2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
34 #include <net/net_namespace.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
39 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
40 struct nlmsghdr *n, u32 clid,
41 struct Qdisc *old, struct Qdisc *new);
42 static int tclass_notify(struct net *net, struct sk_buff *oskb,
43 struct nlmsghdr *n, struct Qdisc *q,
44 unsigned long cl, int event);
51 This file consists of two interrelated parts:
53 1. queueing disciplines manager frontend.
54 2. traffic classes manager frontend.
56 Generally, queueing discipline ("qdisc") is a black box,
57 which is able to enqueue packets and to dequeue them (when
58 device is ready to send something) in order and at times
59 determined by algorithm hidden in it.
61 qdisc's are divided to two categories:
62 - "queues", which have no internal structure visible from outside.
63 - "schedulers", which split all the packets to "traffic classes",
64 using "packet classifiers" (look at cls_api.c)
66 In turn, classes may have child qdiscs (as rule, queues)
67 attached to them etc. etc. etc.
69 The goal of the routines in this file is to translate
70 information supplied by user in the form of handles
71 to more intelligible for kernel form, to make some sanity
72 checks and part of work, which is common to all qdiscs
73 and to provide rtnetlink notifications.
75 All real intelligent work is done inside qdisc modules.
79 Every discipline has two major routines: enqueue and dequeue.
83 dequeue usually returns a skb to send. It is allowed to return NULL,
84 but it does not mean that queue is empty, it just means that
85 discipline does not want to send anything this time.
86 Queue is really empty if q->q.qlen == 0.
87 For complicated disciplines with multiple queues q->q is not
88 real packet queue, but however q->q.qlen must be valid.
92 enqueue returns 0, if packet was enqueued successfully.
93 If packet (this one or another one) was dropped, it returns
95 NET_XMIT_DROP - this packet dropped
96 Expected action: do not backoff, but wait until queue will clear.
97 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
98 Expected action: backoff or ignore
104 like dequeue but without removing a packet from the queue
108 returns qdisc to initial state: purge all buffers, clear all
109 timers, counters (except for statistics) etc.
113 initializes newly created qdisc.
117 destroys resources allocated by init and during lifetime of qdisc.
121 changes qdisc parameters.
124 /* Protects list of registered TC modules. It is pure SMP lock. */
125 static DEFINE_RWLOCK(qdisc_mod_lock);
128 /************************************************
129 * Queueing disciplines manipulation. *
130 ************************************************/
133 /* The list of all installed queueing disciplines. */
135 static struct Qdisc_ops *qdisc_base;
137 /* Register/unregister queueing discipline */
139 int register_qdisc(struct Qdisc_ops *qops)
141 struct Qdisc_ops *q, **qp;
144 write_lock(&qdisc_mod_lock);
145 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
146 if (!strcmp(qops->id, q->id))
149 if (qops->enqueue == NULL)
150 qops->enqueue = noop_qdisc_ops.enqueue;
151 if (qops->peek == NULL) {
152 if (qops->dequeue == NULL)
153 qops->peek = noop_qdisc_ops.peek;
157 if (qops->dequeue == NULL)
158 qops->dequeue = noop_qdisc_ops.dequeue;
161 const struct Qdisc_class_ops *cops = qops->cl_ops;
163 if (!(cops->get && cops->put && cops->walk && cops->leaf))
166 if (cops->tcf_block && !(cops->bind_tcf && cops->unbind_tcf))
174 write_unlock(&qdisc_mod_lock);
181 EXPORT_SYMBOL(register_qdisc);
183 int unregister_qdisc(struct Qdisc_ops *qops)
185 struct Qdisc_ops *q, **qp;
188 write_lock(&qdisc_mod_lock);
189 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
197 write_unlock(&qdisc_mod_lock);
200 EXPORT_SYMBOL(unregister_qdisc);
202 /* Get default qdisc if not otherwise specified */
203 void qdisc_get_default(char *name, size_t len)
205 read_lock(&qdisc_mod_lock);
206 strlcpy(name, default_qdisc_ops->id, len);
207 read_unlock(&qdisc_mod_lock);
210 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212 struct Qdisc_ops *q = NULL;
214 for (q = qdisc_base; q; q = q->next) {
215 if (!strcmp(name, q->id)) {
216 if (!try_module_get(q->owner))
225 /* Set new default qdisc to use */
226 int qdisc_set_default(const char *name)
228 const struct Qdisc_ops *ops;
230 if (!capable(CAP_NET_ADMIN))
233 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
236 /* Not found, drop lock and try to load module */
237 write_unlock(&qdisc_mod_lock);
238 request_module("sch_%s", name);
239 write_lock(&qdisc_mod_lock);
241 ops = qdisc_lookup_default(name);
245 /* Set new default */
246 module_put(default_qdisc_ops->owner);
247 default_qdisc_ops = ops;
249 write_unlock(&qdisc_mod_lock);
251 return ops ? 0 : -ENOENT;
254 #ifdef CONFIG_NET_SCH_DEFAULT
255 /* Set default value from kernel config */
256 static int __init sch_default_qdisc(void)
258 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
260 late_initcall(sch_default_qdisc);
263 /* We know handle. Find qdisc among all qdisc's attached to device
264 * (root qdisc, all its children, children of children etc.)
265 * Note: caller either uses rtnl or rcu_read_lock()
268 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
272 if (!qdisc_dev(root))
273 return (root->handle == handle ? root : NULL);
275 if (!(root->flags & TCQ_F_BUILTIN) &&
276 root->handle == handle)
279 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
280 if (q->handle == handle)
286 void qdisc_hash_add(struct Qdisc *q, bool invisible)
288 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
290 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
292 q->flags |= TCQ_F_INVISIBLE;
295 EXPORT_SYMBOL(qdisc_hash_add);
297 void qdisc_hash_del(struct Qdisc *q)
299 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
301 hash_del_rcu(&q->hash);
304 EXPORT_SYMBOL(qdisc_hash_del);
306 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
310 q = qdisc_match_from_root(dev->qdisc, handle);
314 if (dev_ingress_queue(dev))
315 q = qdisc_match_from_root(
316 dev_ingress_queue(dev)->qdisc_sleeping,
322 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
326 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
330 cl = cops->get(p, classid);
334 leaf = cops->leaf(p, cl);
339 /* Find queueing discipline by name */
341 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
343 struct Qdisc_ops *q = NULL;
346 read_lock(&qdisc_mod_lock);
347 for (q = qdisc_base; q; q = q->next) {
348 if (nla_strcmp(kind, q->id) == 0) {
349 if (!try_module_get(q->owner))
354 read_unlock(&qdisc_mod_lock);
359 /* The linklayer setting were not transferred from iproute2, in older
360 * versions, and the rate tables lookup systems have been dropped in
361 * the kernel. To keep backward compatible with older iproute2 tc
362 * utils, we detect the linklayer setting by detecting if the rate
363 * table were modified.
365 * For linklayer ATM table entries, the rate table will be aligned to
366 * 48 bytes, thus some table entries will contain the same value. The
367 * mpu (min packet unit) is also encoded into the old rate table, thus
368 * starting from the mpu, we find low and high table entries for
369 * mapping this cell. If these entries contain the same value, when
370 * the rate tables have been modified for linklayer ATM.
372 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
373 * and then roundup to the next cell, calc the table entry one below,
376 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
378 int low = roundup(r->mpu, 48);
379 int high = roundup(low+1, 48);
380 int cell_low = low >> r->cell_log;
381 int cell_high = (high >> r->cell_log) - 1;
383 /* rtab is too inaccurate at rates > 100Mbit/s */
384 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
385 pr_debug("TC linklayer: Giving up ATM detection\n");
386 return TC_LINKLAYER_ETHERNET;
389 if ((cell_high > cell_low) && (cell_high < 256)
390 && (rtab[cell_low] == rtab[cell_high])) {
391 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
392 cell_low, cell_high, rtab[cell_high]);
393 return TC_LINKLAYER_ATM;
395 return TC_LINKLAYER_ETHERNET;
398 static struct qdisc_rate_table *qdisc_rtab_list;
400 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
403 struct qdisc_rate_table *rtab;
405 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
406 nla_len(tab) != TC_RTAB_SIZE)
409 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
410 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
411 !memcmp(&rtab->data, nla_data(tab), 1024)) {
417 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
421 memcpy(rtab->data, nla_data(tab), 1024);
422 if (r->linklayer == TC_LINKLAYER_UNAWARE)
423 r->linklayer = __detect_linklayer(r, rtab->data);
424 rtab->next = qdisc_rtab_list;
425 qdisc_rtab_list = rtab;
429 EXPORT_SYMBOL(qdisc_get_rtab);
431 void qdisc_put_rtab(struct qdisc_rate_table *tab)
433 struct qdisc_rate_table *rtab, **rtabp;
435 if (!tab || --tab->refcnt)
438 for (rtabp = &qdisc_rtab_list;
439 (rtab = *rtabp) != NULL;
440 rtabp = &rtab->next) {
448 EXPORT_SYMBOL(qdisc_put_rtab);
450 static LIST_HEAD(qdisc_stab_list);
452 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
453 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
454 [TCA_STAB_DATA] = { .type = NLA_BINARY },
457 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
459 struct nlattr *tb[TCA_STAB_MAX + 1];
460 struct qdisc_size_table *stab;
461 struct tc_sizespec *s;
462 unsigned int tsize = 0;
466 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
469 if (!tb[TCA_STAB_BASE])
470 return ERR_PTR(-EINVAL);
472 s = nla_data(tb[TCA_STAB_BASE]);
475 if (!tb[TCA_STAB_DATA])
476 return ERR_PTR(-EINVAL);
477 tab = nla_data(tb[TCA_STAB_DATA]);
478 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
481 if (tsize != s->tsize || (!tab && tsize > 0))
482 return ERR_PTR(-EINVAL);
484 list_for_each_entry(stab, &qdisc_stab_list, list) {
485 if (memcmp(&stab->szopts, s, sizeof(*s)))
487 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
493 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
495 return ERR_PTR(-ENOMEM);
500 memcpy(stab->data, tab, tsize * sizeof(u16));
502 list_add_tail(&stab->list, &qdisc_stab_list);
507 static void stab_kfree_rcu(struct rcu_head *head)
509 kfree(container_of(head, struct qdisc_size_table, rcu));
512 void qdisc_put_stab(struct qdisc_size_table *tab)
517 if (--tab->refcnt == 0) {
518 list_del(&tab->list);
519 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
522 EXPORT_SYMBOL(qdisc_put_stab);
524 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
528 nest = nla_nest_start(skb, TCA_STAB);
530 goto nla_put_failure;
531 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
532 goto nla_put_failure;
533 nla_nest_end(skb, nest);
541 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
542 const struct qdisc_size_table *stab)
546 pkt_len = skb->len + stab->szopts.overhead;
547 if (unlikely(!stab->szopts.tsize))
550 slot = pkt_len + stab->szopts.cell_align;
551 if (unlikely(slot < 0))
554 slot >>= stab->szopts.cell_log;
555 if (likely(slot < stab->szopts.tsize))
556 pkt_len = stab->data[slot];
558 pkt_len = stab->data[stab->szopts.tsize - 1] *
559 (slot / stab->szopts.tsize) +
560 stab->data[slot % stab->szopts.tsize];
562 pkt_len <<= stab->szopts.size_log;
564 if (unlikely(pkt_len < 1))
566 qdisc_skb_cb(skb)->pkt_len = pkt_len;
568 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
570 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
572 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
573 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
574 txt, qdisc->ops->id, qdisc->handle >> 16);
575 qdisc->flags |= TCQ_F_WARN_NONWC;
578 EXPORT_SYMBOL(qdisc_warn_nonwc);
580 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
582 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
586 __netif_schedule(qdisc_root(wd->qdisc));
589 return HRTIMER_NORESTART;
592 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
594 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
595 wd->timer.function = qdisc_watchdog;
598 EXPORT_SYMBOL(qdisc_watchdog_init);
600 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
602 if (test_bit(__QDISC_STATE_DEACTIVATED,
603 &qdisc_root_sleeping(wd->qdisc)->state))
606 if (wd->last_expires == expires)
609 wd->last_expires = expires;
610 hrtimer_start(&wd->timer,
611 ns_to_ktime(expires),
612 HRTIMER_MODE_ABS_PINNED);
614 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
616 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
618 hrtimer_cancel(&wd->timer);
620 EXPORT_SYMBOL(qdisc_watchdog_cancel);
622 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
624 unsigned int size = n * sizeof(struct hlist_head), i;
625 struct hlist_head *h;
627 if (size <= PAGE_SIZE)
628 h = kmalloc(size, GFP_KERNEL);
630 h = (struct hlist_head *)
631 __get_free_pages(GFP_KERNEL, get_order(size));
634 for (i = 0; i < n; i++)
635 INIT_HLIST_HEAD(&h[i]);
640 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
642 unsigned int size = n * sizeof(struct hlist_head);
644 if (size <= PAGE_SIZE)
647 free_pages((unsigned long)h, get_order(size));
650 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
652 struct Qdisc_class_common *cl;
653 struct hlist_node *next;
654 struct hlist_head *nhash, *ohash;
655 unsigned int nsize, nmask, osize;
658 /* Rehash when load factor exceeds 0.75 */
659 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
661 nsize = clhash->hashsize * 2;
663 nhash = qdisc_class_hash_alloc(nsize);
667 ohash = clhash->hash;
668 osize = clhash->hashsize;
671 for (i = 0; i < osize; i++) {
672 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
673 h = qdisc_class_hash(cl->classid, nmask);
674 hlist_add_head(&cl->hnode, &nhash[h]);
677 clhash->hash = nhash;
678 clhash->hashsize = nsize;
679 clhash->hashmask = nmask;
680 sch_tree_unlock(sch);
682 qdisc_class_hash_free(ohash, osize);
684 EXPORT_SYMBOL(qdisc_class_hash_grow);
686 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
688 unsigned int size = 4;
690 clhash->hash = qdisc_class_hash_alloc(size);
691 if (clhash->hash == NULL)
693 clhash->hashsize = size;
694 clhash->hashmask = size - 1;
695 clhash->hashelems = 0;
698 EXPORT_SYMBOL(qdisc_class_hash_init);
700 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
702 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
704 EXPORT_SYMBOL(qdisc_class_hash_destroy);
706 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
707 struct Qdisc_class_common *cl)
711 INIT_HLIST_NODE(&cl->hnode);
712 h = qdisc_class_hash(cl->classid, clhash->hashmask);
713 hlist_add_head(&cl->hnode, &clhash->hash[h]);
716 EXPORT_SYMBOL(qdisc_class_hash_insert);
718 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
719 struct Qdisc_class_common *cl)
721 hlist_del(&cl->hnode);
724 EXPORT_SYMBOL(qdisc_class_hash_remove);
726 /* Allocate an unique handle from space managed by kernel
727 * Possible range is [8000-FFFF]:0000 (0x8000 values)
729 static u32 qdisc_alloc_handle(struct net_device *dev)
732 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
735 autohandle += TC_H_MAKE(0x10000U, 0);
736 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
737 autohandle = TC_H_MAKE(0x80000000U, 0);
738 if (!qdisc_lookup(dev, autohandle))
746 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
749 const struct Qdisc_class_ops *cops;
754 if (n == 0 && len == 0)
756 drops = max_t(int, n, 0);
758 while ((parentid = sch->parent)) {
759 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
762 if (sch->flags & TCQ_F_NOPARENT)
764 /* TODO: perform the search on a per txq basis */
765 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
767 WARN_ON_ONCE(parentid != TC_H_ROOT);
770 cops = sch->ops->cl_ops;
771 if (cops->qlen_notify) {
772 cl = cops->get(sch, parentid);
773 cops->qlen_notify(sch, cl);
777 sch->qstats.backlog -= len;
778 __qdisc_qstats_drop(sch, drops);
782 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
784 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
785 struct nlmsghdr *n, u32 clid,
786 struct Qdisc *old, struct Qdisc *new)
789 qdisc_notify(net, skb, n, clid, old, new);
795 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
798 * When appropriate send a netlink notification using 'skb'
801 * On success, destroy old qdisc.
804 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
805 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
806 struct Qdisc *new, struct Qdisc *old)
808 struct Qdisc *q = old;
809 struct net *net = dev_net(dev);
812 if (parent == NULL) {
813 unsigned int i, num_q, ingress;
816 num_q = dev->num_tx_queues;
817 if ((q && q->flags & TCQ_F_INGRESS) ||
818 (new && new->flags & TCQ_F_INGRESS)) {
821 if (!dev_ingress_queue(dev))
825 if (dev->flags & IFF_UP)
828 if (new && new->ops->attach)
831 for (i = 0; i < num_q; i++) {
832 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
835 dev_queue = netdev_get_tx_queue(dev, i);
837 old = dev_graft_qdisc(dev_queue, new);
839 refcount_inc(&new->refcnt);
847 notify_and_destroy(net, skb, n, classid,
849 if (new && !new->ops->attach)
850 refcount_inc(&new->refcnt);
851 dev->qdisc = new ? : &noop_qdisc;
853 if (new && new->ops->attach)
854 new->ops->attach(new);
856 notify_and_destroy(net, skb, n, classid, old, new);
859 if (dev->flags & IFF_UP)
862 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
865 if (cops && cops->graft) {
866 unsigned long cl = cops->get(parent, classid);
868 err = cops->graft(parent, cl, new, &old);
869 cops->put(parent, cl);
874 notify_and_destroy(net, skb, n, classid, old, new);
879 /* lockdep annotation is needed for ingress; egress gets it only for name */
880 static struct lock_class_key qdisc_tx_lock;
881 static struct lock_class_key qdisc_rx_lock;
884 Allocate and initialize new qdisc.
886 Parameters are passed via opt.
889 static struct Qdisc *qdisc_create(struct net_device *dev,
890 struct netdev_queue *dev_queue,
891 struct Qdisc *p, u32 parent, u32 handle,
892 struct nlattr **tca, int *errp)
895 struct nlattr *kind = tca[TCA_KIND];
897 struct Qdisc_ops *ops;
898 struct qdisc_size_table *stab;
900 ops = qdisc_lookup_ops(kind);
901 #ifdef CONFIG_MODULES
902 if (ops == NULL && kind != NULL) {
904 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
905 /* We dropped the RTNL semaphore in order to
906 * perform the module load. So, even if we
907 * succeeded in loading the module we have to
908 * tell the caller to replay the request. We
909 * indicate this using -EAGAIN.
910 * We replay the request because the device may
911 * go away in the mean time.
914 request_module("sch_%s", name);
916 ops = qdisc_lookup_ops(kind);
918 /* We will try again qdisc_lookup_ops,
919 * so don't keep a reference.
921 module_put(ops->owner);
933 sch = qdisc_alloc(dev_queue, ops);
939 sch->parent = parent;
941 if (handle == TC_H_INGRESS) {
942 sch->flags |= TCQ_F_INGRESS;
943 handle = TC_H_MAKE(TC_H_INGRESS, 0);
944 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
947 handle = qdisc_alloc_handle(dev);
952 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
953 if (!netif_is_multiqueue(dev))
954 sch->flags |= TCQ_F_ONETXQUEUE;
957 sch->handle = handle;
959 /* This exist to keep backward compatible with a userspace
960 * loophole, what allowed userspace to get IFF_NO_QUEUE
961 * facility on older kernels by setting tx_queue_len=0 (prior
962 * to qdisc init), and then forgot to reinit tx_queue_len
963 * before again attaching a qdisc.
965 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
966 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
967 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
970 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
971 if (qdisc_is_percpu_stats(sch)) {
973 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
974 if (!sch->cpu_bstats)
977 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
978 if (!sch->cpu_qstats)
983 stab = qdisc_get_stab(tca[TCA_STAB]);
988 rcu_assign_pointer(sch->stab, stab);
994 if (sch->flags & TCQ_F_MQROOT)
997 if ((sch->parent != TC_H_ROOT) &&
998 !(sch->flags & TCQ_F_INGRESS) &&
999 (!p || !(p->flags & TCQ_F_MQROOT)))
1000 running = qdisc_root_sleeping_running(sch);
1002 running = &sch->running;
1004 err = gen_new_estimator(&sch->bstats,
1014 qdisc_hash_add(sch, false);
1018 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1023 kfree((char *) sch - sch->padded);
1025 module_put(ops->owner);
1031 free_percpu(sch->cpu_bstats);
1032 free_percpu(sch->cpu_qstats);
1034 * Any broken qdiscs that would require a ops->reset() here?
1035 * The qdisc was never in action so it shouldn't be necessary.
1037 qdisc_put_stab(rtnl_dereference(sch->stab));
1043 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1045 struct qdisc_size_table *ostab, *stab = NULL;
1048 if (tca[TCA_OPTIONS]) {
1049 if (sch->ops->change == NULL)
1051 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1056 if (tca[TCA_STAB]) {
1057 stab = qdisc_get_stab(tca[TCA_STAB]);
1059 return PTR_ERR(stab);
1062 ostab = rtnl_dereference(sch->stab);
1063 rcu_assign_pointer(sch->stab, stab);
1064 qdisc_put_stab(ostab);
1066 if (tca[TCA_RATE]) {
1067 /* NB: ignores errors from replace_estimator
1068 because change can't be undone. */
1069 if (sch->flags & TCQ_F_MQROOT)
1071 gen_replace_estimator(&sch->bstats,
1075 qdisc_root_sleeping_running(sch),
1082 struct check_loop_arg {
1083 struct qdisc_walker w;
1088 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1089 struct qdisc_walker *w);
1091 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1093 struct check_loop_arg arg;
1095 if (q->ops->cl_ops == NULL)
1098 arg.w.stop = arg.w.skip = arg.w.count = 0;
1099 arg.w.fn = check_loop_fn;
1102 q->ops->cl_ops->walk(q, &arg.w);
1103 return arg.w.stop ? -ELOOP : 0;
1107 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1110 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1111 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1113 leaf = cops->leaf(q, cl);
1115 if (leaf == arg->p || arg->depth > 7)
1117 return check_loop(leaf, arg->p, arg->depth + 1);
1126 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1127 struct netlink_ext_ack *extack)
1129 struct net *net = sock_net(skb->sk);
1130 struct tcmsg *tcm = nlmsg_data(n);
1131 struct nlattr *tca[TCA_MAX + 1];
1132 struct net_device *dev;
1134 struct Qdisc *q = NULL;
1135 struct Qdisc *p = NULL;
1138 if ((n->nlmsg_type != RTM_GETQDISC) &&
1139 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1142 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1146 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1150 clid = tcm->tcm_parent;
1152 if (clid != TC_H_ROOT) {
1153 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1154 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1157 q = qdisc_leaf(p, clid);
1158 } else if (dev_ingress_queue(dev)) {
1159 q = dev_ingress_queue(dev)->qdisc_sleeping;
1167 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1170 q = qdisc_lookup(dev, tcm->tcm_handle);
1175 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1178 if (n->nlmsg_type == RTM_DELQDISC) {
1183 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1187 qdisc_notify(net, skb, n, clid, NULL, q);
1193 * Create/change qdisc.
1196 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1197 struct netlink_ext_ack *extack)
1199 struct net *net = sock_net(skb->sk);
1201 struct nlattr *tca[TCA_MAX + 1];
1202 struct net_device *dev;
1204 struct Qdisc *q, *p;
1207 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1211 /* Reinit, just in case something touches this. */
1212 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1216 tcm = nlmsg_data(n);
1217 clid = tcm->tcm_parent;
1220 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1226 if (clid != TC_H_ROOT) {
1227 if (clid != TC_H_INGRESS) {
1228 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1231 q = qdisc_leaf(p, clid);
1232 } else if (dev_ingress_queue_create(dev)) {
1233 q = dev_ingress_queue(dev)->qdisc_sleeping;
1239 /* It may be default qdisc, ignore it */
1240 if (q && q->handle == 0)
1243 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1244 if (tcm->tcm_handle) {
1245 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1247 if (TC_H_MIN(tcm->tcm_handle))
1249 q = qdisc_lookup(dev, tcm->tcm_handle);
1251 goto create_n_graft;
1252 if (n->nlmsg_flags & NLM_F_EXCL)
1254 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1257 (p && check_loop(q, p, 0)))
1259 refcount_inc(&q->refcnt);
1263 goto create_n_graft;
1265 /* This magic test requires explanation.
1267 * We know, that some child q is already
1268 * attached to this parent and have choice:
1269 * either to change it or to create/graft new one.
1271 * 1. We are allowed to create/graft only
1272 * if CREATE and REPLACE flags are set.
1274 * 2. If EXCL is set, requestor wanted to say,
1275 * that qdisc tcm_handle is not expected
1276 * to exist, so that we choose create/graft too.
1278 * 3. The last case is when no flags are set.
1279 * Alas, it is sort of hole in API, we
1280 * cannot decide what to do unambiguously.
1281 * For now we select create/graft, if
1282 * user gave KIND, which does not match existing.
1284 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1285 (n->nlmsg_flags & NLM_F_REPLACE) &&
1286 ((n->nlmsg_flags & NLM_F_EXCL) ||
1288 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1289 goto create_n_graft;
1293 if (!tcm->tcm_handle)
1295 q = qdisc_lookup(dev, tcm->tcm_handle);
1298 /* Change qdisc parameters */
1301 if (n->nlmsg_flags & NLM_F_EXCL)
1303 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1305 err = qdisc_change(q, tca);
1307 qdisc_notify(net, skb, n, clid, NULL, q);
1311 if (!(n->nlmsg_flags & NLM_F_CREATE))
1313 if (clid == TC_H_INGRESS) {
1314 if (dev_ingress_queue(dev))
1315 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1316 tcm->tcm_parent, tcm->tcm_parent,
1321 struct netdev_queue *dev_queue;
1323 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1324 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1326 dev_queue = p->dev_queue;
1328 dev_queue = netdev_get_tx_queue(dev, 0);
1330 q = qdisc_create(dev, dev_queue, p,
1331 tcm->tcm_parent, tcm->tcm_handle,
1341 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1351 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1352 u32 portid, u32 seq, u16 flags, int event)
1354 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1355 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1357 struct nlmsghdr *nlh;
1358 unsigned char *b = skb_tail_pointer(skb);
1360 struct qdisc_size_table *stab;
1364 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1366 goto out_nlmsg_trim;
1367 tcm = nlmsg_data(nlh);
1368 tcm->tcm_family = AF_UNSPEC;
1371 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1372 tcm->tcm_parent = clid;
1373 tcm->tcm_handle = q->handle;
1374 tcm->tcm_info = refcount_read(&q->refcnt);
1375 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1376 goto nla_put_failure;
1377 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1378 goto nla_put_failure;
1381 stab = rtnl_dereference(q->stab);
1382 if (stab && qdisc_dump_stab(skb, stab) < 0)
1383 goto nla_put_failure;
1385 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1386 NULL, &d, TCA_PAD) < 0)
1387 goto nla_put_failure;
1389 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1390 goto nla_put_failure;
1392 if (qdisc_is_percpu_stats(q)) {
1393 cpu_bstats = q->cpu_bstats;
1394 cpu_qstats = q->cpu_qstats;
1397 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
1398 &d, cpu_bstats, &q->bstats) < 0 ||
1399 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
1400 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1401 goto nla_put_failure;
1403 if (gnet_stats_finish_copy(&d) < 0)
1404 goto nla_put_failure;
1406 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1415 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
1417 if (q->flags & TCQ_F_BUILTIN)
1419 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1425 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1426 struct nlmsghdr *n, u32 clid,
1427 struct Qdisc *old, struct Qdisc *new)
1429 struct sk_buff *skb;
1430 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1432 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1436 if (old && !tc_qdisc_dump_ignore(old, false)) {
1437 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1438 0, RTM_DELQDISC) < 0)
1441 if (new && !tc_qdisc_dump_ignore(new, false)) {
1442 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1443 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1448 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1449 n->nlmsg_flags & NLM_F_ECHO);
1456 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1457 struct netlink_callback *cb,
1458 int *q_idx_p, int s_q_idx, bool recur,
1459 bool dump_invisible)
1461 int ret = 0, q_idx = *q_idx_p;
1469 if (q_idx < s_q_idx) {
1472 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1473 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1474 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1480 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1481 * itself has already been dumped.
1483 * If we've already dumped the top-level (ingress) qdisc above and the global
1484 * qdisc hashtable, we don't want to hit it again
1486 if (!qdisc_dev(root) || !recur)
1489 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1490 if (q_idx < s_q_idx) {
1494 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1495 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1496 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1510 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1512 struct net *net = sock_net(skb->sk);
1515 struct net_device *dev;
1516 const struct nlmsghdr *nlh = cb->nlh;
1517 struct tcmsg *tcm = nlmsg_data(nlh);
1518 struct nlattr *tca[TCA_MAX + 1];
1521 s_idx = cb->args[0];
1522 s_q_idx = q_idx = cb->args[1];
1527 err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
1531 for_each_netdev(net, dev) {
1532 struct netdev_queue *dev_queue;
1540 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1541 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1544 dev_queue = dev_ingress_queue(dev);
1546 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1547 &q_idx, s_q_idx, false,
1548 tca[TCA_DUMP_INVISIBLE]) < 0)
1557 cb->args[1] = q_idx;
1564 /************************************************
1565 * Traffic classes manipulation. *
1566 ************************************************/
1570 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1571 struct netlink_ext_ack *extack)
1573 struct net *net = sock_net(skb->sk);
1574 struct tcmsg *tcm = nlmsg_data(n);
1575 struct nlattr *tca[TCA_MAX + 1];
1576 struct net_device *dev;
1577 struct Qdisc *q = NULL;
1578 const struct Qdisc_class_ops *cops;
1579 unsigned long cl = 0;
1580 unsigned long new_cl;
1586 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1587 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1590 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1594 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1599 parent == TC_H_UNSPEC - unspecified parent.
1600 parent == TC_H_ROOT - class is root, which has no parent.
1601 parent == X:0 - parent is root class.
1602 parent == X:Y - parent is a node in hierarchy.
1603 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1605 handle == 0:0 - generate handle from kernel pool.
1606 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1607 handle == X:Y - clear.
1608 handle == X:0 - root class.
1611 /* Step 1. Determine qdisc handle X:0 */
1613 portid = tcm->tcm_parent;
1614 clid = tcm->tcm_handle;
1615 qid = TC_H_MAJ(clid);
1617 if (portid != TC_H_ROOT) {
1618 u32 qid1 = TC_H_MAJ(portid);
1621 /* If both majors are known, they must be identical. */
1626 } else if (qid == 0)
1627 qid = dev->qdisc->handle;
1629 /* Now qid is genuine qdisc handle consistent
1630 * both with parent and child.
1632 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1635 portid = TC_H_MAKE(qid, portid);
1638 qid = dev->qdisc->handle;
1641 /* OK. Locate qdisc */
1642 q = qdisc_lookup(dev, qid);
1646 /* An check that it supports classes */
1647 cops = q->ops->cl_ops;
1651 /* Now try to get class */
1653 if (portid == TC_H_ROOT)
1656 clid = TC_H_MAKE(qid, clid);
1659 cl = cops->get(q, clid);
1663 if (n->nlmsg_type != RTM_NEWTCLASS ||
1664 !(n->nlmsg_flags & NLM_F_CREATE))
1667 switch (n->nlmsg_type) {
1670 if (n->nlmsg_flags & NLM_F_EXCL)
1676 err = cops->delete(q, cl);
1678 tclass_notify(net, skb, n, q, cl,
1682 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1693 err = cops->change(q, clid, portid, tca, &new_cl);
1695 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1705 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1707 u32 portid, u32 seq, u16 flags, int event)
1710 struct nlmsghdr *nlh;
1711 unsigned char *b = skb_tail_pointer(skb);
1713 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1716 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1718 goto out_nlmsg_trim;
1719 tcm = nlmsg_data(nlh);
1720 tcm->tcm_family = AF_UNSPEC;
1723 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1724 tcm->tcm_parent = q->handle;
1725 tcm->tcm_handle = q->handle;
1727 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1728 goto nla_put_failure;
1729 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1730 goto nla_put_failure;
1732 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1733 NULL, &d, TCA_PAD) < 0)
1734 goto nla_put_failure;
1736 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1737 goto nla_put_failure;
1739 if (gnet_stats_finish_copy(&d) < 0)
1740 goto nla_put_failure;
1742 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1751 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1752 struct nlmsghdr *n, struct Qdisc *q,
1753 unsigned long cl, int event)
1755 struct sk_buff *skb;
1756 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1758 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1762 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1767 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1768 n->nlmsg_flags & NLM_F_ECHO);
1771 struct qdisc_dump_args {
1772 struct qdisc_walker w;
1773 struct sk_buff *skb;
1774 struct netlink_callback *cb;
1777 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1778 struct qdisc_walker *arg)
1780 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1782 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1783 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1787 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1788 struct tcmsg *tcm, struct netlink_callback *cb,
1791 struct qdisc_dump_args arg;
1793 if (tc_qdisc_dump_ignore(q, false) ||
1794 *t_p < s_t || !q->ops->cl_ops ||
1796 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1801 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1802 arg.w.fn = qdisc_class_dump;
1806 arg.w.skip = cb->args[1];
1808 q->ops->cl_ops->walk(q, &arg.w);
1809 cb->args[1] = arg.w.count;
1816 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1817 struct tcmsg *tcm, struct netlink_callback *cb,
1826 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1829 if (!qdisc_dev(root))
1832 if (tcm->tcm_parent) {
1833 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1834 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1838 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1839 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1846 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1848 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1849 struct net *net = sock_net(skb->sk);
1850 struct netdev_queue *dev_queue;
1851 struct net_device *dev;
1854 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1856 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1863 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1866 dev_queue = dev_ingress_queue(dev);
1868 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1879 #ifdef CONFIG_PROC_FS
1880 static int psched_show(struct seq_file *seq, void *v)
1882 seq_printf(seq, "%08x %08x %08x %08x\n",
1883 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1885 (u32)NSEC_PER_SEC / hrtimer_resolution);
1890 static int psched_open(struct inode *inode, struct file *file)
1892 return single_open(file, psched_show, NULL);
1895 static const struct file_operations psched_fops = {
1896 .owner = THIS_MODULE,
1897 .open = psched_open,
1899 .llseek = seq_lseek,
1900 .release = single_release,
1903 static int __net_init psched_net_init(struct net *net)
1905 struct proc_dir_entry *e;
1907 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1914 static void __net_exit psched_net_exit(struct net *net)
1916 remove_proc_entry("psched", net->proc_net);
1919 static int __net_init psched_net_init(struct net *net)
1924 static void __net_exit psched_net_exit(struct net *net)
1929 static struct pernet_operations psched_net_ops = {
1930 .init = psched_net_init,
1931 .exit = psched_net_exit,
1934 static int __init pktsched_init(void)
1938 err = register_pernet_subsys(&psched_net_ops);
1940 pr_err("pktsched_init: "
1941 "cannot initialize per netns operations\n");
1945 register_qdisc(&pfifo_fast_ops);
1946 register_qdisc(&pfifo_qdisc_ops);
1947 register_qdisc(&bfifo_qdisc_ops);
1948 register_qdisc(&pfifo_head_drop_qdisc_ops);
1949 register_qdisc(&mq_qdisc_ops);
1950 register_qdisc(&noqueue_qdisc_ops);
1952 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1953 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1954 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
1956 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
1957 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
1958 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
1964 subsys_initcall(pktsched_init);