2 * net/sched/sch_api.c Packet scheduler API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
14 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
15 * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990601: ingress support
18 #include <linux/module.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/string.h>
22 #include <linux/errno.h>
23 #include <linux/skbuff.h>
24 #include <linux/init.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/kmod.h>
28 #include <linux/list.h>
29 #include <linux/hrtimer.h>
30 #include <linux/lockdep.h>
31 #include <linux/slab.h>
32 #include <linux/hashtable.h>
34 #include <net/net_namespace.h>
36 #include <net/netlink.h>
37 #include <net/pkt_sched.h>
39 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
40 struct nlmsghdr *n, u32 clid,
41 struct Qdisc *old, struct Qdisc *new);
42 static int tclass_notify(struct net *net, struct sk_buff *oskb,
43 struct nlmsghdr *n, struct Qdisc *q,
44 unsigned long cl, int event);
51 This file consists of two interrelated parts:
53 1. queueing disciplines manager frontend.
54 2. traffic classes manager frontend.
56 Generally, queueing discipline ("qdisc") is a black box,
57 which is able to enqueue packets and to dequeue them (when
58 device is ready to send something) in order and at times
59 determined by algorithm hidden in it.
61 qdisc's are divided to two categories:
62 - "queues", which have no internal structure visible from outside.
63 - "schedulers", which split all the packets to "traffic classes",
64 using "packet classifiers" (look at cls_api.c)
66 In turn, classes may have child qdiscs (as rule, queues)
67 attached to them etc. etc. etc.
69 The goal of the routines in this file is to translate
70 information supplied by user in the form of handles
71 to more intelligible for kernel form, to make some sanity
72 checks and part of work, which is common to all qdiscs
73 and to provide rtnetlink notifications.
75 All real intelligent work is done inside qdisc modules.
79 Every discipline has two major routines: enqueue and dequeue.
83 dequeue usually returns a skb to send. It is allowed to return NULL,
84 but it does not mean that queue is empty, it just means that
85 discipline does not want to send anything this time.
86 Queue is really empty if q->q.qlen == 0.
87 For complicated disciplines with multiple queues q->q is not
88 real packet queue, but however q->q.qlen must be valid.
92 enqueue returns 0, if packet was enqueued successfully.
93 If packet (this one or another one) was dropped, it returns
95 NET_XMIT_DROP - this packet dropped
96 Expected action: do not backoff, but wait until queue will clear.
97 NET_XMIT_CN - probably this packet enqueued, but another one dropped.
98 Expected action: backoff or ignore
104 like dequeue but without removing a packet from the queue
108 returns qdisc to initial state: purge all buffers, clear all
109 timers, counters (except for statistics) etc.
113 initializes newly created qdisc.
117 destroys resources allocated by init and during lifetime of qdisc.
121 changes qdisc parameters.
124 /* Protects list of registered TC modules. It is pure SMP lock. */
125 static DEFINE_RWLOCK(qdisc_mod_lock);
128 /************************************************
129 * Queueing disciplines manipulation. *
130 ************************************************/
133 /* The list of all installed queueing disciplines. */
135 static struct Qdisc_ops *qdisc_base;
137 /* Register/unregister queueing discipline */
139 int register_qdisc(struct Qdisc_ops *qops)
141 struct Qdisc_ops *q, **qp;
144 write_lock(&qdisc_mod_lock);
145 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
146 if (!strcmp(qops->id, q->id))
149 if (qops->enqueue == NULL)
150 qops->enqueue = noop_qdisc_ops.enqueue;
151 if (qops->peek == NULL) {
152 if (qops->dequeue == NULL)
153 qops->peek = noop_qdisc_ops.peek;
157 if (qops->dequeue == NULL)
158 qops->dequeue = noop_qdisc_ops.dequeue;
161 const struct Qdisc_class_ops *cops = qops->cl_ops;
163 if (!(cops->get && cops->put && cops->walk && cops->leaf))
166 if (cops->tcf_chain && !(cops->bind_tcf && cops->unbind_tcf))
174 write_unlock(&qdisc_mod_lock);
181 EXPORT_SYMBOL(register_qdisc);
183 int unregister_qdisc(struct Qdisc_ops *qops)
185 struct Qdisc_ops *q, **qp;
188 write_lock(&qdisc_mod_lock);
189 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next)
197 write_unlock(&qdisc_mod_lock);
200 EXPORT_SYMBOL(unregister_qdisc);
202 /* Get default qdisc if not otherwise specified */
203 void qdisc_get_default(char *name, size_t len)
205 read_lock(&qdisc_mod_lock);
206 strlcpy(name, default_qdisc_ops->id, len);
207 read_unlock(&qdisc_mod_lock);
210 static struct Qdisc_ops *qdisc_lookup_default(const char *name)
212 struct Qdisc_ops *q = NULL;
214 for (q = qdisc_base; q; q = q->next) {
215 if (!strcmp(name, q->id)) {
216 if (!try_module_get(q->owner))
225 /* Set new default qdisc to use */
226 int qdisc_set_default(const char *name)
228 const struct Qdisc_ops *ops;
230 if (!capable(CAP_NET_ADMIN))
233 write_lock(&qdisc_mod_lock);
234 ops = qdisc_lookup_default(name);
236 /* Not found, drop lock and try to load module */
237 write_unlock(&qdisc_mod_lock);
238 request_module("sch_%s", name);
239 write_lock(&qdisc_mod_lock);
241 ops = qdisc_lookup_default(name);
245 /* Set new default */
246 module_put(default_qdisc_ops->owner);
247 default_qdisc_ops = ops;
249 write_unlock(&qdisc_mod_lock);
251 return ops ? 0 : -ENOENT;
254 #ifdef CONFIG_NET_SCH_DEFAULT
255 /* Set default value from kernel config */
256 static int __init sch_default_qdisc(void)
258 return qdisc_set_default(CONFIG_DEFAULT_NET_SCH);
260 late_initcall(sch_default_qdisc);
263 /* We know handle. Find qdisc among all qdisc's attached to device
264 * (root qdisc, all its children, children of children etc.)
265 * Note: caller either uses rtnl or rcu_read_lock()
268 static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
272 if (!qdisc_dev(root))
273 return (root->handle == handle ? root : NULL);
275 if (!(root->flags & TCQ_F_BUILTIN) &&
276 root->handle == handle)
279 hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) {
280 if (q->handle == handle)
286 void qdisc_hash_add(struct Qdisc *q, bool invisible)
288 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
289 struct Qdisc *root = qdisc_dev(q)->qdisc;
291 WARN_ON_ONCE(root == &noop_qdisc);
293 hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
295 q->flags |= TCQ_F_INVISIBLE;
298 EXPORT_SYMBOL(qdisc_hash_add);
300 void qdisc_hash_del(struct Qdisc *q)
302 if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
304 hash_del_rcu(&q->hash);
307 EXPORT_SYMBOL(qdisc_hash_del);
309 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
313 q = qdisc_match_from_root(dev->qdisc, handle);
317 if (dev_ingress_queue(dev))
318 q = qdisc_match_from_root(
319 dev_ingress_queue(dev)->qdisc_sleeping,
325 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
329 const struct Qdisc_class_ops *cops = p->ops->cl_ops;
333 cl = cops->get(p, classid);
337 leaf = cops->leaf(p, cl);
342 /* Find queueing discipline by name */
344 static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
346 struct Qdisc_ops *q = NULL;
349 read_lock(&qdisc_mod_lock);
350 for (q = qdisc_base; q; q = q->next) {
351 if (nla_strcmp(kind, q->id) == 0) {
352 if (!try_module_get(q->owner))
357 read_unlock(&qdisc_mod_lock);
362 /* The linklayer setting were not transferred from iproute2, in older
363 * versions, and the rate tables lookup systems have been dropped in
364 * the kernel. To keep backward compatible with older iproute2 tc
365 * utils, we detect the linklayer setting by detecting if the rate
366 * table were modified.
368 * For linklayer ATM table entries, the rate table will be aligned to
369 * 48 bytes, thus some table entries will contain the same value. The
370 * mpu (min packet unit) is also encoded into the old rate table, thus
371 * starting from the mpu, we find low and high table entries for
372 * mapping this cell. If these entries contain the same value, when
373 * the rate tables have been modified for linklayer ATM.
375 * This is done by rounding mpu to the nearest 48 bytes cell/entry,
376 * and then roundup to the next cell, calc the table entry one below,
379 static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
381 int low = roundup(r->mpu, 48);
382 int high = roundup(low+1, 48);
383 int cell_low = low >> r->cell_log;
384 int cell_high = (high >> r->cell_log) - 1;
386 /* rtab is too inaccurate at rates > 100Mbit/s */
387 if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
388 pr_debug("TC linklayer: Giving up ATM detection\n");
389 return TC_LINKLAYER_ETHERNET;
392 if ((cell_high > cell_low) && (cell_high < 256)
393 && (rtab[cell_low] == rtab[cell_high])) {
394 pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
395 cell_low, cell_high, rtab[cell_high]);
396 return TC_LINKLAYER_ATM;
398 return TC_LINKLAYER_ETHERNET;
401 static struct qdisc_rate_table *qdisc_rtab_list;
403 struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
406 struct qdisc_rate_table *rtab;
408 if (tab == NULL || r->rate == 0 || r->cell_log == 0 ||
409 nla_len(tab) != TC_RTAB_SIZE)
412 for (rtab = qdisc_rtab_list; rtab; rtab = rtab->next) {
413 if (!memcmp(&rtab->rate, r, sizeof(struct tc_ratespec)) &&
414 !memcmp(&rtab->data, nla_data(tab), 1024)) {
420 rtab = kmalloc(sizeof(*rtab), GFP_KERNEL);
424 memcpy(rtab->data, nla_data(tab), 1024);
425 if (r->linklayer == TC_LINKLAYER_UNAWARE)
426 r->linklayer = __detect_linklayer(r, rtab->data);
427 rtab->next = qdisc_rtab_list;
428 qdisc_rtab_list = rtab;
432 EXPORT_SYMBOL(qdisc_get_rtab);
434 void qdisc_put_rtab(struct qdisc_rate_table *tab)
436 struct qdisc_rate_table *rtab, **rtabp;
438 if (!tab || --tab->refcnt)
441 for (rtabp = &qdisc_rtab_list;
442 (rtab = *rtabp) != NULL;
443 rtabp = &rtab->next) {
451 EXPORT_SYMBOL(qdisc_put_rtab);
453 static LIST_HEAD(qdisc_stab_list);
455 static const struct nla_policy stab_policy[TCA_STAB_MAX + 1] = {
456 [TCA_STAB_BASE] = { .len = sizeof(struct tc_sizespec) },
457 [TCA_STAB_DATA] = { .type = NLA_BINARY },
460 static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt)
462 struct nlattr *tb[TCA_STAB_MAX + 1];
463 struct qdisc_size_table *stab;
464 struct tc_sizespec *s;
465 unsigned int tsize = 0;
469 err = nla_parse_nested(tb, TCA_STAB_MAX, opt, stab_policy, NULL);
472 if (!tb[TCA_STAB_BASE])
473 return ERR_PTR(-EINVAL);
475 s = nla_data(tb[TCA_STAB_BASE]);
478 if (!tb[TCA_STAB_DATA])
479 return ERR_PTR(-EINVAL);
480 tab = nla_data(tb[TCA_STAB_DATA]);
481 tsize = nla_len(tb[TCA_STAB_DATA]) / sizeof(u16);
484 if (tsize != s->tsize || (!tab && tsize > 0))
485 return ERR_PTR(-EINVAL);
487 list_for_each_entry(stab, &qdisc_stab_list, list) {
488 if (memcmp(&stab->szopts, s, sizeof(*s)))
490 if (tsize > 0 && memcmp(stab->data, tab, tsize * sizeof(u16)))
496 stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
498 return ERR_PTR(-ENOMEM);
503 memcpy(stab->data, tab, tsize * sizeof(u16));
505 list_add_tail(&stab->list, &qdisc_stab_list);
510 static void stab_kfree_rcu(struct rcu_head *head)
512 kfree(container_of(head, struct qdisc_size_table, rcu));
515 void qdisc_put_stab(struct qdisc_size_table *tab)
520 if (--tab->refcnt == 0) {
521 list_del(&tab->list);
522 call_rcu_bh(&tab->rcu, stab_kfree_rcu);
525 EXPORT_SYMBOL(qdisc_put_stab);
527 static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
531 nest = nla_nest_start(skb, TCA_STAB);
533 goto nla_put_failure;
534 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
535 goto nla_put_failure;
536 nla_nest_end(skb, nest);
544 void __qdisc_calculate_pkt_len(struct sk_buff *skb,
545 const struct qdisc_size_table *stab)
549 pkt_len = skb->len + stab->szopts.overhead;
550 if (unlikely(!stab->szopts.tsize))
553 slot = pkt_len + stab->szopts.cell_align;
554 if (unlikely(slot < 0))
557 slot >>= stab->szopts.cell_log;
558 if (likely(slot < stab->szopts.tsize))
559 pkt_len = stab->data[slot];
561 pkt_len = stab->data[stab->szopts.tsize - 1] *
562 (slot / stab->szopts.tsize) +
563 stab->data[slot % stab->szopts.tsize];
565 pkt_len <<= stab->szopts.size_log;
567 if (unlikely(pkt_len < 1))
569 qdisc_skb_cb(skb)->pkt_len = pkt_len;
571 EXPORT_SYMBOL(__qdisc_calculate_pkt_len);
573 void qdisc_warn_nonwc(const char *txt, struct Qdisc *qdisc)
575 if (!(qdisc->flags & TCQ_F_WARN_NONWC)) {
576 pr_warn("%s: %s qdisc %X: is non-work-conserving?\n",
577 txt, qdisc->ops->id, qdisc->handle >> 16);
578 qdisc->flags |= TCQ_F_WARN_NONWC;
581 EXPORT_SYMBOL(qdisc_warn_nonwc);
583 static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
585 struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
589 __netif_schedule(qdisc_root(wd->qdisc));
592 return HRTIMER_NORESTART;
595 void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
597 hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
598 wd->timer.function = qdisc_watchdog;
601 EXPORT_SYMBOL(qdisc_watchdog_init);
603 void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
605 if (test_bit(__QDISC_STATE_DEACTIVATED,
606 &qdisc_root_sleeping(wd->qdisc)->state))
609 if (wd->last_expires == expires)
612 wd->last_expires = expires;
613 hrtimer_start(&wd->timer,
614 ns_to_ktime(expires),
615 HRTIMER_MODE_ABS_PINNED);
617 EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
619 void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
621 hrtimer_cancel(&wd->timer);
623 EXPORT_SYMBOL(qdisc_watchdog_cancel);
625 static struct hlist_head *qdisc_class_hash_alloc(unsigned int n)
627 unsigned int size = n * sizeof(struct hlist_head), i;
628 struct hlist_head *h;
630 if (size <= PAGE_SIZE)
631 h = kmalloc(size, GFP_KERNEL);
633 h = (struct hlist_head *)
634 __get_free_pages(GFP_KERNEL, get_order(size));
637 for (i = 0; i < n; i++)
638 INIT_HLIST_HEAD(&h[i]);
643 static void qdisc_class_hash_free(struct hlist_head *h, unsigned int n)
645 unsigned int size = n * sizeof(struct hlist_head);
647 if (size <= PAGE_SIZE)
650 free_pages((unsigned long)h, get_order(size));
653 void qdisc_class_hash_grow(struct Qdisc *sch, struct Qdisc_class_hash *clhash)
655 struct Qdisc_class_common *cl;
656 struct hlist_node *next;
657 struct hlist_head *nhash, *ohash;
658 unsigned int nsize, nmask, osize;
661 /* Rehash when load factor exceeds 0.75 */
662 if (clhash->hashelems * 4 <= clhash->hashsize * 3)
664 nsize = clhash->hashsize * 2;
666 nhash = qdisc_class_hash_alloc(nsize);
670 ohash = clhash->hash;
671 osize = clhash->hashsize;
674 for (i = 0; i < osize; i++) {
675 hlist_for_each_entry_safe(cl, next, &ohash[i], hnode) {
676 h = qdisc_class_hash(cl->classid, nmask);
677 hlist_add_head(&cl->hnode, &nhash[h]);
680 clhash->hash = nhash;
681 clhash->hashsize = nsize;
682 clhash->hashmask = nmask;
683 sch_tree_unlock(sch);
685 qdisc_class_hash_free(ohash, osize);
687 EXPORT_SYMBOL(qdisc_class_hash_grow);
689 int qdisc_class_hash_init(struct Qdisc_class_hash *clhash)
691 unsigned int size = 4;
693 clhash->hash = qdisc_class_hash_alloc(size);
694 if (clhash->hash == NULL)
696 clhash->hashsize = size;
697 clhash->hashmask = size - 1;
698 clhash->hashelems = 0;
701 EXPORT_SYMBOL(qdisc_class_hash_init);
703 void qdisc_class_hash_destroy(struct Qdisc_class_hash *clhash)
705 qdisc_class_hash_free(clhash->hash, clhash->hashsize);
707 EXPORT_SYMBOL(qdisc_class_hash_destroy);
709 void qdisc_class_hash_insert(struct Qdisc_class_hash *clhash,
710 struct Qdisc_class_common *cl)
714 INIT_HLIST_NODE(&cl->hnode);
715 h = qdisc_class_hash(cl->classid, clhash->hashmask);
716 hlist_add_head(&cl->hnode, &clhash->hash[h]);
719 EXPORT_SYMBOL(qdisc_class_hash_insert);
721 void qdisc_class_hash_remove(struct Qdisc_class_hash *clhash,
722 struct Qdisc_class_common *cl)
724 hlist_del(&cl->hnode);
727 EXPORT_SYMBOL(qdisc_class_hash_remove);
729 /* Allocate an unique handle from space managed by kernel
730 * Possible range is [8000-FFFF]:0000 (0x8000 values)
732 static u32 qdisc_alloc_handle(struct net_device *dev)
735 static u32 autohandle = TC_H_MAKE(0x80000000U, 0);
738 autohandle += TC_H_MAKE(0x10000U, 0);
739 if (autohandle == TC_H_MAKE(TC_H_ROOT, 0))
740 autohandle = TC_H_MAKE(0x80000000U, 0);
741 if (!qdisc_lookup(dev, autohandle))
749 void qdisc_tree_reduce_backlog(struct Qdisc *sch, unsigned int n,
752 const struct Qdisc_class_ops *cops;
757 if (n == 0 && len == 0)
759 drops = max_t(int, n, 0);
761 while ((parentid = sch->parent)) {
762 if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
765 if (sch->flags & TCQ_F_NOPARENT)
767 /* TODO: perform the search on a per txq basis */
768 sch = qdisc_lookup(qdisc_dev(sch), TC_H_MAJ(parentid));
770 WARN_ON_ONCE(parentid != TC_H_ROOT);
773 cops = sch->ops->cl_ops;
774 if (cops->qlen_notify) {
775 cl = cops->get(sch, parentid);
776 cops->qlen_notify(sch, cl);
780 sch->qstats.backlog -= len;
781 __qdisc_qstats_drop(sch, drops);
785 EXPORT_SYMBOL(qdisc_tree_reduce_backlog);
787 static void notify_and_destroy(struct net *net, struct sk_buff *skb,
788 struct nlmsghdr *n, u32 clid,
789 struct Qdisc *old, struct Qdisc *new)
792 qdisc_notify(net, skb, n, clid, old, new);
798 /* Graft qdisc "new" to class "classid" of qdisc "parent" or
801 * When appropriate send a netlink notification using 'skb'
804 * On success, destroy old qdisc.
807 static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
808 struct sk_buff *skb, struct nlmsghdr *n, u32 classid,
809 struct Qdisc *new, struct Qdisc *old)
811 struct Qdisc *q = old;
812 struct net *net = dev_net(dev);
815 if (parent == NULL) {
816 unsigned int i, num_q, ingress;
819 num_q = dev->num_tx_queues;
820 if ((q && q->flags & TCQ_F_INGRESS) ||
821 (new && new->flags & TCQ_F_INGRESS)) {
824 if (!dev_ingress_queue(dev))
828 if (dev->flags & IFF_UP)
831 if (new && new->ops->attach)
834 for (i = 0; i < num_q; i++) {
835 struct netdev_queue *dev_queue = dev_ingress_queue(dev);
838 dev_queue = netdev_get_tx_queue(dev, i);
840 old = dev_graft_qdisc(dev_queue, new);
842 atomic_inc(&new->refcnt);
850 notify_and_destroy(net, skb, n, classid,
852 if (new && !new->ops->attach)
853 atomic_inc(&new->refcnt);
854 dev->qdisc = new ? : &noop_qdisc;
856 if (new && new->ops->attach)
857 new->ops->attach(new);
859 notify_and_destroy(net, skb, n, classid, old, new);
862 if (dev->flags & IFF_UP)
865 const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
868 if (cops && cops->graft) {
869 unsigned long cl = cops->get(parent, classid);
871 err = cops->graft(parent, cl, new, &old);
872 cops->put(parent, cl);
877 notify_and_destroy(net, skb, n, classid, old, new);
882 /* lockdep annotation is needed for ingress; egress gets it only for name */
883 static struct lock_class_key qdisc_tx_lock;
884 static struct lock_class_key qdisc_rx_lock;
887 Allocate and initialize new qdisc.
889 Parameters are passed via opt.
892 static struct Qdisc *qdisc_create(struct net_device *dev,
893 struct netdev_queue *dev_queue,
894 struct Qdisc *p, u32 parent, u32 handle,
895 struct nlattr **tca, int *errp)
898 struct nlattr *kind = tca[TCA_KIND];
900 struct Qdisc_ops *ops;
901 struct qdisc_size_table *stab;
903 ops = qdisc_lookup_ops(kind);
904 #ifdef CONFIG_MODULES
905 if (ops == NULL && kind != NULL) {
907 if (nla_strlcpy(name, kind, IFNAMSIZ) < IFNAMSIZ) {
908 /* We dropped the RTNL semaphore in order to
909 * perform the module load. So, even if we
910 * succeeded in loading the module we have to
911 * tell the caller to replay the request. We
912 * indicate this using -EAGAIN.
913 * We replay the request because the device may
914 * go away in the mean time.
917 request_module("sch_%s", name);
919 ops = qdisc_lookup_ops(kind);
921 /* We will try again qdisc_lookup_ops,
922 * so don't keep a reference.
924 module_put(ops->owner);
936 sch = qdisc_alloc(dev_queue, ops);
942 sch->parent = parent;
944 if (handle == TC_H_INGRESS) {
945 sch->flags |= TCQ_F_INGRESS;
946 handle = TC_H_MAKE(TC_H_INGRESS, 0);
947 lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
950 handle = qdisc_alloc_handle(dev);
955 lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
956 if (!netif_is_multiqueue(dev))
957 sch->flags |= TCQ_F_ONETXQUEUE;
960 sch->handle = handle;
962 /* This exist to keep backward compatible with a userspace
963 * loophole, what allowed userspace to get IFF_NO_QUEUE
964 * facility on older kernels by setting tx_queue_len=0 (prior
965 * to qdisc init), and then forgot to reinit tx_queue_len
966 * before again attaching a qdisc.
968 if ((dev->priv_flags & IFF_NO_QUEUE) && (dev->tx_queue_len == 0)) {
969 dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
970 netdev_info(dev, "Caught tx_queue_len zero misconfig\n");
973 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
974 if (qdisc_is_percpu_stats(sch)) {
976 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
977 if (!sch->cpu_bstats)
980 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
981 if (!sch->cpu_qstats)
986 stab = qdisc_get_stab(tca[TCA_STAB]);
991 rcu_assign_pointer(sch->stab, stab);
997 if (sch->flags & TCQ_F_MQROOT)
1000 if ((sch->parent != TC_H_ROOT) &&
1001 !(sch->flags & TCQ_F_INGRESS) &&
1002 (!p || !(p->flags & TCQ_F_MQROOT)))
1003 running = qdisc_root_sleeping_running(sch);
1005 running = &sch->running;
1007 err = gen_new_estimator(&sch->bstats,
1017 qdisc_hash_add(sch, false);
1021 /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
1025 kfree((char *) sch - sch->padded);
1027 module_put(ops->owner);
1033 free_percpu(sch->cpu_bstats);
1034 free_percpu(sch->cpu_qstats);
1036 * Any broken qdiscs that would require a ops->reset() here?
1037 * The qdisc was never in action so it shouldn't be necessary.
1039 qdisc_put_stab(rtnl_dereference(sch->stab));
1045 static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
1047 struct qdisc_size_table *ostab, *stab = NULL;
1050 if (tca[TCA_OPTIONS]) {
1051 if (sch->ops->change == NULL)
1053 err = sch->ops->change(sch, tca[TCA_OPTIONS]);
1058 if (tca[TCA_STAB]) {
1059 stab = qdisc_get_stab(tca[TCA_STAB]);
1061 return PTR_ERR(stab);
1064 ostab = rtnl_dereference(sch->stab);
1065 rcu_assign_pointer(sch->stab, stab);
1066 qdisc_put_stab(ostab);
1068 if (tca[TCA_RATE]) {
1069 /* NB: ignores errors from replace_estimator
1070 because change can't be undone. */
1071 if (sch->flags & TCQ_F_MQROOT)
1073 gen_replace_estimator(&sch->bstats,
1077 qdisc_root_sleeping_running(sch),
1084 struct check_loop_arg {
1085 struct qdisc_walker w;
1090 static int check_loop_fn(struct Qdisc *q, unsigned long cl,
1091 struct qdisc_walker *w);
1093 static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth)
1095 struct check_loop_arg arg;
1097 if (q->ops->cl_ops == NULL)
1100 arg.w.stop = arg.w.skip = arg.w.count = 0;
1101 arg.w.fn = check_loop_fn;
1104 q->ops->cl_ops->walk(q, &arg.w);
1105 return arg.w.stop ? -ELOOP : 0;
1109 check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
1112 const struct Qdisc_class_ops *cops = q->ops->cl_ops;
1113 struct check_loop_arg *arg = (struct check_loop_arg *)w;
1115 leaf = cops->leaf(q, cl);
1117 if (leaf == arg->p || arg->depth > 7)
1119 return check_loop(leaf, arg->p, arg->depth + 1);
1128 static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1129 struct netlink_ext_ack *extack)
1131 struct net *net = sock_net(skb->sk);
1132 struct tcmsg *tcm = nlmsg_data(n);
1133 struct nlattr *tca[TCA_MAX + 1];
1134 struct net_device *dev;
1136 struct Qdisc *q = NULL;
1137 struct Qdisc *p = NULL;
1140 if ((n->nlmsg_type != RTM_GETQDISC) &&
1141 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1144 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1148 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1152 clid = tcm->tcm_parent;
1154 if (clid != TC_H_ROOT) {
1155 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1156 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1159 q = qdisc_leaf(p, clid);
1160 } else if (dev_ingress_queue(dev)) {
1161 q = dev_ingress_queue(dev)->qdisc_sleeping;
1169 if (tcm->tcm_handle && q->handle != tcm->tcm_handle)
1172 q = qdisc_lookup(dev, tcm->tcm_handle);
1177 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1180 if (n->nlmsg_type == RTM_DELQDISC) {
1185 err = qdisc_graft(dev, p, skb, n, clid, NULL, q);
1189 qdisc_notify(net, skb, n, clid, NULL, q);
1195 * Create/change qdisc.
1198 static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
1199 struct netlink_ext_ack *extack)
1201 struct net *net = sock_net(skb->sk);
1203 struct nlattr *tca[TCA_MAX + 1];
1204 struct net_device *dev;
1206 struct Qdisc *q, *p;
1209 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1213 /* Reinit, just in case something touches this. */
1214 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1218 tcm = nlmsg_data(n);
1219 clid = tcm->tcm_parent;
1222 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1228 if (clid != TC_H_ROOT) {
1229 if (clid != TC_H_INGRESS) {
1230 p = qdisc_lookup(dev, TC_H_MAJ(clid));
1233 q = qdisc_leaf(p, clid);
1234 } else if (dev_ingress_queue_create(dev)) {
1235 q = dev_ingress_queue(dev)->qdisc_sleeping;
1241 /* It may be default qdisc, ignore it */
1242 if (q && q->handle == 0)
1245 if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) {
1246 if (tcm->tcm_handle) {
1247 if (q && !(n->nlmsg_flags & NLM_F_REPLACE))
1249 if (TC_H_MIN(tcm->tcm_handle))
1251 q = qdisc_lookup(dev, tcm->tcm_handle);
1253 goto create_n_graft;
1254 if (n->nlmsg_flags & NLM_F_EXCL)
1256 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1259 (p && check_loop(q, p, 0)))
1261 atomic_inc(&q->refcnt);
1265 goto create_n_graft;
1267 /* This magic test requires explanation.
1269 * We know, that some child q is already
1270 * attached to this parent and have choice:
1271 * either to change it or to create/graft new one.
1273 * 1. We are allowed to create/graft only
1274 * if CREATE and REPLACE flags are set.
1276 * 2. If EXCL is set, requestor wanted to say,
1277 * that qdisc tcm_handle is not expected
1278 * to exist, so that we choose create/graft too.
1280 * 3. The last case is when no flags are set.
1281 * Alas, it is sort of hole in API, we
1282 * cannot decide what to do unambiguously.
1283 * For now we select create/graft, if
1284 * user gave KIND, which does not match existing.
1286 if ((n->nlmsg_flags & NLM_F_CREATE) &&
1287 (n->nlmsg_flags & NLM_F_REPLACE) &&
1288 ((n->nlmsg_flags & NLM_F_EXCL) ||
1290 nla_strcmp(tca[TCA_KIND], q->ops->id))))
1291 goto create_n_graft;
1295 if (!tcm->tcm_handle)
1297 q = qdisc_lookup(dev, tcm->tcm_handle);
1300 /* Change qdisc parameters */
1303 if (n->nlmsg_flags & NLM_F_EXCL)
1305 if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id))
1307 err = qdisc_change(q, tca);
1309 qdisc_notify(net, skb, n, clid, NULL, q);
1313 if (!(n->nlmsg_flags & NLM_F_CREATE))
1315 if (clid == TC_H_INGRESS) {
1316 if (dev_ingress_queue(dev))
1317 q = qdisc_create(dev, dev_ingress_queue(dev), p,
1318 tcm->tcm_parent, tcm->tcm_parent,
1323 struct netdev_queue *dev_queue;
1325 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1326 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1328 dev_queue = p->dev_queue;
1330 dev_queue = netdev_get_tx_queue(dev, 0);
1332 q = qdisc_create(dev, dev_queue, p,
1333 tcm->tcm_parent, tcm->tcm_handle,
1343 err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
1353 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1354 u32 portid, u32 seq, u16 flags, int event)
1356 struct gnet_stats_basic_cpu __percpu *cpu_bstats = NULL;
1357 struct gnet_stats_queue __percpu *cpu_qstats = NULL;
1359 struct nlmsghdr *nlh;
1360 unsigned char *b = skb_tail_pointer(skb);
1362 struct qdisc_size_table *stab;
1366 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1368 goto out_nlmsg_trim;
1369 tcm = nlmsg_data(nlh);
1370 tcm->tcm_family = AF_UNSPEC;
1373 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1374 tcm->tcm_parent = clid;
1375 tcm->tcm_handle = q->handle;
1376 tcm->tcm_info = atomic_read(&q->refcnt);
1377 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1378 goto nla_put_failure;
1379 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1380 goto nla_put_failure;
1383 stab = rtnl_dereference(q->stab);
1384 if (stab && qdisc_dump_stab(skb, stab) < 0)
1385 goto nla_put_failure;
1387 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1388 NULL, &d, TCA_PAD) < 0)
1389 goto nla_put_failure;
1391 if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0)
1392 goto nla_put_failure;
1394 if (qdisc_is_percpu_stats(q)) {
1395 cpu_bstats = q->cpu_bstats;
1396 cpu_qstats = q->cpu_qstats;
1399 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q),
1400 &d, cpu_bstats, &q->bstats) < 0 ||
1401 gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
1402 gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0)
1403 goto nla_put_failure;
1405 if (gnet_stats_finish_copy(&d) < 0)
1406 goto nla_put_failure;
1408 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1417 static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible)
1419 if (q->flags & TCQ_F_BUILTIN)
1421 if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible)
1427 static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1428 struct nlmsghdr *n, u32 clid,
1429 struct Qdisc *old, struct Qdisc *new)
1431 struct sk_buff *skb;
1432 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1434 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1438 if (old && !tc_qdisc_dump_ignore(old, false)) {
1439 if (tc_fill_qdisc(skb, old, clid, portid, n->nlmsg_seq,
1440 0, RTM_DELQDISC) < 0)
1443 if (new && !tc_qdisc_dump_ignore(new, false)) {
1444 if (tc_fill_qdisc(skb, new, clid, portid, n->nlmsg_seq,
1445 old ? NLM_F_REPLACE : 0, RTM_NEWQDISC) < 0)
1450 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1451 n->nlmsg_flags & NLM_F_ECHO);
1458 static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
1459 struct netlink_callback *cb,
1460 int *q_idx_p, int s_q_idx, bool recur,
1461 bool dump_invisible)
1463 int ret = 0, q_idx = *q_idx_p;
1471 if (q_idx < s_q_idx) {
1474 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1475 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1476 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1482 /* If dumping singletons, there is no qdisc_dev(root) and the singleton
1483 * itself has already been dumped.
1485 * If we've already dumped the top-level (ingress) qdisc above and the global
1486 * qdisc hashtable, we don't want to hit it again
1488 if (!qdisc_dev(root) || !recur)
1491 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1492 if (q_idx < s_q_idx) {
1496 if (!tc_qdisc_dump_ignore(q, dump_invisible) &&
1497 tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid,
1498 cb->nlh->nlmsg_seq, NLM_F_MULTI,
1512 static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1514 struct net *net = sock_net(skb->sk);
1517 struct net_device *dev;
1518 const struct nlmsghdr *nlh = cb->nlh;
1519 struct tcmsg *tcm = nlmsg_data(nlh);
1520 struct nlattr *tca[TCA_MAX + 1];
1523 s_idx = cb->args[0];
1524 s_q_idx = q_idx = cb->args[1];
1529 err = nlmsg_parse(nlh, sizeof(*tcm), tca, TCA_MAX, NULL, NULL);
1533 for_each_netdev(net, dev) {
1534 struct netdev_queue *dev_queue;
1542 if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx,
1543 true, tca[TCA_DUMP_INVISIBLE]) < 0)
1546 dev_queue = dev_ingress_queue(dev);
1548 tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
1549 &q_idx, s_q_idx, false,
1550 tca[TCA_DUMP_INVISIBLE]) < 0)
1559 cb->args[1] = q_idx;
1566 /************************************************
1567 * Traffic classes manipulation. *
1568 ************************************************/
1572 static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
1573 struct netlink_ext_ack *extack)
1575 struct net *net = sock_net(skb->sk);
1576 struct tcmsg *tcm = nlmsg_data(n);
1577 struct nlattr *tca[TCA_MAX + 1];
1578 struct net_device *dev;
1579 struct Qdisc *q = NULL;
1580 const struct Qdisc_class_ops *cops;
1581 unsigned long cl = 0;
1582 unsigned long new_cl;
1588 if ((n->nlmsg_type != RTM_GETTCLASS) &&
1589 !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
1592 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL, extack);
1596 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1601 parent == TC_H_UNSPEC - unspecified parent.
1602 parent == TC_H_ROOT - class is root, which has no parent.
1603 parent == X:0 - parent is root class.
1604 parent == X:Y - parent is a node in hierarchy.
1605 parent == 0:Y - parent is X:Y, where X:0 is qdisc.
1607 handle == 0:0 - generate handle from kernel pool.
1608 handle == 0:Y - class is X:Y, where X:0 is qdisc.
1609 handle == X:Y - clear.
1610 handle == X:0 - root class.
1613 /* Step 1. Determine qdisc handle X:0 */
1615 portid = tcm->tcm_parent;
1616 clid = tcm->tcm_handle;
1617 qid = TC_H_MAJ(clid);
1619 if (portid != TC_H_ROOT) {
1620 u32 qid1 = TC_H_MAJ(portid);
1623 /* If both majors are known, they must be identical. */
1628 } else if (qid == 0)
1629 qid = dev->qdisc->handle;
1631 /* Now qid is genuine qdisc handle consistent
1632 * both with parent and child.
1634 * TC_H_MAJ(portid) still may be unspecified, complete it now.
1637 portid = TC_H_MAKE(qid, portid);
1640 qid = dev->qdisc->handle;
1643 /* OK. Locate qdisc */
1644 q = qdisc_lookup(dev, qid);
1648 /* An check that it supports classes */
1649 cops = q->ops->cl_ops;
1653 /* Now try to get class */
1655 if (portid == TC_H_ROOT)
1658 clid = TC_H_MAKE(qid, clid);
1661 cl = cops->get(q, clid);
1665 if (n->nlmsg_type != RTM_NEWTCLASS ||
1666 !(n->nlmsg_flags & NLM_F_CREATE))
1669 switch (n->nlmsg_type) {
1672 if (n->nlmsg_flags & NLM_F_EXCL)
1678 err = cops->delete(q, cl);
1680 tclass_notify(net, skb, n, q, cl,
1684 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1695 err = cops->change(q, clid, portid, tca, &new_cl);
1697 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1707 static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1709 u32 portid, u32 seq, u16 flags, int event)
1712 struct nlmsghdr *nlh;
1713 unsigned char *b = skb_tail_pointer(skb);
1715 const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops;
1718 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
1720 goto out_nlmsg_trim;
1721 tcm = nlmsg_data(nlh);
1722 tcm->tcm_family = AF_UNSPEC;
1725 tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
1726 tcm->tcm_parent = q->handle;
1727 tcm->tcm_handle = q->handle;
1729 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1730 goto nla_put_failure;
1731 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1732 goto nla_put_failure;
1734 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS, TCA_XSTATS,
1735 NULL, &d, TCA_PAD) < 0)
1736 goto nla_put_failure;
1738 if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0)
1739 goto nla_put_failure;
1741 if (gnet_stats_finish_copy(&d) < 0)
1742 goto nla_put_failure;
1744 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
1753 static int tclass_notify(struct net *net, struct sk_buff *oskb,
1754 struct nlmsghdr *n, struct Qdisc *q,
1755 unsigned long cl, int event)
1757 struct sk_buff *skb;
1758 u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
1760 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1764 if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) {
1769 return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
1770 n->nlmsg_flags & NLM_F_ECHO);
1773 struct qdisc_dump_args {
1774 struct qdisc_walker w;
1775 struct sk_buff *skb;
1776 struct netlink_callback *cb;
1779 static int qdisc_class_dump(struct Qdisc *q, unsigned long cl,
1780 struct qdisc_walker *arg)
1782 struct qdisc_dump_args *a = (struct qdisc_dump_args *)arg;
1784 return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid,
1785 a->cb->nlh->nlmsg_seq, NLM_F_MULTI,
1789 static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb,
1790 struct tcmsg *tcm, struct netlink_callback *cb,
1793 struct qdisc_dump_args arg;
1795 if (tc_qdisc_dump_ignore(q, false) ||
1796 *t_p < s_t || !q->ops->cl_ops ||
1798 TC_H_MAJ(tcm->tcm_parent) != q->handle)) {
1803 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
1804 arg.w.fn = qdisc_class_dump;
1808 arg.w.skip = cb->args[1];
1810 q->ops->cl_ops->walk(q, &arg.w);
1811 cb->args[1] = arg.w.count;
1818 static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1819 struct tcmsg *tcm, struct netlink_callback *cb,
1828 if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0)
1831 if (!qdisc_dev(root))
1834 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1835 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1842 static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1844 struct tcmsg *tcm = nlmsg_data(cb->nlh);
1845 struct net *net = sock_net(skb->sk);
1846 struct netdev_queue *dev_queue;
1847 struct net_device *dev;
1850 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1852 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1859 if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
1862 dev_queue = dev_ingress_queue(dev);
1864 tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
1875 /* Main classifier routine: scans classifier chain attached
1876 * to this qdisc, (optionally) tests for protocol and asks
1877 * specific classifiers.
1879 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1880 struct tcf_result *res, bool compat_mode)
1882 __be16 protocol = tc_skb_protocol(skb);
1883 #ifdef CONFIG_NET_CLS_ACT
1884 const int max_reclassify_loop = 4;
1885 const struct tcf_proto *old_tp = tp;
1890 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1893 if (tp->protocol != protocol &&
1894 tp->protocol != htons(ETH_P_ALL))
1897 err = tp->classify(skb, tp, res);
1898 #ifdef CONFIG_NET_CLS_ACT
1899 if (unlikely(err == TC_ACT_RECLASSIFY && !compat_mode))
1906 return TC_ACT_UNSPEC; /* signal: continue lookup */
1907 #ifdef CONFIG_NET_CLS_ACT
1909 if (unlikely(limit++ >= max_reclassify_loop)) {
1910 net_notice_ratelimited("%s: reclassify loop, rule prio %u, protocol %02x\n",
1911 tp->q->ops->id, tp->prio & 0xffff,
1912 ntohs(tp->protocol));
1917 protocol = tc_skb_protocol(skb);
1921 EXPORT_SYMBOL(tc_classify);
1923 #ifdef CONFIG_PROC_FS
1924 static int psched_show(struct seq_file *seq, void *v)
1926 seq_printf(seq, "%08x %08x %08x %08x\n",
1927 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1929 (u32)NSEC_PER_SEC / hrtimer_resolution);
1934 static int psched_open(struct inode *inode, struct file *file)
1936 return single_open(file, psched_show, NULL);
1939 static const struct file_operations psched_fops = {
1940 .owner = THIS_MODULE,
1941 .open = psched_open,
1943 .llseek = seq_lseek,
1944 .release = single_release,
1947 static int __net_init psched_net_init(struct net *net)
1949 struct proc_dir_entry *e;
1951 e = proc_create("psched", 0, net->proc_net, &psched_fops);
1958 static void __net_exit psched_net_exit(struct net *net)
1960 remove_proc_entry("psched", net->proc_net);
1963 static int __net_init psched_net_init(struct net *net)
1968 static void __net_exit psched_net_exit(struct net *net)
1973 static struct pernet_operations psched_net_ops = {
1974 .init = psched_net_init,
1975 .exit = psched_net_exit,
1978 static int __init pktsched_init(void)
1982 err = register_pernet_subsys(&psched_net_ops);
1984 pr_err("pktsched_init: "
1985 "cannot initialize per netns operations\n");
1989 register_qdisc(&pfifo_fast_ops);
1990 register_qdisc(&pfifo_qdisc_ops);
1991 register_qdisc(&bfifo_qdisc_ops);
1992 register_qdisc(&pfifo_head_drop_qdisc_ops);
1993 register_qdisc(&mq_qdisc_ops);
1994 register_qdisc(&noqueue_qdisc_ops);
1996 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, NULL);
1997 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, NULL);
1998 rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
2000 rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, NULL);
2001 rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, NULL);
2002 rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
2008 subsys_initcall(pktsched_init);