2 * net/sched/sch_drr.c Deficit Round Robin scheduler
4 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/netdevice.h>
16 #include <linux/pkt_sched.h>
17 #include <net/sch_generic.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
22 struct Qdisc_class_common common;
24 unsigned int filter_cnt;
26 struct gnet_stats_basic_packed bstats;
27 struct gnet_stats_queue qstats;
28 struct gnet_stats_rate_est64 rate_est;
29 struct list_head alist;
37 struct list_head active;
38 struct tcf_proto __rcu *filter_list;
39 struct Qdisc_class_hash clhash;
42 static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
44 struct drr_sched *q = qdisc_priv(sch);
45 struct Qdisc_class_common *clc;
47 clc = qdisc_class_find(&q->clhash, classid);
50 return container_of(clc, struct drr_class, common);
53 static void drr_purge_queue(struct drr_class *cl)
55 unsigned int len = cl->qdisc->q.qlen;
57 qdisc_reset(cl->qdisc);
58 qdisc_tree_decrease_qlen(cl->qdisc, len);
61 static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
62 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
65 static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
66 struct nlattr **tca, unsigned long *arg)
68 struct drr_sched *q = qdisc_priv(sch);
69 struct drr_class *cl = (struct drr_class *)*arg;
70 struct nlattr *opt = tca[TCA_OPTIONS];
71 struct nlattr *tb[TCA_DRR_MAX + 1];
78 err = nla_parse_nested(tb, TCA_DRR_MAX, opt, drr_policy);
82 if (tb[TCA_DRR_QUANTUM]) {
83 quantum = nla_get_u32(tb[TCA_DRR_QUANTUM]);
87 quantum = psched_mtu(qdisc_dev(sch));
91 err = gen_replace_estimator(&cl->bstats, NULL,
93 qdisc_root_sleeping_lock(sch),
100 if (tb[TCA_DRR_QUANTUM])
101 cl->quantum = quantum;
102 sch_tree_unlock(sch);
107 cl = kzalloc(sizeof(struct drr_class), GFP_KERNEL);
112 cl->common.classid = classid;
113 cl->quantum = quantum;
114 cl->qdisc = qdisc_create_dflt(sch->dev_queue,
115 &pfifo_qdisc_ops, classid);
116 if (cl->qdisc == NULL)
117 cl->qdisc = &noop_qdisc;
120 err = gen_replace_estimator(&cl->bstats, NULL, &cl->rate_est,
121 qdisc_root_sleeping_lock(sch),
124 qdisc_destroy(cl->qdisc);
131 qdisc_class_hash_insert(&q->clhash, &cl->common);
132 sch_tree_unlock(sch);
134 qdisc_class_hash_grow(sch, &q->clhash);
136 *arg = (unsigned long)cl;
140 static void drr_destroy_class(struct Qdisc *sch, struct drr_class *cl)
142 gen_kill_estimator(&cl->bstats, &cl->rate_est);
143 qdisc_destroy(cl->qdisc);
147 static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
149 struct drr_sched *q = qdisc_priv(sch);
150 struct drr_class *cl = (struct drr_class *)arg;
152 if (cl->filter_cnt > 0)
158 qdisc_class_hash_remove(&q->clhash, &cl->common);
160 BUG_ON(--cl->refcnt == 0);
162 * This shouldn't happen: we "hold" one cops->get() when called
163 * from tc_ctl_tclass; the destroy method is done from cops->put().
166 sch_tree_unlock(sch);
170 static unsigned long drr_get_class(struct Qdisc *sch, u32 classid)
172 struct drr_class *cl = drr_find_class(sch, classid);
177 return (unsigned long)cl;
180 static void drr_put_class(struct Qdisc *sch, unsigned long arg)
182 struct drr_class *cl = (struct drr_class *)arg;
184 if (--cl->refcnt == 0)
185 drr_destroy_class(sch, cl);
188 static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
191 struct drr_sched *q = qdisc_priv(sch);
196 return &q->filter_list;
199 static unsigned long drr_bind_tcf(struct Qdisc *sch, unsigned long parent,
202 struct drr_class *cl = drr_find_class(sch, classid);
207 return (unsigned long)cl;
210 static void drr_unbind_tcf(struct Qdisc *sch, unsigned long arg)
212 struct drr_class *cl = (struct drr_class *)arg;
217 static int drr_graft_class(struct Qdisc *sch, unsigned long arg,
218 struct Qdisc *new, struct Qdisc **old)
220 struct drr_class *cl = (struct drr_class *)arg;
223 new = qdisc_create_dflt(sch->dev_queue,
224 &pfifo_qdisc_ops, cl->common.classid);
233 sch_tree_unlock(sch);
237 static struct Qdisc *drr_class_leaf(struct Qdisc *sch, unsigned long arg)
239 struct drr_class *cl = (struct drr_class *)arg;
244 static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
246 struct drr_class *cl = (struct drr_class *)arg;
248 if (cl->qdisc->q.qlen == 0)
249 list_del(&cl->alist);
252 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
253 struct sk_buff *skb, struct tcmsg *tcm)
255 struct drr_class *cl = (struct drr_class *)arg;
258 tcm->tcm_parent = TC_H_ROOT;
259 tcm->tcm_handle = cl->common.classid;
260 tcm->tcm_info = cl->qdisc->handle;
262 nest = nla_nest_start(skb, TCA_OPTIONS);
264 goto nla_put_failure;
265 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
266 goto nla_put_failure;
267 return nla_nest_end(skb, nest);
270 nla_nest_cancel(skb, nest);
274 static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
277 struct drr_class *cl = (struct drr_class *)arg;
278 __u32 qlen = cl->qdisc->q.qlen;
279 struct tc_drr_stats xstats;
281 memset(&xstats, 0, sizeof(xstats));
283 xstats.deficit = cl->deficit;
285 if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
286 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
287 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0)
290 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
293 static void drr_walk(struct Qdisc *sch, struct qdisc_walker *arg)
295 struct drr_sched *q = qdisc_priv(sch);
296 struct drr_class *cl;
302 for (i = 0; i < q->clhash.hashsize; i++) {
303 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
304 if (arg->count < arg->skip) {
308 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
317 static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
320 struct drr_sched *q = qdisc_priv(sch);
321 struct drr_class *cl;
322 struct tcf_result res;
323 struct tcf_proto *fl;
326 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
327 cl = drr_find_class(sch, skb->priority);
332 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
333 fl = rcu_dereference_bh(q->filter_list);
334 result = tc_classify(skb, fl, &res, false);
336 #ifdef CONFIG_NET_CLS_ACT
340 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
345 cl = (struct drr_class *)res.class;
347 cl = drr_find_class(sch, res.classid);
353 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
355 struct drr_sched *q = qdisc_priv(sch);
356 struct drr_class *cl;
359 cl = drr_classify(skb, sch, &err);
361 if (err & __NET_XMIT_BYPASS)
362 qdisc_qstats_drop(sch);
367 err = qdisc_enqueue(skb, cl->qdisc);
368 if (unlikely(err != NET_XMIT_SUCCESS)) {
369 if (net_xmit_drop_count(err)) {
371 qdisc_qstats_drop(sch);
376 if (cl->qdisc->q.qlen == 1) {
377 list_add_tail(&cl->alist, &q->active);
378 cl->deficit = cl->quantum;
385 static struct sk_buff *drr_dequeue(struct Qdisc *sch)
387 struct drr_sched *q = qdisc_priv(sch);
388 struct drr_class *cl;
392 if (list_empty(&q->active))
395 cl = list_first_entry(&q->active, struct drr_class, alist);
396 skb = cl->qdisc->ops->peek(cl->qdisc);
398 qdisc_warn_nonwc(__func__, cl->qdisc);
402 len = qdisc_pkt_len(skb);
403 if (len <= cl->deficit) {
405 skb = qdisc_dequeue_peeked(cl->qdisc);
406 if (unlikely(skb == NULL))
408 if (cl->qdisc->q.qlen == 0)
409 list_del(&cl->alist);
411 bstats_update(&cl->bstats, skb);
412 qdisc_bstats_update(sch, skb);
417 cl->deficit += cl->quantum;
418 list_move_tail(&cl->alist, &q->active);
424 static unsigned int drr_drop(struct Qdisc *sch)
426 struct drr_sched *q = qdisc_priv(sch);
427 struct drr_class *cl;
430 list_for_each_entry(cl, &q->active, alist) {
431 if (cl->qdisc->ops->drop) {
432 len = cl->qdisc->ops->drop(cl->qdisc);
435 if (cl->qdisc->q.qlen == 0)
436 list_del(&cl->alist);
444 static int drr_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
446 struct drr_sched *q = qdisc_priv(sch);
449 err = qdisc_class_hash_init(&q->clhash);
452 INIT_LIST_HEAD(&q->active);
456 static void drr_reset_qdisc(struct Qdisc *sch)
458 struct drr_sched *q = qdisc_priv(sch);
459 struct drr_class *cl;
462 for (i = 0; i < q->clhash.hashsize; i++) {
463 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
464 if (cl->qdisc->q.qlen)
465 list_del(&cl->alist);
466 qdisc_reset(cl->qdisc);
472 static void drr_destroy_qdisc(struct Qdisc *sch)
474 struct drr_sched *q = qdisc_priv(sch);
475 struct drr_class *cl;
476 struct hlist_node *next;
479 tcf_destroy_chain(&q->filter_list);
481 for (i = 0; i < q->clhash.hashsize; i++) {
482 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
484 drr_destroy_class(sch, cl);
486 qdisc_class_hash_destroy(&q->clhash);
489 static const struct Qdisc_class_ops drr_class_ops = {
490 .change = drr_change_class,
491 .delete = drr_delete_class,
492 .get = drr_get_class,
493 .put = drr_put_class,
494 .tcf_chain = drr_tcf_chain,
495 .bind_tcf = drr_bind_tcf,
496 .unbind_tcf = drr_unbind_tcf,
497 .graft = drr_graft_class,
498 .leaf = drr_class_leaf,
499 .qlen_notify = drr_qlen_notify,
500 .dump = drr_dump_class,
501 .dump_stats = drr_dump_class_stats,
505 static struct Qdisc_ops drr_qdisc_ops __read_mostly = {
506 .cl_ops = &drr_class_ops,
508 .priv_size = sizeof(struct drr_sched),
509 .enqueue = drr_enqueue,
510 .dequeue = drr_dequeue,
511 .peek = qdisc_peek_dequeued,
513 .init = drr_init_qdisc,
514 .reset = drr_reset_qdisc,
515 .destroy = drr_destroy_qdisc,
516 .owner = THIS_MODULE,
519 static int __init drr_init(void)
521 return register_qdisc(&drr_qdisc_ops);
524 static void __exit drr_exit(void)
526 unregister_qdisc(&drr_qdisc_ops);
529 module_init(drr_init);
530 module_exit(drr_exit);
531 MODULE_LICENSE("GPL");