2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/inet_ecn.h>
26 /* Parameters, settable by user:
27 -----------------------------
29 limit - bytes (must be > qth_max + burst)
31 Hard limit on queue length, should be chosen >qth_max
32 to allow packet bursts. This parameter does not
33 affect the algorithms behaviour and can be chosen
34 arbitrarily high (well, less than ram size)
35 Really, this limit will never be reached
36 if RED works correctly.
41 u32 limit; /* HARD maximal queue length */
43 struct red_parms parms;
44 struct red_stats stats;
48 static inline int red_use_ecn(struct red_sched_data *q)
50 return q->flags & TC_RED_ECN;
53 static inline int red_use_harddrop(struct red_sched_data *q)
55 return q->flags & TC_RED_HARDDROP;
58 static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
60 struct red_sched_data *q = qdisc_priv(sch);
61 struct Qdisc *child = q->qdisc;
64 q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog);
66 if (red_is_idling(&q->parms))
67 red_end_of_idle_period(&q->parms);
69 switch (red_action(&q->parms, q->parms.qavg)) {
74 sch->qstats.overlimits++;
75 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
84 sch->qstats.overlimits++;
85 if (red_use_harddrop(q) || !red_use_ecn(q) ||
86 !INET_ECN_set_ce(skb)) {
87 q->stats.forced_drop++;
91 q->stats.forced_mark++;
95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += qdisc_pkt_len(skb);
98 sch->bstats.packets++;
100 } else if (net_xmit_drop_count(ret)) {
107 qdisc_drop(skb, sch);
111 static int red_requeue(struct sk_buff *skb, struct Qdisc* sch)
113 struct red_sched_data *q = qdisc_priv(sch);
114 struct Qdisc *child = q->qdisc;
117 if (red_is_idling(&q->parms))
118 red_end_of_idle_period(&q->parms);
120 ret = child->ops->requeue(skb, child);
121 if (likely(ret == NET_XMIT_SUCCESS)) {
122 sch->qstats.requeues++;
128 static struct sk_buff * red_dequeue(struct Qdisc* sch)
131 struct red_sched_data *q = qdisc_priv(sch);
132 struct Qdisc *child = q->qdisc;
134 skb = child->dequeue(child);
137 else if (!red_is_idling(&q->parms))
138 red_start_of_idle_period(&q->parms);
143 static struct sk_buff * red_peek(struct Qdisc* sch)
145 struct red_sched_data *q = qdisc_priv(sch);
146 struct Qdisc *child = q->qdisc;
148 return child->ops->peek(child);
151 static unsigned int red_drop(struct Qdisc* sch)
153 struct red_sched_data *q = qdisc_priv(sch);
154 struct Qdisc *child = q->qdisc;
157 if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
164 if (!red_is_idling(&q->parms))
165 red_start_of_idle_period(&q->parms);
170 static void red_reset(struct Qdisc* sch)
172 struct red_sched_data *q = qdisc_priv(sch);
174 qdisc_reset(q->qdisc);
176 red_restart(&q->parms);
179 static void red_destroy(struct Qdisc *sch)
181 struct red_sched_data *q = qdisc_priv(sch);
182 qdisc_destroy(q->qdisc);
185 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
186 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
187 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
190 static int red_change(struct Qdisc *sch, struct nlattr *opt)
192 struct red_sched_data *q = qdisc_priv(sch);
193 struct nlattr *tb[TCA_RED_MAX + 1];
194 struct tc_red_qopt *ctl;
195 struct Qdisc *child = NULL;
201 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
205 if (tb[TCA_RED_PARMS] == NULL ||
206 tb[TCA_RED_STAB] == NULL)
209 ctl = nla_data(tb[TCA_RED_PARMS]);
211 if (ctl->limit > 0) {
212 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
214 return PTR_ERR(child);
218 q->flags = ctl->flags;
219 q->limit = ctl->limit;
221 qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
222 qdisc_destroy(xchg(&q->qdisc, child));
225 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
226 ctl->Plog, ctl->Scell_log,
227 nla_data(tb[TCA_RED_STAB]));
229 if (skb_queue_empty(&sch->q))
230 red_end_of_idle_period(&q->parms);
232 sch_tree_unlock(sch);
236 static int red_init(struct Qdisc* sch, struct nlattr *opt)
238 struct red_sched_data *q = qdisc_priv(sch);
240 q->qdisc = &noop_qdisc;
241 return red_change(sch, opt);
244 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
246 struct red_sched_data *q = qdisc_priv(sch);
247 struct nlattr *opts = NULL;
248 struct tc_red_qopt opt = {
251 .qth_min = q->parms.qth_min >> q->parms.Wlog,
252 .qth_max = q->parms.qth_max >> q->parms.Wlog,
253 .Wlog = q->parms.Wlog,
254 .Plog = q->parms.Plog,
255 .Scell_log = q->parms.Scell_log,
258 opts = nla_nest_start(skb, TCA_OPTIONS);
260 goto nla_put_failure;
261 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
262 return nla_nest_end(skb, opts);
265 nla_nest_cancel(skb, opts);
269 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
271 struct red_sched_data *q = qdisc_priv(sch);
272 struct tc_red_xstats st = {
273 .early = q->stats.prob_drop + q->stats.forced_drop,
274 .pdrop = q->stats.pdrop,
275 .other = q->stats.other,
276 .marked = q->stats.prob_mark + q->stats.forced_mark,
279 return gnet_stats_copy_app(d, &st, sizeof(st));
282 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
283 struct sk_buff *skb, struct tcmsg *tcm)
285 struct red_sched_data *q = qdisc_priv(sch);
289 tcm->tcm_handle |= TC_H_MIN(1);
290 tcm->tcm_info = q->qdisc->handle;
294 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
297 struct red_sched_data *q = qdisc_priv(sch);
303 *old = xchg(&q->qdisc, new);
304 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
306 sch_tree_unlock(sch);
310 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
312 struct red_sched_data *q = qdisc_priv(sch);
316 static unsigned long red_get(struct Qdisc *sch, u32 classid)
321 static void red_put(struct Qdisc *sch, unsigned long arg)
326 static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
327 struct nlattr **tca, unsigned long *arg)
332 static int red_delete(struct Qdisc *sch, unsigned long cl)
337 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
340 if (walker->count >= walker->skip)
341 if (walker->fn(sch, 1, walker) < 0) {
349 static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
354 static const struct Qdisc_class_ops red_class_ops = {
359 .change = red_change_class,
360 .delete = red_delete,
362 .tcf_chain = red_find_tcf,
363 .dump = red_dump_class,
366 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
368 .priv_size = sizeof(struct red_sched_data),
369 .cl_ops = &red_class_ops,
370 .enqueue = red_enqueue,
371 .dequeue = red_dequeue,
373 .requeue = red_requeue,
377 .destroy = red_destroy,
378 .change = red_change,
380 .dump_stats = red_dump_stats,
381 .owner = THIS_MODULE,
384 static int __init red_module_init(void)
386 return register_qdisc(&red_qdisc_ops);
389 static void __exit red_module_exit(void)
391 unregister_qdisc(&red_qdisc_ops);
394 module_init(red_module_init)
395 module_exit(red_module_exit)
397 MODULE_LICENSE("GPL");