2 * net/sched/sch_gred.c Generic Random Early Detection queue.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
12 * 991129: - Bug fix with grio mode
13 * - a better sing. AvgQ mode with Grio(WRED)
14 * - A finer grained VQ dequeue based on sugestion
20 * For all the glorious comments look at Alexey's sch_red.c
23 #include <linux/config.h>
24 #include <linux/module.h>
25 #include <asm/uaccess.h>
26 #include <asm/system.h>
27 #include <linux/bitops.h>
28 #include <linux/types.h>
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/string.h>
33 #include <linux/socket.h>
34 #include <linux/sockios.h>
36 #include <linux/errno.h>
37 #include <linux/interrupt.h>
38 #include <linux/if_ether.h>
39 #include <linux/inet.h>
40 #include <linux/netdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/notifier.h>
44 #include <net/route.h>
45 #include <linux/skbuff.h>
47 #include <net/pkt_sched.h>
50 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
52 #define DPRINTK(format,args...)
56 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
58 #define D2PRINTK(format,args...)
61 #define GRED_DEF_PRIO (MAX_DPs / 2)
63 struct gred_sched_data;
66 struct gred_sched_data
69 u32 limit; /* HARD maximal queue length */
70 u32 qth_min; /* Min average length threshold: A scaled */
71 u32 qth_max; /* Max average length threshold: A scaled */
72 u32 DP; /* the drop pramaters */
73 char Wlog; /* log(W) */
74 char Plog; /* random number bits */
77 u32 bytesin; /* bytes seen on virtualQ so far*/
78 u32 packetsin; /* packets seen on virtualQ so far*/
79 u32 backlog; /* bytes on the virtualQ */
80 u32 forced; /* packets dropped for exceeding limits */
81 u32 early; /* packets dropped as a warning */
82 u32 other; /* packets dropped by invoking drop() */
83 u32 pdrop; /* packets dropped because we exceeded physical queue limits */
86 u8 prio; /* the prio of this vq */
89 unsigned long qave; /* Average queue length: A scaled */
90 int qcount; /* Packets since last random number generation */
91 u32 qR; /* Cached random number */
93 psched_time_t qidlestart; /* Start of idle period */
103 struct gred_sched_data *tab[MAX_DPs];
110 static inline int gred_wred_mode(struct gred_sched *table)
112 return test_bit(GRED_WRED_MODE, &table->flags);
115 static inline void gred_enable_wred_mode(struct gred_sched *table)
117 __set_bit(GRED_WRED_MODE, &table->flags);
120 static inline void gred_disable_wred_mode(struct gred_sched *table)
122 __clear_bit(GRED_WRED_MODE, &table->flags);
125 static inline int gred_rio_mode(struct gred_sched *table)
127 return test_bit(GRED_RIO_MODE, &table->flags);
130 static inline void gred_enable_rio_mode(struct gred_sched *table)
132 __set_bit(GRED_RIO_MODE, &table->flags);
135 static inline void gred_disable_rio_mode(struct gred_sched *table)
137 __clear_bit(GRED_RIO_MODE, &table->flags);
140 static inline int gred_wred_mode_check(struct Qdisc *sch)
142 struct gred_sched *table = qdisc_priv(sch);
145 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
146 for (i = 0; i < table->DPs; i++) {
147 struct gred_sched_data *q = table->tab[i];
153 for (n = 0; n < table->DPs; n++)
154 if (table->tab[n] && table->tab[n] != q &&
155 table->tab[n]->prio == q->prio)
163 gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
166 struct gred_sched_data *q=NULL;
167 struct gred_sched *t= qdisc_priv(sch);
168 unsigned long qave=0;
171 if (!t->initd && skb_queue_len(&sch->q) < (sch->dev->tx_queue_len ? : 1)) {
172 D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
177 if ( ((skb->tc_index&0xf) > (t->DPs -1)) || !(q=t->tab[skb->tc_index&0xf])) {
178 printk("GRED: setting to default (%d)\n ",t->def);
179 if (!(q=t->tab[t->def])) {
180 DPRINTK("GRED: setting to default FAILED! dropping!! "
184 /* fix tc_index? --could be controvesial but needed for
186 skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
189 D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
190 "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
191 sch->qstats.backlog);
192 /* sum up all the qaves of prios <= to ours to get the new qave*/
193 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
194 for (i=0;i<t->DPs;i++) {
195 if ((!t->tab[i]) || (i==q->DP))
198 if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
199 qave +=t->tab[i]->qave;
205 q->bytesin+=skb->len;
207 if (gred_wred_mode(t)) {
209 q->qave=t->tab[t->def]->qave;
210 q->qidlestart=t->tab[t->def]->qidlestart;
213 if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
215 PSCHED_GET_TIME(now);
216 us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
217 PSCHED_SET_PASTPERFECT(q->qidlestart);
219 q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
221 if (gred_wred_mode(t)) {
222 q->qave += sch->qstats.backlog - (q->qave >> q->Wlog);
224 q->qave += q->backlog - (q->qave >> q->Wlog);
230 if (gred_wred_mode(t))
231 t->tab[t->def]->qave=q->qave;
233 if ((q->qave+qave) < q->qth_min) {
236 if (q->backlog + skb->len <= q->limit) {
237 q->backlog += skb->len;
239 __skb_queue_tail(&sch->q, skb);
240 sch->qstats.backlog += skb->len;
241 sch->bstats.bytes += skb->len;
242 sch->bstats.packets++;
251 return NET_XMIT_DROP;
253 if ((q->qave+qave) >= q->qth_max) {
255 sch->qstats.overlimits++;
260 if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
263 q->qR = net_random()&q->Rmask;
264 sch->qstats.overlimits++;
268 q->qR = net_random()&q->Rmask;
273 gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
275 struct gred_sched_data *q;
276 struct gred_sched *t= qdisc_priv(sch);
277 q= t->tab[(skb->tc_index&0xf)];
278 /* error checking here -- probably unnecessary */
279 PSCHED_SET_PASTPERFECT(q->qidlestart);
281 __skb_queue_head(&sch->q, skb);
282 sch->qstats.backlog += skb->len;
283 sch->qstats.requeues++;
284 q->backlog += skb->len;
288 static struct sk_buff *
289 gred_dequeue(struct Qdisc* sch)
292 struct gred_sched_data *q;
293 struct gred_sched *t= qdisc_priv(sch);
295 skb = __skb_dequeue(&sch->q);
297 sch->qstats.backlog -= skb->len;
298 q= t->tab[(skb->tc_index&0xf)];
300 q->backlog -= skb->len;
301 if (!q->backlog && !gred_wred_mode(t))
302 PSCHED_GET_TIME(q->qidlestart);
304 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
309 if (gred_wred_mode(t)) {
312 D2PRINTK("no default VQ set: Results will be "
315 PSCHED_GET_TIME(q->qidlestart);
321 static unsigned int gred_drop(struct Qdisc* sch)
325 struct gred_sched_data *q;
326 struct gred_sched *t= qdisc_priv(sch);
328 skb = __skb_dequeue_tail(&sch->q);
330 unsigned int len = skb->len;
331 sch->qstats.backlog -= len;
333 q= t->tab[(skb->tc_index&0xf)];
337 if (!q->backlog && !gred_wred_mode(t))
338 PSCHED_GET_TIME(q->qidlestart);
340 D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
349 D2PRINTK("no default VQ set: Results might be screwed up\n");
353 PSCHED_GET_TIME(q->qidlestart);
358 static void gred_reset(struct Qdisc* sch)
361 struct gred_sched_data *q;
362 struct gred_sched *t= qdisc_priv(sch);
364 __skb_queue_purge(&sch->q);
366 sch->qstats.backlog = 0;
368 for (i=0;i<t->DPs;i++) {
372 PSCHED_SET_PASTPERFECT(q->qidlestart);
383 static inline void gred_destroy_vq(struct gred_sched_data *q)
388 static inline int gred_change_table_def(struct Qdisc *sch, struct rtattr *dps)
390 struct gred_sched *table = qdisc_priv(sch);
391 struct tc_gred_sopt *sopt;
394 if (dps == NULL || RTA_PAYLOAD(dps) < sizeof(*sopt))
397 sopt = RTA_DATA(dps);
399 if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
403 table->DPs = sopt->DPs;
404 table->def = sopt->def_DP;
407 * Every entry point to GRED is synchronized with the above code
408 * and the DP is checked against DPs, i.e. shadowed VQs can no
409 * longer be found so we can unlock right here.
411 sch_tree_unlock(sch);
414 gred_enable_rio_mode(table);
415 gred_disable_wred_mode(table);
416 if (gred_wred_mode_check(sch))
417 gred_enable_wred_mode(table);
419 gred_disable_rio_mode(table);
420 gred_disable_wred_mode(table);
423 for (i = table->DPs; i < MAX_DPs; i++) {
425 printk(KERN_WARNING "GRED: Warning: Destroying "
426 "shadowed VQ 0x%x\n", i);
427 gred_destroy_vq(table->tab[i]);
428 table->tab[i] = NULL;
437 static inline int gred_change_vq(struct Qdisc *sch, int dp,
438 struct tc_gred_qopt *ctl, int prio, u8 *stab)
440 struct gred_sched *table = qdisc_priv(sch);
441 struct gred_sched_data *q;
443 if (table->tab[dp] == NULL) {
444 table->tab[dp] = kmalloc(sizeof(*q), GFP_KERNEL);
445 if (table->tab[dp] == NULL)
447 memset(table->tab[dp], 0, sizeof(*q));
456 q->limit = ctl->limit;
457 q->Scell_log = ctl->Scell_log;
458 q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
459 q->Scell_max = (255<<q->Scell_log);
460 q->qth_min = ctl->qth_min<<ctl->Wlog;
461 q->qth_max = ctl->qth_max<<ctl->Wlog;
470 PSCHED_SET_PASTPERFECT(q->qidlestart);
471 memcpy(q->Stab, stab, 256);
476 static int gred_change(struct Qdisc *sch, struct rtattr *opt)
478 struct gred_sched *table = qdisc_priv(sch);
479 struct tc_gred_qopt *ctl;
480 struct rtattr *tb[TCA_GRED_MAX];
481 int err = -EINVAL, prio = GRED_DEF_PRIO;
484 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
487 if (tb[TCA_GRED_PARMS-1] == NULL && tb[TCA_GRED_STAB-1] == NULL)
488 return gred_change_table_def(sch, opt);
490 if (tb[TCA_GRED_PARMS-1] == NULL ||
491 RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
492 tb[TCA_GRED_STAB-1] == NULL ||
493 RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
496 ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
497 stab = RTA_DATA(tb[TCA_GRED_STAB-1]);
499 if (ctl->DP >= table->DPs)
502 if (gred_rio_mode(table)) {
503 if (ctl->prio == 0) {
504 int def_prio = GRED_DEF_PRIO;
506 if (table->tab[table->def])
507 def_prio = table->tab[table->def]->prio;
509 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
510 "setting default to %d\n", ctl->DP, def_prio);
519 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab);
523 if (table->tab[table->def] == NULL) {
524 if (gred_rio_mode(table))
525 prio = table->tab[ctl->DP]->prio;
527 err = gred_change_vq(sch, table->def, ctl, prio, stab);
534 if (gred_rio_mode(table)) {
535 gred_disable_wred_mode(table);
536 if (gred_wred_mode_check(sch))
537 gred_enable_wred_mode(table);
543 sch_tree_unlock(sch);
548 static int gred_init(struct Qdisc *sch, struct rtattr *opt)
550 struct rtattr *tb[TCA_GRED_MAX];
552 if (opt == NULL || rtattr_parse_nested(tb, TCA_GRED_MAX, opt))
555 if (tb[TCA_GRED_PARMS-1] || tb[TCA_GRED_STAB-1])
558 return gred_change_table_def(sch, tb[TCA_GRED_DPS-1]);
561 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
563 struct gred_sched *table = qdisc_priv(sch);
564 struct rtattr *parms, *opts = NULL;
566 struct tc_gred_sopt sopt = {
568 .def_DP = table->def,
569 .grio = gred_rio_mode(table),
572 opts = RTA_NEST(skb, TCA_OPTIONS);
573 RTA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt);
574 parms = RTA_NEST(skb, TCA_GRED_PARMS);
576 for (i = 0; i < MAX_DPs; i++) {
577 struct gred_sched_data *q = table->tab[i];
578 struct tc_gred_qopt opt;
580 memset(&opt, 0, sizeof(opt));
583 /* hack -- fix at some point with proper message
584 This is how we indicate to tc that there is no VQ
587 opt.DP = MAX_DPs + i;
591 opt.limit = q->limit;
593 opt.backlog = q->backlog;
595 opt.qth_min = q->qth_min >> q->Wlog;
596 opt.qth_max = q->qth_max >> q->Wlog;
599 opt.Scell_log = q->Scell_log;
600 opt.other = q->other;
601 opt.early = q->early;
602 opt.forced = q->forced;
603 opt.pdrop = q->pdrop;
604 opt.packets = q->packetsin;
605 opt.bytesin = q->bytesin;
608 if (gred_wred_mode(table)) {
609 q->qidlestart=table->tab[table->def]->qidlestart;
610 q->qave=table->tab[table->def]->qave;
612 if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
616 PSCHED_GET_TIME(now);
617 idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max);
618 qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
619 opt.qave = qave >> q->Wlog;
622 opt.qave = q->qave >> q->Wlog;
627 RTA_APPEND(skb, sizeof(opt), &opt);
630 RTA_NEST_END(skb, parms);
632 return RTA_NEST_END(skb, opts);
635 return RTA_NEST_CANCEL(skb, opts);
638 static void gred_destroy(struct Qdisc *sch)
640 struct gred_sched *table = qdisc_priv(sch);
643 for (i = 0;i < table->DPs; i++) {
645 gred_destroy_vq(table->tab[i]);
649 static struct Qdisc_ops gred_qdisc_ops = {
653 .priv_size = sizeof(struct gred_sched),
654 .enqueue = gred_enqueue,
655 .dequeue = gred_dequeue,
656 .requeue = gred_requeue,
660 .destroy = gred_destroy,
661 .change = gred_change,
663 .owner = THIS_MODULE,
666 static int __init gred_module_init(void)
668 return register_qdisc(&gred_qdisc_ops);
670 static void __exit gred_module_exit(void)
672 unregister_qdisc(&gred_qdisc_ops);
674 module_init(gred_module_init)
675 module_exit(gred_module_exit)
676 MODULE_LICENSE("GPL");