2 * net/sched/sch_choke.c CHOKE scheduler
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <linux/vmalloc.h>
18 #include <net/pkt_sched.h>
19 #include <net/pkt_cls.h>
20 #include <net/inet_ecn.h>
22 #include <net/flow_dissector.h>
25 CHOKe stateless AQM for fair bandwidth allocation
26 =================================================
28 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
29 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
30 maintains no flow state. The difference from RED is an additional step
31 during the enqueuing process. If average queue size is over the
32 low threshold (qmin), a packet is chosen at random from the queue.
33 If both the new and chosen packet are from the same flow, both
34 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
35 needs to access packets in queue randomly. It has a minimal class
36 interface to allow overriding the builtin flow classifier with
40 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
41 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
44 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
45 Characteristics", IEEE/ACM Transactions on Networking, 2004
49 /* Upper bound on size of sk_buff table (packets) */
50 #define CHOKE_MAX_QUEUE (128*1024 - 1)
52 struct choke_sched_data {
57 struct red_parms parms;
61 struct tcf_proto __rcu *filter_list;
63 u32 prob_drop; /* Early probability drops */
64 u32 prob_mark; /* Early probability marks */
65 u32 forced_drop; /* Forced drops, qavg > max_thresh */
66 u32 forced_mark; /* Forced marks, qavg > max_thresh */
67 u32 pdrop; /* Drops due to queue limits */
68 u32 other; /* Drops due to drop() calls */
69 u32 matched; /* Drops to flow match */
75 unsigned int tab_mask; /* size - 1 */
80 /* number of elements in queue including holes */
81 static unsigned int choke_len(const struct choke_sched_data *q)
83 return (q->tail - q->head) & q->tab_mask;
86 /* Is ECN parameter configured */
87 static int use_ecn(const struct choke_sched_data *q)
89 return q->flags & TC_RED_ECN;
92 /* Should packets over max just be dropped (versus marked) */
93 static int use_harddrop(const struct choke_sched_data *q)
95 return q->flags & TC_RED_HARDDROP;
98 /* Move head pointer forward to skip over holes */
99 static void choke_zap_head_holes(struct choke_sched_data *q)
102 q->head = (q->head + 1) & q->tab_mask;
103 if (q->head == q->tail)
105 } while (q->tab[q->head] == NULL);
108 /* Move tail pointer backwards to reuse holes */
109 static void choke_zap_tail_holes(struct choke_sched_data *q)
112 q->tail = (q->tail - 1) & q->tab_mask;
113 if (q->head == q->tail)
115 } while (q->tab[q->tail] == NULL);
118 /* Drop packet from queue array by creating a "hole" */
119 static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
120 struct sk_buff **to_free)
122 struct choke_sched_data *q = qdisc_priv(sch);
123 struct sk_buff *skb = q->tab[idx];
128 choke_zap_head_holes(q);
130 choke_zap_tail_holes(q);
132 qdisc_qstats_backlog_dec(sch, skb);
133 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
134 qdisc_drop(skb, sch, to_free);
138 struct choke_skb_cb {
141 struct flow_keys_digest keys;
144 static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
146 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
147 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
150 static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
152 choke_skb_cb(skb)->classid = classid;
155 static u16 choke_get_classid(const struct sk_buff *skb)
157 return choke_skb_cb(skb)->classid;
161 * Compare flow of two packets
162 * Returns true only if source and destination address and port match.
163 * false for special cases
165 static bool choke_match_flow(struct sk_buff *skb1,
166 struct sk_buff *skb2)
168 struct flow_keys temp;
170 if (skb1->protocol != skb2->protocol)
173 if (!choke_skb_cb(skb1)->keys_valid) {
174 choke_skb_cb(skb1)->keys_valid = 1;
175 skb_flow_dissect_flow_keys(skb1, &temp, 0);
176 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
179 if (!choke_skb_cb(skb2)->keys_valid) {
180 choke_skb_cb(skb2)->keys_valid = 1;
181 skb_flow_dissect_flow_keys(skb2, &temp, 0);
182 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
185 return !memcmp(&choke_skb_cb(skb1)->keys,
186 &choke_skb_cb(skb2)->keys,
187 sizeof(choke_skb_cb(skb1)->keys));
191 * Classify flow using either:
192 * 1. pre-existing classification result in skb
193 * 2. fast internal classification
194 * 3. use TC filter based classification
196 static bool choke_classify(struct sk_buff *skb,
197 struct Qdisc *sch, int *qerr)
200 struct choke_sched_data *q = qdisc_priv(sch);
201 struct tcf_result res;
202 struct tcf_proto *fl;
205 fl = rcu_dereference_bh(q->filter_list);
206 result = tc_classify(skb, fl, &res, false);
208 #ifdef CONFIG_NET_CLS_ACT
212 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
217 choke_set_classid(skb, TC_H_MIN(res.classid));
225 * Select a packet at random from queue
226 * HACK: since queue can have holes from previous deletion; retry several
227 * times to find a random skb but then just give up and return the head
228 * Will return NULL if queue is empty (q->head == q->tail)
230 static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
237 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
241 } while (--retrys > 0);
243 return q->tab[*pidx = q->head];
247 * Compare new packet with random packet in queue
248 * returns true if matched and sets *pidx
250 static bool choke_match_random(const struct choke_sched_data *q,
251 struct sk_buff *nskb,
254 struct sk_buff *oskb;
256 if (q->head == q->tail)
259 oskb = choke_peek_random(q, pidx);
260 if (rcu_access_pointer(q->filter_list))
261 return choke_get_classid(nskb) == choke_get_classid(oskb);
263 return choke_match_flow(oskb, nskb);
266 static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
267 struct sk_buff **to_free)
269 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
270 struct choke_sched_data *q = qdisc_priv(sch);
271 const struct red_parms *p = &q->parms;
273 if (rcu_access_pointer(q->filter_list)) {
274 /* If using external classifiers, get result and record it. */
275 if (!choke_classify(skb, sch, &ret))
276 goto other_drop; /* Packet was eaten by filter */
279 choke_skb_cb(skb)->keys_valid = 0;
280 /* Compute average queue usage (see RED) */
281 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
282 if (red_is_idling(&q->vars))
283 red_end_of_idle_period(&q->vars);
285 /* Is queue small? */
286 if (q->vars.qavg <= p->qth_min)
291 /* Draw a packet at random from queue and compare flow */
292 if (choke_match_random(q, skb, &idx)) {
294 choke_drop_by_idx(sch, idx, to_free);
295 goto congestion_drop;
298 /* Queue is large, always mark/drop */
299 if (q->vars.qavg > p->qth_max) {
302 qdisc_qstats_overlimit(sch);
303 if (use_harddrop(q) || !use_ecn(q) ||
304 !INET_ECN_set_ce(skb)) {
305 q->stats.forced_drop++;
306 goto congestion_drop;
309 q->stats.forced_mark++;
310 } else if (++q->vars.qcount) {
311 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
313 q->vars.qR = red_random(p);
315 qdisc_qstats_overlimit(sch);
316 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
317 q->stats.prob_drop++;
318 goto congestion_drop;
321 q->stats.prob_mark++;
324 q->vars.qR = red_random(p);
327 /* Admit new packet */
328 if (sch->q.qlen < q->limit) {
329 q->tab[q->tail] = skb;
330 q->tail = (q->tail + 1) & q->tab_mask;
332 qdisc_qstats_backlog_inc(sch, skb);
333 return NET_XMIT_SUCCESS;
337 return qdisc_drop(skb, sch, to_free);
340 qdisc_drop(skb, sch, to_free);
344 if (ret & __NET_XMIT_BYPASS)
345 qdisc_qstats_drop(sch);
346 __qdisc_drop(skb, to_free);
350 static struct sk_buff *choke_dequeue(struct Qdisc *sch)
352 struct choke_sched_data *q = qdisc_priv(sch);
355 if (q->head == q->tail) {
356 if (!red_is_idling(&q->vars))
357 red_start_of_idle_period(&q->vars);
361 skb = q->tab[q->head];
362 q->tab[q->head] = NULL;
363 choke_zap_head_holes(q);
365 qdisc_qstats_backlog_dec(sch, skb);
366 qdisc_bstats_update(sch, skb);
371 static void choke_reset(struct Qdisc *sch)
373 struct choke_sched_data *q = qdisc_priv(sch);
375 while (q->head != q->tail) {
376 struct sk_buff *skb = q->tab[q->head];
378 q->head = (q->head + 1) & q->tab_mask;
381 rtnl_qdisc_drop(skb, sch);
385 sch->qstats.backlog = 0;
386 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
387 q->head = q->tail = 0;
388 red_restart(&q->vars);
391 static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
392 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
393 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
394 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
398 static void choke_free(void *addr)
403 static int choke_change(struct Qdisc *sch, struct nlattr *opt)
405 struct choke_sched_data *q = qdisc_priv(sch);
406 struct nlattr *tb[TCA_CHOKE_MAX + 1];
407 const struct tc_red_qopt *ctl;
409 struct sk_buff **old = NULL;
416 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
420 if (tb[TCA_CHOKE_PARMS] == NULL ||
421 tb[TCA_CHOKE_STAB] == NULL)
424 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
426 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
428 if (ctl->limit > CHOKE_MAX_QUEUE)
431 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
432 if (mask != q->tab_mask) {
433 struct sk_buff **ntab;
435 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
436 GFP_KERNEL | __GFP_NOWARN);
438 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
445 unsigned int oqlen = sch->q.qlen, tail = 0;
446 unsigned dropped = 0;
448 while (q->head != q->tail) {
449 struct sk_buff *skb = q->tab[q->head];
451 q->head = (q->head + 1) & q->tab_mask;
458 dropped += qdisc_pkt_len(skb);
459 qdisc_qstats_backlog_dec(sch, skb);
461 rtnl_qdisc_drop(skb, sch);
463 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
473 q->flags = ctl->flags;
474 q->limit = ctl->limit;
476 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
477 ctl->Plog, ctl->Scell_log,
478 nla_data(tb[TCA_CHOKE_STAB]),
480 red_set_vars(&q->vars);
482 if (q->head == q->tail)
483 red_end_of_idle_period(&q->vars);
485 sch_tree_unlock(sch);
490 static int choke_init(struct Qdisc *sch, struct nlattr *opt)
492 return choke_change(sch, opt);
495 static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
497 struct choke_sched_data *q = qdisc_priv(sch);
498 struct nlattr *opts = NULL;
499 struct tc_red_qopt opt = {
502 .qth_min = q->parms.qth_min >> q->parms.Wlog,
503 .qth_max = q->parms.qth_max >> q->parms.Wlog,
504 .Wlog = q->parms.Wlog,
505 .Plog = q->parms.Plog,
506 .Scell_log = q->parms.Scell_log,
509 opts = nla_nest_start(skb, TCA_OPTIONS);
511 goto nla_put_failure;
513 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
514 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
515 goto nla_put_failure;
516 return nla_nest_end(skb, opts);
519 nla_nest_cancel(skb, opts);
523 static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
525 struct choke_sched_data *q = qdisc_priv(sch);
526 struct tc_choke_xstats st = {
527 .early = q->stats.prob_drop + q->stats.forced_drop,
528 .marked = q->stats.prob_mark + q->stats.forced_mark,
529 .pdrop = q->stats.pdrop,
530 .other = q->stats.other,
531 .matched = q->stats.matched,
534 return gnet_stats_copy_app(d, &st, sizeof(st));
537 static void choke_destroy(struct Qdisc *sch)
539 struct choke_sched_data *q = qdisc_priv(sch);
541 tcf_destroy_chain(&q->filter_list);
545 static struct sk_buff *choke_peek_head(struct Qdisc *sch)
547 struct choke_sched_data *q = qdisc_priv(sch);
549 return (q->head != q->tail) ? q->tab[q->head] : NULL;
552 static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
554 .priv_size = sizeof(struct choke_sched_data),
556 .enqueue = choke_enqueue,
557 .dequeue = choke_dequeue,
558 .peek = choke_peek_head,
560 .destroy = choke_destroy,
561 .reset = choke_reset,
562 .change = choke_change,
564 .dump_stats = choke_dump_stats,
565 .owner = THIS_MODULE,
568 static int __init choke_module_init(void)
570 return register_qdisc(&choke_qdisc_ops);
573 static void __exit choke_module_exit(void)
575 unregister_qdisc(&choke_qdisc_ops);
578 module_init(choke_module_init)
579 module_exit(choke_module_exit)
581 MODULE_LICENSE("GPL");