2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
27 #include <net/netlink.h>
28 #include <net/pkt_sched.h>
32 /* Network Emulation Queuing algorithm.
33 ====================================
35 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
36 Network Emulation Tool
37 [2] Luigi Rizzo, DummyNet for FreeBSD
39 ----------------------------------------------------------------
41 This started out as a simple way to delay outgoing packets to
42 test TCP but has grown to include most of the functionality
43 of a full blown network emulator like NISTnet. It can delay
44 packets and add random jitter (and correlation). The random
45 distribution can be loaded from a table as well to provide
46 normal, Pareto, or experimental curves. Packet loss,
47 duplication, and reordering can also be emulated.
49 This qdisc does not do classification that can be handled in
50 layering other disciplines. It does not need to do bandwidth
51 control either since that can be handled by using token
52 bucket or other rate control.
54 Correlated Loss Generator models
56 Added generation of correlated loss according to the
57 "Gilbert-Elliot" model, a 4-state markov model.
60 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
61 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
62 and intuitive loss model for packet networks and its implementation
63 in the Netem module in the Linux kernel", available in [1]
65 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
66 Fabio Ludovici <fabio.ludovici at yahoo.it>
69 struct netem_sched_data {
70 /* internal t(ime)fifo qdisc uses sch->q and sch->limit */
72 /* optional qdisc for classful handling (NULL at netem init) */
75 struct qdisc_watchdog watchdog;
77 psched_tdiff_t latency;
78 psched_tdiff_t jitter;
90 u32 cell_size_reciprocal;
96 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
109 /* Correlated Loss Generation models */
111 /* state of the Markov chain */
114 /* 4-states and Gilbert-Elliot models */
115 u32 a1; /* p13 for 4-states or p for GE */
116 u32 a2; /* p31 for 4-states or r for GE */
117 u32 a3; /* p32 for 4-states or h for GE */
118 u32 a4; /* p14 for 4-states or 1-k for GE */
119 u32 a5; /* p23 used only in 4-states */
124 /* Time stamp put into socket buffer control block
125 * Only valid when skbs are in our internal t(ime)fifo queue.
127 struct netem_skb_cb {
128 psched_time_t time_to_send;
131 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
133 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
134 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
137 /* init_crandom - initialize correlated random number generator
138 * Use entropy source for initial seed.
140 static void init_crandom(struct crndstate *state, unsigned long rho)
143 state->last = net_random();
146 /* get_crandom - correlated random number generator
147 * Next number depends on last value.
148 * rho is scaled to avoid floating point.
150 static u32 get_crandom(struct crndstate *state)
153 unsigned long answer;
155 if (state->rho == 0) /* no correlation */
158 value = net_random();
159 rho = (u64)state->rho + 1;
160 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
161 state->last = answer;
165 /* loss_4state - 4-state model loss generator
166 * Generates losses according to the 4-state Markov chain adopted in
167 * the GI (General and Intuitive) loss model.
169 static bool loss_4state(struct netem_sched_data *q)
171 struct clgstate *clg = &q->clg;
172 u32 rnd = net_random();
175 * Makes a comparison between rnd and the transition
176 * probabilities outgoing from the current state, then decides the
177 * next state and if the next packet has to be transmitted or lost.
178 * The four states correspond to:
179 * 1 => successfully transmitted packets within a gap period
180 * 4 => isolated losses within a gap period
181 * 3 => lost packets within a burst period
182 * 2 => successfully transmitted packets within a burst period
184 switch (clg->state) {
189 } else if (clg->a4 < rnd && rnd < clg->a1) {
192 } else if (clg->a1 < rnd)
207 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
210 } else if (clg->a2 + clg->a3 < rnd) {
223 /* loss_gilb_ell - Gilbert-Elliot model loss generator
224 * Generates losses according to the Gilbert-Elliot loss model or
225 * its special cases (Gilbert or Simple Gilbert)
227 * Makes a comparison between random number and the transition
228 * probabilities outgoing from the current state, then decides the
229 * next state. A second random number is extracted and the comparison
230 * with the loss probability of the current state decides if the next
231 * packet will be transmitted or lost.
233 static bool loss_gilb_ell(struct netem_sched_data *q)
235 struct clgstate *clg = &q->clg;
237 switch (clg->state) {
239 if (net_random() < clg->a1)
241 if (net_random() < clg->a4)
244 if (net_random() < clg->a2)
246 if (clg->a3 > net_random())
253 static bool loss_event(struct netem_sched_data *q)
255 switch (q->loss_model) {
257 /* Random packet drop 0 => none, ~0 => all */
258 return q->loss && q->loss >= get_crandom(&q->loss_cor);
261 /* 4state loss model algorithm (used also for GI model)
262 * Extracts a value from the markov 4 state loss generator,
263 * if it is 1 drops a packet and if needed writes the event in
266 return loss_4state(q);
269 /* Gilbert-Elliot loss model algorithm
270 * Extracts a value from the Gilbert-Elliot loss generator,
271 * if it is 1 drops a packet and if needed writes the event in
274 return loss_gilb_ell(q);
277 return false; /* not reached */
281 /* tabledist - return a pseudo-randomly distributed value with mean mu and
282 * std deviation sigma. Uses table lookup to approximate the desired
283 * distribution, and a uniformly-distributed pseudo-random source.
285 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
286 struct crndstate *state,
287 const struct disttable *dist)
296 rnd = get_crandom(state);
298 /* default uniform distribution */
300 return (rnd % (2*sigma)) - sigma + mu;
302 t = dist->table[rnd % dist->size];
303 x = (sigma % NETEM_DIST_SCALE) * t;
305 x += NETEM_DIST_SCALE/2;
307 x -= NETEM_DIST_SCALE/2;
309 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
312 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
316 len += q->packet_overhead;
319 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
321 if (len > cells * q->cell_size) /* extra cell needed for remainder */
323 len = cells * (q->cell_size + q->cell_overhead);
326 ticks = (u64)len * NSEC_PER_SEC;
328 do_div(ticks, q->rate);
329 return PSCHED_NS2TICKS(ticks);
332 static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
334 struct sk_buff_head *list = &sch->q;
335 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
338 if (likely(skb_queue_len(list) < sch->limit)) {
339 skb = skb_peek_tail(list);
340 /* Optimize for add at tail */
341 if (likely(!skb || tnext >= netem_skb_cb(skb)->time_to_send))
342 return qdisc_enqueue_tail(nskb, sch);
344 skb_queue_reverse_walk(list, skb) {
345 if (tnext >= netem_skb_cb(skb)->time_to_send)
349 __skb_queue_after(list, skb, nskb);
350 sch->qstats.backlog += qdisc_pkt_len(nskb);
351 return NET_XMIT_SUCCESS;
354 return qdisc_reshape_fail(nskb, sch);
358 * Insert one skb into qdisc.
359 * Note: parent depends on return value to account for queue length.
360 * NET_XMIT_DROP: queue length didn't change.
361 * NET_XMIT_SUCCESS: one skb was queued.
363 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
365 struct netem_sched_data *q = qdisc_priv(sch);
366 /* We don't fill cb now as skb_unshare() may invalidate it */
367 struct netem_skb_cb *cb;
368 struct sk_buff *skb2;
372 /* Random duplication */
373 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
383 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
389 * If we need to duplicate packet, then re-insert at top of the
390 * qdisc tree, since parent queuer expects that only one
391 * skb will be queued.
393 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
394 struct Qdisc *rootq = qdisc_root(sch);
395 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
398 qdisc_enqueue_root(skb2, rootq);
399 q->duplicate = dupsave;
403 * Randomized packet corruption.
404 * Make copy if needed since we are modifying
405 * If packet is going to be hardware checksummed, then
406 * do it now in software before we mangle it.
408 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
409 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
410 (skb->ip_summed == CHECKSUM_PARTIAL &&
411 skb_checksum_help(skb))) {
413 return NET_XMIT_DROP;
416 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
419 cb = netem_skb_cb(skb);
420 if (q->gap == 0 || /* not doing reordering */
421 q->counter < q->gap - 1 || /* inside last reordering gap */
422 q->reorder < get_crandom(&q->reorder_cor)) {
424 psched_tdiff_t delay;
426 delay = tabledist(q->latency, q->jitter,
427 &q->delay_cor, q->delay_dist);
429 now = psched_get_time();
432 struct sk_buff_head *list = &sch->q;
434 delay += packet_len_2_sched_time(skb->len, q);
436 if (!skb_queue_empty(list)) {
438 * Last packet in queue is reference point (now).
439 * First packet in queue is already in flight,
440 * calculate this time bonus and substract
443 delay -= now - netem_skb_cb(skb_peek(list))->time_to_send;
444 now = netem_skb_cb(skb_peek_tail(list))->time_to_send;
448 cb->time_to_send = now + delay;
450 ret = tfifo_enqueue(skb, sch);
453 * Do re-ordering by putting one out of N packets at the front
456 cb->time_to_send = psched_get_time();
459 __skb_queue_head(&sch->q, skb);
460 sch->qstats.backlog += qdisc_pkt_len(skb);
461 sch->qstats.requeues++;
462 ret = NET_XMIT_SUCCESS;
465 if (ret != NET_XMIT_SUCCESS) {
466 if (net_xmit_drop_count(ret)) {
472 return NET_XMIT_SUCCESS;
475 static unsigned int netem_drop(struct Qdisc *sch)
477 struct netem_sched_data *q = qdisc_priv(sch);
480 len = qdisc_queue_drop(sch);
481 if (!len && q->qdisc && q->qdisc->ops->drop)
482 len = q->qdisc->ops->drop(q->qdisc);
489 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
491 struct netem_sched_data *q = qdisc_priv(sch);
494 if (qdisc_is_throttled(sch))
498 skb = qdisc_peek_head(sch);
500 const struct netem_skb_cb *cb = netem_skb_cb(skb);
502 /* if more time remaining? */
503 if (cb->time_to_send <= psched_get_time()) {
504 skb = qdisc_dequeue_tail(sch);
508 #ifdef CONFIG_NET_CLS_ACT
510 * If it's at ingress let's pretend the delay is
511 * from the network (tstamp will be updated).
513 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
514 skb->tstamp.tv64 = 0;
518 int err = qdisc_enqueue(skb, q->qdisc);
520 if (unlikely(err != NET_XMIT_SUCCESS)) {
521 if (net_xmit_drop_count(err)) {
523 qdisc_tree_decrease_qlen(sch, 1);
529 qdisc_unthrottled(sch);
530 qdisc_bstats_update(sch, skb);
535 skb = q->qdisc->ops->dequeue(q->qdisc);
539 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
544 skb = q->qdisc->ops->dequeue(q->qdisc);
551 static void netem_reset(struct Qdisc *sch)
553 struct netem_sched_data *q = qdisc_priv(sch);
555 qdisc_reset_queue(sch);
557 qdisc_reset(q->qdisc);
558 qdisc_watchdog_cancel(&q->watchdog);
561 static void dist_free(struct disttable *d)
564 if (is_vmalloc_addr(d))
572 * Distribution data is a variable size payload containing
573 * signed 16 bit values.
575 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
577 struct netem_sched_data *q = qdisc_priv(sch);
578 size_t n = nla_len(attr)/sizeof(__s16);
579 const __s16 *data = nla_data(attr);
580 spinlock_t *root_lock;
585 if (n > NETEM_DIST_MAX)
588 s = sizeof(struct disttable) + n * sizeof(s16);
589 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
596 for (i = 0; i < n; i++)
597 d->table[i] = data[i];
599 root_lock = qdisc_root_sleeping_lock(sch);
601 spin_lock_bh(root_lock);
602 swap(q->delay_dist, d);
603 spin_unlock_bh(root_lock);
609 static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
611 struct netem_sched_data *q = qdisc_priv(sch);
612 const struct tc_netem_corr *c = nla_data(attr);
614 init_crandom(&q->delay_cor, c->delay_corr);
615 init_crandom(&q->loss_cor, c->loss_corr);
616 init_crandom(&q->dup_cor, c->dup_corr);
619 static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
621 struct netem_sched_data *q = qdisc_priv(sch);
622 const struct tc_netem_reorder *r = nla_data(attr);
624 q->reorder = r->probability;
625 init_crandom(&q->reorder_cor, r->correlation);
628 static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
630 struct netem_sched_data *q = qdisc_priv(sch);
631 const struct tc_netem_corrupt *r = nla_data(attr);
633 q->corrupt = r->probability;
634 init_crandom(&q->corrupt_cor, r->correlation);
637 static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
639 struct netem_sched_data *q = qdisc_priv(sch);
640 const struct tc_netem_rate *r = nla_data(attr);
643 q->packet_overhead = r->packet_overhead;
644 q->cell_size = r->cell_size;
646 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
647 q->cell_overhead = r->cell_overhead;
650 static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
652 struct netem_sched_data *q = qdisc_priv(sch);
653 const struct nlattr *la;
656 nla_for_each_nested(la, attr, rem) {
657 u16 type = nla_type(la);
660 case NETEM_LOSS_GI: {
661 const struct tc_netem_gimodel *gi = nla_data(la);
663 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
664 pr_info("netem: incorrect gi model size\n");
668 q->loss_model = CLG_4_STATES;
679 case NETEM_LOSS_GE: {
680 const struct tc_netem_gemodel *ge = nla_data(la);
682 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
683 pr_info("netem: incorrect ge model size\n");
687 q->loss_model = CLG_GILB_ELL;
697 pr_info("netem: unknown loss type %u\n", type);
705 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
706 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
707 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
708 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
709 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
710 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
713 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
714 const struct nla_policy *policy, int len)
716 int nested_len = nla_len(nla) - NLA_ALIGN(len);
718 if (nested_len < 0) {
719 pr_info("netem: invalid attributes len %d\n", nested_len);
723 if (nested_len >= nla_attr_size(0))
724 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
727 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
731 /* Parse netlink message to set options */
732 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
734 struct netem_sched_data *q = qdisc_priv(sch);
735 struct nlattr *tb[TCA_NETEM_MAX + 1];
736 struct tc_netem_qopt *qopt;
742 qopt = nla_data(opt);
743 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
747 sch->limit = qopt->limit;
749 q->latency = qopt->latency;
750 q->jitter = qopt->jitter;
751 q->limit = qopt->limit;
754 q->loss = qopt->loss;
755 q->duplicate = qopt->duplicate;
757 /* for compatibility with earlier versions.
758 * if gap is set, need to assume 100% probability
763 if (tb[TCA_NETEM_CORR])
764 get_correlation(sch, tb[TCA_NETEM_CORR]);
766 if (tb[TCA_NETEM_DELAY_DIST]) {
767 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
772 if (tb[TCA_NETEM_REORDER])
773 get_reorder(sch, tb[TCA_NETEM_REORDER]);
775 if (tb[TCA_NETEM_CORRUPT])
776 get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
778 if (tb[TCA_NETEM_RATE])
779 get_rate(sch, tb[TCA_NETEM_RATE]);
781 q->loss_model = CLG_RANDOM;
782 if (tb[TCA_NETEM_LOSS])
783 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
788 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
790 struct netem_sched_data *q = qdisc_priv(sch);
796 qdisc_watchdog_init(&q->watchdog, sch);
798 q->loss_model = CLG_RANDOM;
799 ret = netem_change(sch, opt);
801 pr_info("netem: change failed\n");
805 static void netem_destroy(struct Qdisc *sch)
807 struct netem_sched_data *q = qdisc_priv(sch);
809 qdisc_watchdog_cancel(&q->watchdog);
811 qdisc_destroy(q->qdisc);
812 dist_free(q->delay_dist);
815 static int dump_loss_model(const struct netem_sched_data *q,
820 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
822 goto nla_put_failure;
824 switch (q->loss_model) {
826 /* legacy loss model */
827 nla_nest_cancel(skb, nest);
828 return 0; /* no data */
831 struct tc_netem_gimodel gi = {
839 NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi);
843 struct tc_netem_gemodel ge = {
850 NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge);
855 nla_nest_end(skb, nest);
859 nla_nest_cancel(skb, nest);
863 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
865 const struct netem_sched_data *q = qdisc_priv(sch);
866 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
867 struct tc_netem_qopt qopt;
868 struct tc_netem_corr cor;
869 struct tc_netem_reorder reorder;
870 struct tc_netem_corrupt corrupt;
871 struct tc_netem_rate rate;
873 qopt.latency = q->latency;
874 qopt.jitter = q->jitter;
875 qopt.limit = q->limit;
878 qopt.duplicate = q->duplicate;
879 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
881 cor.delay_corr = q->delay_cor.rho;
882 cor.loss_corr = q->loss_cor.rho;
883 cor.dup_corr = q->dup_cor.rho;
884 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
886 reorder.probability = q->reorder;
887 reorder.correlation = q->reorder_cor.rho;
888 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
890 corrupt.probability = q->corrupt;
891 corrupt.correlation = q->corrupt_cor.rho;
892 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
895 rate.packet_overhead = q->packet_overhead;
896 rate.cell_size = q->cell_size;
897 rate.cell_overhead = q->cell_overhead;
898 NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate);
900 if (dump_loss_model(q, skb) != 0)
901 goto nla_put_failure;
903 return nla_nest_end(skb, nla);
906 nlmsg_trim(skb, nla);
910 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
911 struct sk_buff *skb, struct tcmsg *tcm)
913 struct netem_sched_data *q = qdisc_priv(sch);
915 if (cl != 1 || !q->qdisc) /* only one class */
918 tcm->tcm_handle |= TC_H_MIN(1);
919 tcm->tcm_info = q->qdisc->handle;
924 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
927 struct netem_sched_data *q = qdisc_priv(sch);
933 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
936 sch_tree_unlock(sch);
941 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
943 struct netem_sched_data *q = qdisc_priv(sch);
947 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
952 static void netem_put(struct Qdisc *sch, unsigned long arg)
956 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
959 if (walker->count >= walker->skip)
960 if (walker->fn(sch, 1, walker) < 0) {
968 static const struct Qdisc_class_ops netem_class_ops = {
969 .graft = netem_graft,
974 .dump = netem_dump_class,
977 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
979 .cl_ops = &netem_class_ops,
980 .priv_size = sizeof(struct netem_sched_data),
981 .enqueue = netem_enqueue,
982 .dequeue = netem_dequeue,
983 .peek = qdisc_peek_dequeued,
986 .reset = netem_reset,
987 .destroy = netem_destroy,
988 .change = netem_change,
990 .owner = THIS_MODULE,
994 static int __init netem_module_init(void)
996 pr_info("netem: version " VERSION "\n");
997 return register_qdisc(&netem_qdisc_ops);
999 static void __exit netem_module_exit(void)
1001 unregister_qdisc(&netem_qdisc_ops);
1003 module_init(netem_module_init)
1004 module_exit(netem_module_exit)
1005 MODULE_LICENSE("GPL");