2 * net/sched/sch_netem.c Network emulator
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
9 * Many of the algorithms and ideas for this came from
10 * NIST Net which is not copyrighted.
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/errno.h>
22 #include <linux/skbuff.h>
23 #include <linux/vmalloc.h>
24 #include <linux/rtnetlink.h>
25 #include <linux/reciprocal_div.h>
26 #include <linux/rbtree.h>
28 #include <net/netlink.h>
29 #include <net/pkt_sched.h>
30 #include <net/inet_ecn.h>
34 /* Network Emulation Queuing algorithm.
35 ====================================
37 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 Network Emulation Tool
39 [2] Luigi Rizzo, DummyNet for FreeBSD
41 ----------------------------------------------------------------
43 This started out as a simple way to delay outgoing packets to
44 test TCP but has grown to include most of the functionality
45 of a full blown network emulator like NISTnet. It can delay
46 packets and add random jitter (and correlation). The random
47 distribution can be loaded from a table as well to provide
48 normal, Pareto, or experimental curves. Packet loss,
49 duplication, and reordering can also be emulated.
51 This qdisc does not do classification that can be handled in
52 layering other disciplines. It does not need to do bandwidth
53 control either since that can be handled by using token
54 bucket or other rate control.
56 Correlated Loss Generator models
58 Added generation of correlated loss according to the
59 "Gilbert-Elliot" model, a 4-state markov model.
62 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
63 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
64 and intuitive loss model for packet networks and its implementation
65 in the Netem module in the Linux kernel", available in [1]
67 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
68 Fabio Ludovici <fabio.ludovici at yahoo.it>
71 struct netem_sched_data {
72 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
73 struct rb_root t_root;
75 /* optional qdisc for classful handling (NULL at netem init) */
78 struct qdisc_watchdog watchdog;
80 psched_tdiff_t latency;
81 psched_tdiff_t jitter;
94 struct reciprocal_value cell_size_reciprocal;
100 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
114 TX_IN_GAP_PERIOD = 1,
117 LOST_IN_BURST_PERIOD,
125 /* Correlated Loss Generation models */
127 /* state of the Markov chain */
130 /* 4-states and Gilbert-Elliot models */
131 u32 a1; /* p13 for 4-states or p for GE */
132 u32 a2; /* p31 for 4-states or r for GE */
133 u32 a3; /* p32 for 4-states or h for GE */
134 u32 a4; /* p14 for 4-states or 1-k for GE */
135 u32 a5; /* p23 used only in 4-states */
140 /* Time stamp put into socket buffer control block
141 * Only valid when skbs are in our internal t(ime)fifo queue.
143 struct netem_skb_cb {
144 psched_time_t time_to_send;
148 /* Because space in skb->cb[] is tight, netem overloads skb->next/prev/tstamp
149 * to hold a rb_node structure.
151 * If struct sk_buff layout is changed, the following checks will complain.
153 static struct rb_node *netem_rb_node(struct sk_buff *skb)
155 BUILD_BUG_ON(offsetof(struct sk_buff, next) != 0);
156 BUILD_BUG_ON(offsetof(struct sk_buff, prev) !=
157 offsetof(struct sk_buff, next) + sizeof(skb->next));
158 BUILD_BUG_ON(offsetof(struct sk_buff, tstamp) !=
159 offsetof(struct sk_buff, prev) + sizeof(skb->prev));
160 BUILD_BUG_ON(sizeof(struct rb_node) > sizeof(skb->next) +
162 sizeof(skb->tstamp));
163 return (struct rb_node *)&skb->next;
166 static struct sk_buff *netem_rb_to_skb(struct rb_node *rb)
168 return (struct sk_buff *)rb;
171 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
173 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
174 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
175 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
178 /* init_crandom - initialize correlated random number generator
179 * Use entropy source for initial seed.
181 static void init_crandom(struct crndstate *state, unsigned long rho)
184 state->last = prandom_u32();
187 /* get_crandom - correlated random number generator
188 * Next number depends on last value.
189 * rho is scaled to avoid floating point.
191 static u32 get_crandom(struct crndstate *state)
194 unsigned long answer;
196 if (state->rho == 0) /* no correlation */
197 return prandom_u32();
199 value = prandom_u32();
200 rho = (u64)state->rho + 1;
201 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
202 state->last = answer;
206 /* loss_4state - 4-state model loss generator
207 * Generates losses according to the 4-state Markov chain adopted in
208 * the GI (General and Intuitive) loss model.
210 static bool loss_4state(struct netem_sched_data *q)
212 struct clgstate *clg = &q->clg;
213 u32 rnd = prandom_u32();
216 * Makes a comparison between rnd and the transition
217 * probabilities outgoing from the current state, then decides the
218 * next state and if the next packet has to be transmitted or lost.
219 * The four states correspond to:
220 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
221 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
222 * LOST_IN_GAP_PERIOD => lost packets within a burst period
223 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
225 switch (clg->state) {
226 case TX_IN_GAP_PERIOD:
228 clg->state = LOST_IN_BURST_PERIOD;
230 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
231 clg->state = LOST_IN_GAP_PERIOD;
233 } else if (clg->a1 + clg->a4 < rnd) {
234 clg->state = TX_IN_GAP_PERIOD;
238 case TX_IN_BURST_PERIOD:
240 clg->state = LOST_IN_GAP_PERIOD;
243 clg->state = TX_IN_BURST_PERIOD;
247 case LOST_IN_GAP_PERIOD:
249 clg->state = TX_IN_BURST_PERIOD;
250 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
251 clg->state = TX_IN_GAP_PERIOD;
252 } else if (clg->a2 + clg->a3 < rnd) {
253 clg->state = LOST_IN_GAP_PERIOD;
257 case LOST_IN_BURST_PERIOD:
258 clg->state = TX_IN_GAP_PERIOD;
265 /* loss_gilb_ell - Gilbert-Elliot model loss generator
266 * Generates losses according to the Gilbert-Elliot loss model or
267 * its special cases (Gilbert or Simple Gilbert)
269 * Makes a comparison between random number and the transition
270 * probabilities outgoing from the current state, then decides the
271 * next state. A second random number is extracted and the comparison
272 * with the loss probability of the current state decides if the next
273 * packet will be transmitted or lost.
275 static bool loss_gilb_ell(struct netem_sched_data *q)
277 struct clgstate *clg = &q->clg;
279 switch (clg->state) {
281 if (prandom_u32() < clg->a1)
282 clg->state = BAD_STATE;
283 if (prandom_u32() < clg->a4)
287 if (prandom_u32() < clg->a2)
288 clg->state = GOOD_STATE;
289 if (prandom_u32() > clg->a3)
296 static bool loss_event(struct netem_sched_data *q)
298 switch (q->loss_model) {
300 /* Random packet drop 0 => none, ~0 => all */
301 return q->loss && q->loss >= get_crandom(&q->loss_cor);
304 /* 4state loss model algorithm (used also for GI model)
305 * Extracts a value from the markov 4 state loss generator,
306 * if it is 1 drops a packet and if needed writes the event in
309 return loss_4state(q);
312 /* Gilbert-Elliot loss model algorithm
313 * Extracts a value from the Gilbert-Elliot loss generator,
314 * if it is 1 drops a packet and if needed writes the event in
317 return loss_gilb_ell(q);
320 return false; /* not reached */
324 /* tabledist - return a pseudo-randomly distributed value with mean mu and
325 * std deviation sigma. Uses table lookup to approximate the desired
326 * distribution, and a uniformly-distributed pseudo-random source.
328 static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
329 struct crndstate *state,
330 const struct disttable *dist)
339 rnd = get_crandom(state);
341 /* default uniform distribution */
343 return (rnd % (2*sigma)) - sigma + mu;
345 t = dist->table[rnd % dist->size];
346 x = (sigma % NETEM_DIST_SCALE) * t;
348 x += NETEM_DIST_SCALE/2;
350 x -= NETEM_DIST_SCALE/2;
352 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
355 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
359 len += q->packet_overhead;
362 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
364 if (len > cells * q->cell_size) /* extra cell needed for remainder */
366 len = cells * (q->cell_size + q->cell_overhead);
369 ticks = (u64)len * NSEC_PER_SEC;
371 do_div(ticks, q->rate);
372 return PSCHED_NS2TICKS(ticks);
375 static void tfifo_reset(struct Qdisc *sch)
377 struct netem_sched_data *q = qdisc_priv(sch);
380 while ((p = rb_first(&q->t_root))) {
381 struct sk_buff *skb = netem_rb_to_skb(p);
383 rb_erase(p, &q->t_root);
390 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
392 struct netem_sched_data *q = qdisc_priv(sch);
393 psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
394 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
400 skb = netem_rb_to_skb(parent);
401 if (tnext >= netem_skb_cb(skb)->time_to_send)
402 p = &parent->rb_right;
404 p = &parent->rb_left;
406 rb_link_node(netem_rb_node(nskb), parent, p);
407 rb_insert_color(netem_rb_node(nskb), &q->t_root);
412 * Insert one skb into qdisc.
413 * Note: parent depends on return value to account for queue length.
414 * NET_XMIT_DROP: queue length didn't change.
415 * NET_XMIT_SUCCESS: one skb was queued.
417 static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
419 struct netem_sched_data *q = qdisc_priv(sch);
420 /* We don't fill cb now as skb_unshare() may invalidate it */
421 struct netem_skb_cb *cb;
422 struct sk_buff *skb2;
425 /* Random duplication */
426 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
431 if (q->ecn && INET_ECN_set_ce(skb))
432 sch->qstats.drops++; /* mark packet */
439 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
442 /* If a delay is expected, orphan the skb. (orphaning usually takes
443 * place at TX completion time, so _before_ the link transit delay)
445 if (q->latency || q->jitter)
446 skb_orphan_partial(skb);
449 * If we need to duplicate packet, then re-insert at top of the
450 * qdisc tree, since parent queuer expects that only one
451 * skb will be queued.
453 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
454 struct Qdisc *rootq = qdisc_root(sch);
455 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
458 qdisc_enqueue_root(skb2, rootq);
459 q->duplicate = dupsave;
463 * Randomized packet corruption.
464 * Make copy if needed since we are modifying
465 * If packet is going to be hardware checksummed, then
466 * do it now in software before we mangle it.
468 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
469 if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
470 (skb->ip_summed == CHECKSUM_PARTIAL &&
471 skb_checksum_help(skb)))
472 return qdisc_drop(skb, sch);
474 skb->data[prandom_u32() % skb_headlen(skb)] ^=
475 1<<(prandom_u32() % 8);
478 if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
479 return qdisc_reshape_fail(skb, sch);
481 sch->qstats.backlog += qdisc_pkt_len(skb);
483 cb = netem_skb_cb(skb);
484 if (q->gap == 0 || /* not doing reordering */
485 q->counter < q->gap - 1 || /* inside last reordering gap */
486 q->reorder < get_crandom(&q->reorder_cor)) {
488 psched_tdiff_t delay;
490 delay = tabledist(q->latency, q->jitter,
491 &q->delay_cor, q->delay_dist);
493 now = psched_get_time();
496 struct sk_buff *last;
498 if (!skb_queue_empty(&sch->q))
499 last = skb_peek_tail(&sch->q);
501 last = netem_rb_to_skb(rb_last(&q->t_root));
504 * Last packet in queue is reference point (now),
505 * calculate this time bonus and subtract
508 delay -= netem_skb_cb(last)->time_to_send - now;
509 delay = max_t(psched_tdiff_t, 0, delay);
510 now = netem_skb_cb(last)->time_to_send;
513 delay += packet_len_2_sched_time(qdisc_pkt_len(skb), q);
516 cb->time_to_send = now + delay;
517 cb->tstamp_save = skb->tstamp;
519 tfifo_enqueue(skb, sch);
522 * Do re-ordering by putting one out of N packets at the front
525 cb->time_to_send = psched_get_time();
528 __skb_queue_head(&sch->q, skb);
529 sch->qstats.requeues++;
532 return NET_XMIT_SUCCESS;
535 static unsigned int netem_drop(struct Qdisc *sch)
537 struct netem_sched_data *q = qdisc_priv(sch);
540 len = qdisc_queue_drop(sch);
543 struct rb_node *p = rb_first(&q->t_root);
546 struct sk_buff *skb = netem_rb_to_skb(p);
548 rb_erase(p, &q->t_root);
552 len = qdisc_pkt_len(skb);
553 sch->qstats.backlog -= len;
557 if (!len && q->qdisc && q->qdisc->ops->drop)
558 len = q->qdisc->ops->drop(q->qdisc);
565 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
567 struct netem_sched_data *q = qdisc_priv(sch);
571 if (qdisc_is_throttled(sch))
575 skb = __skb_dequeue(&sch->q);
578 sch->qstats.backlog -= qdisc_pkt_len(skb);
579 qdisc_unthrottled(sch);
580 qdisc_bstats_update(sch, skb);
583 p = rb_first(&q->t_root);
585 psched_time_t time_to_send;
587 skb = netem_rb_to_skb(p);
589 /* if more time remaining? */
590 time_to_send = netem_skb_cb(skb)->time_to_send;
591 if (time_to_send <= psched_get_time()) {
592 rb_erase(p, &q->t_root);
597 skb->tstamp = netem_skb_cb(skb)->tstamp_save;
599 #ifdef CONFIG_NET_CLS_ACT
601 * If it's at ingress let's pretend the delay is
602 * from the network (tstamp will be updated).
604 if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
605 skb->tstamp.tv64 = 0;
609 int err = qdisc_enqueue(skb, q->qdisc);
611 if (unlikely(err != NET_XMIT_SUCCESS)) {
612 if (net_xmit_drop_count(err)) {
614 qdisc_tree_decrease_qlen(sch, 1);
623 skb = q->qdisc->ops->dequeue(q->qdisc);
627 qdisc_watchdog_schedule(&q->watchdog, time_to_send);
631 skb = q->qdisc->ops->dequeue(q->qdisc);
638 static void netem_reset(struct Qdisc *sch)
640 struct netem_sched_data *q = qdisc_priv(sch);
642 qdisc_reset_queue(sch);
645 qdisc_reset(q->qdisc);
646 qdisc_watchdog_cancel(&q->watchdog);
649 static void dist_free(struct disttable *d)
652 if (is_vmalloc_addr(d))
660 * Distribution data is a variable size payload containing
661 * signed 16 bit values.
663 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
665 struct netem_sched_data *q = qdisc_priv(sch);
666 size_t n = nla_len(attr)/sizeof(__s16);
667 const __s16 *data = nla_data(attr);
668 spinlock_t *root_lock;
673 if (n > NETEM_DIST_MAX)
676 s = sizeof(struct disttable) + n * sizeof(s16);
677 d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
684 for (i = 0; i < n; i++)
685 d->table[i] = data[i];
687 root_lock = qdisc_root_sleeping_lock(sch);
689 spin_lock_bh(root_lock);
690 swap(q->delay_dist, d);
691 spin_unlock_bh(root_lock);
697 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
699 const struct tc_netem_corr *c = nla_data(attr);
701 init_crandom(&q->delay_cor, c->delay_corr);
702 init_crandom(&q->loss_cor, c->loss_corr);
703 init_crandom(&q->dup_cor, c->dup_corr);
706 static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
708 const struct tc_netem_reorder *r = nla_data(attr);
710 q->reorder = r->probability;
711 init_crandom(&q->reorder_cor, r->correlation);
714 static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
716 const struct tc_netem_corrupt *r = nla_data(attr);
718 q->corrupt = r->probability;
719 init_crandom(&q->corrupt_cor, r->correlation);
722 static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
724 const struct tc_netem_rate *r = nla_data(attr);
727 q->packet_overhead = r->packet_overhead;
728 q->cell_size = r->cell_size;
729 q->cell_overhead = r->cell_overhead;
731 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
733 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
736 static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
738 const struct nlattr *la;
741 nla_for_each_nested(la, attr, rem) {
742 u16 type = nla_type(la);
745 case NETEM_LOSS_GI: {
746 const struct tc_netem_gimodel *gi = nla_data(la);
748 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
749 pr_info("netem: incorrect gi model size\n");
753 q->loss_model = CLG_4_STATES;
755 q->clg.state = TX_IN_GAP_PERIOD;
764 case NETEM_LOSS_GE: {
765 const struct tc_netem_gemodel *ge = nla_data(la);
767 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
768 pr_info("netem: incorrect ge model size\n");
772 q->loss_model = CLG_GILB_ELL;
773 q->clg.state = GOOD_STATE;
782 pr_info("netem: unknown loss type %u\n", type);
790 static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
791 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
792 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
793 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
794 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
795 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
796 [TCA_NETEM_ECN] = { .type = NLA_U32 },
797 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
800 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
801 const struct nla_policy *policy, int len)
803 int nested_len = nla_len(nla) - NLA_ALIGN(len);
805 if (nested_len < 0) {
806 pr_info("netem: invalid attributes len %d\n", nested_len);
810 if (nested_len >= nla_attr_size(0))
811 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
814 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
818 /* Parse netlink message to set options */
819 static int netem_change(struct Qdisc *sch, struct nlattr *opt)
821 struct netem_sched_data *q = qdisc_priv(sch);
822 struct nlattr *tb[TCA_NETEM_MAX + 1];
823 struct tc_netem_qopt *qopt;
824 struct clgstate old_clg;
825 int old_loss_model = CLG_RANDOM;
831 qopt = nla_data(opt);
832 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
836 /* backup q->clg and q->loss_model */
838 old_loss_model = q->loss_model;
840 if (tb[TCA_NETEM_LOSS]) {
841 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
843 q->loss_model = old_loss_model;
847 q->loss_model = CLG_RANDOM;
850 if (tb[TCA_NETEM_DELAY_DIST]) {
851 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
853 /* recover clg and loss_model, in case of
854 * q->clg and q->loss_model were modified
858 q->loss_model = old_loss_model;
863 sch->limit = qopt->limit;
865 q->latency = qopt->latency;
866 q->jitter = qopt->jitter;
867 q->limit = qopt->limit;
870 q->loss = qopt->loss;
871 q->duplicate = qopt->duplicate;
873 /* for compatibility with earlier versions.
874 * if gap is set, need to assume 100% probability
879 if (tb[TCA_NETEM_CORR])
880 get_correlation(q, tb[TCA_NETEM_CORR]);
882 if (tb[TCA_NETEM_REORDER])
883 get_reorder(q, tb[TCA_NETEM_REORDER]);
885 if (tb[TCA_NETEM_CORRUPT])
886 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
888 if (tb[TCA_NETEM_RATE])
889 get_rate(q, tb[TCA_NETEM_RATE]);
891 if (tb[TCA_NETEM_RATE64])
892 q->rate = max_t(u64, q->rate,
893 nla_get_u64(tb[TCA_NETEM_RATE64]));
895 if (tb[TCA_NETEM_ECN])
896 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
901 static int netem_init(struct Qdisc *sch, struct nlattr *opt)
903 struct netem_sched_data *q = qdisc_priv(sch);
909 qdisc_watchdog_init(&q->watchdog, sch);
911 q->loss_model = CLG_RANDOM;
912 ret = netem_change(sch, opt);
914 pr_info("netem: change failed\n");
918 static void netem_destroy(struct Qdisc *sch)
920 struct netem_sched_data *q = qdisc_priv(sch);
922 qdisc_watchdog_cancel(&q->watchdog);
924 qdisc_destroy(q->qdisc);
925 dist_free(q->delay_dist);
928 static int dump_loss_model(const struct netem_sched_data *q,
933 nest = nla_nest_start(skb, TCA_NETEM_LOSS);
935 goto nla_put_failure;
937 switch (q->loss_model) {
939 /* legacy loss model */
940 nla_nest_cancel(skb, nest);
941 return 0; /* no data */
944 struct tc_netem_gimodel gi = {
952 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
953 goto nla_put_failure;
957 struct tc_netem_gemodel ge = {
964 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
965 goto nla_put_failure;
970 nla_nest_end(skb, nest);
974 nla_nest_cancel(skb, nest);
978 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
980 const struct netem_sched_data *q = qdisc_priv(sch);
981 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
982 struct tc_netem_qopt qopt;
983 struct tc_netem_corr cor;
984 struct tc_netem_reorder reorder;
985 struct tc_netem_corrupt corrupt;
986 struct tc_netem_rate rate;
988 qopt.latency = q->latency;
989 qopt.jitter = q->jitter;
990 qopt.limit = q->limit;
993 qopt.duplicate = q->duplicate;
994 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
995 goto nla_put_failure;
997 cor.delay_corr = q->delay_cor.rho;
998 cor.loss_corr = q->loss_cor.rho;
999 cor.dup_corr = q->dup_cor.rho;
1000 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1001 goto nla_put_failure;
1003 reorder.probability = q->reorder;
1004 reorder.correlation = q->reorder_cor.rho;
1005 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1006 goto nla_put_failure;
1008 corrupt.probability = q->corrupt;
1009 corrupt.correlation = q->corrupt_cor.rho;
1010 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1011 goto nla_put_failure;
1013 if (q->rate >= (1ULL << 32)) {
1014 if (nla_put_u64(skb, TCA_NETEM_RATE64, q->rate))
1015 goto nla_put_failure;
1018 rate.rate = q->rate;
1020 rate.packet_overhead = q->packet_overhead;
1021 rate.cell_size = q->cell_size;
1022 rate.cell_overhead = q->cell_overhead;
1023 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1024 goto nla_put_failure;
1026 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1027 goto nla_put_failure;
1029 if (dump_loss_model(q, skb) != 0)
1030 goto nla_put_failure;
1032 return nla_nest_end(skb, nla);
1035 nlmsg_trim(skb, nla);
1039 static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1040 struct sk_buff *skb, struct tcmsg *tcm)
1042 struct netem_sched_data *q = qdisc_priv(sch);
1044 if (cl != 1 || !q->qdisc) /* only one class */
1047 tcm->tcm_handle |= TC_H_MIN(1);
1048 tcm->tcm_info = q->qdisc->handle;
1053 static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1056 struct netem_sched_data *q = qdisc_priv(sch);
1062 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
1065 sch_tree_unlock(sch);
1070 static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1072 struct netem_sched_data *q = qdisc_priv(sch);
1076 static unsigned long netem_get(struct Qdisc *sch, u32 classid)
1081 static void netem_put(struct Qdisc *sch, unsigned long arg)
1085 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1087 if (!walker->stop) {
1088 if (walker->count >= walker->skip)
1089 if (walker->fn(sch, 1, walker) < 0) {
1097 static const struct Qdisc_class_ops netem_class_ops = {
1098 .graft = netem_graft,
1103 .dump = netem_dump_class,
1106 static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
1108 .cl_ops = &netem_class_ops,
1109 .priv_size = sizeof(struct netem_sched_data),
1110 .enqueue = netem_enqueue,
1111 .dequeue = netem_dequeue,
1112 .peek = qdisc_peek_dequeued,
1115 .reset = netem_reset,
1116 .destroy = netem_destroy,
1117 .change = netem_change,
1119 .owner = THIS_MODULE,
1123 static int __init netem_module_init(void)
1125 pr_info("netem: version " VERSION "\n");
1126 return register_qdisc(&netem_qdisc_ops);
1128 static void __exit netem_module_exit(void)
1130 unregister_qdisc(&netem_qdisc_ops);
1132 module_init(netem_module_init)
1133 module_exit(netem_module_exit)
1134 MODULE_LICENSE("GPL");