]> git.karo-electronics.de Git - linux-beck.git/commitdiff
net_sched: Add qdisc __NET_XMIT_STOLEN flag
authorJarek Poplawski <jarkao2@gmail.com>
Tue, 5 Aug 2008 05:31:03 +0000 (22:31 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 5 Aug 2008 05:31:03 +0000 (22:31 -0700)
Patrick McHardy <kaber@trash.net> noticed:
"The other problem that affects all qdiscs supporting actions is
TC_ACT_QUEUED/TC_ACT_STOLEN getting mapped to NET_XMIT_SUCCESS
even though the packet is not queued, corrupting upper qdiscs'
qlen counters."

and later explained:
"The reason why it translates it at all seems to be to not increase
the drops counter. Within a single qdisc this could be avoided by
other means easily, upper qdiscs would still increase the counter
when we return anything besides NET_XMIT_SUCCESS though.

This means we need a new NET_XMIT return value to indicate this to
the upper qdiscs. So I'd suggest to introduce NET_XMIT_STOLEN,
return that to upper qdiscs and translate it to NET_XMIT_SUCCESS
in dev_queue_xmit, similar to NET_XMIT_BYPASS."

David Miller <davem@davemloft.net> noticed:
"Maybe these NET_XMIT_* values being passed around should be a set of
bits. They could be composed of base meanings, combined with specific
attributes.

So you could say "NET_XMIT_DROP | __NET_XMIT_NO_DROP_COUNT"

The attributes get masked out by the top-level ->enqueue() caller,
such that the base meanings are the only thing that make their
way up into the stack. If it's only about communication within the
qdisc tree, let's simply code it that way."

This patch is trying to realize these ideas.

Signed-off-by: Jarek Poplawski <jarkao2@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
12 files changed:
include/linux/netdevice.h
include/net/sch_generic.h
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_dsmark.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_netem.c
net/sched/sch_prio.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sched/sch_tbf.c

index ee583f642a9f56de52b7be72c381764ebf816307..abbf5d52ec86f1194e6a4f8c3188f3515284f4f5 100644 (file)
@@ -64,6 +64,7 @@ struct wireless_dev;
 #define NET_XMIT_BYPASS                4       /* packet does not leave via dequeue;
                                           (TC use only - dev_queue_xmit
                                           returns this as NET_XMIT_SUCCESS) */
+#define NET_XMIT_MASK          0xFFFF  /* qdisc flags in net/sch_generic.h */
 
 /* Backlog congestion levels */
 #define NET_RX_SUCCESS         0   /* keep 'em coming, baby */
index c5bb130650518b49249463cf518b8a8a6c813a8c..f15b045a85e95d300514757a18fa2fbb67c80df9 100644 (file)
@@ -343,6 +343,18 @@ static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
        return qdisc_skb_cb(skb)->pkt_len;
 }
 
+#ifdef CONFIG_NET_CLS_ACT
+/* additional qdisc xmit flags */
+enum net_xmit_qdisc_t {
+       __NET_XMIT_STOLEN = 0x00010000,
+};
+
+#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
+
+#else
+#define net_xmit_drop_count(e) (1)
+#endif
+
 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 {
 #ifdef CONFIG_NET_SCHED
@@ -355,7 +367,7 @@ static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
 {
        qdisc_skb_cb(skb)->pkt_len = skb->len;
-       return qdisc_enqueue(skb, sch);
+       return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
 }
 
 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
index 6b517b9dac5be93fb959e008b678904bf1c467a0..27dd773481bc10a641149b5184072aca3670c28d 100644 (file)
@@ -415,7 +415,7 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
                        kfree_skb(skb);
-                       return NET_XMIT_SUCCESS;
+                       return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        kfree_skb(skb);
                        goto drop;
@@ -432,9 +432,11 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        ret = qdisc_enqueue(skb, flow->q);
        if (ret != 0) {
 drop: __maybe_unused
-               sch->qstats.drops++;
-               if (flow)
-                       flow->qstats.drops++;
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       if (flow)
+                               flow->qstats.drops++;
+               }
                return ret;
        }
        sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -530,7 +532,7 @@ static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
        if (!ret) {
                sch->q.qlen++;
                sch->qstats.requeues++;
-       } else {
+       } else if (net_xmit_drop_count(ret)) {
                sch->qstats.drops++;
                p->link.qstats.drops++;
        }
index 14954bf4a6836c8fe21731072af228d23f85b33e..765ae5659000affa3e4a204cfc7dbd27c4bedd93 100644 (file)
@@ -256,7 +256,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                case TC_ACT_RECLASSIFY:
@@ -397,9 +397,11 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                return ret;
        }
 
-       sch->qstats.drops++;
-       cbq_mark_toplevel(q, cl);
-       cl->qstats.drops++;
+       if (net_xmit_drop_count(ret)) {
+               sch->qstats.drops++;
+               cbq_mark_toplevel(q, cl);
+               cl->qstats.drops++;
+       }
        return ret;
 }
 
@@ -430,8 +432,10 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
                        cbq_activate_class(cl);
                return 0;
        }
-       sch->qstats.drops++;
-       cl->qstats.drops++;
+       if (net_xmit_drop_count(ret)) {
+               sch->qstats.drops++;
+               cl->qstats.drops++;
+       }
        return ret;
 }
 
@@ -664,13 +668,15 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
        q->rx_class = NULL;
 
        if (cl && (cl = cbq_reclassify(skb, cl)) != NULL) {
+               int ret;
 
                cbq_mark_toplevel(q, cl);
 
                q->rx_class = cl;
                cl->q->__parent = sch;
 
-               if (qdisc_enqueue(skb, cl->q) == 0) {
+               ret = qdisc_enqueue(skb, cl->q);
+               if (ret == NET_XMIT_SUCCESS) {
                        sch->q.qlen++;
                        sch->bstats.packets++;
                        sch->bstats.bytes += qdisc_pkt_len(skb);
@@ -678,7 +684,8 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
                                cbq_activate_class(cl);
                        return 0;
                }
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(ret))
+                       sch->qstats.drops++;
                return 0;
        }
 
index a935676987e2ad1dd7858f03bb8aff3b8065c799..7170275d9f9939927bfe398cd8934d98ce8ba94a 100644 (file)
@@ -236,7 +236,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
                        kfree_skb(skb);
-                       return NET_XMIT_SUCCESS;
+                       return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
 
                case TC_ACT_SHOT:
                        goto drop;
@@ -254,7 +254,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        err = qdisc_enqueue(skb, p->q);
        if (err != NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(err))
+                       sch->qstats.drops++;
                return err;
        }
 
@@ -321,7 +322,8 @@ static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
 
        err = p->q->ops->requeue(skb, p->q);
        if (err != NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(err))
+                       sch->qstats.drops++;
                return err;
        }
 
index 0ae7d19dcba8c0e0906577832b5df293e9a0641e..5cf9ae716118fc30666e2846f22b837392990d48 100644 (file)
@@ -1166,7 +1166,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                }
@@ -1586,8 +1586,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        err = qdisc_enqueue(skb, cl->qdisc);
        if (unlikely(err != NET_XMIT_SUCCESS)) {
-               cl->qstats.drops++;
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(err)) {
+                       cl->qstats.drops++;
+                       sch->qstats.drops++;
+               }
                return err;
        }
 
index 75a40951c4f24edc68684462980e74dfd7123214..538d79b489ae0ec680ce3d509a94f70741b365f3 100644 (file)
@@ -221,7 +221,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
                switch (result) {
                case TC_ACT_QUEUED:
                case TC_ACT_STOLEN:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                }
@@ -572,9 +572,11 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                kfree_skb(skb);
                return ret;
 #endif
-       } else if (qdisc_enqueue(skb, cl->un.leaf.q) != NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
-               cl->qstats.drops++;
+       } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       cl->qstats.drops++;
+               }
                return NET_XMIT_DROP;
        } else {
                cl->bstats.packets +=
@@ -615,10 +617,12 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
                kfree_skb(skb);
                return ret;
 #endif
-       } else if (cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q) !=
+       } else if ((ret = cl->un.leaf.q->ops->requeue(skb, cl->un.leaf.q)) !=
                   NET_XMIT_SUCCESS) {
-               sch->qstats.drops++;
-               cl->qstats.drops++;
+               if (net_xmit_drop_count(ret)) {
+                       sch->qstats.drops++;
+                       cl->qstats.drops++;
+               }
                return NET_XMIT_DROP;
        } else
                htb_activate(q, cl);
index a590857006786dc7d98bac27230f79dbd2cc552c..6cd6f2bc749e7e29a87905fc43018484f72a4ede 100644 (file)
@@ -240,8 +240,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                sch->q.qlen++;
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
-       } else
+       } else if (net_xmit_drop_count(ret)) {
                sch->qstats.drops++;
+       }
 
        pr_debug("netem: enqueue ret %d\n", ret);
        return ret;
index f849243eb095f74943612630c7e658f2601c20ad..adb1a52b77d3872ffa9dd77c31ef34fef231b1fc 100644 (file)
@@ -45,7 +45,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                switch (err) {
                case TC_ACT_STOLEN:
                case TC_ACT_QUEUED:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return NULL;
                }
@@ -88,7 +88,8 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                sch->q.qlen++;
                return NET_XMIT_SUCCESS;
        }
-       sch->qstats.drops++;
+       if (net_xmit_drop_count(ret))
+               sch->qstats.drops++;
        return ret;
 }
 
@@ -114,7 +115,8 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
                sch->qstats.requeues++;
                return 0;
        }
-       sch->qstats.drops++;
+       if (net_xmit_drop_count(ret))
+               sch->qstats.drops++;
        return NET_XMIT_DROP;
 }
 
index 3f2d1d7f3bbd2dae4fd43d9d86241797d3087f09..5da05839e225001bd9afeae5e6ba975a96fc3da1 100644 (file)
@@ -97,7 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
                sch->bstats.bytes += qdisc_pkt_len(skb);
                sch->bstats.packets++;
                sch->q.qlen++;
-       } else {
+       } else if (net_xmit_drop_count(ret)) {
                q->stats.pdrop++;
                sch->qstats.drops++;
        }
index 8589da666568b50fc47ac1a2c280a0a96c92a5f3..3a456e1b829ab3298bd8b51a9ef5ee82f473d35d 100644 (file)
@@ -178,7 +178,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
                switch (result) {
                case TC_ACT_STOLEN:
                case TC_ACT_QUEUED:
-                       *qerr = NET_XMIT_SUCCESS;
+                       *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                case TC_ACT_SHOT:
                        return 0;
                }
index b296672f76326c9f07a7cbaf1d3343f41e4ceafa..7d3b7ff3bf07fbb1af0de7cab8a5056a37254937 100644 (file)
@@ -135,7 +135,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
 
        ret = qdisc_enqueue(skb, q->qdisc);
        if (ret != 0) {
-               sch->qstats.drops++;
+               if (net_xmit_drop_count(ret))
+                       sch->qstats.drops++;
                return ret;
        }