]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
[NET_SCHED]: ingress: switch back to using ingress_lock
authorPatrick McHardy <kaber@trash.net>
Tue, 17 Apr 2007 00:07:08 +0000 (17:07 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Thu, 26 Apr 2007 05:29:08 +0000 (22:29 -0700)
Switch ingress queueing back to use ingress_lock. qdisc_lock_tree now locks
both the ingress and egress qdiscs on the device. All changes to data that
might be used on both ingress and egress needs to be protected by using
qdisc_lock_tree instead of manually taking dev->queue_lock. Additionally
the qdisc stats_lock needs to be initialized to ingress_lock for ingress
qdiscs.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/dev.c
net/sched/cls_route.c
net/sched/sch_api.c
net/sched/sch_generic.c
net/sched/sch_ingress.c

index 7f31d0f88424dd15a574ab3ca6f92a7202ed7783..c8f5ea9aea81c81c352b2778269d2525c0ef0b46 100644 (file)
@@ -1747,10 +1747,10 @@ static int ing_filter(struct sk_buff *skb)
 
                skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_INGRESS);
 
-               spin_lock(&dev->queue_lock);
+               spin_lock(&dev->ingress_lock);
                if ((q = dev->qdisc_ingress) != NULL)
                        result = q->enqueue(skb, q);
-               spin_unlock(&dev->queue_lock);
+               spin_unlock(&dev->ingress_lock);
 
        }
 
index e92d716c9158f29f9d4b733ce01963fd98b713cd..cc941d0ee3a5daf9c51065b9e3fce766d32ea8bd 100644 (file)
@@ -89,9 +89,9 @@ static __inline__ int route4_fastmap_hash(u32 id, int iif)
 static inline
 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
 {
-       spin_lock_bh(&dev->queue_lock);
+       qdisc_lock_tree(dev);
        memset(head->fastmap, 0, sizeof(head->fastmap));
-       spin_unlock_bh(&dev->queue_lock);
+       qdisc_unlock_tree(dev);
 }
 
 static inline void
index 0ce6914f598126f025757b60d1ea86d543908b03..8699e7006d8041f8d77e3241a97e9e608686b95f 100644 (file)
@@ -500,12 +500,16 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
 
        if (handle == TC_H_INGRESS) {
                sch->flags |= TCQ_F_INGRESS;
+               sch->stats_lock = &dev->ingress_lock;
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
-       } else if (handle == 0) {
-               handle = qdisc_alloc_handle(dev);
-               err = -ENOMEM;
-               if (handle == 0)
-                       goto err_out3;
+       } else {
+               sch->stats_lock = &dev->queue_lock;
+               if (handle == 0) {
+                       handle = qdisc_alloc_handle(dev);
+                       err = -ENOMEM;
+                       if (handle == 0)
+                               goto err_out3;
+               }
        }
 
        sch->handle = handle;
@@ -654,9 +658,9 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                        return err;
                if (q) {
                        qdisc_notify(skb, n, clid, q, NULL);
-                       spin_lock_bh(&dev->queue_lock);
+                       qdisc_lock_tree(dev);
                        qdisc_destroy(q);
-                       spin_unlock_bh(&dev->queue_lock);
+                       qdisc_unlock_tree(dev);
                }
        } else {
                qdisc_notify(skb, n, clid, NULL, q);
@@ -789,17 +793,17 @@ graft:
                err = qdisc_graft(dev, p, clid, q, &old_q);
                if (err) {
                        if (q) {
-                               spin_lock_bh(&dev->queue_lock);
+                               qdisc_lock_tree(dev);
                                qdisc_destroy(q);
-                               spin_unlock_bh(&dev->queue_lock);
+                               qdisc_unlock_tree(dev);
                        }
                        return err;
                }
                qdisc_notify(skb, n, clid, old_q, q);
                if (old_q) {
-                       spin_lock_bh(&dev->queue_lock);
+                       qdisc_lock_tree(dev);
                        qdisc_destroy(old_q);
-                       spin_unlock_bh(&dev->queue_lock);
+                       qdisc_unlock_tree(dev);
                }
        }
        return 0;
index 1894eb72f6cf8ffc60761fed6de0251470cce630..3385ee5925418d73da404e429c33b20f6db9f4a1 100644 (file)
  * The idea is the following:
  * - enqueue, dequeue are serialized via top level device
  *   spinlock dev->queue_lock.
+ * - ingress filtering is serialized via top level device
+ *   spinlock dev->ingress_lock.
  * - updates to tree and tree walking are only done under the rtnl mutex.
  */
 
 void qdisc_lock_tree(struct net_device *dev)
 {
        spin_lock_bh(&dev->queue_lock);
+       spin_lock(&dev->ingress_lock);
 }
 
 void qdisc_unlock_tree(struct net_device *dev)
 {
+       spin_unlock(&dev->ingress_lock);
        spin_unlock_bh(&dev->queue_lock);
 }
 
@@ -431,7 +435,6 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
        sch->dequeue = ops->dequeue;
        sch->dev = dev;
        dev_hold(dev);
-       sch->stats_lock = &dev->queue_lock;
        atomic_set(&sch->refcnt, 1);
 
        return sch;
@@ -447,6 +450,7 @@ struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
        sch = qdisc_alloc(dev, ops);
        if (IS_ERR(sch))
                goto errout;
+       sch->stats_lock = &dev->queue_lock;
        sch->parent = parentid;
 
        if (!ops->init || ops->init(sch, NULL) == 0)
index 1fb60aba1e6ce7e42ea9ebfd3e1726bc1c316ac1..ad22dc6af22ae6c712766db0cc743c13b091ec2f 100644 (file)
@@ -248,16 +248,11 @@ ing_hook(unsigned int hook, struct sk_buff **pskb,
                skb->dev ? (*pskb)->dev->name : "(no dev)",
                skb->len);
 
-/*
-revisit later: Use a private since lock dev->queue_lock is also
-used on the egress (might slow things for an iota)
-*/
-
        if (dev->qdisc_ingress) {
-               spin_lock(&dev->queue_lock);
+               spin_lock(&dev->ingress_lock);
                if ((q = dev->qdisc_ingress) != NULL)
                        fwres = q->enqueue(skb, q);
-               spin_unlock(&dev->queue_lock);
+               spin_unlock(&dev->ingress_lock);
        }
 
        return fwres;