]> git.karo-electronics.de Git - linux-beck.git/commitdiff
netdev: Kill qdisc_ingress, use netdev->rx_queue.qdisc instead.
authorDavid S. Miller <davem@davemloft.net>
Wed, 9 Jul 2008 05:49:00 +0000 (22:49 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 9 Jul 2008 05:49:00 +0000 (22:49 -0700)
Now that our qdisc management is bi-directional, per-queue, and fully
orthogonal, there is no reason to have a special ingress qdisc pointer
in struct net_device.

Signed-off-by: David S. Miller <davem@davemloft.net>
include/linux/netdevice.h
net/core/dev.c
net/sched/sch_api.c

index df702a7b3db5e7e7e8991755148c002a5e7be0f3..e7c49246fd880771fb0031e4a264103f1f707943 100644 (file)
@@ -634,9 +634,6 @@ struct net_device
 
        struct netdev_queue     rx_queue;
        struct netdev_queue     tx_queue ____cacheline_aligned_in_smp;
-
-       struct Qdisc            *qdisc_ingress;
-
        unsigned long           tx_queue_len;   /* Max frames per queue allowed */
 
        /* Partially transmitted GSO packet. */
index ce79c28d739ddfb98eb9d224f3fb3b5eee123444..ab760a954d99c4f4a247e964652ec132c510d77c 100644 (file)
@@ -2033,7 +2033,7 @@ static int ing_filter(struct sk_buff *skb)
        rxq = &dev->rx_queue;
 
        spin_lock(&rxq->lock);
-       if ((q = dev->qdisc_ingress) != NULL)
+       if ((q = rxq->qdisc) != NULL)
                result = q->enqueue(skb, q);
        spin_unlock(&rxq->lock);
 
@@ -2044,7 +2044,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
                                         struct packet_type **pt_prev,
                                         int *ret, struct net_device *orig_dev)
 {
-       if (!skb->dev->qdisc_ingress)
+       if (!skb->dev->rx_queue.qdisc)
                goto out;
 
        if (*pt_prev) {
index 2313fa7c97becc5e809444195a0631069a88cc0a..4003c280b69f997f3da0e4cf8d134f13c58dfab9 100644 (file)
@@ -450,14 +450,15 @@ dev_graft_qdisc(struct net_device *dev, struct Qdisc *qdisc)
 
        qdisc_lock_tree(dev);
        if (qdisc && qdisc->flags&TCQ_F_INGRESS) {
-               oqdisc = dev->qdisc_ingress;
+               dev_queue = &dev->rx_queue;
+               oqdisc = dev_queue->qdisc;
                /* Prune old scheduler */
                if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
                        /* delete */
                        qdisc_reset(oqdisc);
-                       dev->qdisc_ingress = NULL;
+                       dev_queue->qdisc = NULL;
                } else {  /* new */
-                       dev->qdisc_ingress = qdisc;
+                       dev_queue->qdisc = qdisc;
                }
 
        } else {
@@ -739,7 +740,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
                        } else { /* ingress */
-                               q = dev->qdisc_ingress;
+                               q = dev->rx_queue.qdisc;
                        }
                } else {
                        struct netdev_queue *dev_queue = &dev->tx_queue;
@@ -814,7 +815,7 @@ replay:
                                        return -ENOENT;
                                q = qdisc_leaf(p, clid);
                        } else { /*ingress */
-                               q = dev->qdisc_ingress;
+                               q = dev->rx_queue.qdisc;
                        }
                } else {
                        struct netdev_queue *dev_queue = &dev->tx_queue;