]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - net/sched/sch_generic.c
Merge tag 'v2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / net / sched / sch_generic.c
index 5dbb3cd96e590f7188a78814e2ee4ddfc492db89..1bc698039ae2e647d670d53b83b976858cdf2a09 100644 (file)
@@ -60,8 +60,7 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
 
                /* check the reason of requeuing without tx lock first */
                txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-               if (!netif_tx_queue_stopped(txq) &&
-                   !netif_tx_queue_frozen(txq)) {
+               if (!netif_tx_queue_frozen_or_stopped(txq)) {
                        q->gso_skb = NULL;
                        q->q.qlen--;
                } else
@@ -122,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
        spin_unlock(root_lock);
 
        HARD_TX_LOCK(dev, txq, smp_processor_id());
-       if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
+       if (!netif_tx_queue_frozen_or_stopped(txq))
                ret = dev_hard_start_xmit(skb, dev, txq);
 
        HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
                ret = dev_requeue_skb(skb, q);
        }
 
-       if (ret && (netif_tx_queue_stopped(txq) ||
-                   netif_tx_queue_frozen(txq)))
+       if (ret && netif_tx_queue_frozen_or_stopped(txq))
                ret = 0;
 
        return ret;
@@ -555,7 +553,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        size = QDISC_ALIGN(sizeof(*sch));
        size += ops->priv_size + (QDISC_ALIGNTO - 1);
 
-       p = kzalloc(size, GFP_KERNEL);
+       p = kzalloc_node(size, GFP_KERNEL,
+                        netdev_queue_numa_node_read(dev_queue));
+
        if (!p)
                goto errout;
        sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
@@ -810,20 +810,36 @@ static bool some_qdisc_is_busy(struct net_device *dev)
        return false;
 }
 
-void dev_deactivate(struct net_device *dev)
+void dev_deactivate_many(struct list_head *head)
 {
-       netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
-       if (dev_ingress_queue(dev))
-               dev_deactivate_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
+       struct net_device *dev;
+
+       list_for_each_entry(dev, head, unreg_list) {
+               netdev_for_each_tx_queue(dev, dev_deactivate_queue,
+                                        &noop_qdisc);
+               if (dev_ingress_queue(dev))
+                       dev_deactivate_queue(dev, dev_ingress_queue(dev),
+                                            &noop_qdisc);
 
-       dev_watchdog_down(dev);
+               dev_watchdog_down(dev);
+       }
 
        /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
        synchronize_rcu();
 
        /* Wait for outstanding qdisc_run calls. */
-       while (some_qdisc_is_busy(dev))
-               yield();
+       list_for_each_entry(dev, head, unreg_list)
+               while (some_qdisc_is_busy(dev))
+                       yield();
+}
+
+void dev_deactivate(struct net_device *dev)
+{
+       LIST_HEAD(single);
+
+       list_add(&dev->unreg_list, &single);
+       dev_deactivate_many(&single);
+       list_del(&single);
 }
 
 static void dev_init_scheduler_queue(struct net_device *dev,