]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
net: Add memory barriers to prevent possible race in byte queue limits
authorAlexander Duyck <alexander.h.duyck@intel.com>
Tue, 7 Feb 2012 02:29:06 +0000 (02:29 +0000)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 12 May 2012 16:32:19 +0000 (09:32 -0700)
[ Upstream commit b37c0fbe3f6dfba1f8ad2aed47fb40578a254635 ]

This change adds a memory barrier to the byte queue limit code to address a
possible race as has been seen in the past with the
netif_stop_queue/netif_wake_queue logic.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Stephen Ko <stephen.s.ko@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/netdevice.h

index 4f3b01a3d696716abfab856df29a339ac5345018..ebf598097890c4365a0cb38bb83d44b04e344d60 100644 (file)
@@ -1898,12 +1898,22 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
 {
 #ifdef CONFIG_BQL
        dql_queued(&dev_queue->dql, bytes);
-       if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
-               set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
-               if (unlikely(dql_avail(&dev_queue->dql) >= 0))
-                       clear_bit(__QUEUE_STATE_STACK_XOFF,
-                           &dev_queue->state);
-       }
+
+       if (likely(dql_avail(&dev_queue->dql) >= 0))
+               return;
+
+       set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+
+       /*
+        * The XOFF flag must be set before checking the dql_avail below,
+        * because in netdev_tx_completed_queue we update the dql_completed
+        * before checking the XOFF flag.
+        */
+       smp_mb();
+
+       /* check again in case another CPU has just made room avail */
+       if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+               clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
 #endif
 }
 
@@ -1916,16 +1926,23 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
                                             unsigned pkts, unsigned bytes)
 {
 #ifdef CONFIG_BQL
-       if (likely(bytes)) {
-               dql_completed(&dev_queue->dql, bytes);
-               if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
-                   &dev_queue->state) &&
-                   dql_avail(&dev_queue->dql) >= 0)) {
-                       if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
-                            &dev_queue->state))
-                               netif_schedule_queue(dev_queue);
-               }
-       }
+       if (unlikely(!bytes))
+               return;
+
+       dql_completed(&dev_queue->dql, bytes);
+
+       /*
+        * Without the memory barrier there is a small possiblity that
+        * netdev_tx_sent_queue will miss the update and cause the queue to
+        * be stopped forever
+        */
+       smp_mb();
+
+       if (dql_avail(&dev_queue->dql) < 0)
+               return;
+
+       if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
+               netif_schedule_queue(dev_queue);
 #endif
 }