]> git.karo-electronics.de Git - linux-beck.git/commitdiff
net: Fix race condition in store_rps_map
authorTom Herbert <tom@herbertland.com>
Wed, 5 Aug 2015 16:39:27 +0000 (09:39 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 7 Aug 2015 22:56:56 +0000 (15:56 -0700)
There is a race condition in store_rps_map that allows jump label
count in rps_needed to go below zero. This can happen when
concurrently attempting to set and a clear map.

Scenario:

1. rps_needed count is zero
2. New map is assigned by setting thread, but rps_needed count _not_ yet
   incremented (rps_needed count still zero)
2. Map is cleared by second thread, old_map set to that just assigned
3. Second thread performs static_key_slow_dec, rps_needed count now goes
   negative

Fix is to increment or decrement rps_needed under the spinlock.

Signed-off-by: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/net-sysfs.c

index 194c1d03b2b3b1e78254fb0108682e4dfa3ab776..39ec6949c1e644d0308a588ce2672b9fb6634966 100644 (file)
@@ -726,14 +726,17 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        old_map = rcu_dereference_protected(queue->rps_map,
                                            lockdep_is_held(&rps_map_lock));
        rcu_assign_pointer(queue->rps_map, map);
-       spin_unlock(&rps_map_lock);
 
        if (map)
                static_key_slow_inc(&rps_needed);
-       if (old_map) {
-               kfree_rcu(old_map, rcu);
+       if (old_map)
                static_key_slow_dec(&rps_needed);
-       }
+
+       spin_unlock(&rps_map_lock);
+
+       if (old_map)
+               kfree_rcu(old_map, rcu);
+
        free_cpumask_var(mask);
        return len;
 }