]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
rcu: Streamline code produced by __rcu_read_unlock()
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Sat, 21 May 2011 12:57:18 +0000 (05:57 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Wed, 20 Jul 2011 04:38:53 +0000 (21:38 -0700)
Given some common flag combinations, particularly -Os, gcc will inline
rcu_read_unlock_special() despite its being in an unlikely() clause.
Use noinline to prohibit this misoptimization.

In addition, move the second barrier() in __rcu_read_unlock() so that
it is not on the common-case code path.  This will allow the compiler to
generate better code for the common-case path through __rcu_read_unlock().

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
kernel/rcutree_plugin.h

index 3a0ae0355222d363759c387d03b27b304527f164..4d2c068ba13eb08dd55efbeeadd5e87854e27f09 100644 (file)
@@ -284,7 +284,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
@@ -391,11 +391,11 @@ void __rcu_read_unlock(void)
        struct task_struct *t = current;
 
        barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
-       --t->rcu_read_lock_nesting;
-       barrier();  /* decrement before load of ->rcu_read_unlock_special */
-       if (t->rcu_read_lock_nesting == 0 &&
-           unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-               rcu_read_unlock_special(t);
+       if (--t->rcu_read_lock_nesting == 0) {
+               barrier();  /* decr before ->rcu_read_unlock_special load */
+               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+                       rcu_read_unlock_special(t);
+       }
 #ifdef CONFIG_PROVE_LOCKING
        WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
 #endif /* #ifdef CONFIG_PROVE_LOCKING */