]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
rcu: Inline preemptible RCU __rcu_read_lock()
authorPaul E. McKenney <paul.mckenney@linaro.org>
Mon, 2 Apr 2012 01:25:32 +0000 (18:25 -0700)
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Mon, 16 Apr 2012 16:43:29 +0000 (09:43 -0700)
Move __rcu_read_lock() from kernel/rcupdate.c to include/linux/rcupdate.h,
allowing the compiler to inline it.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rcupdate.h
kernel/rcupdate.c

index 8f25595bb2456329eea7816e6600e22c5f07f6b6..24fbe76717c94465ac396219a33286a02357af5d 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
+#include <linux/percpu.h>
 
 #ifdef CONFIG_RCU_TORTURE_TEST
 extern int rcutorture_runnable; /* for sysctl */
@@ -151,7 +152,17 @@ DECLARE_PER_CPU(int, rcu_read_unlock_special);
 DECLARE_PER_CPU(struct task_struct *, rcu_current_task);
 #endif /* #ifdef CONFIG_PROVE_RCU */
 
-extern void __rcu_read_lock(void);
+/*
+ * Preemptible-RCU implementation for rcu_read_lock().  Just increment
+ * the per-CPU rcu_read_lock_nesting: Shared state and per-task state will
+ * be updated if we block.
+ */
+static inline void __rcu_read_lock(void)
+{
+       __this_cpu_inc(rcu_read_lock_nesting);
+       barrier(); /* Keep code within RCU read-side critical section. */
+}
+
 extern void __rcu_read_unlock(void);
 void synchronize_rcu(void);
 
index f77a5fcd66e61e2f641529218c980899aca2b7fe..d52c68e149fe1cd12c00c4b3420c79fb03358222 100644 (file)
@@ -58,18 +58,6 @@ DEFINE_PER_CPU(int, rcu_read_unlock_special);
 DEFINE_PER_CPU(struct task_struct *, rcu_current_task);
 #endif /* #ifdef CONFIG_PROVE_RCU */
 
-/*
- * Preemptible-RCU implementation for rcu_read_lock().  Just increment
- * the per-CPU rcu_read_lock_nesting: Shared state and per-task state will
- * be updated if we block.
- */
-void __rcu_read_lock(void)
-{
-       __this_cpu_inc(rcu_read_lock_nesting);
-       barrier(); /* Keep code within RCU read-side critical section. */
-}
-EXPORT_SYMBOL_GPL(__rcu_read_lock);
-
 /*
  * Tree-preemptible RCU implementation for rcu_read_unlock().
  * Decrement rcu_read_lock_nesting.  If the result is zero (outermost