]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
lcoking/barriers, arch: Use smp barriers in smp_store_release()
authorDavidlohr Bueso <dave@stgolabs.net>
Tue, 27 Oct 2015 19:53:49 +0000 (12:53 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 4 Dec 2015 10:39:51 +0000 (11:39 +0100)
With commit b92b8b35a2e ("locking/arch: Rename set_mb() to smp_store_mb()")
it was made clear that the context of this call (and thus set_mb)
is strictly for CPU ordering, as opposed to IO. As such all archs
should use the smp variant of mb(), respecting the semantics and
saving a mandatory barrier on UP.

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: <linux-arch@vger.kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: dave@stgolabs.net
Link: http://lkml.kernel.org/r/1445975631-17047-3-git-send-email-dave@stgolabs.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/ia64/include/asm/barrier.h
arch/powerpc/include/asm/barrier.h
arch/s390/include/asm/barrier.h
include/asm-generic/barrier.h

index df896a1c41d348e60641bb0f89aece2d5ba95f93..209c4b817c958e25eea0298afe121fc225f1e9e3 100644 (file)
@@ -77,7 +77,7 @@ do {                                                                  \
        ___p1;                                                          \
 })
 
-#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 
 /*
  * The group barrier in front of the rsm & ssm are necessary to ensure
index 0eca6efc0631d52adcdd3bd66ad88bb7d33f830d..a7af5fb7b91476148e9ee2ce44d4d93ed3ad70fd 100644 (file)
@@ -34,7 +34,7 @@
 #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
 #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
 
-#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 
 #ifdef __SUBARCH_HAS_LWSYNC
 #    define SMPWMB      LWSYNC
index d68e11e0df5eada7e600f58529e8cbe60fab41c7..7ffd0b19135c8d46770f1f37ca6d4da19e352de0 100644 (file)
@@ -36,7 +36,7 @@
 #define smp_mb__before_atomic()                smp_mb()
 #define smp_mb__after_atomic()         smp_mb()
 
-#define smp_store_mb(var, value)               do { WRITE_ONCE(var, value); mb(); } while (0)
+#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 
 #define smp_store_release(p, v)                                                \
 do {                                                                   \
index b42afada1280e08534d6e98fbf9364b3dcae9d8a..0f45f93ef6922ba1417966ba99680a96f81a19f9 100644 (file)
@@ -93,7 +93,7 @@
 #endif /* CONFIG_SMP */
 
 #ifndef smp_store_mb
-#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); mb(); } while (0)
+#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 #endif
 
 #ifndef smp_mb__before_atomic