From ca50e426f96c905e7d14a9c7a6bd4e0330516047 Mon Sep 17 00:00:00 2001 From: Pan Xinhui Date: Fri, 3 Jun 2016 16:38:14 +0800 Subject: [PATCH] locking/qspinlock: Use atomic_sub_return_release() in queued_spin_unlock() The existing version uses a heavy barrier while only release semantics is required. So use atomic_sub_return_release() instead. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Pan Xinhui Signed-off-by: Peter Zijlstra (Intel) Cc: Andrew Morton Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: arnd@arndb.de Cc: waiman.long@hp.com Link: http://lkml.kernel.org/r/1464943094-3129-1-git-send-email-xinhui.pan@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- include/asm-generic/qspinlock.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 05f05f17a7c2..9f0681bf1e87 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -111,10 +111,9 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) static __always_inline void queued_spin_unlock(struct qspinlock *lock) { /* - * smp_mb__before_atomic() in order to guarantee release semantics + * unlock() needs release semantics: */ - smp_mb__before_atomic(); - atomic_sub(_Q_LOCKED_VAL, &lock->val); + (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val); } #endif -- 2.39.5