]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
locking/rwsem: Optimize write lock by reducing operations in slowpath
authorJason Low <jason.low2@hpe.com>
Tue, 17 May 2016 00:38:00 +0000 (17:38 -0700)
committerIngo Molnar <mingo@kernel.org>
Fri, 3 Jun 2016 07:47:13 +0000 (09:47 +0200)
When acquiring the rwsem write lock in the slowpath, we first try
to set count to RWSEM_WAITING_BIAS. When that is successful,
we then atomically add the RWSEM_WAITING_BIAS in cases where
there are other tasks on the wait list. This causes write lock
operations to often issue multiple atomic operations.

We can instead make the list_is_singular() check first, and then
set the count accordingly, so that we issue at most 1 atomic
operation when acquiring the write lock and reduce unnecessary
cacheline contention.

Signed-off-by: Jason Low <jason.low2@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Waiman Long<Waiman.Long@hpe.com>
Acked-by: Davidlohr Bueso <dave@stgolabs.net>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Jason Low <jason.low2@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Terry Rudd <terry.rudd@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/1463445486-16078-2-git-send-email-jason.low2@hpe.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/rwsem-xadd.c

index fcbf75ac3dcb5dede62fd753b21902e0eeabb129..b957da7fcb19e92842d8f963b7d3e730bf5126c2 100644 (file)
@@ -261,17 +261,28 @@ struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
 }
 EXPORT_SYMBOL(rwsem_down_read_failed);
 
+/*
+ * This function must be called with the sem->wait_lock held to prevent
+ * race conditions between checking the rwsem wait list and setting the
+ * sem->count accordingly.
+ */
 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
 {
        /*
-        * Try acquiring the write lock. Check count first in order
-        * to reduce unnecessary expensive cmpxchg() operations.
+        * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
         */
-       if (count == RWSEM_WAITING_BIAS &&
-           cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
-                   RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
-               if (!list_is_singular(&sem->wait_list))
-                       rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
+       if (count != RWSEM_WAITING_BIAS)
+               return false;
+
+       /*
+        * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
+        * are other tasks on the wait list, we need to add on WAITING_BIAS.
+        */
+       count = list_is_singular(&sem->wait_list) ?
+                       RWSEM_ACTIVE_WRITE_BIAS :
+                       RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
+
+       if (cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count) == RWSEM_WAITING_BIAS) {
                rwsem_set_owner(sem);
                return true;
        }