]> git.karo-electronics.de Git - linux-beck.git/commitdiff
locking/rwsem: Remove rwsem_atomic_add() and rwsem_atomic_update()
authorJason Low <jason.low2@hpe.com>
Tue, 17 May 2016 00:38:02 +0000 (17:38 -0700)
committerIngo Molnar <mingo@kernel.org>
Wed, 8 Jun 2016 13:16:59 +0000 (15:16 +0200)
The rwsem-xadd count has been converted to an atomic variable and the
rwsem code now directly uses atomic_long_add() and
atomic_long_add_return(), so we can remove the arch implementations of
rwsem_atomic_add() and rwsem_atomic_update().

Signed-off-by: Jason Low <jason.low2@hpe.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Lameter <cl@linux.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Jason Low <jason.low2@hp.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Hurley <peter@hurleysoftware.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Terry Rudd <terry.rudd@hpe.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Waiman Long <Waiman.Long@hpe.com>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/alpha/include/asm/rwsem.h
arch/ia64/include/asm/rwsem.h
arch/s390/include/asm/rwsem.h
arch/x86/include/asm/rwsem.h
include/asm-generic/rwsem.h

index b40021aabb9f4daf68b3a586bf42672f4ca858ea..77873d0ad2937b130ab0a8fa737b5a61b821d087 100644 (file)
@@ -191,47 +191,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
-static inline void rwsem_atomic_add(long val, struct rw_semaphore *sem)
-{
-#ifndef        CONFIG_SMP
-       sem->count += val;
-#else
-       long temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%2,%0\n"
-       "       stq_c   %0,%1\n"
-       "       beq     %0,2f\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (temp), "=m" (sem->count)
-       :"Ir" (val), "m" (sem->count));
-#endif
-}
-
-static inline long rwsem_atomic_update(long val, struct rw_semaphore *sem)
-{
-#ifndef        CONFIG_SMP
-       sem->count += val;
-       return sem->count;
-#else
-       long ret, temp;
-       __asm__ __volatile__(
-       "1:     ldq_l   %0,%1\n"
-       "       addq    %0,%3,%2\n"
-       "       addq    %0,%3,%0\n"
-       "       stq_c   %2,%1\n"
-       "       beq     %2,2f\n"
-       ".subsection 2\n"
-       "2:     br      1b\n"
-       ".previous"
-       :"=&r" (ret), "=m" (sem->count), "=&r" (temp)
-       :"Ir" (val), "m" (sem->count));
-
-       return ret;
-#endif
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ALPHA_RWSEM_H */
index c5d544f188ed4fcc6b0408a17b7db8fed6b734c4..8fa98dd303b4b4733f862cce39cca2b82235f1b7 100644 (file)
@@ -151,11 +151,4 @@ __downgrade_write (struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
-/*
- * Implement atomic add functionality.  These used to be "inline" functions, but GCC v3.1
- * doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
- */
-#define rwsem_atomic_add(delta, sem)   atomic64_add(delta, (atomic64_t *)(&(sem)->count))
-#define rwsem_atomic_update(delta, sem)        atomic64_add_return(delta, (atomic64_t *)(&(sem)->count))
-
 #endif /* _ASM_IA64_RWSEM_H */
index c75e4471e618826a385c6aef2955b79c757ab9e7..597e7e96b59e8b9bb2948d9ae43799641abcc375 100644 (file)
@@ -207,41 +207,4 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-       signed long old, new;
-
-       asm volatile(
-               "       lg      %0,%2\n"
-               "0:     lgr     %1,%0\n"
-               "       agr     %1,%4\n"
-               "       csg     %0,%1,%2\n"
-               "       jl      0b"
-               : "=&d" (old), "=&d" (new), "=Q" (sem->count)
-               : "Q" (sem->count), "d" (delta)
-               : "cc", "memory");
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-       signed long old, new;
-
-       asm volatile(
-               "       lg      %0,%2\n"
-               "0:     lgr     %1,%0\n"
-               "       agr     %1,%4\n"
-               "       csg     %0,%1,%2\n"
-               "       jl      0b"
-               : "=&d" (old), "=&d" (new), "=Q" (sem->count)
-               : "Q" (sem->count), "d" (delta)
-               : "cc", "memory");
-       return new;
-}
-
 #endif /* _S390_RWSEM_H */
index 453744c1d34752c20988cf513ff1eef75c3fa657..089ced4edbbce70193eaa2e5e45154c39d492430 100644 (file)
@@ -213,23 +213,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                     : "memory", "cc");
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-       asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
-                    : "+m" (sem->count)
-                    : "er" (delta));
-}
-
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-       return delta + xadd(&sem->count, delta);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_RWSEM_H */
index a3a93eca766ce40d09229f80aad427392f0c9178..5be122e3d32605ad9e0f809b23ba5349d5118de4 100644 (file)
@@ -106,14 +106,6 @@ static inline void __up_write(struct rw_semaphore *sem)
                rwsem_wake(sem);
 }
 
-/*
- * implement atomic add functionality
- */
-static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
-{
-       atomic_long_add(delta, (atomic_long_t *)&sem->count);
-}
-
 /*
  * downgrade write lock to read lock
  */
@@ -134,13 +126,5 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
                rwsem_downgrade_wake(sem);
 }
 
-/*
- * implement exchange and add functionality
- */
-static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
-{
-       return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
-}
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_GENERIC_RWSEM_H */