From: Peter Zijlstra Date: Sun, 23 Mar 2014 15:25:53 +0000 (+0100) Subject: locking,arch,alpha: Fold atomic_ops X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=b93c7b8c5b281bf3646d6c5b6e05249b98cc5ab7;p=linux-beck.git locking,arch,alpha: Fold atomic_ops Many of the atomic op implementations are the same except for one instruction; fold the lot into a few CPP macros and reduce LoC. This also prepares for easy addition of new ops. Cc: Matt Turner Cc: Richard Henderson Cc: Ivan Kokshaysky Cc: Linus Torvalds Signed-off-by: Peter Zijlstra Cc: Ivan Kokshaysky Cc: Linus Torvalds Cc: Matt Turner Cc: Paul E. McKenney Cc: Richard Henderson Cc: linux-alpha@vger.kernel.org Link: http://lkml.kernel.org/r/20140508135851.832107183@infradead.org Signed-off-by: Ingo Molnar --- diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h index ed60a1ee1ed3..6fbb53a13049 100644 --- a/arch/alpha/include/asm/atomic.h +++ b/arch/alpha/include/asm/atomic.h @@ -29,145 +29,92 @@ * branch back to restart the operation. */ -static __inline__ void atomic_add(int i, atomic_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " addl %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - -static __inline__ void atomic64_add(long i, atomic64_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - -static __inline__ void atomic_sub(int i, atomic_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " subl %0,%2,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); +#define ATOMIC_OP(op) \ +static __inline__ void atomic_##op(int i, atomic_t * v) \ +{ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "1: ldl_l %0,%1\n" \ + " " #op "l %0,%2,%0\n" \ + " stl_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter) \ + :"Ir" (i), "m" (v->counter)); \ +} \ + +#define ATOMIC_OP_RETURN(op) \ +static inline int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + long temp, result; \ + smp_mb(); \ + __asm__ __volatile__( \ + "1: ldl_l %0,%1\n" \ + " " #op "l %0,%3,%2\n" \ + " " #op "l %0,%3,%0\n" \ + " stl_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ + :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ + return result; \ } -static __inline__ void atomic64_sub(long i, atomic64_t * v) -{ - unsigned long temp; - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " subq %0,%2,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter) - :"Ir" (i), "m" (v->counter)); -} - - -/* - * Same as above, but return the result value - */ -static inline int atomic_add_return(int i, atomic_t *v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " addl %0,%3,%2\n" - " addl %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; +#define ATOMIC64_OP(op) \ +static __inline__ void atomic64_##op(long i, atomic64_t * v) \ +{ \ + unsigned long temp; \ + __asm__ __volatile__( \ + "1: ldq_l %0,%1\n" \ + " " #op "q %0,%2,%0\n" \ + " stq_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter) \ + :"Ir" (i), "m" (v->counter)); \ +} \ + +#define ATOMIC64_OP_RETURN(op) \ +static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ +{ \ + long temp, result; \ + smp_mb(); \ + __asm__ __volatile__( \ + "1: ldq_l %0,%1\n" \ + " " #op "q %0,%3,%2\n" \ + " " #op "q %0,%3,%0\n" \ + " stq_c %0,%1\n" \ + " beq %0,2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + :"=&r" (temp), "=m" (v->counter), "=&r" (result) \ + :"Ir" (i), "m" (v->counter) : "memory"); \ + smp_mb(); \ + return result; \ } -static __inline__ long atomic64_add_return(long i, atomic64_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " addq %0,%3,%2\n" - " addq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} +#define ATOMIC_OPS(opg) \ + ATOMIC_OP(opg) \ + ATOMIC_OP_RETURN(opg) \ + ATOMIC64_OP(opg) \ + ATOMIC64_OP_RETURN(opg) -static __inline__ long atomic_sub_return(int i, atomic_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldl_l %0,%1\n" - " subl %0,%3,%2\n" - " subl %0,%3,%0\n" - " stl_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} +ATOMIC_OPS(add) +ATOMIC_OPS(sub) -static __inline__ long atomic64_sub_return(long i, atomic64_t * v) -{ - long temp, result; - smp_mb(); - __asm__ __volatile__( - "1: ldq_l %0,%1\n" - " subq %0,%3,%2\n" - " subq %0,%3,%0\n" - " stq_c %0,%1\n" - " beq %0,2f\n" - ".subsection 2\n" - "2: br 1b\n" - ".previous" - :"=&r" (temp), "=m" (v->counter), "=&r" (result) - :"Ir" (i), "m" (v->counter) : "memory"); - smp_mb(); - return result; -} +#undef ATOMIC_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))