]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
locking/atomic: Introduce atomic_try_cmpxchg()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 1 Feb 2017 15:39:38 +0000 (16:39 +0100)
committerIngo Molnar <mingo@kernel.org>
Thu, 23 Mar 2017 07:54:40 +0000 (08:54 +0100)
Add a new cmpxchg interface:

  bool try_cmpxchg(u{8,16,32,64} *ptr, u{8,16,32,64} *val, u{8,16,32,64} new);

Where the boolean returns the result of the compare; and thus if the
exchange happened; and in case of failure, the new value of *ptr is
returned in *val.

This allows simplification/improvement of loops like:

for (;;) {
new = val $op $imm;
old = cmpxchg(ptr, val, new);
if (old == val)
break;
val = old;
}

into:

do {
} while (!try_cmpxchg(ptr, &val, val $op $imm));

while also generating better code (GCC6 and onwards).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/cmpxchg.h
include/linux/atomic.h

index 14635c5ea025138ba49cf5a4305c990c60f1ac6c..8410377c68698b7186475d37d52713343d1c9825 100644 (file)
@@ -186,6 +186,12 @@ static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
        return cmpxchg(&v->counter, old, new);
 }
 
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+       return try_cmpxchg(&v->counter, old, new);
+}
+
 static inline int atomic_xchg(atomic_t *v, int new)
 {
        return xchg(&v->counter, new);
index 89ed2f6ae2f76accf15ad16810c6aceb4a1854ae..12fb57413732e78c39437b469ba86be04a3ab449 100644 (file)
@@ -176,6 +176,12 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
        return cmpxchg(&v->counter, old, new);
 }
 
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, long *old, long new)
+{
+       return try_cmpxchg(&v->counter, old, new);
+}
+
 static inline long atomic64_xchg(atomic64_t *v, long new)
 {
        return xchg(&v->counter, new);
index 97848cdfcb1a12b16b6c5e14afcd7d11b9055354..fb961db51a2a29f4398e34e906509a558c55a995 100644 (file)
@@ -153,6 +153,75 @@ extern void __add_wrong_size(void)
 #define cmpxchg_local(ptr, old, new)                                   \
        __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
 
+
+#define __raw_try_cmpxchg(_ptr, _pold, _new, size, lock)               \
+({                                                                     \
+       bool success;                                                   \
+       __typeof__(_ptr) _old = (_pold);                                \
+       __typeof__(*(_ptr)) __old = *_old;                              \
+       __typeof__(*(_ptr)) __new = (_new);                             \
+       switch (size) {                                                 \
+       case __X86_CASE_B:                                              \
+       {                                                               \
+               volatile u8 *__ptr = (volatile u8 *)(_ptr);             \
+               asm volatile(lock "cmpxchgb %[new], %[ptr]"             \
+                            CC_SET(z)                                  \
+                            : CC_OUT(z) (success),                     \
+                              [ptr] "+m" (*__ptr),                     \
+                              [old] "+a" (__old)                       \
+                            : [new] "q" (__new)                        \
+                            : "memory");                               \
+               break;                                                  \
+       }                                                               \
+       case __X86_CASE_W:                                              \
+       {                                                               \
+               volatile u16 *__ptr = (volatile u16 *)(_ptr);           \
+               asm volatile(lock "cmpxchgw %[new], %[ptr]"             \
+                            CC_SET(z)                                  \
+                            : CC_OUT(z) (success),                     \
+                              [ptr] "+m" (*__ptr),                     \
+                              [old] "+a" (__old)                       \
+                            : [new] "r" (__new)                        \
+                            : "memory");                               \
+               break;                                                  \
+       }                                                               \
+       case __X86_CASE_L:                                              \
+       {                                                               \
+               volatile u32 *__ptr = (volatile u32 *)(_ptr);           \
+               asm volatile(lock "cmpxchgl %[new], %[ptr]"             \
+                            CC_SET(z)                                  \
+                            : CC_OUT(z) (success),                     \
+                              [ptr] "+m" (*__ptr),                     \
+                              [old] "+a" (__old)                       \
+                            : [new] "r" (__new)                        \
+                            : "memory");                               \
+               break;                                                  \
+       }                                                               \
+       case __X86_CASE_Q:                                              \
+       {                                                               \
+               volatile u64 *__ptr = (volatile u64 *)(_ptr);           \
+               asm volatile(lock "cmpxchgq %[new], %[ptr]"             \
+                            CC_SET(z)                                  \
+                            : CC_OUT(z) (success),                     \
+                              [ptr] "+m" (*__ptr),                     \
+                              [old] "+a" (__old)                       \
+                            : [new] "r" (__new)                        \
+                            : "memory");                               \
+               break;                                                  \
+       }                                                               \
+       default:                                                        \
+               __cmpxchg_wrong_size();                                 \
+       }                                                               \
+       *_old = __old;                                                  \
+       success;                                                        \
+})
+
+#define __try_cmpxchg(ptr, pold, new, size)                            \
+       __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
+
+#define try_cmpxchg(ptr, pold, new)                                    \
+       __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
+
 /*
  * xadd() adds "inc" to "*ptr" and atomically returns the previous
  * value of "*ptr".
index e71835bf60a977a37277d44c6357a02e70c3a41d..aae5953817d633aed12990ae7ad202b2060ae177 100644 (file)
 #endif
 #endif /* atomic_cmpxchg_relaxed */
 
+#ifndef atomic_try_cmpxchg
+
+#define __atomic_try_cmpxchg(type, _p, _po, _n)                                \
+({                                                                     \
+       typeof(_po) __po = (_po);                                       \
+       typeof(*(_po)) __o = *__po;                                     \
+       *__po = atomic_cmpxchg##type((_p), __o, (_n));                  \
+       (*__po == __o);                                                 \
+})
+
+#define atomic_try_cmpxchg(_p, _po, _n)                __atomic_try_cmpxchg(, _p, _po, _n)
+#define atomic_try_cmpxchg_relaxed(_p, _po, _n)        __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic_try_cmpxchg_acquire(_p, _po, _n)        __atomic_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic_try_cmpxchg_release(_p, _po, _n)        __atomic_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic_try_cmpxchg */
+#define atomic_try_cmpxchg_relaxed     atomic_try_cmpxchg
+#define atomic_try_cmpxchg_acquire     atomic_try_cmpxchg
+#define atomic_try_cmpxchg_release     atomic_try_cmpxchg
+#endif /* atomic_try_cmpxchg */
+
 /* cmpxchg_relaxed */
 #ifndef cmpxchg_relaxed
 #define  cmpxchg_relaxed               cmpxchg
@@ -996,6 +1017,27 @@ static inline int atomic_dec_if_positive(atomic_t *v)
 #endif
 #endif /* atomic64_cmpxchg_relaxed */
 
+#ifndef atomic64_try_cmpxchg
+
+#define __atomic64_try_cmpxchg(type, _p, _po, _n)                      \
+({                                                                     \
+       typeof(_po) __po = (_po);                                       \
+       typeof(*(_po)) __o = *__po;                                     \
+       *__po = atomic64_cmpxchg##type((_p), __o, (_n));                \
+       (*__po == __o);                                                 \
+})
+
+#define atomic64_try_cmpxchg(_p, _po, _n)              __atomic64_try_cmpxchg(, _p, _po, _n)
+#define atomic64_try_cmpxchg_relaxed(_p, _po, _n)      __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic64_try_cmpxchg_acquire(_p, _po, _n)      __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic64_try_cmpxchg_release(_p, _po, _n)      __atomic64_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic64_try_cmpxchg */
+#define atomic64_try_cmpxchg_relaxed   atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_acquire   atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_release   atomic64_try_cmpxchg
+#endif /* atomic64_try_cmpxchg */
+
 #ifndef atomic64_andnot
 static inline void atomic64_andnot(long long i, atomic64_t *v)
 {