]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/s390/include/asm/atomic.h
Merge remote-tracking branch 'net-next/master'
[karo-tx-linux.git] / arch / s390 / include / asm / atomic.h
index c797832daa5f596bac9da8f5075d1bd2e006559c..12c5ec156502a87306e093396b7d195b7b4fa50d 100644 (file)
 
 #define ATOMIC_INIT(i)  { (i) }
 
-#define __CS_LOOP(ptr, op_val, op_string) ({                           \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC_OR    "lao"
+#define __ATOMIC_AND   "lan"
+#define __ATOMIC_ADD   "laa"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string)                          \
+({                                                                     \
+       int old_val;                                                    \
+                                                                       \
+       typecheck(atomic_t *, ptr);                                     \
+       asm volatile(                                                   \
+               op_string "     %0,%2,%1\n"                             \
+               : "=d" (old_val), "+Q" ((ptr)->counter)                 \
+               : "d" (op_val)                                          \
+               : "cc", "memory");                                      \
+       old_val;                                                        \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC_OR    "or"
+#define __ATOMIC_AND   "nr"
+#define __ATOMIC_ADD   "ar"
+
+#define __ATOMIC_LOOP(ptr, op_val, op_string)                          \
+({                                                                     \
        int old_val, new_val;                                           \
+                                                                       \
+       typecheck(atomic_t *, ptr);                                     \
        asm volatile(                                                   \
                "       l       %0,%2\n"                                \
                "0:     lr      %1,%0\n"                                \
                op_string "     %1,%3\n"                                \
                "       cs      %0,%1,%2\n"                             \
                "       jl      0b"                                     \
-               : "=&d" (old_val), "=&d" (new_val),                     \
-                 "=Q" (((atomic_t *)(ptr))->counter)                   \
-               : "d" (op_val),  "Q" (((atomic_t *)(ptr))->counter)     \
+               : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+               : "d" (op_val)                                          \
                : "cc", "memory");                                      \
-       new_val;                                                        \
+       old_val;                                                        \
 })
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 static inline int atomic_read(const atomic_t *v)
 {
        int c;
@@ -53,32 +82,45 @@ static inline void atomic_set(atomic_t *v, int i)
 
 static inline int atomic_add_return(int i, atomic_t *v)
 {
-       return __CS_LOOP(v, i, "ar");
+       return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
 }
-#define atomic_add(_i, _v)             atomic_add_return(_i, _v)
-#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
-#define atomic_inc(_v)                 atomic_add_return(1, _v)
-#define atomic_inc_return(_v)          atomic_add_return(1, _v)
-#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
 
-static inline int atomic_sub_return(int i, atomic_t *v)
+static inline void atomic_add(int i, atomic_t *v)
 {
-       return __CS_LOOP(v, i, "sr");
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+               asm volatile(
+                       "asi    %0,%1\n"
+                       : "+Q" (v->counter)
+                       : "i" (i)
+                       : "cc", "memory");
+       } else {
+               atomic_add_return(i, v);
+       }
+#else
+       atomic_add_return(i, v);
+#endif
 }
-#define atomic_sub(_i, _v)             atomic_sub_return(_i, _v)
+
+#define atomic_add_negative(_i, _v)    (atomic_add_return(_i, _v) < 0)
+#define atomic_inc(_v)                 atomic_add(1, _v)
+#define atomic_inc_return(_v)          atomic_add_return(1, _v)
+#define atomic_inc_and_test(_v)                (atomic_add_return(1, _v) == 0)
+#define atomic_sub(_i, _v)             atomic_add(-(int)_i, _v)
+#define atomic_sub_return(_i, _v)      atomic_add_return(-(int)(_i), _v)
 #define atomic_sub_and_test(_i, _v)    (atomic_sub_return(_i, _v) == 0)
-#define atomic_dec(_v)                 atomic_sub_return(1, _v)
+#define atomic_dec(_v)                 atomic_sub(1, _v)
 #define atomic_dec_return(_v)          atomic_sub_return(1, _v)
 #define atomic_dec_and_test(_v)                (atomic_sub_return(1, _v) == 0)
 
-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
 {
-       __CS_LOOP(v, ~mask, "nr");
+       __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
 }
 
-static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 {
-       __CS_LOOP(v, mask, "or");
+       __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
 }
 
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -87,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        asm volatile(
                "       cs      %0,%2,%1"
-               : "+d" (old), "=Q" (v->counter)
-               : "d" (new), "Q" (v->counter)
+               : "+d" (old), "+Q" (v->counter)
+               : "d" (new)
                : "cc", "memory");
        return old;
 }
@@ -109,27 +151,56 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 }
 
 
-#undef __CS_LOOP
+#undef __ATOMIC_LOOP
 
 #define ATOMIC64_INIT(i)  { (i) }
 
 #ifdef CONFIG_64BIT
 
-#define __CSG_LOOP(ptr, op_val, op_string) ({                          \
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+
+#define __ATOMIC64_OR  "laog"
+#define __ATOMIC64_AND "lang"
+#define __ATOMIC64_ADD "laag"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string)                                \
+({                                                                     \
+       long long old_val;                                              \
+                                                                       \
+       typecheck(atomic64_t *, ptr);                                   \
+       asm volatile(                                                   \
+               op_string "     %0,%2,%1\n"                             \
+               : "=d" (old_val), "+Q" ((ptr)->counter)                 \
+               : "d" (op_val)                                          \
+               : "cc", "memory");                                      \
+       old_val;                                                        \
+})
+
+#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
+#define __ATOMIC64_OR  "ogr"
+#define __ATOMIC64_AND "ngr"
+#define __ATOMIC64_ADD "agr"
+
+#define __ATOMIC64_LOOP(ptr, op_val, op_string)                                \
+({                                                                     \
        long long old_val, new_val;                                     \
+                                                                       \
+       typecheck(atomic64_t *, ptr);                                   \
        asm volatile(                                                   \
                "       lg      %0,%2\n"                                \
                "0:     lgr     %1,%0\n"                                \
                op_string "     %1,%3\n"                                \
                "       csg     %0,%1,%2\n"                             \
                "       jl      0b"                                     \
-               : "=&d" (old_val), "=&d" (new_val),                     \
-                 "=Q" (((atomic_t *)(ptr))->counter)                   \
-               : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter)      \
+               : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
+               : "d" (op_val)                                          \
                : "cc", "memory");                                      \
-       new_val;                                                        \
+       old_val;                                                        \
 })
 
+#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
+
 static inline long long atomic64_read(const atomic64_t *v)
 {
        long long c;
@@ -149,22 +220,17 @@ static inline void atomic64_set(atomic64_t *v, long long i)
 
 static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
-       return __CSG_LOOP(v, i, "agr");
-}
-
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
-       return __CSG_LOOP(v, i, "sgr");
+       return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
 }
 
 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
 {
-       __CSG_LOOP(v, ~mask, "ngr");
+       __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
 }
 
 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
 {
-       __CSG_LOOP(v, mask, "ogr");
+       __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
 }
 
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -174,13 +240,13 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
 {
        asm volatile(
                "       csg     %0,%2,%1"
-               : "+d" (old), "=Q" (v->counter)
-               : "d" (new), "Q" (v->counter)
+               : "+d" (old), "+Q" (v->counter)
+               : "d" (new)
                : "cc", "memory");
        return old;
 }
 
-#undef __CSG_LOOP
+#undef __ATOMIC64_LOOP
 
 #else /* CONFIG_64BIT */
 
@@ -216,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
                "       lm      %0,%N0,%1\n"
                "0:     cds     %0,%2,%1\n"
                "       jl      0b\n"
-               : "=&d" (rp_old), "=Q" (v->counter)
-               : "d" (rp_new), "Q" (v->counter)
+               : "=&d" (rp_old), "+Q" (v->counter)
+               : "d" (rp_new)
                : "cc");
        return rp_old.pair;
 }
@@ -230,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
 
        asm volatile(
                "       cds     %0,%2,%1"
-               : "+&d" (rp_old), "=Q" (v->counter)
-               : "d" (rp_new), "Q" (v->counter)
+               : "+&d" (rp_old), "+Q" (v->counter)
+               : "d" (rp_new)
                : "cc");
        return rp_old.pair;
 }
@@ -248,17 +314,6 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
        return new;
 }
 
-static inline long long atomic64_sub_return(long long i, atomic64_t *v)
-{
-       long long old, new;
-
-       do {
-               old = atomic64_read(v);
-               new = old - i;
-       } while (atomic64_cmpxchg(v, old, new) != old);
-       return new;
-}
-
 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
 {
        long long old, new;
@@ -281,7 +336,24 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
 
 #endif /* CONFIG_64BIT */
 
-static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+       if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+               asm volatile(
+                       "agsi   %0,%1\n"
+                       : "+Q" (v->counter)
+                       : "i" (i)
+                       : "cc", "memory");
+       } else {
+               atomic64_add_return(i, v);
+       }
+#else
+       atomic64_add_return(i, v);
+#endif
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
 {
        long long c, old;
 
@@ -289,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
        for (;;) {
                if (unlikely(c == u))
                        break;
-               old = atomic64_cmpxchg(v, c, c + a);
+               old = atomic64_cmpxchg(v, c, c + i);
                if (likely(old == c))
                        break;
                c = old;
@@ -314,14 +386,14 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
        return dec;
 }
 
-#define atomic64_add(_i, _v)           atomic64_add_return(_i, _v)
 #define atomic64_add_negative(_i, _v)  (atomic64_add_return(_i, _v) < 0)
-#define atomic64_inc(_v)               atomic64_add_return(1, _v)
+#define atomic64_inc(_v)               atomic64_add(1, _v)
 #define atomic64_inc_return(_v)                atomic64_add_return(1, _v)
 #define atomic64_inc_and_test(_v)      (atomic64_add_return(1, _v) == 0)
-#define atomic64_sub(_i, _v)           atomic64_sub_return(_i, _v)
+#define atomic64_sub_return(_i, _v)    atomic64_add_return(-(long long)(_i), _v)
+#define atomic64_sub(_i, _v)           atomic64_add(-(long long)_i, _v)
 #define atomic64_sub_and_test(_i, _v)  (atomic64_sub_return(_i, _v) == 0)
-#define atomic64_dec(_v)               atomic64_sub_return(1, _v)
+#define atomic64_dec(_v)               atomic64_sub(1, _v)
 #define atomic64_dec_return(_v)                atomic64_sub_return(1, _v)
 #define atomic64_dec_and_test(_v)      (atomic64_sub_return(1, _v) == 0)
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)