2 * Copyright IBM Corp. 1999, 2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Arnd Bergmann <arndb@de.ibm.com>,
7 * Atomic operations that C can't guarantee us.
8 * Useful for resource counting etc.
9 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
13 #ifndef __ARCH_S390_ATOMIC__
14 #define __ARCH_S390_ATOMIC__
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/cmpxchg.h>
20 #define ATOMIC_INIT(i) { (i) }
22 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
24 #define __ATOMIC_OR "lao"
25 #define __ATOMIC_AND "lan"
26 #define __ATOMIC_ADD "laa"
28 #define __ATOMIC_LOOP(ptr, op_val, op_string) \
32 op_string " %0,%2,%1\n" \
33 : "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter) \
39 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
41 #define __ATOMIC_OR "or"
42 #define __ATOMIC_AND "nr"
43 #define __ATOMIC_ADD "ar"
45 #define __ATOMIC_LOOP(ptr, op_val, op_string) \
47 int old_val, new_val; \
51 op_string " %1,%3\n" \
54 : "=&d" (old_val), "=&d" (new_val), \
55 "=Q" (((atomic_t *)(ptr))->counter) \
56 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
61 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
63 static inline int atomic_read(const atomic_t *v)
69 : "=d" (c) : "Q" (v->counter));
73 static inline void atomic_set(atomic_t *v, int i)
77 : "=Q" (v->counter) : "d" (i));
80 static inline int atomic_add_return(int i, atomic_t *v)
82 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
85 static inline void atomic_add(int i, atomic_t *v)
87 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
88 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
95 atomic_add_return(i, v);
98 atomic_add_return(i, v);
102 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
103 #define atomic_inc(_v) atomic_add(1, _v)
104 #define atomic_inc_return(_v) atomic_add_return(1, _v)
105 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
106 #define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
107 #define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
108 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
109 #define atomic_dec(_v) atomic_sub(1, _v)
110 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
111 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
113 static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
115 __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
118 static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
120 __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
123 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
125 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
129 : "+d" (old), "=Q" (v->counter)
130 : "d" (new), "Q" (v->counter)
135 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
140 if (unlikely(c == u))
142 old = atomic_cmpxchg(v, c, c + a);
143 if (likely(old == c))
153 #define ATOMIC64_INIT(i) { (i) }
157 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
159 #define __ATOMIC64_OR "laog"
160 #define __ATOMIC64_AND "lang"
161 #define __ATOMIC64_ADD "laag"
163 #define __ATOMIC64_LOOP(ptr, op_val, op_string) \
167 op_string " %0,%2,%1\n" \
168 : "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter) \
174 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
176 #define __ATOMIC64_OR "ogr"
177 #define __ATOMIC64_AND "ngr"
178 #define __ATOMIC64_ADD "agr"
180 #define __ATOMIC64_LOOP(ptr, op_val, op_string) \
182 long long old_val, new_val; \
186 op_string " %1,%3\n" \
189 : "=&d" (old_val), "=&d" (new_val), \
190 "=Q" (((atomic_t *)(ptr))->counter) \
191 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
196 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
198 static inline long long atomic64_read(const atomic64_t *v)
204 : "=d" (c) : "Q" (v->counter));
208 static inline void atomic64_set(atomic64_t *v, long long i)
212 : "=Q" (v->counter) : "d" (i));
215 static inline long long atomic64_add_return(long long i, atomic64_t *v)
217 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
220 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
222 __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
225 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
227 __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
230 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
232 static inline long long atomic64_cmpxchg(atomic64_t *v,
233 long long old, long long new)
237 : "+d" (old), "=Q" (v->counter)
238 : "d" (new), "Q" (v->counter)
243 #undef __ATOMIC64_LOOP
245 #else /* CONFIG_64BIT */
251 static inline long long atomic64_read(const atomic64_t *v)
257 : "=&d" (rp) : "Q" (v->counter) );
261 static inline void atomic64_set(atomic64_t *v, long long i)
263 register_pair rp = {.pair = i};
267 : "=Q" (v->counter) : "d" (rp) );
270 static inline long long atomic64_xchg(atomic64_t *v, long long new)
272 register_pair rp_new = {.pair = new};
273 register_pair rp_old;
279 : "=&d" (rp_old), "=Q" (v->counter)
280 : "d" (rp_new), "Q" (v->counter)
285 static inline long long atomic64_cmpxchg(atomic64_t *v,
286 long long old, long long new)
288 register_pair rp_old = {.pair = old};
289 register_pair rp_new = {.pair = new};
293 : "+&d" (rp_old), "=Q" (v->counter)
294 : "d" (rp_new), "Q" (v->counter)
300 static inline long long atomic64_add_return(long long i, atomic64_t *v)
305 old = atomic64_read(v);
307 } while (atomic64_cmpxchg(v, old, new) != old);
311 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
316 old = atomic64_read(v);
318 } while (atomic64_cmpxchg(v, old, new) != old);
321 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
326 old = atomic64_read(v);
328 } while (atomic64_cmpxchg(v, old, new) != old);
331 #endif /* CONFIG_64BIT */
333 static inline void atomic64_add(long long i, atomic64_t *v)
335 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
336 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
343 atomic64_add_return(i, v);
346 atomic64_add_return(i, v);
350 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
354 c = atomic64_read(v);
356 if (unlikely(c == u))
358 old = atomic64_cmpxchg(v, c, c + a);
359 if (likely(old == c))
366 static inline long long atomic64_dec_if_positive(atomic64_t *v)
368 long long c, old, dec;
370 c = atomic64_read(v);
373 if (unlikely(dec < 0))
375 old = atomic64_cmpxchg((v), c, dec);
376 if (likely(old == c))
383 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
384 #define atomic64_inc(_v) atomic64_add(1, _v)
385 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
386 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
387 #define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
388 #define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
389 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
390 #define atomic64_dec(_v) atomic64_sub(1, _v)
391 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
392 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
393 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
395 #define smp_mb__before_atomic_dec() smp_mb()
396 #define smp_mb__after_atomic_dec() smp_mb()
397 #define smp_mb__before_atomic_inc() smp_mb()
398 #define smp_mb__after_atomic_inc() smp_mb()
400 #endif /* __ARCH_S390_ATOMIC__ */