1 #ifndef __ARCH_S390_ATOMIC__
2 #define __ARCH_S390_ATOMIC__
5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
8 * Arnd Bergmann <arndb@de.ibm.com>,
10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc.
12 * s390 uses 'Compare And Swap' for atomicity in SMP enviroment.
16 #include <linux/compiler.h>
17 #include <linux/types.h>
19 #define ATOMIC_INIT(i) { (i) }
21 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
23 #define __CS_LOOP(ptr, op_val, op_string) ({ \
24 typeof(ptr->counter) old_val, new_val; \
28 op_string " %1,%3\n" \
31 : "=&d" (old_val), "=&d" (new_val), \
32 "=Q" (((atomic_t *)(ptr))->counter) \
33 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
40 #define __CS_LOOP(ptr, op_val, op_string) ({ \
41 typeof(ptr->counter) old_val, new_val; \
45 op_string " %1,%4\n" \
48 : "=&d" (old_val), "=&d" (new_val), \
49 "=m" (((atomic_t *)(ptr))->counter) \
50 : "a" (ptr), "d" (op_val), \
51 "m" (((atomic_t *)(ptr))->counter) \
58 static inline int atomic_read(const atomic_t *v)
64 static inline void atomic_set(atomic_t *v, int i)
70 static inline int atomic_add_return(int i, atomic_t *v)
72 return __CS_LOOP(v, i, "ar");
74 #define atomic_add(_i, _v) atomic_add_return(_i, _v)
75 #define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
76 #define atomic_inc(_v) atomic_add_return(1, _v)
77 #define atomic_inc_return(_v) atomic_add_return(1, _v)
78 #define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
80 static inline int atomic_sub_return(int i, atomic_t *v)
82 return __CS_LOOP(v, i, "sr");
84 #define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
85 #define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
86 #define atomic_dec(_v) atomic_sub_return(1, _v)
87 #define atomic_dec_return(_v) atomic_sub_return(1, _v)
88 #define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
90 static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
92 __CS_LOOP(v, ~mask, "nr");
95 static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
97 __CS_LOOP(v, mask, "or");
100 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
102 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
104 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
107 : "+d" (old), "=Q" (v->counter)
108 : "d" (new), "Q" (v->counter)
113 : "+d" (old), "=m" (v->counter)
114 : "a" (v), "d" (new), "m" (v->counter)
116 #endif /* __GNUC__ */
120 static inline int atomic_add_unless(atomic_t *v, int a, int u)
125 if (unlikely(c == u))
127 old = atomic_cmpxchg(v, c, c + a);
128 if (likely(old == c))
135 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
139 #define ATOMIC64_INIT(i) { (i) }
143 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
145 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
146 typeof(ptr->counter) old_val, new_val; \
150 op_string " %1,%3\n" \
153 : "=&d" (old_val), "=&d" (new_val), \
154 "=Q" (((atomic_t *)(ptr))->counter) \
155 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
162 #define __CSG_LOOP(ptr, op_val, op_string) ({ \
163 typeof(ptr->counter) old_val, new_val; \
167 op_string " %1,%4\n" \
168 " csg %0,%1,0(%3)\n" \
170 : "=&d" (old_val), "=&d" (new_val), \
171 "=m" (((atomic_t *)(ptr))->counter) \
172 : "a" (ptr), "d" (op_val), \
173 "m" (((atomic_t *)(ptr))->counter) \
178 #endif /* __GNUC__ */
180 static inline long long atomic64_read(const atomic64_t *v)
186 static inline void atomic64_set(atomic64_t *v, long long i)
192 static inline long long atomic64_add_return(long long i, atomic64_t *v)
194 return __CSG_LOOP(v, i, "agr");
197 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
199 return __CSG_LOOP(v, i, "sgr");
202 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
204 __CSG_LOOP(v, ~mask, "ngr");
207 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
209 __CSG_LOOP(v, mask, "ogr");
212 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
214 static inline long long atomic64_cmpxchg(atomic64_t *v,
215 long long old, long long new)
217 #if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
220 : "+d" (old), "=Q" (v->counter)
221 : "d" (new), "Q" (v->counter)
226 : "+d" (old), "=m" (v->counter)
227 : "a" (v), "d" (new), "m" (v->counter)
229 #endif /* __GNUC__ */
235 #else /* CONFIG_64BIT */
241 static inline long long atomic64_read(const atomic64_t *v)
248 : "a" (&v->counter), "m" (v->counter)
253 static inline void atomic64_set(atomic64_t *v, long long i)
255 register_pair rp = {.pair = i};
260 : "d" (rp), "a" (&v->counter)
264 static inline long long atomic64_xchg(atomic64_t *v, long long new)
266 register_pair rp_new = {.pair = new};
267 register_pair rp_old;
271 "0: cds %0,%3,0(%2)\n"
273 : "=&d" (rp_old), "+m" (v->counter)
274 : "a" (&v->counter), "d" (rp_new)
279 static inline long long atomic64_cmpxchg(atomic64_t *v,
280 long long old, long long new)
282 register_pair rp_old = {.pair = old};
283 register_pair rp_new = {.pair = new};
287 : "+&d" (rp_old), "+m" (v->counter)
288 : "a" (&v->counter), "d" (rp_new)
294 static inline long long atomic64_add_return(long long i, atomic64_t *v)
299 old = atomic64_read(v);
301 } while (atomic64_cmpxchg(v, old, new) != old);
305 static inline long long atomic64_sub_return(long long i, atomic64_t *v)
310 old = atomic64_read(v);
312 } while (atomic64_cmpxchg(v, old, new) != old);
316 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
321 old = atomic64_read(v);
323 } while (atomic64_cmpxchg(v, old, new) != old);
326 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
331 old = atomic64_read(v);
333 } while (atomic64_cmpxchg(v, old, new) != old);
336 #endif /* CONFIG_64BIT */
338 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
341 c = atomic64_read(v);
343 if (unlikely(c == u))
345 old = atomic64_cmpxchg(v, c, c + a);
346 if (likely(old == c))
353 #define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
354 #define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
355 #define atomic64_inc(_v) atomic64_add_return(1, _v)
356 #define atomic64_inc_return(_v) atomic64_add_return(1, _v)
357 #define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
358 #define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
359 #define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
360 #define atomic64_dec(_v) atomic64_sub_return(1, _v)
361 #define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
362 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
363 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
365 #define smp_mb__before_atomic_dec() smp_mb()
366 #define smp_mb__after_atomic_dec() smp_mb()
367 #define smp_mb__before_atomic_inc() smp_mb()
368 #define smp_mb__after_atomic_inc() smp_mb()
370 #include <asm-generic/atomic-long.h>
372 #endif /* __ARCH_S390_ATOMIC__ */