]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/s390/include/asm/atomic.h
Merge remote-tracking branch 'hid/for-next'
[karo-tx-linux.git] / arch / s390 / include / asm / atomic.h
1 /*
2  * Copyright IBM Corp. 1999, 2009
3  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4  *            Denis Joseph Barrow,
5  *            Arnd Bergmann <arndb@de.ibm.com>,
6  *
7  * Atomic operations that C can't guarantee us.
8  * Useful for resource counting etc.
9  * s390 uses 'Compare And Swap' for atomicity in SMP environment.
10  *
11  */
12
13 #ifndef __ARCH_S390_ATOMIC__
14 #define __ARCH_S390_ATOMIC__
15
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/cmpxchg.h>
19
20 #define ATOMIC_INIT(i)  { (i) }
21
22 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
23
24 #define __ATOMIC_OR     "lao"
25 #define __ATOMIC_AND    "lan"
26 #define __ATOMIC_ADD    "laa"
27
28 #define __ATOMIC_LOOP(ptr, op_val, op_string)                           \
29 ({                                                                      \
30         int old_val;                                                    \
31                                                                         \
32         typecheck(atomic_t *, ptr);                                     \
33         asm volatile(                                                   \
34                 op_string "     %0,%2,%1\n"                             \
35                 : "=d" (old_val), "+Q" ((ptr)->counter)                 \
36                 : "d" (op_val)                                          \
37                 : "cc", "memory");                                      \
38         old_val;                                                        \
39 })
40
41 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
42
43 #define __ATOMIC_OR     "or"
44 #define __ATOMIC_AND    "nr"
45 #define __ATOMIC_ADD    "ar"
46
47 #define __ATOMIC_LOOP(ptr, op_val, op_string)                           \
48 ({                                                                      \
49         int old_val, new_val;                                           \
50                                                                         \
51         typecheck(atomic_t *, ptr);                                     \
52         asm volatile(                                                   \
53                 "       l       %0,%2\n"                                \
54                 "0:     lr      %1,%0\n"                                \
55                 op_string "     %1,%3\n"                                \
56                 "       cs      %0,%1,%2\n"                             \
57                 "       jl      0b"                                     \
58                 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
59                 : "d" (op_val)                                          \
60                 : "cc", "memory");                                      \
61         old_val;                                                        \
62 })
63
64 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
65
66 static inline int atomic_read(const atomic_t *v)
67 {
68         int c;
69
70         asm volatile(
71                 "       l       %0,%1\n"
72                 : "=d" (c) : "Q" (v->counter));
73         return c;
74 }
75
76 static inline void atomic_set(atomic_t *v, int i)
77 {
78         asm volatile(
79                 "       st      %1,%0\n"
80                 : "=Q" (v->counter) : "d" (i));
81 }
82
83 static inline int atomic_add_return(int i, atomic_t *v)
84 {
85         return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
86 }
87
88 static inline void atomic_add(int i, atomic_t *v)
89 {
90 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
91         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
92                 asm volatile(
93                         "asi    %0,%1\n"
94                         : "+Q" (v->counter)
95                         : "i" (i)
96                         : "cc", "memory");
97         } else {
98                 atomic_add_return(i, v);
99         }
100 #else
101         atomic_add_return(i, v);
102 #endif
103 }
104
105 #define atomic_add_negative(_i, _v)     (atomic_add_return(_i, _v) < 0)
106 #define atomic_inc(_v)                  atomic_add(1, _v)
107 #define atomic_inc_return(_v)           atomic_add_return(1, _v)
108 #define atomic_inc_and_test(_v)         (atomic_add_return(1, _v) == 0)
109 #define atomic_sub(_i, _v)              atomic_add(-(int)_i, _v)
110 #define atomic_sub_return(_i, _v)       atomic_add_return(-(int)(_i), _v)
111 #define atomic_sub_and_test(_i, _v)     (atomic_sub_return(_i, _v) == 0)
112 #define atomic_dec(_v)                  atomic_sub(1, _v)
113 #define atomic_dec_return(_v)           atomic_sub_return(1, _v)
114 #define atomic_dec_and_test(_v)         (atomic_sub_return(1, _v) == 0)
115
116 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
117 {
118         __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
119 }
120
121 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
122 {
123         __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
124 }
125
126 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
127
128 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
129 {
130         asm volatile(
131                 "       cs      %0,%2,%1"
132                 : "+d" (old), "+Q" (v->counter)
133                 : "d" (new)
134                 : "cc", "memory");
135         return old;
136 }
137
138 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
139 {
140         int c, old;
141         c = atomic_read(v);
142         for (;;) {
143                 if (unlikely(c == u))
144                         break;
145                 old = atomic_cmpxchg(v, c, c + a);
146                 if (likely(old == c))
147                         break;
148                 c = old;
149         }
150         return c;
151 }
152
153
154 #undef __ATOMIC_LOOP
155
156 #define ATOMIC64_INIT(i)  { (i) }
157
158 #ifdef CONFIG_64BIT
159
160 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
161
162 #define __ATOMIC64_OR   "laog"
163 #define __ATOMIC64_AND  "lang"
164 #define __ATOMIC64_ADD  "laag"
165
166 #define __ATOMIC64_LOOP(ptr, op_val, op_string)                         \
167 ({                                                                      \
168         long long old_val;                                              \
169                                                                         \
170         typecheck(atomic64_t *, ptr);                                   \
171         asm volatile(                                                   \
172                 op_string "     %0,%2,%1\n"                             \
173                 : "=d" (old_val), "+Q" ((ptr)->counter)                 \
174                 : "d" (op_val)                                          \
175                 : "cc", "memory");                                      \
176         old_val;                                                        \
177 })
178
179 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
180
181 #define __ATOMIC64_OR   "ogr"
182 #define __ATOMIC64_AND  "ngr"
183 #define __ATOMIC64_ADD  "agr"
184
185 #define __ATOMIC64_LOOP(ptr, op_val, op_string)                         \
186 ({                                                                      \
187         long long old_val, new_val;                                     \
188                                                                         \
189         typecheck(atomic64_t *, ptr);                                   \
190         asm volatile(                                                   \
191                 "       lg      %0,%2\n"                                \
192                 "0:     lgr     %1,%0\n"                                \
193                 op_string "     %1,%3\n"                                \
194                 "       csg     %0,%1,%2\n"                             \
195                 "       jl      0b"                                     \
196                 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
197                 : "d" (op_val)                                          \
198                 : "cc", "memory");                                      \
199         old_val;                                                        \
200 })
201
202 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
203
204 static inline long long atomic64_read(const atomic64_t *v)
205 {
206         long long c;
207
208         asm volatile(
209                 "       lg      %0,%1\n"
210                 : "=d" (c) : "Q" (v->counter));
211         return c;
212 }
213
214 static inline void atomic64_set(atomic64_t *v, long long i)
215 {
216         asm volatile(
217                 "       stg     %1,%0\n"
218                 : "=Q" (v->counter) : "d" (i));
219 }
220
221 static inline long long atomic64_add_return(long long i, atomic64_t *v)
222 {
223         return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
224 }
225
226 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
227 {
228         __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
229 }
230
231 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
232 {
233         __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
234 }
235
236 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
237
238 static inline long long atomic64_cmpxchg(atomic64_t *v,
239                                              long long old, long long new)
240 {
241         asm volatile(
242                 "       csg     %0,%2,%1"
243                 : "+d" (old), "+Q" (v->counter)
244                 : "d" (new)
245                 : "cc", "memory");
246         return old;
247 }
248
249 #undef __ATOMIC64_LOOP
250
251 #else /* CONFIG_64BIT */
252
253 typedef struct {
254         long long counter;
255 } atomic64_t;
256
257 static inline long long atomic64_read(const atomic64_t *v)
258 {
259         register_pair rp;
260
261         asm volatile(
262                 "       lm      %0,%N0,%1"
263                 : "=&d" (rp) : "Q" (v->counter) );
264         return rp.pair;
265 }
266
267 static inline void atomic64_set(atomic64_t *v, long long i)
268 {
269         register_pair rp = {.pair = i};
270
271         asm volatile(
272                 "       stm     %1,%N1,%0"
273                 : "=Q" (v->counter) : "d" (rp) );
274 }
275
276 static inline long long atomic64_xchg(atomic64_t *v, long long new)
277 {
278         register_pair rp_new = {.pair = new};
279         register_pair rp_old;
280
281         asm volatile(
282                 "       lm      %0,%N0,%1\n"
283                 "0:     cds     %0,%2,%1\n"
284                 "       jl      0b\n"
285                 : "=&d" (rp_old), "+Q" (v->counter)
286                 : "d" (rp_new)
287                 : "cc");
288         return rp_old.pair;
289 }
290
291 static inline long long atomic64_cmpxchg(atomic64_t *v,
292                                          long long old, long long new)
293 {
294         register_pair rp_old = {.pair = old};
295         register_pair rp_new = {.pair = new};
296
297         asm volatile(
298                 "       cds     %0,%2,%1"
299                 : "+&d" (rp_old), "+Q" (v->counter)
300                 : "d" (rp_new)
301                 : "cc");
302         return rp_old.pair;
303 }
304
305
306 static inline long long atomic64_add_return(long long i, atomic64_t *v)
307 {
308         long long old, new;
309
310         do {
311                 old = atomic64_read(v);
312                 new = old + i;
313         } while (atomic64_cmpxchg(v, old, new) != old);
314         return new;
315 }
316
317 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
318 {
319         long long old, new;
320
321         do {
322                 old = atomic64_read(v);
323                 new = old | mask;
324         } while (atomic64_cmpxchg(v, old, new) != old);
325 }
326
327 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
328 {
329         long long old, new;
330
331         do {
332                 old = atomic64_read(v);
333                 new = old & mask;
334         } while (atomic64_cmpxchg(v, old, new) != old);
335 }
336
337 #endif /* CONFIG_64BIT */
338
339 static inline void atomic64_add(long long i, atomic64_t *v)
340 {
341 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
342         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
343                 asm volatile(
344                         "agsi   %0,%1\n"
345                         : "+Q" (v->counter)
346                         : "i" (i)
347                         : "cc", "memory");
348         } else {
349                 atomic64_add_return(i, v);
350         }
351 #else
352         atomic64_add_return(i, v);
353 #endif
354 }
355
356 static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
357 {
358         long long c, old;
359
360         c = atomic64_read(v);
361         for (;;) {
362                 if (unlikely(c == u))
363                         break;
364                 old = atomic64_cmpxchg(v, c, c + i);
365                 if (likely(old == c))
366                         break;
367                 c = old;
368         }
369         return c != u;
370 }
371
372 static inline long long atomic64_dec_if_positive(atomic64_t *v)
373 {
374         long long c, old, dec;
375
376         c = atomic64_read(v);
377         for (;;) {
378                 dec = c - 1;
379                 if (unlikely(dec < 0))
380                         break;
381                 old = atomic64_cmpxchg((v), c, dec);
382                 if (likely(old == c))
383                         break;
384                 c = old;
385         }
386         return dec;
387 }
388
389 #define atomic64_add_negative(_i, _v)   (atomic64_add_return(_i, _v) < 0)
390 #define atomic64_inc(_v)                atomic64_add(1, _v)
391 #define atomic64_inc_return(_v)         atomic64_add_return(1, _v)
392 #define atomic64_inc_and_test(_v)       (atomic64_add_return(1, _v) == 0)
393 #define atomic64_sub_return(_i, _v)     atomic64_add_return(-(long long)(_i), _v)
394 #define atomic64_sub(_i, _v)            atomic64_add(-(long long)_i, _v)
395 #define atomic64_sub_and_test(_i, _v)   (atomic64_sub_return(_i, _v) == 0)
396 #define atomic64_dec(_v)                atomic64_sub(1, _v)
397 #define atomic64_dec_return(_v)         atomic64_sub_return(1, _v)
398 #define atomic64_dec_and_test(_v)       (atomic64_sub_return(1, _v) == 0)
399 #define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1, 0)
400
401 #define smp_mb__before_atomic_dec()     smp_mb()
402 #define smp_mb__after_atomic_dec()      smp_mb()
403 #define smp_mb__before_atomic_inc()     smp_mb()
404 #define smp_mb__after_atomic_inc()      smp_mb()
405
406 #endif /* __ARCH_S390_ATOMIC__  */