]> git.karo-electronics.de Git - linux-beck.git/blob - arch/s390/include/asm/atomic.h
b58ea97b176f902fd131ebfdd020d3aa1d5a2103
[linux-beck.git] / arch / s390 / include / asm / atomic.h
1 /*
2  * Copyright IBM Corp. 1999, 2009
3  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4  *            Denis Joseph Barrow,
5  *            Arnd Bergmann <arndb@de.ibm.com>,
6  *
7  * Atomic operations that C can't guarantee us.
8  * Useful for resource counting etc.
9  * s390 uses 'Compare And Swap' for atomicity in SMP environment.
10  *
11  */
12
13 #ifndef __ARCH_S390_ATOMIC__
14 #define __ARCH_S390_ATOMIC__
15
16 #include <linux/compiler.h>
17 #include <linux/types.h>
18 #include <asm/cmpxchg.h>
19
20 #define ATOMIC_INIT(i)  { (i) }
21
22 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
23
24 #define __ATOMIC_OR     "lao"
25 #define __ATOMIC_AND    "lan"
26 #define __ATOMIC_ADD    "laa"
27
28 #define __ATOMIC_LOOP(ptr, op_val, op_string)                           \
29 ({                                                                      \
30         int old_val;                                                    \
31         asm volatile(                                                   \
32                 op_string "     %0,%2,%1\n"                             \
33                 : "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter)   \
34                 : "d" (op_val)                                          \
35                 : "cc", "memory");                                      \
36         old_val;                                                        \
37 })
38
39 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
40
41 #define __ATOMIC_OR     "or"
42 #define __ATOMIC_AND    "nr"
43 #define __ATOMIC_ADD    "ar"
44
45 #define __ATOMIC_LOOP(ptr, op_val, op_string)                           \
46 ({                                                                      \
47         int old_val, new_val;                                           \
48         asm volatile(                                                   \
49                 "       l       %0,%2\n"                                \
50                 "0:     lr      %1,%0\n"                                \
51                 op_string "     %1,%3\n"                                \
52                 "       cs      %0,%1,%2\n"                             \
53                 "       jl      0b"                                     \
54                 : "=&d" (old_val), "=&d" (new_val),                     \
55                   "=Q" (((atomic_t *)(ptr))->counter)                   \
56                 : "d" (op_val),  "Q" (((atomic_t *)(ptr))->counter)     \
57                 : "cc", "memory");                                      \
58         old_val;                                                        \
59 })
60
61 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
62
63 static inline int atomic_read(const atomic_t *v)
64 {
65         int c;
66
67         asm volatile(
68                 "       l       %0,%1\n"
69                 : "=d" (c) : "Q" (v->counter));
70         return c;
71 }
72
73 static inline void atomic_set(atomic_t *v, int i)
74 {
75         asm volatile(
76                 "       st      %1,%0\n"
77                 : "=Q" (v->counter) : "d" (i));
78 }
79
80 static inline int atomic_add_return(int i, atomic_t *v)
81 {
82         return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
83 }
84
85 static inline void atomic_add(int i, atomic_t *v)
86 {
87 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
88         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
89                 asm volatile(
90                         "asi    %0,%1\n"
91                         : "+Q" (v->counter)
92                         : "i" (i)
93                         : "cc", "memory");
94         } else {
95                 atomic_add_return(i, v);
96         }
97 #else
98         atomic_add_return(i, v);
99 #endif
100 }
101
102 #define atomic_add_negative(_i, _v)     (atomic_add_return(_i, _v) < 0)
103 #define atomic_inc(_v)                  atomic_add(1, _v)
104 #define atomic_inc_return(_v)           atomic_add_return(1, _v)
105 #define atomic_inc_and_test(_v)         (atomic_add_return(1, _v) == 0)
106 #define atomic_sub(_i, _v)              atomic_add(-(int)(_i), _v)
107 #define atomic_sub_return(_i, _v)       atomic_add_return(-(int)(_i), _v)
108 #define atomic_sub_and_test(_i, _v)     (atomic_sub_return(_i, _v) == 0)
109 #define atomic_dec(_v)                  atomic_sub(1, _v)
110 #define atomic_dec_return(_v)           atomic_sub_return(1, _v)
111 #define atomic_dec_and_test(_v)         (atomic_sub_return(1, _v) == 0)
112
113 static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
114 {
115         __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
116 }
117
118 static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
119 {
120         __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
121 }
122
123 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
124
125 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
126 {
127         asm volatile(
128                 "       cs      %0,%2,%1"
129                 : "+d" (old), "=Q" (v->counter)
130                 : "d" (new), "Q" (v->counter)
131                 : "cc", "memory");
132         return old;
133 }
134
135 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
136 {
137         int c, old;
138         c = atomic_read(v);
139         for (;;) {
140                 if (unlikely(c == u))
141                         break;
142                 old = atomic_cmpxchg(v, c, c + a);
143                 if (likely(old == c))
144                         break;
145                 c = old;
146         }
147         return c;
148 }
149
150
151 #undef __ATOMIC_LOOP
152
153 #define ATOMIC64_INIT(i)  { (i) }
154
155 #ifdef CONFIG_64BIT
156
157 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
158
159 #define __ATOMIC64_OR   "laog"
160 #define __ATOMIC64_AND  "lang"
161 #define __ATOMIC64_ADD  "laag"
162
163 #define __ATOMIC64_LOOP(ptr, op_val, op_string)                         \
164 ({                                                                      \
165         long long old_val;                                              \
166         asm volatile(                                                   \
167                 op_string "     %0,%2,%1\n"                             \
168                 : "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter)   \
169                 : "d" (op_val)                                          \
170                 : "cc", "memory");                                      \
171         old_val;                                                        \
172 })
173
174 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
175
176 #define __ATOMIC64_OR   "ogr"
177 #define __ATOMIC64_AND  "ngr"
178 #define __ATOMIC64_ADD  "agr"
179
180 #define __ATOMIC64_LOOP(ptr, op_val, op_string)                         \
181 ({                                                                      \
182         long long old_val, new_val;                                     \
183         asm volatile(                                                   \
184                 "       lg      %0,%2\n"                                \
185                 "0:     lgr     %1,%0\n"                                \
186                 op_string "     %1,%3\n"                                \
187                 "       csg     %0,%1,%2\n"                             \
188                 "       jl      0b"                                     \
189                 : "=&d" (old_val), "=&d" (new_val),                     \
190                   "=Q" (((atomic_t *)(ptr))->counter)                   \
191                 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter)      \
192                 : "cc", "memory");                                      \
193         old_val;                                                        \
194 })
195
196 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
197
198 static inline long long atomic64_read(const atomic64_t *v)
199 {
200         long long c;
201
202         asm volatile(
203                 "       lg      %0,%1\n"
204                 : "=d" (c) : "Q" (v->counter));
205         return c;
206 }
207
208 static inline void atomic64_set(atomic64_t *v, long long i)
209 {
210         asm volatile(
211                 "       stg     %1,%0\n"
212                 : "=Q" (v->counter) : "d" (i));
213 }
214
215 static inline long long atomic64_add_return(long long i, atomic64_t *v)
216 {
217         return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
218 }
219
220 static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
221 {
222         __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
223 }
224
225 static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
226 {
227         __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
228 }
229
230 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
231
232 static inline long long atomic64_cmpxchg(atomic64_t *v,
233                                              long long old, long long new)
234 {
235         asm volatile(
236                 "       csg     %0,%2,%1"
237                 : "+d" (old), "=Q" (v->counter)
238                 : "d" (new), "Q" (v->counter)
239                 : "cc", "memory");
240         return old;
241 }
242
243 #undef __ATOMIC64_LOOP
244
245 #else /* CONFIG_64BIT */
246
247 typedef struct {
248         long long counter;
249 } atomic64_t;
250
251 static inline long long atomic64_read(const atomic64_t *v)
252 {
253         register_pair rp;
254
255         asm volatile(
256                 "       lm      %0,%N0,%1"
257                 : "=&d" (rp) : "Q" (v->counter) );
258         return rp.pair;
259 }
260
261 static inline void atomic64_set(atomic64_t *v, long long i)
262 {
263         register_pair rp = {.pair = i};
264
265         asm volatile(
266                 "       stm     %1,%N1,%0"
267                 : "=Q" (v->counter) : "d" (rp) );
268 }
269
270 static inline long long atomic64_xchg(atomic64_t *v, long long new)
271 {
272         register_pair rp_new = {.pair = new};
273         register_pair rp_old;
274
275         asm volatile(
276                 "       lm      %0,%N0,%1\n"
277                 "0:     cds     %0,%2,%1\n"
278                 "       jl      0b\n"
279                 : "=&d" (rp_old), "=Q" (v->counter)
280                 : "d" (rp_new), "Q" (v->counter)
281                 : "cc");
282         return rp_old.pair;
283 }
284
285 static inline long long atomic64_cmpxchg(atomic64_t *v,
286                                          long long old, long long new)
287 {
288         register_pair rp_old = {.pair = old};
289         register_pair rp_new = {.pair = new};
290
291         asm volatile(
292                 "       cds     %0,%2,%1"
293                 : "+&d" (rp_old), "=Q" (v->counter)
294                 : "d" (rp_new), "Q" (v->counter)
295                 : "cc");
296         return rp_old.pair;
297 }
298
299
300 static inline long long atomic64_add_return(long long i, atomic64_t *v)
301 {
302         long long old, new;
303
304         do {
305                 old = atomic64_read(v);
306                 new = old + i;
307         } while (atomic64_cmpxchg(v, old, new) != old);
308         return new;
309 }
310
311 static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
312 {
313         long long old, new;
314
315         do {
316                 old = atomic64_read(v);
317                 new = old | mask;
318         } while (atomic64_cmpxchg(v, old, new) != old);
319 }
320
321 static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
322 {
323         long long old, new;
324
325         do {
326                 old = atomic64_read(v);
327                 new = old & mask;
328         } while (atomic64_cmpxchg(v, old, new) != old);
329 }
330
331 #endif /* CONFIG_64BIT */
332
333 static inline void atomic64_add(long long i, atomic64_t *v)
334 {
335 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
336         if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
337                 asm volatile(
338                         "agsi   %0,%1\n"
339                         : "+Q" (v->counter)
340                         : "i" (i)
341                         : "cc", "memory");
342         } else {
343                 atomic64_add_return(i, v);
344         }
345 #else
346         atomic64_add_return(i, v);
347 #endif
348 }
349
350 static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
351 {
352         long long c, old;
353
354         c = atomic64_read(v);
355         for (;;) {
356                 if (unlikely(c == u))
357                         break;
358                 old = atomic64_cmpxchg(v, c, c + a);
359                 if (likely(old == c))
360                         break;
361                 c = old;
362         }
363         return c != u;
364 }
365
366 static inline long long atomic64_dec_if_positive(atomic64_t *v)
367 {
368         long long c, old, dec;
369
370         c = atomic64_read(v);
371         for (;;) {
372                 dec = c - 1;
373                 if (unlikely(dec < 0))
374                         break;
375                 old = atomic64_cmpxchg((v), c, dec);
376                 if (likely(old == c))
377                         break;
378                 c = old;
379         }
380         return dec;
381 }
382
383 #define atomic64_add_negative(_i, _v)   (atomic64_add_return(_i, _v) < 0)
384 #define atomic64_inc(_v)                atomic64_add(1, _v)
385 #define atomic64_inc_return(_v)         atomic64_add_return(1, _v)
386 #define atomic64_inc_and_test(_v)       (atomic64_add_return(1, _v) == 0)
387 #define atomic64_sub_return(_i, _v)     atomic64_add_return(-(long long)(_i), _v)
388 #define atomic64_sub(_i, _v)            atomic64_add(-(long long)(_i), _v)
389 #define atomic64_sub_and_test(_i, _v)   (atomic64_sub_return(_i, _v) == 0)
390 #define atomic64_dec(_v)                atomic64_sub(1, _v)
391 #define atomic64_dec_return(_v)         atomic64_sub_return(1, _v)
392 #define atomic64_dec_and_test(_v)       (atomic64_sub_return(1, _v) == 0)
393 #define atomic64_inc_not_zero(v)        atomic64_add_unless((v), 1, 0)
394
395 #define smp_mb__before_atomic_dec()     smp_mb()
396 #define smp_mb__after_atomic_dec()      smp_mb()
397 #define smp_mb__before_atomic_inc()     smp_mb()
398 #define smp_mb__after_atomic_inc()      smp_mb()
399
400 #endif /* __ARCH_S390_ATOMIC__  */