1 #ifndef _ALPHA_ATOMIC_H
2 #define _ALPHA_ATOMIC_H
4 #include <linux/types.h>
5 #include <asm/barrier.h>
8 * Atomic operations that C can't guarantee us. Useful for
9 * resource counting etc...
11 * But use these as seldom as possible since they are much slower
12 * than regular operations.
16 #define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
17 #define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
19 #define atomic_read(v) (*(volatile int *)&(v)->counter)
20 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
22 #define atomic_set(v,i) ((v)->counter = (i))
23 #define atomic64_set(v,i) ((v)->counter = (i))
26 * To get proper branch prediction for the main line, we must branch
27 * forward to code at the end of this object's .text section, then
28 * branch back to restart the operation.
31 static __inline__ void atomic_add(int i, atomic_t * v)
42 :"=&r" (temp), "=m" (v->counter)
43 :"Ir" (i), "m" (v->counter));
46 static __inline__ void atomic64_add(long i, atomic64_t * v)
57 :"=&r" (temp), "=m" (v->counter)
58 :"Ir" (i), "m" (v->counter));
61 static __inline__ void atomic_sub(int i, atomic_t * v)
72 :"=&r" (temp), "=m" (v->counter)
73 :"Ir" (i), "m" (v->counter));
76 static __inline__ void atomic64_sub(long i, atomic64_t * v)
87 :"=&r" (temp), "=m" (v->counter)
88 :"Ir" (i), "m" (v->counter));
93 * Same as above, but return the result value
95 static inline int atomic_add_return(int i, atomic_t *v)
108 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
109 :"Ir" (i), "m" (v->counter) : "memory");
114 static __inline__ long atomic64_add_return(long i, atomic64_t * v)
118 __asm__ __volatile__(
127 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
128 :"Ir" (i), "m" (v->counter) : "memory");
133 static __inline__ long atomic_sub_return(int i, atomic_t * v)
137 __asm__ __volatile__(
146 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
147 :"Ir" (i), "m" (v->counter) : "memory");
152 static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
156 __asm__ __volatile__(
165 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
166 :"Ir" (i), "m" (v->counter) : "memory");
172 * Atomic exchange routines.
176 #define ____xchg(type, args...) __xchg ## type ## _local(args)
177 #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
178 #include <asm/xchg.h>
180 #define xchg_local(ptr,x) \
182 __typeof__(*(ptr)) _x_ = (x); \
183 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
187 #define cmpxchg_local(ptr, o, n) \
189 __typeof__(*(ptr)) _o_ = (o); \
190 __typeof__(*(ptr)) _n_ = (n); \
191 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
192 (unsigned long)_n_, \
196 #define cmpxchg64_local(ptr, o, n) \
198 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
199 cmpxchg_local((ptr), (o), (n)); \
204 #define __ASM__MB "\tmb\n"
208 #define ____xchg(type, args...) __xchg ##type(args)
209 #define ____cmpxchg(type, args...) __cmpxchg ##type(args)
210 #include <asm/xchg.h>
212 #define xchg(ptr,x) \
214 __typeof__(*(ptr)) _x_ = (x); \
215 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
219 #define cmpxchg(ptr, o, n) \
221 __typeof__(*(ptr)) _o_ = (o); \
222 __typeof__(*(ptr)) _n_ = (n); \
223 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
224 (unsigned long)_n_, sizeof(*(ptr)));\
227 #define cmpxchg64(ptr, o, n) \
229 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
230 cmpxchg((ptr), (o), (n)); \
236 #define __HAVE_ARCH_CMPXCHG 1
238 #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
239 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
241 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
242 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
245 * __atomic_add_unless - add unless the number is a given value
246 * @v: pointer of type atomic_t
247 * @a: the amount to add to v...
248 * @u: ...unless v is equal to u.
250 * Atomically adds @a to @v, so long as it was not @u.
251 * Returns the old value of @v.
253 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
258 if (unlikely(c == (u)))
260 old = atomic_cmpxchg((v), c, c + (a));
261 if (likely(old == c))
270 * atomic64_add_unless - add unless the number is a given value
271 * @v: pointer of type atomic64_t
272 * @a: the amount to add to v...
273 * @u: ...unless v is equal to u.
275 * Atomically adds @a to @v, so long as it was not @u.
276 * Returns the old value of @v.
278 static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
281 c = atomic64_read(v);
283 if (unlikely(c == (u)))
285 old = atomic64_cmpxchg((v), c, c + (a));
286 if (likely(old == c))
293 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
295 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
296 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
298 #define atomic_dec_return(v) atomic_sub_return(1,(v))
299 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
301 #define atomic_inc_return(v) atomic_add_return(1,(v))
302 #define atomic64_inc_return(v) atomic64_add_return(1,(v))
304 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
305 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
307 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
308 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
310 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
311 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
313 #define atomic_inc(v) atomic_add(1,(v))
314 #define atomic64_inc(v) atomic64_add(1,(v))
316 #define atomic_dec(v) atomic_sub(1,(v))
317 #define atomic64_dec(v) atomic64_sub(1,(v))
319 #define smp_mb__before_atomic_dec() smp_mb()
320 #define smp_mb__after_atomic_dec() smp_mb()
321 #define smp_mb__before_atomic_inc() smp_mb()
322 #define smp_mb__after_atomic_inc() smp_mb()
324 #endif /* _ALPHA_ATOMIC_H */