1 #ifndef __ASM_METAG_ATOMIC_LOCK1_H
2 #define __ASM_METAG_ATOMIC_LOCK1_H
4 #define ATOMIC_INIT(i) { (i) }
6 #include <linux/compiler.h>
8 #include <asm/barrier.h>
9 #include <asm/global_lock.h>
11 static inline int atomic_read(const atomic_t *v)
17 * atomic_set needs to be take the lock to protect atomic_add_unless from a
18 * possible race, as it reads the counter twice:
21 * atomic_add_unless(1, 0)
22 * ret = v->counter (non-zero)
23 * if (ret != u) v->counter = 0
24 * v->counter += 1 (counter set to 1)
26 * Making atomic_set take the lock ensures that ordering and logical
27 * consistency is preserved.
29 static inline int atomic_set(atomic_t *v, int i)
33 __global_lock1(flags);
36 __global_unlock1(flags);
40 static inline void atomic_add(int i, atomic_t *v)
44 __global_lock1(flags);
47 __global_unlock1(flags);
50 static inline void atomic_sub(int i, atomic_t *v)
54 __global_lock1(flags);
57 __global_unlock1(flags);
60 static inline int atomic_add_return(int i, atomic_t *v)
65 __global_lock1(flags);
70 __global_unlock1(flags);
75 static inline int atomic_sub_return(int i, atomic_t *v)
80 __global_lock1(flags);
85 __global_unlock1(flags);
90 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
94 __global_lock1(flags);
97 __global_unlock1(flags);
100 static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
104 __global_lock1(flags);
107 __global_unlock1(flags);
110 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
115 __global_lock1(flags);
121 __global_unlock1(flags);
126 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
128 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
133 __global_lock1(flags);
139 __global_unlock1(flags);
144 static inline int atomic_sub_if_positive(int i, atomic_t *v)
149 __global_lock1(flags);
150 ret = v->counter - 1;
155 __global_unlock1(flags);
160 #endif /* __ASM_METAG_ATOMIC_LOCK1_H */