1 #ifndef __ARCH_M68K_ATOMIC__
2 #define __ARCH_M68K_ATOMIC__
4 #include <linux/types.h>
5 #include <linux/irqflags.h>
6 #include <asm/cmpxchg.h>
7 #include <asm/barrier.h>
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc..
15 * We do not have SMP m68k systems, so we don't have to deal with that.
18 #define ATOMIC_INIT(i) { (i) }
20 #define atomic_read(v) ACCESS_ONCE((v)->counter)
21 #define atomic_set(v, i) (((v)->counter) = i)
24 * The ColdFire parts cannot do some immediate to memory operations,
25 * so for them we do not specify the "i" asm constraint.
27 #ifdef CONFIG_COLDFIRE
33 #define ATOMIC_OP(op, c_op, asm_op) \
34 static inline void atomic_##op(int i, atomic_t *v) \
36 __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
39 #ifdef CONFIG_RMW_INSNS
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
42 static inline int atomic_##op##_return(int i, atomic_t *v) \
46 __asm__ __volatile__( \
48 " " #asm_op "l %3,%1\n" \
51 : "+m" (*v), "=&d" (t), "=&d" (tmp) \
52 : "g" (i), "2" (atomic_read(v))); \
58 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
59 static inline int atomic_##op##_return(int i, atomic_t * v) \
61 unsigned long flags; \
64 local_irq_save(flags); \
65 t = (v->counter c_op i); \
66 local_irq_restore(flags); \
71 #endif /* CONFIG_RMW_INSNS */
73 #define ATOMIC_OPS(op, c_op, asm_op) \
74 ATOMIC_OP(op, c_op, asm_op) \
75 ATOMIC_OP_RETURN(op, c_op, asm_op)
77 ATOMIC_OPS(add, +=, add)
78 ATOMIC_OPS(sub, -=, sub)
81 #undef ATOMIC_OP_RETURN
84 static inline void atomic_inc(atomic_t *v)
86 __asm__ __volatile__("addql #1,%0" : "+m" (*v));
89 static inline void atomic_dec(atomic_t *v)
91 __asm__ __volatile__("subql #1,%0" : "+m" (*v));
94 static inline int atomic_dec_and_test(atomic_t *v)
97 __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
101 static inline int atomic_dec_and_test_lt(atomic_t *v)
104 __asm__ __volatile__(
105 "subql #1,%1; slt %0"
106 : "=d" (c), "=m" (*v)
111 static inline int atomic_inc_and_test(atomic_t *v)
114 __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
118 #ifdef CONFIG_RMW_INSNS
120 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
121 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
123 #else /* !CONFIG_RMW_INSNS */
125 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
130 local_irq_save(flags);
131 prev = atomic_read(v);
134 local_irq_restore(flags);
138 static inline int atomic_xchg(atomic_t *v, int new)
143 local_irq_save(flags);
144 prev = atomic_read(v);
146 local_irq_restore(flags);
150 #endif /* !CONFIG_RMW_INSNS */
152 #define atomic_dec_return(v) atomic_sub_return(1, (v))
153 #define atomic_inc_return(v) atomic_add_return(1, (v))
155 static inline int atomic_sub_and_test(int i, atomic_t *v)
158 __asm__ __volatile__("subl %2,%1; seq %0"
159 : "=d" (c), "+m" (*v)
164 static inline int atomic_add_negative(int i, atomic_t *v)
167 __asm__ __volatile__("addl %2,%1; smi %0"
168 : "=d" (c), "+m" (*v)
173 static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
175 __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
178 static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
180 __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
183 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188 if (unlikely(c == (u)))
190 old = atomic_cmpxchg((v), c, c + (a));
191 if (likely(old == c))
198 #endif /* __ARCH_M68K_ATOMIC __ */