2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #ifndef _ASM_ARC_ATOMIC_H
10 #define _ASM_ARC_ATOMIC_H
14 #include <linux/types.h>
15 #include <linux/compiler.h>
16 #include <asm/cmpxchg.h>
17 #include <asm/barrier.h>
20 #ifndef CONFIG_ARC_PLAT_EZNPS
22 #define atomic_read(v) READ_ONCE((v)->counter)
24 #ifdef CONFIG_ARC_HAS_LLSC
26 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
28 #ifdef CONFIG_ARC_STAR_9000923308
30 #define SCOND_FAIL_RETRY_VAR_DEF \
31 unsigned int delay = 1, tmp; \
33 #define SCOND_FAIL_RETRY_ASM \
35 " ; --- scond fail delay --- \n" \
36 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
37 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
38 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
39 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
40 " b 1b \n" /* start over */ \
41 "4: ; --- success --- \n" \
43 #define SCOND_FAIL_RETRY_VARS \
44 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
46 #else /* !CONFIG_ARC_STAR_9000923308 */
48 #define SCOND_FAIL_RETRY_VAR_DEF
50 #define SCOND_FAIL_RETRY_ASM \
53 #define SCOND_FAIL_RETRY_VARS
57 #define ATOMIC_OP(op, c_op, asm_op) \
58 static inline void atomic_##op(int i, atomic_t *v) \
61 SCOND_FAIL_RETRY_VAR_DEF \
63 __asm__ __volatile__( \
64 "1: llock %[val], [%[ctr]] \n" \
65 " " #asm_op " %[val], %[val], %[i] \n" \
66 " scond %[val], [%[ctr]] \n" \
68 SCOND_FAIL_RETRY_ASM \
70 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
71 SCOND_FAIL_RETRY_VARS \
72 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
77 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
78 static inline int atomic_##op##_return(int i, atomic_t *v) \
81 SCOND_FAIL_RETRY_VAR_DEF \
84 * Explicit full memory barrier needed before/after as \
85 * LLOCK/SCOND thmeselves don't provide any such semantics \
89 __asm__ __volatile__( \
90 "1: llock %[val], [%[ctr]] \n" \
91 " " #asm_op " %[val], %[val], %[i] \n" \
92 " scond %[val], [%[ctr]] \n" \
94 SCOND_FAIL_RETRY_ASM \
97 SCOND_FAIL_RETRY_VARS \
98 : [ctr] "r" (&v->counter), \
107 #else /* !CONFIG_ARC_HAS_LLSC */
111 /* violating atomic_xxx API locking protocol in UP for optimization sake */
112 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
116 static inline void atomic_set(atomic_t *v, int i)
119 * Independent of hardware support, all of the atomic_xxx() APIs need
120 * to follow the same locking rules to make sure that a "hardware"
121 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
124 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
125 * requires the locking.
129 atomic_ops_lock(flags);
130 WRITE_ONCE(v->counter, i);
131 atomic_ops_unlock(flags);
137 * Non hardware assisted Atomic-R-M-W
138 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
141 #define ATOMIC_OP(op, c_op, asm_op) \
142 static inline void atomic_##op(int i, atomic_t *v) \
144 unsigned long flags; \
146 atomic_ops_lock(flags); \
148 atomic_ops_unlock(flags); \
151 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
152 static inline int atomic_##op##_return(int i, atomic_t *v) \
154 unsigned long flags; \
155 unsigned long temp; \
158 * spin lock/unlock provides the needed smp_mb() before/after \
160 atomic_ops_lock(flags); \
164 atomic_ops_unlock(flags); \
169 #endif /* !CONFIG_ARC_HAS_LLSC */
171 #define ATOMIC_OPS(op, c_op, asm_op) \
172 ATOMIC_OP(op, c_op, asm_op) \
173 ATOMIC_OP_RETURN(op, c_op, asm_op)
175 ATOMIC_OPS(add, +=, add)
176 ATOMIC_OPS(sub, -=, sub)
178 #define atomic_andnot atomic_andnot
180 ATOMIC_OP(and, &=, and)
181 ATOMIC_OP(andnot, &= ~, bic)
182 ATOMIC_OP(or, |=, or)
183 ATOMIC_OP(xor, ^=, xor)
185 #undef SCOND_FAIL_RETRY_VAR_DEF
186 #undef SCOND_FAIL_RETRY_ASM
187 #undef SCOND_FAIL_RETRY_VARS
189 #else /* CONFIG_ARC_PLAT_EZNPS */
191 static inline int atomic_read(const atomic_t *v)
195 __asm__ __volatile__(
203 static inline void atomic_set(atomic_t *v, int i)
205 __asm__ __volatile__(
208 : "r"(i), "r"(&v->counter)
212 #define ATOMIC_OP(op, c_op, asm_op) \
213 static inline void atomic_##op(int i, atomic_t *v) \
215 __asm__ __volatile__( \
220 : "r"(i), "r"(&v->counter), "i"(asm_op) \
221 : "r2", "r3", "memory"); \
224 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
225 static inline int atomic_##op##_return(int i, atomic_t *v) \
227 unsigned int temp = i; \
229 /* Explicit full memory barrier needed before/after */ \
232 __asm__ __volatile__( \
238 : "r"(&v->counter), "i"(asm_op) \
239 : "r2", "r3", "memory"); \
248 #define ATOMIC_OPS(op, c_op, asm_op) \
249 ATOMIC_OP(op, c_op, asm_op) \
250 ATOMIC_OP_RETURN(op, c_op, asm_op)
252 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
253 #define atomic_sub(i, v) atomic_add(-(i), (v))
254 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
256 ATOMIC_OP(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
257 #define atomic_andnot(mask, v) atomic_and(~(mask), (v))
258 ATOMIC_OP(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
259 ATOMIC_OP(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
261 #endif /* CONFIG_ARC_PLAT_EZNPS */
264 #undef ATOMIC_OP_RETURN
268 * __atomic_add_unless - add unless the number is a given value
269 * @v: pointer of type atomic_t
270 * @a: the amount to add to v...
271 * @u: ...unless v is equal to u.
273 * Atomically adds @a to @v, so long as it was not @u.
274 * Returns the old value of @v
276 #define __atomic_add_unless(v, a, u) \
281 * Explicit full memory barrier needed before/after as \
282 * LLOCK/SCOND thmeselves don't provide any such semantics \
286 c = atomic_read(v); \
287 while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
295 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
297 #define atomic_inc(v) atomic_add(1, v)
298 #define atomic_dec(v) atomic_sub(1, v)
300 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
301 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
302 #define atomic_inc_return(v) atomic_add_return(1, (v))
303 #define atomic_dec_return(v) atomic_sub_return(1, (v))
304 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
306 #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
308 #define ATOMIC_INIT(i) { (i) }
310 #include <asm-generic/atomic64.h>