2 * arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #ifndef __ASM_ARM_ATOMIC_H
12 #define __ASM_ARM_ATOMIC_H
14 #include <linux/compiler.h>
15 #include <linux/types.h>
16 #include <linux/irqflags.h>
17 #include <asm/barrier.h>
18 #include <asm/cmpxchg.h>
20 #define ATOMIC_INIT(i) { (i) }
25 * On ARM, ordinary assignment (str instruction) doesn't clear the local
26 * strex/ldrex monitor on some implementations. The reason we can use it for
27 * atomic_set() is the clrex or dummy strex done on every exception return.
29 #define atomic_read(v) (*(volatile int *)&(v)->counter)
30 #define atomic_set(v,i) (((v)->counter) = (i))
32 #if __LINUX_ARM_ARCH__ >= 6
35 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
36 * store exclusive to ensure that these are atomic. We may loop
37 * to ensure that the update happens.
39 static inline void atomic_add(int i, atomic_t *v)
44 __asm__ __volatile__("@ atomic_add\n"
47 " strex %1, %0, [%3]\n"
50 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
51 : "r" (&v->counter), "Ir" (i)
55 static inline int atomic_add_return(int i, atomic_t *v)
62 __asm__ __volatile__("@ atomic_add_return\n"
65 " strex %1, %0, [%3]\n"
68 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
69 : "r" (&v->counter), "Ir" (i)
77 static inline void atomic_sub(int i, atomic_t *v)
82 __asm__ __volatile__("@ atomic_sub\n"
85 " strex %1, %0, [%3]\n"
88 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
89 : "r" (&v->counter), "Ir" (i)
93 static inline int atomic_sub_return(int i, atomic_t *v)
100 __asm__ __volatile__("@ atomic_sub_return\n"
101 "1: ldrex %0, [%3]\n"
103 " strex %1, %0, [%3]\n"
106 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
107 : "r" (&v->counter), "Ir" (i)
115 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
117 unsigned long oldval, res;
122 __asm__ __volatile__("@ atomic_cmpxchg\n"
126 "strexeq %0, %5, [%3]\n"
127 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
128 : "r" (&ptr->counter), "Ir" (old), "r" (new)
137 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
139 unsigned long tmp, tmp2;
141 __asm__ __volatile__("@ atomic_clear_mask\n"
142 "1: ldrex %0, [%3]\n"
144 " strex %1, %0, [%3]\n"
147 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148 : "r" (addr), "Ir" (mask)
152 #else /* ARM_ARCH_6 */
155 #error SMP not supported on pre-ARMv6 CPUs
158 static inline int atomic_add_return(int i, atomic_t *v)
163 raw_local_irq_save(flags);
165 v->counter = val += i;
166 raw_local_irq_restore(flags);
170 #define atomic_add(i, v) (void) atomic_add_return(i, v)
172 static inline int atomic_sub_return(int i, atomic_t *v)
177 raw_local_irq_save(flags);
179 v->counter = val -= i;
180 raw_local_irq_restore(flags);
184 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
186 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
191 raw_local_irq_save(flags);
193 if (likely(ret == old))
195 raw_local_irq_restore(flags);
200 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
204 raw_local_irq_save(flags);
206 raw_local_irq_restore(flags);
209 #endif /* __LINUX_ARM_ARCH__ */
211 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
213 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
218 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
223 #define atomic_inc(v) atomic_add(1, v)
224 #define atomic_dec(v) atomic_sub(1, v)
226 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
227 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
228 #define atomic_inc_return(v) (atomic_add_return(1, v))
229 #define atomic_dec_return(v) (atomic_sub_return(1, v))
230 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
232 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
234 #define smp_mb__before_atomic_dec() smp_mb()
235 #define smp_mb__after_atomic_dec() smp_mb()
236 #define smp_mb__before_atomic_inc() smp_mb()
237 #define smp_mb__after_atomic_inc() smp_mb()
239 #ifndef CONFIG_GENERIC_ATOMIC64
241 u64 __aligned(8) counter;
244 #define ATOMIC64_INIT(i) { (i) }
246 #ifdef CONFIG_ARM_LPAE
247 static inline u64 atomic64_read(const atomic64_t *v)
251 __asm__ __volatile__("@ atomic64_read\n"
252 " ldrd %0, %H0, [%1]"
254 : "r" (&v->counter), "Qo" (v->counter)
260 static inline void atomic64_set(atomic64_t *v, u64 i)
262 __asm__ __volatile__("@ atomic64_set\n"
263 " strd %2, %H2, [%1]"
265 : "r" (&v->counter), "r" (i)
269 static inline u64 atomic64_read(const atomic64_t *v)
273 __asm__ __volatile__("@ atomic64_read\n"
274 " ldrexd %0, %H0, [%1]"
276 : "r" (&v->counter), "Qo" (v->counter)
282 static inline void atomic64_set(atomic64_t *v, u64 i)
286 __asm__ __volatile__("@ atomic64_set\n"
287 "1: ldrexd %0, %H0, [%2]\n"
288 " strexd %0, %3, %H3, [%2]\n"
291 : "=&r" (tmp), "=Qo" (v->counter)
292 : "r" (&v->counter), "r" (i)
297 static inline void atomic64_add(u64 i, atomic64_t *v)
302 __asm__ __volatile__("@ atomic64_add\n"
303 "1: ldrexd %0, %H0, [%3]\n"
305 " adc %H0, %H0, %H4\n"
306 " strexd %1, %0, %H0, [%3]\n"
309 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
310 : "r" (&v->counter), "r" (i)
314 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
321 __asm__ __volatile__("@ atomic64_add_return\n"
322 "1: ldrexd %0, %H0, [%3]\n"
324 " adc %H0, %H0, %H4\n"
325 " strexd %1, %0, %H0, [%3]\n"
328 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
329 : "r" (&v->counter), "r" (i)
337 static inline void atomic64_sub(u64 i, atomic64_t *v)
342 __asm__ __volatile__("@ atomic64_sub\n"
343 "1: ldrexd %0, %H0, [%3]\n"
345 " sbc %H0, %H0, %H4\n"
346 " strexd %1, %0, %H0, [%3]\n"
349 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
350 : "r" (&v->counter), "r" (i)
354 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
361 __asm__ __volatile__("@ atomic64_sub_return\n"
362 "1: ldrexd %0, %H0, [%3]\n"
364 " sbc %H0, %H0, %H4\n"
365 " strexd %1, %0, %H0, [%3]\n"
368 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
369 : "r" (&v->counter), "r" (i)
377 static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
385 __asm__ __volatile__("@ atomic64_cmpxchg\n"
386 "ldrexd %1, %H1, [%3]\n"
390 "strexdeq %0, %5, %H5, [%3]"
391 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
392 : "r" (&ptr->counter), "r" (old), "r" (new)
401 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
408 __asm__ __volatile__("@ atomic64_xchg\n"
409 "1: ldrexd %0, %H0, [%3]\n"
410 " strexd %1, %4, %H4, [%3]\n"
413 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
414 : "r" (&ptr->counter), "r" (new)
422 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
429 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
430 "1: ldrexd %0, %H0, [%3]\n"
432 " sbc %H0, %H0, #0\n"
435 " strexd %1, %0, %H0, [%3]\n"
439 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
448 static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
456 __asm__ __volatile__("@ atomic64_add_unless\n"
457 "1: ldrexd %0, %H0, [%4]\n"
463 " adc %H0, %H0, %H6\n"
464 " strexd %2, %0, %H0, [%4]\n"
468 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
469 : "r" (&v->counter), "r" (u), "r" (a)
478 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
479 #define atomic64_inc(v) atomic64_add(1LL, (v))
480 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
481 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
482 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
483 #define atomic64_dec(v) atomic64_sub(1LL, (v))
484 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
485 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
486 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
488 #endif /* !CONFIG_GENERIC_ATOMIC64 */