2 * Based on arch/arm/include/asm/atomic.h
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #ifndef __ASM_ATOMIC_H
21 #define __ASM_ATOMIC_H
23 #include <linux/compiler.h>
24 #include <linux/types.h>
26 #include <asm/barrier.h>
27 #include <asm/cmpxchg.h>
30 #define ATOMIC_INIT(i) { (i) }
34 #define __ARM64_IN_ATOMIC_IMPL
36 #if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
37 #include <asm/atomic_lse.h>
39 #include <asm/atomic_ll_sc.h>
42 #undef __ARM64_IN_ATOMIC_IMPL
45 * On ARM, ordinary assignment (str instruction) doesn't clear the local
46 * strex/ldrex monitor on some implementations. The reason we can use it for
47 * atomic_set() is the clrex or dummy strex done on every exception return.
49 #define atomic_read(v) ACCESS_ONCE((v)->counter)
50 #define atomic_set(v,i) (((v)->counter) = (i))
52 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
54 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
59 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
64 #define atomic_inc(v) atomic_add(1, v)
65 #define atomic_dec(v) atomic_sub(1, v)
67 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
68 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
69 #define atomic_inc_return(v) (atomic_add_return(1, v))
70 #define atomic_dec_return(v) (atomic_sub_return(1, v))
71 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
73 #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
75 #define atomic_andnot atomic_andnot
78 * 64-bit atomic operations.
80 #define ATOMIC64_INIT(i) { (i) }
82 #define atomic64_read(v) ACCESS_ONCE((v)->counter)
83 #define atomic64_set(v,i) (((v)->counter) = (i))
85 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
87 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
92 while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
98 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
99 #define atomic64_inc(v) atomic64_add(1LL, (v))
100 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
101 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
102 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
103 #define atomic64_dec(v) atomic64_sub(1LL, (v))
104 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
105 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
106 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
108 #define atomic64_andnot atomic64_andnot