2 * Based on arch/arm/include/asm/cmpxchg.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_CMPXCHG_H
19 #define __ASM_CMPXCHG_H
21 #include <linux/bug.h>
22 #include <linux/mmdebug.h>
24 #include <asm/atomic.h>
25 #include <asm/barrier.h>
28 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
30 unsigned long ret, tmp;
34 asm volatile(ARM64_LSE_ATOMIC_INSN(
36 " prfm pstl1strm, %2\n"
38 " stlxrb %w1, %w3, %2\n"
44 " swpalb %w3, %w0, %2\n"
47 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
52 asm volatile(ARM64_LSE_ATOMIC_INSN(
54 " prfm pstl1strm, %2\n"
56 " stlxrh %w1, %w3, %2\n"
62 " swpalh %w3, %w0, %2\n"
65 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
70 asm volatile(ARM64_LSE_ATOMIC_INSN(
72 " prfm pstl1strm, %2\n"
74 " stlxr %w1, %w3, %2\n"
80 " swpal %w3, %w0, %2\n"
83 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
88 asm volatile(ARM64_LSE_ATOMIC_INSN(
90 " prfm pstl1strm, %2\n"
92 " stlxr %w1, %3, %2\n"
101 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
112 #define xchg(ptr,x) \
114 __typeof__(*(ptr)) __ret; \
115 __ret = (__typeof__(*(ptr))) \
116 __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
120 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
121 unsigned long new, int size)
125 return __cmpxchg_case_1(ptr, (u8)old, new);
127 return __cmpxchg_case_2(ptr, (u16)old, new);
129 return __cmpxchg_case_4(ptr, old, new);
131 return __cmpxchg_case_8(ptr, old, new);
139 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
140 unsigned long new, int size)
144 return __cmpxchg_case_mb_1(ptr, (u8)old, new);
146 return __cmpxchg_case_mb_2(ptr, (u16)old, new);
148 return __cmpxchg_case_mb_4(ptr, old, new);
150 return __cmpxchg_case_mb_8(ptr, old, new);
158 #define cmpxchg(ptr, o, n) \
160 __typeof__(*(ptr)) __ret; \
161 __ret = (__typeof__(*(ptr))) \
162 __cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
167 #define cmpxchg_local(ptr, o, n) \
169 __typeof__(*(ptr)) __ret; \
170 __ret = (__typeof__(*(ptr))) \
171 __cmpxchg((ptr), (unsigned long)(o), \
172 (unsigned long)(n), sizeof(*(ptr))); \
176 #define system_has_cmpxchg_double() 1
178 #define __cmpxchg_double_check(ptr1, ptr2) \
180 if (sizeof(*(ptr1)) != 8) \
182 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
185 #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
188 __cmpxchg_double_check(ptr1, ptr2); \
189 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
190 (unsigned long)(n1), (unsigned long)(n2), \
195 #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
198 __cmpxchg_double_check(ptr1, ptr2); \
199 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
200 (unsigned long)(n1), (unsigned long)(n2), \
205 #define _protect_cmpxchg_local(pcp, o, n) \
207 typeof(*raw_cpu_ptr(&(pcp))) __ret; \
209 __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
214 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
215 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
216 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
217 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
219 #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
223 __ret = cmpxchg_double_local( raw_cpu_ptr(&(ptr1)), \
224 raw_cpu_ptr(&(ptr2)), \
230 #define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
231 #define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
233 #define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
235 #endif /* __ASM_CMPXCHG_H */