1 #ifndef __ASM_ARM_CMPXCHG_H
2 #define __ASM_ARM_CMPXCHG_H
4 #include <linux/irqflags.h>
5 #include <asm/barrier.h>
7 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
9 * On the StrongARM, "swp" is terminally broken since it bypasses the
10 * cache totally. This means that the cache becomes inconsistent, and,
11 * since we use normal loads/stores as well, this is really bad.
12 * Typically, this causes oopsen in filp_close, but could have other,
13 * more disastrous effects. There are two work-arounds:
14 * 1. Disable interrupts and emulate the atomic swap
15 * 2. Clean the cache, perform atomic swap, flush the cache
17 * We choose (1) since its the "easiest" to achieve here and is not
18 * dependent on the processor type.
20 * NOTE that this solution won't work on an SMP system, so explcitly
26 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
28 extern void __bad_xchg(volatile void *, int);
33 #if __LINUX_ARM_ARCH__ >= 6
40 #if __LINUX_ARM_ARCH__ >= 6
42 asm volatile("@ __xchg1\n"
43 "1: ldrexb %0, [%3]\n"
44 " strexb %1, %2, [%3]\n"
47 : "=&r" (ret), "=&r" (tmp)
52 asm volatile("@ __xchg4\n"
54 " strex %1, %2, [%3]\n"
57 : "=&r" (ret), "=&r" (tmp)
61 #elif defined(swp_is_buggy)
63 #error SMP is not supported on this platform
66 raw_local_irq_save(flags);
67 ret = *(volatile unsigned char *)ptr;
68 *(volatile unsigned char *)ptr = x;
69 raw_local_irq_restore(flags);
73 raw_local_irq_save(flags);
74 ret = *(volatile unsigned long *)ptr;
75 *(volatile unsigned long *)ptr = x;
76 raw_local_irq_restore(flags);
80 asm volatile("@ __xchg1\n"
87 asm volatile("@ __xchg4\n"
95 __bad_xchg(ptr, size), ret = 0;
103 #define xchg(ptr,x) \
104 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
106 #include <asm-generic/cmpxchg-local.h>
108 #if __LINUX_ARM_ARCH__ < 6
109 /* min ARCH < ARMv6 */
112 #error "SMP is not supported on this platform"
116 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
119 #define cmpxchg_local(ptr, o, n) \
120 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
121 (unsigned long)(n), sizeof(*(ptr))))
122 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
125 #include <asm-generic/cmpxchg.h>
128 #else /* min ARCH >= ARMv6 */
130 extern void __bad_cmpxchg(volatile void *ptr, int size);
133 * cmpxchg only support 32-bits operands on ARMv6.
136 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
137 unsigned long new, int size)
139 unsigned long oldval, res;
142 #ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
145 asm volatile("@ __cmpxchg1\n"
149 " strexbeq %0, %4, [%2]\n"
150 : "=&r" (res), "=&r" (oldval)
151 : "r" (ptr), "Ir" (old), "r" (new)
157 asm volatile("@ __cmpxchg1\n"
161 " strexheq %0, %4, [%2]\n"
162 : "=&r" (res), "=&r" (oldval)
163 : "r" (ptr), "Ir" (old), "r" (new)
170 asm volatile("@ __cmpxchg4\n"
174 " strexeq %0, %4, [%2]\n"
175 : "=&r" (res), "=&r" (oldval)
176 : "r" (ptr), "Ir" (old), "r" (new)
181 __bad_cmpxchg(ptr, size);
188 static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
189 unsigned long new, int size)
194 ret = __cmpxchg(ptr, old, new, size);
200 #define cmpxchg(ptr,o,n) \
201 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
202 (unsigned long)(o), \
203 (unsigned long)(n), \
206 static inline unsigned long __cmpxchg_local(volatile void *ptr,
208 unsigned long new, int size)
213 #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
216 ret = __cmpxchg_local_generic(ptr, old, new, size);
220 ret = __cmpxchg(ptr, old, new, size);
226 #define cmpxchg_local(ptr,o,n) \
227 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
228 (unsigned long)(o), \
229 (unsigned long)(n), \
232 #define cmpxchg64(ptr, o, n) \
233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
236 (unsigned long long)(o), \
237 (unsigned long long)(n)))
239 #define cmpxchg64_local(ptr, o, n) \
240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
243 (unsigned long long)(o), \
244 (unsigned long long)(n)))
246 #endif /* __LINUX_ARM_ARCH__ >= 6 */
248 #endif /* __ASM_ARM_CMPXCHG_H */