1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #if __LINUX_ARM_ARCH__ < 6
5 #error SMP not supported on pre-ARMv6 CPUs
8 #include <asm/processor.h>
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away.
14 #ifdef CONFIG_THUMB2_KERNEL
16 * For Thumb-2, special care is needed to ensure that the conditional WFE
17 * instruction really does assemble to exactly 4 bytes (as required by
18 * the SMP_ON_UP fixup code). By itself "wfene" might cause the
19 * assembler to insert a extra (16-bit) IT instruction, depending on the
20 * presence or absence of neighbouring conditional instructions.
22 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
23 * the assembler won't change IT instructions which are explicitly present
26 #define WFE(cond) __ALT_SMP_ASM( \
33 #define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
36 #define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
38 static inline void dsb_sev(void)
40 #if __LINUX_ARM_ARCH__ >= 7
41 __asm__ __volatile__ (
46 __asm__ __volatile__ (
47 "mcr p15, 0, %0, c7, c10, 4\n"
55 * ARMv6 ticket-based spin-locking.
57 * A memory barrier is required after we get a lock, and before we
58 * release it, because V6 CPUs are assumed to have weakly ordered
62 #define arch_spin_unlock_wait(lock) \
63 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
65 #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
67 static inline void arch_spin_lock(arch_spinlock_t *lock)
71 arch_spinlock_t lockval;
76 " strex %2, %1, [%3]\n"
79 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
80 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
83 while (lockval.tickets.next != lockval.tickets.owner) {
85 lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
91 static inline int arch_spin_trylock(arch_spinlock_t *lock)
93 unsigned long contended, res;
100 " subs %1, %0, %0, ror #16\n"
101 " addeq %0, %0, %4\n"
102 " strexeq %2, %0, [%3]"
103 : "=&r" (slock), "=&r" (contended), "=&r" (res)
104 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
116 static inline void arch_spin_unlock(arch_spinlock_t *lock)
119 lock->tickets.owner++;
123 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
125 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
126 return tickets.owner != tickets.next;
129 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
131 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
132 return (tickets.next - tickets.owner) > 1;
134 #define arch_spin_is_contended arch_spin_is_contended
140 * Write locks are easy - we just set bit 31. When unlocking, we can
141 * just write zero since the lock is exclusively held.
144 static inline void arch_write_lock(arch_rwlock_t *rw)
148 __asm__ __volatile__(
149 "1: ldrex %0, [%1]\n"
152 " strexeq %0, %2, [%1]\n"
156 : "r" (&rw->lock), "r" (0x80000000)
162 static inline int arch_write_trylock(arch_rwlock_t *rw)
164 unsigned long contended, res;
167 __asm__ __volatile__(
171 " strexeq %1, %3, [%2]"
172 : "=&r" (contended), "=&r" (res)
173 : "r" (&rw->lock), "r" (0x80000000)
185 static inline void arch_write_unlock(arch_rwlock_t *rw)
189 __asm__ __volatile__(
192 : "r" (&rw->lock), "r" (0)
198 /* write_can_lock - would write_trylock() succeed? */
199 #define arch_write_can_lock(x) ((x)->lock == 0)
202 * Read locks are a bit more hairy:
203 * - Exclusively load the lock value.
205 * - Store new lock value if positive, and we still own this location.
206 * If the value is negative, we've already failed.
207 * - If we failed to store the value, we want a negative result.
208 * - If we failed, try again.
209 * Unlocking is similarly hairy. We may have multiple read locks
210 * currently active. However, we know we won't have any write
213 static inline void arch_read_lock(arch_rwlock_t *rw)
215 unsigned long tmp, tmp2;
217 __asm__ __volatile__(
218 "1: ldrex %0, [%2]\n"
220 " strexpl %1, %0, [%2]\n"
222 " rsbpls %0, %1, #0\n"
224 : "=&r" (tmp), "=&r" (tmp2)
231 static inline void arch_read_unlock(arch_rwlock_t *rw)
233 unsigned long tmp, tmp2;
237 __asm__ __volatile__(
238 "1: ldrex %0, [%2]\n"
240 " strex %1, %0, [%2]\n"
243 : "=&r" (tmp), "=&r" (tmp2)
251 static inline int arch_read_trylock(arch_rwlock_t *rw)
253 unsigned long contended, res;
256 __asm__ __volatile__(
260 " strexpl %1, %0, [%2]"
261 : "=&r" (contended), "=&r" (res)
266 /* If the lock is negative, then it is already held for write. */
267 if (contended < 0x80000000) {
275 /* read_can_lock - would read_trylock() succeed? */
276 #define arch_read_can_lock(x) ((x)->lock < 0x80000000)
278 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
279 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
281 #define arch_spin_relax(lock) cpu_relax()
282 #define arch_read_relax(lock) cpu_relax()
283 #define arch_write_relax(lock) cpu_relax()
285 #endif /* __ASM_SPINLOCK_H */