1 #ifndef _ASM_X86_SPINLOCK_H
2 #define _ASM_X86_SPINLOCK_H
4 #include <linux/atomic.h>
6 #include <asm/processor.h>
7 #include <linux/compiler.h>
8 #include <asm/paravirt.h>
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
15 * These are fair FIFO ticket locks, which support up to 2^16 CPUs.
17 * (the type definitions are in asm/spinlock_types.h)
21 # define LOCK_PTR_REG "a"
23 # define LOCK_PTR_REG "D"
26 #if defined(CONFIG_X86_32) && \
27 (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
29 * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
30 * (PPro errata 66, 92)
32 # define UNLOCK_LOCK_PREFIX LOCK_PREFIX
34 # define UNLOCK_LOCK_PREFIX
37 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
39 return lock.tickets.head == lock.tickets.tail;
43 * Ticket locks are conceptually two parts, one indicating the current head of
44 * the queue, and the other indicating the current tail. The lock is acquired
45 * by atomically noting the tail and incrementing it by one (thus adding
46 * ourself to the queue and noting our position), then waiting until the head
47 * becomes equal to the the initial value of the tail.
49 * We use an xadd covering *both* parts of the lock, to increment the tail and
50 * also load the position of the head, which takes care of memory ordering
51 * issues and should be optimal for the uncontended case. Note the tail must be
52 * in the high part, because a wide xadd increment of the low part would carry
53 * up and contaminate the high part.
55 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
57 register struct __raw_tickets inc = { .tail = 1 };
59 inc = xadd(&lock->tickets, inc);
62 if (inc.head == inc.tail)
65 inc.head = ACCESS_ONCE(lock->tickets.head);
67 barrier(); /* make sure nothing creeps before the lock is taken */
70 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
72 arch_spinlock_t old, new;
74 old.tickets = ACCESS_ONCE(lock->tickets);
75 if (old.tickets.head != old.tickets.tail)
78 new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
80 /* cmpxchg is a full barrier, so nothing can move before it */
81 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
84 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
86 __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
89 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
91 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
93 return tmp.tail != tmp.head;
96 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
98 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
100 return (__ticket_t)(tmp.tail - tmp.head) > 1;
103 #ifndef CONFIG_PARAVIRT_SPINLOCKS
105 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
107 return __ticket_spin_is_locked(lock);
110 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
112 return __ticket_spin_is_contended(lock);
114 #define arch_spin_is_contended arch_spin_is_contended
116 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
118 __ticket_spin_lock(lock);
121 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
123 return __ticket_spin_trylock(lock);
126 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
128 __ticket_spin_unlock(lock);
131 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
134 arch_spin_lock(lock);
137 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
139 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
141 while (arch_spin_is_locked(lock))
146 * Read-write spinlocks, allowing multiple readers
147 * but only one writer.
149 * NOTE! it is quite common to have readers in interrupts
150 * but no interrupt writers. For those circumstances we
151 * can "mix" irq-safe locks - any writer needs to get a
152 * irq-safe write-lock, but readers can get non-irqsafe
155 * On x86, we implement read-write locks as a 32-bit counter
156 * with the high bit (sign) being the "contended" bit.
160 * read_can_lock - would read_trylock() succeed?
161 * @lock: the rwlock in question.
163 static inline int arch_read_can_lock(arch_rwlock_t *lock)
165 return lock->lock > 0;
169 * write_can_lock - would write_trylock() succeed?
170 * @lock: the rwlock in question.
172 static inline int arch_write_can_lock(arch_rwlock_t *lock)
174 return lock->write == WRITE_LOCK_CMP;
177 static inline void arch_read_lock(arch_rwlock_t *rw)
179 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
181 "call __read_lock_failed\n\t"
183 ::LOCK_PTR_REG (rw) : "memory");
186 static inline void arch_write_lock(arch_rwlock_t *rw)
188 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
190 "call __write_lock_failed\n\t"
192 ::LOCK_PTR_REG (&rw->write), "i" (RW_LOCK_BIAS)
196 static inline int arch_read_trylock(arch_rwlock_t *lock)
198 READ_LOCK_ATOMIC(t) *count = (READ_LOCK_ATOMIC(t) *)lock;
200 if (READ_LOCK_ATOMIC(dec_return)(count) >= 0)
202 READ_LOCK_ATOMIC(inc)(count);
206 static inline int arch_write_trylock(arch_rwlock_t *lock)
208 atomic_t *count = (atomic_t *)&lock->write;
210 if (atomic_sub_and_test(WRITE_LOCK_CMP, count))
212 atomic_add(WRITE_LOCK_CMP, count);
216 static inline void arch_read_unlock(arch_rwlock_t *rw)
218 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
219 :"+m" (rw->lock) : : "memory");
222 static inline void arch_write_unlock(arch_rwlock_t *rw)
224 asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
225 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
228 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
229 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
231 #undef READ_LOCK_SIZE
232 #undef READ_LOCK_ATOMIC
233 #undef WRITE_LOCK_ADD
234 #undef WRITE_LOCK_SUB
235 #undef WRITE_LOCK_CMP
237 #define arch_spin_relax(lock) cpu_relax()
238 #define arch_read_relax(lock) cpu_relax()
239 #define arch_write_relax(lock) cpu_relax()
241 #endif /* _ASM_X86_SPINLOCK_H */