1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
4 #include <asm/atomic.h>
5 #include <asm/rwlock.h>
7 #include <asm/processor.h>
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere
12 * Simple spin lock operations. There are two variants, one clears IRQ's
13 * on the local processor, one does not.
15 * We make no fairness assumptions. They have a cost.
17 * (the type definitions are in asm/spinlock_types.h)
20 typedef char _slock_t;
21 #define LOCK_INS_DEC "decb"
22 #define LOCK_INS_XCH "xchgb"
23 #define LOCK_INS_MOV "movb"
24 #define LOCK_INS_CMP "cmpb"
25 #define LOCK_PTR_REG "a"
27 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
29 return *(volatile _slock_t *)(&(lock)->slock) <= 0;
32 static inline void __raw_spin_lock(raw_spinlock_t *lock)
36 LOCK_PREFIX " ; " LOCK_INS_DEC " %0\n\t"
40 LOCK_INS_CMP " $0,%0\n\t"
44 : "+m" (lock->slock) : : "memory");
48 * It is easier for the lock validator if interrupts are not re-enabled
49 * in the middle of a lock-acquire. This is a performance feature anyway
52 * NOTE: there's an irqs-on section here, which normally would have to be
53 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
55 #ifndef CONFIG_PROVE_LOCKING
56 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
61 LOCK_PREFIX " ; " LOCK_INS_DEC " %[slock]\n\t"
63 "testl $0x200, %[flags]\n\t"
68 LOCK_INS_CMP " $0, %[slock]\n\t"
74 LOCK_INS_CMP " $0, %[slock]\n\t"
78 : [slock] "+m" (lock->slock)
79 : [flags] "r" ((u32)flags)
81 : "memory" CLI_STI_CLOBBERS);
85 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
91 :"=q" (oldval), "+m" (lock->slock)
98 * __raw_spin_unlock based on writing $1 to the low byte.
99 * This method works. Despite all the confusion.
100 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
101 * (PPro errata 66, 92)
104 #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
106 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
108 asm volatile(LOCK_INS_MOV " $1,%0" : "=m" (lock->slock) :: "memory");
113 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
115 unsigned char oldval = 1;
117 asm volatile("xchgb %b0, %1"
118 : "=q" (oldval), "+m" (lock->slock)
119 : "0" (oldval) : "memory");
124 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
126 while (__raw_spin_is_locked(lock))
131 * Read-write spinlocks, allowing multiple readers
132 * but only one writer.
134 * NOTE! it is quite common to have readers in interrupts
135 * but no interrupt writers. For those circumstances we
136 * can "mix" irq-safe locks - any writer needs to get a
137 * irq-safe write-lock, but readers can get non-irqsafe
140 * On x86, we implement read-write locks as a 32-bit counter
141 * with the high bit (sign) being the "contended" bit.
144 static inline int __raw_read_can_lock(raw_rwlock_t *lock)
146 return (int)(lock)->lock > 0;
149 static inline int __raw_write_can_lock(raw_rwlock_t *lock)
151 return (lock)->lock == RW_LOCK_BIAS;
154 static inline void __raw_read_lock(raw_rwlock_t *rw)
156 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
158 "call __read_lock_failed\n\t"
160 ::LOCK_PTR_REG (rw) : "memory");
163 static inline void __raw_write_lock(raw_rwlock_t *rw)
165 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
167 "call __write_lock_failed\n\t"
169 ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory");
172 static inline int __raw_read_trylock(raw_rwlock_t *lock)
174 atomic_t *count = (atomic_t *)lock;
177 if (atomic_read(count) >= 0)
183 static inline int __raw_write_trylock(raw_rwlock_t *lock)
185 atomic_t *count = (atomic_t *)lock;
187 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
189 atomic_add(RW_LOCK_BIAS, count);
193 static inline void __raw_read_unlock(raw_rwlock_t *rw)
195 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
198 static inline void __raw_write_unlock(raw_rwlock_t *rw)
200 asm volatile(LOCK_PREFIX "addl %1, %0"
201 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
204 #define _raw_spin_relax(lock) cpu_relax()
205 #define _raw_read_relax(lock) cpu_relax()
206 #define _raw_write_relax(lock) cpu_relax()
208 #endif /* __ASM_SPINLOCK_H */