cmpxchg_local((ptr), (o), (n)); \
})
-#define xadd(ptr, inc) \
- do { \
- switch (sizeof(*(ptr))) { \
- case 1: \
- asm volatile (LOCK_PREFIX "xaddb %b0, %1\n" \
- : "+r" (inc), "+m" (*(ptr)) \
- : : "memory", "cc"); \
- break; \
- case 2: \
- asm volatile (LOCK_PREFIX "xaddw %w0, %1\n" \
- : "+r" (inc), "+m" (*(ptr)) \
- : : "memory", "cc"); \
- break; \
- case 4: \
- asm volatile (LOCK_PREFIX "xaddl %0, %1\n" \
- : "+r" (inc), "+m" (*(ptr)) \
- : : "memory", "cc"); \
- break; \
- case 8: \
- asm volatile (LOCK_PREFIX "xaddq %q0, %1\n" \
- : "+r" (inc), "+m" (*(ptr)) \
- : : "memory", "cc"); \
- break; \
- } \
- } while(0)
-
#define cmpxchg16b(ptr, o1, o2, n1, n2) \
({ \
char __ret; \
* On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
* (PPro errata 66, 92)
*/
-static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
-{
- if (sizeof(lock->tickets.head) == sizeof(u8))
- asm volatile(LOCK_PREFIX "incb %0"
- : "+m" (lock->tickets.head) : : "memory");
- else
- asm volatile(LOCK_PREFIX "incw %0"
- : "+m" (lock->tickets.head) : : "memory");
-
-}
+# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
#else
-static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
-{
- lock->tickets.head++;
-}
+# define UNLOCK_LOCK_PREFIX
#endif
/*
* save some instructions and make the code more elegant. There really isn't
* much between them in performance though, especially as locks are out of line.
*/
-static __always_inline struct __raw_tickets __ticket_spin_claim(struct arch_spinlock *lock)
-{
- register struct __raw_tickets tickets = { .tail = 1 };
-
- xadd(&lock->tickets, tickets);
+#if (NR_CPUS < 256)
+#define TICKET_SHIFT 8
- return tickets;
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+{
+ short inc = 0x0100;
+
+ asm volatile (
+ LOCK_PREFIX "xaddw %w0, %1\n"
+ "1:\t"
+ "cmpb %h0, %b0\n\t"
+ "je 2f\n\t"
+ "rep ; nop\n\t"
+ "movb %1, %b0\n\t"
+ /* don't need lfence here, because loads are in-order */
+ "jmp 1b\n"
+ "2:"
+ : "+Q" (inc), "+m" (lock->slock)
+ :
+ : "memory", "cc");
}
-static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- register struct __raw_tickets inc;
+ int tmp, new;
+
+ asm volatile("movzwl %2, %0\n\t"
+ "cmpb %h0,%b0\n\t"
+ "leal 0x100(%" REG_PTR_MODE "0), %1\n\t"
+ "jne 1f\n\t"
+ LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
+ "1:"
+ "sete %b1\n\t"
+ "movzbl %b1,%0\n\t"
+ : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
+ :
+ : "memory", "cc");
+
+ return tmp;
+}
- inc = __ticket_spin_claim(lock);
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+{
+ asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
+ : "+m" (lock->slock)
+ :
+ : "memory", "cc");
+}
+#else
+#define TICKET_SHIFT 16
- for (;;) {
- if (inc.head == inc.tail)
- goto out;
- cpu_relax();
- inc.head = ACCESS_ONCE(lock->tickets.head);
- }
-out: barrier(); /* make sure nothing creeps before the lock is taken */
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+{
+ int inc = 0x00010000;
+ int tmp;
+
+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
+ "movzwl %w0, %2\n\t"
+ "shrl $16, %0\n\t"
+ "1:\t"
+ "cmpl %0, %2\n\t"
+ "je 2f\n\t"
+ "rep ; nop\n\t"
+ "movzwl %1, %2\n\t"
+ /* don't need lfence here, because loads are in-order */
+ "jmp 1b\n"
+ "2:"
+ : "+r" (inc), "+m" (lock->slock), "=&r" (tmp)
+ :
+ : "memory", "cc");
}
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- arch_spinlock_t old, new;
-
- old.tickets = ACCESS_ONCE(lock->tickets);
- if (old.tickets.head != old.tickets.tail)
- return 0;
-
- new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
-
- /* cmpxchg is a full barrier, so nothing can move before it */
- return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
+ int tmp;
+ int new;
+
+ asm volatile("movl %2,%0\n\t"
+ "movl %0,%1\n\t"
+ "roll $16, %0\n\t"
+ "cmpl %0,%1\n\t"
+ "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t"
+ "jne 1f\n\t"
+ LOCK_PREFIX "cmpxchgl %1,%2\n\t"
+ "1:"
+ "sete %b1\n\t"
+ "movzbl %b1,%0\n\t"
+ : "=&a" (tmp), "=&q" (new), "+m" (lock->slock)
+ :
+ : "memory", "cc");
+
+ return tmp;
}
static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
- barrier(); /* prevent reordering out of locked region */
- __ticket_unlock_release(lock);
- barrier(); /* prevent reordering into locked region */
+ asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
+ : "+m" (lock->slock)
+ :
+ : "memory", "cc");
}
+#endif
static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ int tmp = ACCESS_ONCE(lock->slock);
- return !!(tmp.tail ^ tmp.head);
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1));
}
static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ int tmp = ACCESS_ONCE(lock->slock);
- return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
+ return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
}
#ifndef CONFIG_PARAVIRT_SPINLOCKS