]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/x86/include/asm/spinlock.h
Merge branch 'x86-spinlocks-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / arch / x86 / include / asm / spinlock.h
index e0e668422c7533ffc2d3b7f08f156f1ea951e33b..bf156ded74b56006a76cc02b8917984117af8afd 100644 (file)
@@ -1,11 +1,14 @@
 #ifndef _ASM_X86_SPINLOCK_H
 #define _ASM_X86_SPINLOCK_H
 
+#include <linux/jump_label.h>
 #include <linux/atomic.h>
 #include <asm/page.h>
 #include <asm/processor.h>
 #include <linux/compiler.h>
 #include <asm/paravirt.h>
+#include <asm/bitops.h>
+
 /*
  * Your basic SMP spinlocks, allowing only a single CPU anywhere
  *
 # define UNLOCK_LOCK_PREFIX
 #endif
 
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+extern struct static_key paravirt_ticketlocks_enabled;
+static __always_inline bool static_key_false(struct static_key *key);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
+{
+       set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+}
+
+#else  /* !CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
+                                                       __ticket_t ticket)
+{
+}
+static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
+                                                       __ticket_t ticket)
+{
+}
+
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
        return lock.tickets.head == lock.tickets.tail;
@@ -52,81 +80,101 @@ static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  * in the high part, because a wide xadd increment of the low part would carry
  * up and contaminate the high part.
  */
-static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
 {
-       register struct __raw_tickets inc = { .tail = 1 };
+       register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
 
        inc = xadd(&lock->tickets, inc);
+       if (likely(inc.head == inc.tail))
+               goto out;
 
+       inc.tail &= ~TICKET_SLOWPATH_FLAG;
        for (;;) {
-               if (inc.head == inc.tail)
-                       break;
-               cpu_relax();
-               inc.head = ACCESS_ONCE(lock->tickets.head);
+               unsigned count = SPIN_THRESHOLD;
+
+               do {
+                       if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
+                               goto out;
+                       cpu_relax();
+               } while (--count);
+               __ticket_lock_spinning(lock, inc.tail);
        }
-       barrier();              /* make sure nothing creeps before the lock is taken */
+out:   barrier();      /* make sure nothing creeps before the lock is taken */
 }
 
-static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
        arch_spinlock_t old, new;
 
        old.tickets = ACCESS_ONCE(lock->tickets);
-       if (old.tickets.head != old.tickets.tail)
+       if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
                return 0;
 
-       new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
+       new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
 
        /* cmpxchg is a full barrier, so nothing can move before it */
        return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
+                                           arch_spinlock_t old)
 {
-       __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
+       arch_spinlock_t new;
+
+       BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+
+       /* Perform the unlock on the "before" copy */
+       old.tickets.head += TICKET_LOCK_INC;
+
+       /* Clear the slowpath flag */
+       new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
+
+       /*
+        * If the lock is uncontended, clear the flag - use cmpxchg in
+        * case it changes behind our back though.
+        */
+       if (new.tickets.head != new.tickets.tail ||
+           cmpxchg(&lock->head_tail, old.head_tail,
+                                       new.head_tail) != old.head_tail) {
+               /*
+                * Lock still has someone queued for it, so wake up an
+                * appropriate waiter.
+                */
+               __ticket_unlock_kick(lock, old.tickets.head);
+       }
 }
 
-static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+       if (TICKET_SLOWPATH_FLAG &&
+           static_key_false(&paravirt_ticketlocks_enabled)) {
+               arch_spinlock_t prev;
 
-       return tmp.tail != tmp.head;
-}
+               prev = *lock;
+               add_smp(&lock->tickets.head, TICKET_LOCK_INC);
 
-static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
-{
-       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+               /* add_smp() is a full mb() */
 
-       return (__ticket_t)(tmp.tail - tmp.head) > 1;
+               if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
+                       __ticket_unlock_slowpath(lock, prev);
+       } else
+               __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
 }
 
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
-
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       return __ticket_spin_is_locked(lock);
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
-       return __ticket_spin_is_contended(lock);
-}
-#define arch_spin_is_contended arch_spin_is_contended
+       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
 
-static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-       __ticket_spin_lock(lock);
+       return tmp.tail != tmp.head;
 }
 
-static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
-       return __ticket_spin_trylock(lock);
-}
+       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
 
-static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-       __ticket_spin_unlock(lock);
+       return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
 }
+#define arch_spin_is_contended arch_spin_is_contended
 
 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
                                                  unsigned long flags)
@@ -134,8 +182,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
        arch_spin_lock(lock);
 }
 
-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-
 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
        while (arch_spin_is_locked(lock))