]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
s390/spinlock: Implement ticket locks (8 Bit tickets)
authorPhilipp Hachtmann <phacht@linux.vnet.ibm.com>
Mon, 7 Apr 2014 16:25:23 +0000 (18:25 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Thu, 17 Apr 2014 10:46:47 +0000 (12:46 +0200)
This patch adds a ticket lock spinlock implementation to s390.
It is controlled by CONFIG_S390_TICKET_SPINLOCK.

The size of arch_spinlock_t is kept (32 bit). Therefore the lock tickets
can have only 8 bits each. Therefore the lock implementation stops to
be fair when the lock is taken and more than 253 CPUs are waiting for it.
But this can be considered a rare case.

[ Martin Schwidefsky: patch breakdown and code beautification ]

Signed-off-by: Philipp Hachtmann <phacht@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/Kconfig
arch/s390/include/asm/spinlock.h
arch/s390/include/asm/spinlock_types.h
arch/s390/lib/spinlock.c

index d239e6afb9238ea6f073ec1cfd330ea6af2a3fe4..387a687833e241a3744089e29678ad537ba4d509 100644 (file)
@@ -330,6 +330,16 @@ config SYSVIPC_COMPAT
 config KEYS_COMPAT
        def_bool y if COMPAT && KEYS
 
+config S390_TICKET_SPINLOCK
+       bool "Use ticket spinlocks"
+       depends on S390 && SMP
+       default n
+       help
+         This enables support for ticket spinlocks. Ticket spinlocks
+         are more fair by means that waiting CPUs will get the lock
+         in the order they tried to obtain it.
+         The tradeoff is more complex code that could impact performance.
+
 config SMP
        def_bool y
        prompt "Symmetric multi-processing support"
index 5a0b2882ad485de9eff628ad0623bfa971678913..84faa13aa7aeb6f7a39297ecaab277008b3e0231 100644 (file)
@@ -1,7 +1,8 @@
 /*
  *  S390 version
- *    Copyright IBM Corp. 1999
+ *    Copyright IBM Corp. 1999, 2014
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *              Philipp Hachtmann (phacht@linux.vnet.ibm.com)
  *
  *  Derived from "include/asm-i386/spinlock.h"
  */
@@ -32,14 +33,35 @@ _raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
  * Simple spin lock operations.  There are two variants, one clears IRQ's
  * on the local processor, one does not.
  *
- * We make no fairness assumptions. They have a cost.
- *
  * (the type definitions are in asm/spinlock_types.h)
  */
 
 void arch_spin_lock_wait(arch_spinlock_t *);
 int arch_spin_trylock_retry(arch_spinlock_t *);
 void arch_spin_relax(arch_spinlock_t *);
+
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+void arch_spin_unlock_slow(arch_spinlock_t *lp);
+
+static inline u32 arch_spin_lockval(u32 cpu)
+{
+       arch_spinlock_t new;
+
+       new.tickets.owner = ~cpu;
+       new.tickets.head = 0;
+       new.tickets.tail = 0;
+       return new.lock;
+}
+
+static inline void arch_spin_lock_wait_flags(arch_spinlock_t *lp,
+                                            unsigned long flags)
+{
+       arch_spin_lock_wait(lp);
+}
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
 void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
 
 static inline u32 arch_spin_lockval(int cpu)
@@ -47,6 +69,12 @@ static inline u32 arch_spin_lockval(int cpu)
        return ~cpu;
 }
 
+static inline void arch_spin_unlock_slow(arch_spinlock_t *lp)
+{
+}
+
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
 static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
        return lock.lock == 0;
@@ -89,7 +117,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lp)
 
 static inline void arch_spin_unlock(arch_spinlock_t *lp)
 {
-       arch_spin_tryrelease_once(lp);
+       if (unlikely(!arch_spin_tryrelease_once(lp)))
+               arch_spin_unlock_slow(lp);
 }
 
 static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
index b2cd6ff7c2c5bcdad941bb91297fa64fb8908616..472e12254a5150b1025d764e5bb6f2d7a9be7c67 100644 (file)
@@ -5,10 +5,27 @@
 # error "please don't include this file directly"
 #endif
 
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+typedef struct arch_spinlock {
+       union {
+               unsigned int lock;
+               struct __raw_tickets {
+                       u16 owner;
+                       u8 tail;
+                       u8 head;
+               } tickets;
+       };
+} arch_spinlock_t;
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
 typedef struct {
        unsigned int lock;
 } __attribute__ ((aligned (4))) arch_spinlock_t;
 
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
 #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
 
 typedef struct {
index 3ca9de4d9cb9880a04ba24ec37b8e7e2d06e141e..0946b99fe6c3653be529dafa99f7a40940fd343d 100644 (file)
@@ -1,8 +1,9 @@
 /*
  *    Out of line spinlock code.
  *
- *    Copyright IBM Corp. 2004, 2006
+ *    Copyright IBM Corp. 2004, 2014
  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ *              Philipp Hachtmann (phacht@linux.vnet.ibm.com)
  */
 
 #include <linux/types.h>
@@ -24,6 +25,89 @@ static int __init spin_retry_setup(char *str)
 }
 __setup("spin_retry=", spin_retry_setup);
 
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+void arch_spin_lock_wait(arch_spinlock_t *lp)
+{
+       arch_spinlock_t cur, new;
+       int cpu, owner, count;
+       u8 ticket = 0;
+
+       cpu = smp_processor_id();
+       count = spin_retry;
+       while (1) {
+               new.lock = cur.lock = ACCESS_ONCE(lp->lock);
+               if (new.lock == 0) {
+                       /* The lock is free with no waiter, try to get it. */
+                       new.tickets.owner = (u16) ~cpu;
+               } else if (!ticket) {
+                       /* Try to get a ticket. */
+                       new.tickets.tail = (u8)(new.tickets.tail + 1) ? : 1;
+                       if (new.tickets.tail == new.tickets.head)
+                               /* Overflow, can't get a ticket. */
+                               new.tickets.tail = cur.tickets.tail;
+               } else if (new.tickets.head == ticket)
+                       new.tickets.owner = (u16) ~cpu;
+               /* Do the atomic update. */
+               if (cur.lock != new.lock &&
+                   _raw_compare_and_swap(&lp->lock, cur.lock, new.lock)) {
+                       /* Update successful. */
+                       if (new.tickets.owner == (u16) ~cpu)
+                               return;         /* Got the lock. */
+                       ticket = new.tickets.tail; /* Got a ticket. */
+                       count = 0;
+               }
+               /* Lock could not be acquired yet. */
+               if (count--)
+                       continue;
+               count = spin_retry;
+               owner = cur.tickets.owner;
+               if (ticket) {
+                       if (owner && smp_vcpu_scheduled(~owner)) {
+                               if (MACHINE_IS_LPAR)
+                                       continue;
+                       } else
+                               count = 0;
+               }
+               /* Yield the cpu. */
+               if (owner)
+                       smp_yield_cpu(~owner);
+               else
+                       smp_yield();
+       }
+}
+EXPORT_SYMBOL(arch_spin_lock_wait);
+
+void arch_spin_unlock_slow(arch_spinlock_t *lp)
+{
+       arch_spinlock_t cur, new;
+
+       do {
+               cur.lock = ACCESS_ONCE(lp->lock);
+               new.lock = 0;
+               if (cur.tickets.head != cur.tickets.tail) {
+                       new.tickets.tail = cur.tickets.tail;
+                       new.tickets.head = (u8)(cur.tickets.head + 1) ? : 1;
+                       new.tickets.owner = 0;
+               }
+       } while (!_raw_compare_and_swap(&lp->lock, cur.lock, new.lock));
+}
+EXPORT_SYMBOL(arch_spin_unlock_slow);
+
+void arch_spin_relax(arch_spinlock_t *lp)
+{
+       unsigned int cpu = lp->tickets.owner;
+
+       if (cpu != 0) {
+               if (MACHINE_IS_VM || MACHINE_IS_KVM ||
+                   !smp_vcpu_scheduled(~cpu))
+                       smp_yield_cpu(~cpu);
+       }
+}
+EXPORT_SYMBOL(arch_spin_relax);
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
 void arch_spin_lock_wait(arch_spinlock_t *lp)
 {
        int count = spin_retry;
@@ -94,6 +178,8 @@ void arch_spin_relax(arch_spinlock_t *lp)
 }
 EXPORT_SYMBOL(arch_spin_relax);
 
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
 int arch_spin_trylock_retry(arch_spinlock_t *lp)
 {
        int count;