/*
* S390 version
- * Copyright IBM Corp. 1999
+ * Copyright IBM Corp. 1999, 2014
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Philipp Hachtmann (phacht@linux.vnet.ibm.com)
*
* Derived from "include/asm-i386/spinlock.h"
*/
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
- * We make no fairness assumptions. They have a cost.
- *
* (the type definitions are in asm/spinlock_types.h)
*/
void arch_spin_lock_wait(arch_spinlock_t *);
int arch_spin_trylock_retry(arch_spinlock_t *);
void arch_spin_relax(arch_spinlock_t *);
+
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+void arch_spin_unlock_slow(arch_spinlock_t *lp);
+
+static inline u32 arch_spin_lockval(u32 cpu)
+{
+ arch_spinlock_t new;
+
+ new.tickets.owner = ~cpu;
+ new.tickets.head = 0;
+ new.tickets.tail = 0;
+ return new.lock;
+}
+
+static inline void arch_spin_lock_wait_flags(arch_spinlock_t *lp,
+ unsigned long flags)
+{
+ arch_spin_lock_wait(lp);
+}
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags);
static inline u32 arch_spin_lockval(int cpu)
return ~cpu;
}
+static inline void arch_spin_unlock_slow(arch_spinlock_t *lp)
+{
+}
+
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
static inline void arch_spin_unlock(arch_spinlock_t *lp)
{
- arch_spin_tryrelease_once(lp);
+ if (unlikely(!arch_spin_tryrelease_once(lp)))
+ arch_spin_unlock_slow(lp);
}
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
/*
* Out of line spinlock code.
*
- * Copyright IBM Corp. 2004, 2006
+ * Copyright IBM Corp. 2004, 2014
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ * Philipp Hachtmann (phacht@linux.vnet.ibm.com)
*/
#include <linux/types.h>
}
__setup("spin_retry=", spin_retry_setup);
+#ifdef CONFIG_S390_TICKET_SPINLOCK
+
+void arch_spin_lock_wait(arch_spinlock_t *lp)
+{
+ arch_spinlock_t cur, new;
+ int cpu, owner, count;
+ u8 ticket = 0;
+
+ cpu = smp_processor_id();
+ count = spin_retry;
+ while (1) {
+ new.lock = cur.lock = ACCESS_ONCE(lp->lock);
+ if (new.lock == 0) {
+ /* The lock is free with no waiter, try to get it. */
+ new.tickets.owner = (u16) ~cpu;
+ } else if (!ticket) {
+ /* Try to get a ticket. */
+ new.tickets.tail = (u8)(new.tickets.tail + 1) ? : 1;
+ if (new.tickets.tail == new.tickets.head)
+ /* Overflow, can't get a ticket. */
+ new.tickets.tail = cur.tickets.tail;
+ } else if (new.tickets.head == ticket)
+ new.tickets.owner = (u16) ~cpu;
+ /* Do the atomic update. */
+ if (cur.lock != new.lock &&
+ _raw_compare_and_swap(&lp->lock, cur.lock, new.lock)) {
+ /* Update successful. */
+ if (new.tickets.owner == (u16) ~cpu)
+ return; /* Got the lock. */
+ ticket = new.tickets.tail; /* Got a ticket. */
+ count = 0;
+ }
+ /* Lock could not be acquired yet. */
+ if (count--)
+ continue;
+ count = spin_retry;
+ owner = cur.tickets.owner;
+ if (ticket) {
+ if (owner && smp_vcpu_scheduled(~owner)) {
+ if (MACHINE_IS_LPAR)
+ continue;
+ } else
+ count = 0;
+ }
+ /* Yield the cpu. */
+ if (owner)
+ smp_yield_cpu(~owner);
+ else
+ smp_yield();
+ }
+}
+EXPORT_SYMBOL(arch_spin_lock_wait);
+
+void arch_spin_unlock_slow(arch_spinlock_t *lp)
+{
+ arch_spinlock_t cur, new;
+
+ do {
+ cur.lock = ACCESS_ONCE(lp->lock);
+ new.lock = 0;
+ if (cur.tickets.head != cur.tickets.tail) {
+ new.tickets.tail = cur.tickets.tail;
+ new.tickets.head = (u8)(cur.tickets.head + 1) ? : 1;
+ new.tickets.owner = 0;
+ }
+ } while (!_raw_compare_and_swap(&lp->lock, cur.lock, new.lock));
+}
+EXPORT_SYMBOL(arch_spin_unlock_slow);
+
+void arch_spin_relax(arch_spinlock_t *lp)
+{
+ unsigned int cpu = lp->tickets.owner;
+
+ if (cpu != 0) {
+ if (MACHINE_IS_VM || MACHINE_IS_KVM ||
+ !smp_vcpu_scheduled(~cpu))
+ smp_yield_cpu(~cpu);
+ }
+}
+EXPORT_SYMBOL(arch_spin_relax);
+
+#else /* CONFIG_S390_TICKET_SPINLOCK */
+
void arch_spin_lock_wait(arch_spinlock_t *lp)
{
int count = spin_retry;
}
EXPORT_SYMBOL(arch_spin_relax);
+#endif /* CONFIG_S390_TICKET_SPINLOCK */
+
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{
int count;