2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
17 static int __init spin_retry_init(void)
20 spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
23 early_initcall(spin_retry_init);
26 * spin_retry= parameter
28 static int __init spin_retry_setup(char *str)
30 spin_retry = simple_strtoul(str, &str, 0);
33 __setup("spin_retry=", spin_retry_setup);
35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
40 void arch_spin_lock_wait(arch_spinlock_t *lp)
42 unsigned int cpu = SPINLOCK_LOCKVAL;
44 int count, first_diag;
48 owner = ACCESS_ONCE(lp->lock);
49 /* Try to get the lock if it is free. */
51 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
55 /* First iteration: check if the lock owner is running. */
56 if (first_diag && arch_vcpu_is_preempted(~owner)) {
57 smp_yield_cpu(~owner);
61 /* Loop for a while on the lock value. */
65 _raw_compare_and_delay(&lp->lock, owner);
66 owner = ACCESS_ONCE(lp->lock);
67 } while (owner && count-- > 0);
71 * For multiple layers of hypervisors, e.g. z/VM + LPAR
72 * yield the CPU unconditionally. For LPAR rely on the
73 * sense running status.
75 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
76 smp_yield_cpu(~owner);
81 EXPORT_SYMBOL(arch_spin_lock_wait);
83 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
85 unsigned int cpu = SPINLOCK_LOCKVAL;
87 int count, first_diag;
89 local_irq_restore(flags);
92 owner = ACCESS_ONCE(lp->lock);
93 /* Try to get the lock if it is free. */
96 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
98 local_irq_restore(flags);
101 /* Check if the lock owner is running. */
102 if (first_diag && arch_vcpu_is_preempted(~owner)) {
103 smp_yield_cpu(~owner);
107 /* Loop for a while on the lock value. */
111 _raw_compare_and_delay(&lp->lock, owner);
112 owner = ACCESS_ONCE(lp->lock);
113 } while (owner && count-- > 0);
117 * For multiple layers of hypervisors, e.g. z/VM + LPAR
118 * yield the CPU unconditionally. For LPAR rely on the
119 * sense running status.
121 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
122 smp_yield_cpu(~owner);
127 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
129 int arch_spin_trylock_retry(arch_spinlock_t *lp)
131 unsigned int cpu = SPINLOCK_LOCKVAL;
135 for (count = spin_retry; count > 0; count--) {
136 owner = ACCESS_ONCE(lp->lock);
137 /* Try to get the lock if it is free. */
139 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
141 } else if (MACHINE_HAS_CAD)
142 _raw_compare_and_delay(&lp->lock, owner);
146 EXPORT_SYMBOL(arch_spin_trylock_retry);
148 void _raw_read_lock_wait(arch_rwlock_t *rw)
150 unsigned int owner, old;
151 int count = spin_retry;
153 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
159 if (owner && arch_vcpu_is_preempted(~owner))
160 smp_yield_cpu(~owner);
163 old = ACCESS_ONCE(rw->lock);
164 owner = ACCESS_ONCE(rw->owner);
167 _raw_compare_and_delay(&rw->lock, old);
170 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
174 EXPORT_SYMBOL(_raw_read_lock_wait);
176 int _raw_read_trylock_retry(arch_rwlock_t *rw)
179 int count = spin_retry;
181 while (count-- > 0) {
182 old = ACCESS_ONCE(rw->lock);
185 _raw_compare_and_delay(&rw->lock, old);
188 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
193 EXPORT_SYMBOL(_raw_read_trylock_retry);
195 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
197 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
199 unsigned int owner, old;
200 int count = spin_retry;
205 if (owner && arch_vcpu_is_preempted(~owner))
206 smp_yield_cpu(~owner);
209 old = ACCESS_ONCE(rw->lock);
210 owner = ACCESS_ONCE(rw->owner);
212 if ((int) old >= 0) {
213 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
216 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
219 _raw_compare_and_delay(&rw->lock, old);
222 EXPORT_SYMBOL(_raw_write_lock_wait);
224 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
226 void _raw_write_lock_wait(arch_rwlock_t *rw)
228 unsigned int owner, old, prev;
229 int count = spin_retry;
235 if (owner && arch_vcpu_is_preempted(~owner))
236 smp_yield_cpu(~owner);
239 old = ACCESS_ONCE(rw->lock);
240 owner = ACCESS_ONCE(rw->owner);
241 if ((int) old >= 0 &&
242 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
246 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
249 _raw_compare_and_delay(&rw->lock, old);
252 EXPORT_SYMBOL(_raw_write_lock_wait);
254 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
256 int _raw_write_trylock_retry(arch_rwlock_t *rw)
259 int count = spin_retry;
261 while (count-- > 0) {
262 old = ACCESS_ONCE(rw->lock);
265 _raw_compare_and_delay(&rw->lock, old);
268 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
273 EXPORT_SYMBOL(_raw_write_trylock_retry);
275 void arch_lock_relax(unsigned int cpu)
279 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
283 EXPORT_SYMBOL(arch_lock_relax);