2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
17 static int __init spin_retry_init(void)
20 spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
23 early_initcall(spin_retry_init);
26 * spin_retry= parameter
28 static int __init spin_retry_setup(char *str)
30 spin_retry = simple_strtoul(str, &str, 0);
33 __setup("spin_retry=", spin_retry_setup);
35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
40 static inline int cpu_is_preempted(int cpu)
42 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
44 if (smp_vcpu_scheduled(cpu))
49 void arch_spin_lock_wait(arch_spinlock_t *lp)
51 unsigned int cpu = SPINLOCK_LOCKVAL;
53 int count, first_diag;
57 owner = ACCESS_ONCE(lp->lock);
58 /* Try to get the lock if it is free. */
60 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
64 /* First iteration: check if the lock owner is running. */
65 if (first_diag && cpu_is_preempted(~owner)) {
66 smp_yield_cpu(~owner);
70 /* Loop for a while on the lock value. */
74 _raw_compare_and_delay(&lp->lock, owner);
75 owner = ACCESS_ONCE(lp->lock);
76 } while (owner && count-- > 0);
80 * For multiple layers of hypervisors, e.g. z/VM + LPAR
81 * yield the CPU unconditionally. For LPAR rely on the
82 * sense running status.
84 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
85 smp_yield_cpu(~owner);
90 EXPORT_SYMBOL(arch_spin_lock_wait);
92 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
94 unsigned int cpu = SPINLOCK_LOCKVAL;
96 int count, first_diag;
98 local_irq_restore(flags);
101 owner = ACCESS_ONCE(lp->lock);
102 /* Try to get the lock if it is free. */
105 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
107 local_irq_restore(flags);
109 /* Check if the lock owner is running. */
110 if (first_diag && cpu_is_preempted(~owner)) {
111 smp_yield_cpu(~owner);
115 /* Loop for a while on the lock value. */
119 _raw_compare_and_delay(&lp->lock, owner);
120 owner = ACCESS_ONCE(lp->lock);
121 } while (owner && count-- > 0);
125 * For multiple layers of hypervisors, e.g. z/VM + LPAR
126 * yield the CPU unconditionally. For LPAR rely on the
127 * sense running status.
129 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
130 smp_yield_cpu(~owner);
135 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
137 int arch_spin_trylock_retry(arch_spinlock_t *lp)
139 unsigned int cpu = SPINLOCK_LOCKVAL;
143 for (count = spin_retry; count > 0; count--) {
144 owner = ACCESS_ONCE(lp->lock);
145 /* Try to get the lock if it is free. */
147 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
149 } else if (MACHINE_HAS_CAD)
150 _raw_compare_and_delay(&lp->lock, owner);
154 EXPORT_SYMBOL(arch_spin_trylock_retry);
156 void _raw_read_lock_wait(arch_rwlock_t *rw)
158 unsigned int owner, old;
159 int count = spin_retry;
161 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
162 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
167 if (owner && cpu_is_preempted(~owner))
168 smp_yield_cpu(~owner);
171 old = ACCESS_ONCE(rw->lock);
172 owner = ACCESS_ONCE(rw->owner);
175 _raw_compare_and_delay(&rw->lock, old);
178 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
182 EXPORT_SYMBOL(_raw_read_lock_wait);
184 int _raw_read_trylock_retry(arch_rwlock_t *rw)
187 int count = spin_retry;
189 while (count-- > 0) {
190 old = ACCESS_ONCE(rw->lock);
193 _raw_compare_and_delay(&rw->lock, old);
196 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
201 EXPORT_SYMBOL(_raw_read_trylock_retry);
203 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
205 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
207 unsigned int owner, old;
208 int count = spin_retry;
213 if (owner && cpu_is_preempted(~owner))
214 smp_yield_cpu(~owner);
217 old = ACCESS_ONCE(rw->lock);
218 owner = ACCESS_ONCE(rw->owner);
220 if ((int) old >= 0) {
221 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
224 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
227 _raw_compare_and_delay(&rw->lock, old);
230 EXPORT_SYMBOL(_raw_write_lock_wait);
232 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
234 void _raw_write_lock_wait(arch_rwlock_t *rw)
236 unsigned int owner, old, prev;
237 int count = spin_retry;
243 if (owner && cpu_is_preempted(~owner))
244 smp_yield_cpu(~owner);
247 old = ACCESS_ONCE(rw->lock);
248 owner = ACCESS_ONCE(rw->owner);
249 if ((int) old >= 0 &&
250 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
254 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
257 _raw_compare_and_delay(&rw->lock, old);
260 EXPORT_SYMBOL(_raw_write_lock_wait);
262 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
264 int _raw_write_trylock_retry(arch_rwlock_t *rw)
267 int count = spin_retry;
269 while (count-- > 0) {
270 old = ACCESS_ONCE(rw->lock);
273 _raw_compare_and_delay(&rw->lock, old);
276 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
281 EXPORT_SYMBOL(_raw_write_trylock_retry);
283 void arch_lock_relax(unsigned int cpu)
287 if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
291 EXPORT_SYMBOL(arch_lock_relax);