]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/s390/lib/spinlock.c
s390/rwlock: add missing local_irq_restore calls
[karo-tx-linux.git] / arch / s390 / lib / spinlock.c
1 /*
2  *    Out of line spinlock code.
3  *
4  *    Copyright IBM Corp. 2004, 2006
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = 1000;
16
17 /**
18  * spin_retry= parameter
19  */
20 static int __init spin_retry_setup(char *str)
21 {
22         spin_retry = simple_strtoul(str, &str, 0);
23         return 1;
24 }
25 __setup("spin_retry=", spin_retry_setup);
26
27 void arch_spin_lock_wait(arch_spinlock_t *lp)
28 {
29         unsigned int cpu = SPINLOCK_LOCKVAL;
30         unsigned int owner;
31         int count;
32
33         while (1) {
34                 owner = lp->lock;
35                 if (!owner || smp_vcpu_scheduled(~owner)) {
36                         count = spin_retry;
37                         do {
38                                 if (arch_spin_is_locked(lp))
39                                         continue;
40                                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
41                                         return;
42                         } while (count-- > 0);
43                         if (MACHINE_IS_LPAR)
44                                 continue;
45                 }
46                 owner = lp->lock;
47                 if (owner)
48                         smp_yield_cpu(~owner);
49                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
50                         return;
51         }
52 }
53 EXPORT_SYMBOL(arch_spin_lock_wait);
54
55 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
56 {
57         unsigned int cpu = SPINLOCK_LOCKVAL;
58         unsigned int owner;
59         int count;
60
61         local_irq_restore(flags);
62         while (1) {
63                 owner = lp->lock;
64                 if (!owner || smp_vcpu_scheduled(~owner)) {
65                         count = spin_retry;
66                         do {
67                                 if (arch_spin_is_locked(lp))
68                                         continue;
69                                 local_irq_disable();
70                                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
71                                         return;
72                                 local_irq_restore(flags);
73                         } while (count-- > 0);
74                         if (MACHINE_IS_LPAR)
75                                 continue;
76                 }
77                 owner = lp->lock;
78                 if (owner)
79                         smp_yield_cpu(~owner);
80                 local_irq_disable();
81                 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
82                         return;
83                 local_irq_restore(flags);
84         }
85 }
86 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
87
88 void arch_spin_relax(arch_spinlock_t *lp)
89 {
90         unsigned int cpu = lp->lock;
91         if (cpu != 0) {
92                 if (MACHINE_IS_VM || MACHINE_IS_KVM ||
93                     !smp_vcpu_scheduled(~cpu))
94                         smp_yield_cpu(~cpu);
95         }
96 }
97 EXPORT_SYMBOL(arch_spin_relax);
98
99 int arch_spin_trylock_retry(arch_spinlock_t *lp)
100 {
101         int count;
102
103         for (count = spin_retry; count > 0; count--)
104                 if (arch_spin_trylock_once(lp))
105                         return 1;
106         return 0;
107 }
108 EXPORT_SYMBOL(arch_spin_trylock_retry);
109
110 void _raw_read_lock_wait(arch_rwlock_t *rw)
111 {
112         unsigned int old;
113         int count = spin_retry;
114
115         while (1) {
116                 if (count-- <= 0) {
117                         smp_yield();
118                         count = spin_retry;
119                 }
120                 old = ACCESS_ONCE(rw->lock);
121                 if ((int) old < 0)
122                         continue;
123                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
124                         return;
125         }
126 }
127 EXPORT_SYMBOL(_raw_read_lock_wait);
128
129 void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
130 {
131         unsigned int old;
132         int count = spin_retry;
133
134         local_irq_restore(flags);
135         while (1) {
136                 if (count-- <= 0) {
137                         smp_yield();
138                         count = spin_retry;
139                 }
140                 old = ACCESS_ONCE(rw->lock);
141                 if ((int) old < 0)
142                         continue;
143                 local_irq_disable();
144                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
145                         return;
146                 local_irq_restore(flags);
147         }
148 }
149 EXPORT_SYMBOL(_raw_read_lock_wait_flags);
150
151 int _raw_read_trylock_retry(arch_rwlock_t *rw)
152 {
153         unsigned int old;
154         int count = spin_retry;
155
156         while (count-- > 0) {
157                 old = ACCESS_ONCE(rw->lock);
158                 if ((int) old < 0)
159                         continue;
160                 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
161                         return 1;
162         }
163         return 0;
164 }
165 EXPORT_SYMBOL(_raw_read_trylock_retry);
166
167 void _raw_write_lock_wait(arch_rwlock_t *rw)
168 {
169         unsigned int old;
170         int count = spin_retry;
171
172         while (1) {
173                 if (count-- <= 0) {
174                         smp_yield();
175                         count = spin_retry;
176                 }
177                 old = ACCESS_ONCE(rw->lock);
178                 if (old)
179                         continue;
180                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
181                         return;
182         }
183 }
184 EXPORT_SYMBOL(_raw_write_lock_wait);
185
186 void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags)
187 {
188         unsigned int old;
189         int count = spin_retry;
190
191         local_irq_restore(flags);
192         while (1) {
193                 if (count-- <= 0) {
194                         smp_yield();
195                         count = spin_retry;
196                 }
197                 old = ACCESS_ONCE(rw->lock);
198                 if (old)
199                         continue;
200                 local_irq_disable();
201                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
202                         return;
203                 local_irq_restore(flags);
204         }
205 }
206 EXPORT_SYMBOL(_raw_write_lock_wait_flags);
207
208 int _raw_write_trylock_retry(arch_rwlock_t *rw)
209 {
210         unsigned int old;
211         int count = spin_retry;
212
213         while (count-- > 0) {
214                 old = ACCESS_ONCE(rw->lock);
215                 if (old)
216                         continue;
217                 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
218                         return 1;
219         }
220         return 0;
221 }
222 EXPORT_SYMBOL(_raw_write_trylock_retry);