]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/xen/spinlock.c
xen/spinlock: We don't need the old structure anymore
[karo-tx-linux.git] / arch / x86 / xen / spinlock.c
1 /*
2  * Split spinlock implementation out into its own file, so it can be
3  * compiled in a FTRACE-compatible way.
4  */
5 #include <linux/kernel_stat.h>
6 #include <linux/spinlock.h>
7 #include <linux/debugfs.h>
8 #include <linux/log2.h>
9 #include <linux/gfp.h>
10 #include <linux/slab.h>
11
12 #include <asm/paravirt.h>
13
14 #include <xen/interface/xen.h>
15 #include <xen/events.h>
16
17 #include "xen-ops.h"
18 #include "debugfs.h"
19
20 enum xen_contention_stat {
21         TAKEN_SLOW,
22         TAKEN_SLOW_PICKUP,
23         TAKEN_SLOW_SPURIOUS,
24         RELEASED_SLOW,
25         RELEASED_SLOW_KICKED,
26         NR_CONTENTION_STATS
27 };
28
29
30 #ifdef CONFIG_XEN_DEBUG_FS
31 #define HISTO_BUCKETS   30
32 static struct xen_spinlock_stats
33 {
34         u32 contention_stats[NR_CONTENTION_STATS];
35         u32 histo_spin_blocked[HISTO_BUCKETS+1];
36         u64 time_blocked;
37 } spinlock_stats;
38
39 static u8 zero_stats;
40
41 static inline void check_zero(void)
42 {
43         u8 ret;
44         u8 old = ACCESS_ONCE(zero_stats);
45         if (unlikely(old)) {
46                 ret = cmpxchg(&zero_stats, old, 0);
47                 /* This ensures only one fellow resets the stat */
48                 if (ret == old)
49                         memset(&spinlock_stats, 0, sizeof(spinlock_stats));
50         }
51 }
52
53 static inline void add_stats(enum xen_contention_stat var, u32 val)
54 {
55         check_zero();
56         spinlock_stats.contention_stats[var] += val;
57 }
58
59 static inline u64 spin_time_start(void)
60 {
61         return xen_clocksource_read();
62 }
63
64 static void __spin_time_accum(u64 delta, u32 *array)
65 {
66         unsigned index = ilog2(delta);
67
68         check_zero();
69
70         if (index < HISTO_BUCKETS)
71                 array[index]++;
72         else
73                 array[HISTO_BUCKETS]++;
74 }
75
76 static inline void spin_time_accum_blocked(u64 start)
77 {
78         u32 delta = xen_clocksource_read() - start;
79
80         __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
81         spinlock_stats.time_blocked += delta;
82 }
83 #else  /* !CONFIG_XEN_DEBUG_FS */
84 static inline void add_stats(enum xen_contention_stat var, u32 val)
85 {
86 }
87
88 static inline u64 spin_time_start(void)
89 {
90         return 0;
91 }
92
93 static inline void spin_time_accum_blocked(u64 start)
94 {
95 }
96 #endif  /* CONFIG_XEN_DEBUG_FS */
97
98 struct xen_lock_waiting {
99         struct arch_spinlock *lock;
100         __ticket_t want;
101 };
102
103 static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
104 static DEFINE_PER_CPU(char *, irq_name);
105 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
106 static cpumask_t waiting_cpus;
107
108 static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
109 {
110         int irq = __this_cpu_read(lock_kicker_irq);
111         struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
112         int cpu = smp_processor_id();
113         u64 start;
114         unsigned long flags;
115
116         /* If kicker interrupts not initialized yet, just spin */
117         if (irq == -1)
118                 return;
119
120         start = spin_time_start();
121
122         /*
123          * Make sure an interrupt handler can't upset things in a
124          * partially setup state.
125          */
126         local_irq_save(flags);
127         /*
128          * We don't really care if we're overwriting some other
129          * (lock,want) pair, as that would mean that we're currently
130          * in an interrupt context, and the outer context had
131          * interrupts enabled.  That has already kicked the VCPU out
132          * of xen_poll_irq(), so it will just return spuriously and
133          * retry with newly setup (lock,want).
134          *
135          * The ordering protocol on this is that the "lock" pointer
136          * may only be set non-NULL if the "want" ticket is correct.
137          * If we're updating "want", we must first clear "lock".
138          */
139         w->lock = NULL;
140         smp_wmb();
141         w->want = want;
142         smp_wmb();
143         w->lock = lock;
144
145         /* This uses set_bit, which atomic and therefore a barrier */
146         cpumask_set_cpu(cpu, &waiting_cpus);
147         add_stats(TAKEN_SLOW, 1);
148
149         /* clear pending */
150         xen_clear_irq_pending(irq);
151
152         /* Only check lock once pending cleared */
153         barrier();
154
155         /*
156          * Mark entry to slowpath before doing the pickup test to make
157          * sure we don't deadlock with an unlocker.
158          */
159         __ticket_enter_slowpath(lock);
160
161         /*
162          * check again make sure it didn't become free while
163          * we weren't looking
164          */
165         if (ACCESS_ONCE(lock->tickets.head) == want) {
166                 add_stats(TAKEN_SLOW_PICKUP, 1);
167                 goto out;
168         }
169
170         /* Allow interrupts while blocked */
171         local_irq_restore(flags);
172
173         /*
174          * If an interrupt happens here, it will leave the wakeup irq
175          * pending, which will cause xen_poll_irq() to return
176          * immediately.
177          */
178
179         /* Block until irq becomes pending (or perhaps a spurious wakeup) */
180         xen_poll_irq(irq);
181         add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
182
183         local_irq_save(flags);
184
185         kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
186 out:
187         cpumask_clear_cpu(cpu, &waiting_cpus);
188         w->lock = NULL;
189
190         local_irq_restore(flags);
191
192         spin_time_accum_blocked(start);
193 }
194 PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
195
196 static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
197 {
198         int cpu;
199
200         add_stats(RELEASED_SLOW, 1);
201
202         for_each_cpu(cpu, &waiting_cpus) {
203                 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
204
205                 /* Make sure we read lock before want */
206                 if (ACCESS_ONCE(w->lock) == lock &&
207                     ACCESS_ONCE(w->want) == next) {
208                         add_stats(RELEASED_SLOW_KICKED, 1);
209                         xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
210                         break;
211                 }
212         }
213 }
214
215 static irqreturn_t dummy_handler(int irq, void *dev_id)
216 {
217         BUG();
218         return IRQ_HANDLED;
219 }
220
221 void xen_init_lock_cpu(int cpu)
222 {
223         int irq;
224         char *name;
225
226         WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
227              cpu, per_cpu(lock_kicker_irq, cpu));
228
229         /*
230          * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
231          * (xen: disable PV spinlocks on HVM)
232          */
233         if (xen_hvm_domain())
234                 return;
235
236         name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
237         irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
238                                      cpu,
239                                      dummy_handler,
240                                      IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
241                                      name,
242                                      NULL);
243
244         if (irq >= 0) {
245                 disable_irq(irq); /* make sure it's never delivered */
246                 per_cpu(lock_kicker_irq, cpu) = irq;
247                 per_cpu(irq_name, cpu) = name;
248         }
249
250         printk("cpu %d spinlock event irq %d\n", cpu, irq);
251 }
252
253 void xen_uninit_lock_cpu(int cpu)
254 {
255         /*
256          * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
257          * (xen: disable PV spinlocks on HVM)
258          */
259         if (xen_hvm_domain())
260                 return;
261
262         unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL);
263         per_cpu(lock_kicker_irq, cpu) = -1;
264         kfree(per_cpu(irq_name, cpu));
265         per_cpu(irq_name, cpu) = NULL;
266 }
267
268 static bool xen_pvspin __initdata = true;
269
270 void __init xen_init_spinlocks(void)
271 {
272         /*
273          * See git commit f10cd522c5fbfec9ae3cc01967868c9c2401ed23
274          * (xen: disable PV spinlocks on HVM)
275          */
276         if (xen_hvm_domain())
277                 return;
278
279         if (!xen_pvspin) {
280                 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
281                 return;
282         }
283
284         static_key_slow_inc(&paravirt_ticketlocks_enabled);
285
286         pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
287         pv_lock_ops.unlock_kick = xen_unlock_kick;
288 }
289
290 static __init int xen_parse_nopvspin(char *arg)
291 {
292         xen_pvspin = false;
293         return 0;
294 }
295 early_param("xen_nopvspin", xen_parse_nopvspin);
296
297 #ifdef CONFIG_XEN_DEBUG_FS
298
299 static struct dentry *d_spin_debug;
300
301 static int __init xen_spinlock_debugfs(void)
302 {
303         struct dentry *d_xen = xen_init_debugfs();
304
305         if (d_xen == NULL)
306                 return -ENOMEM;
307
308         d_spin_debug = debugfs_create_dir("spinlocks", d_xen);
309
310         debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
311
312         debugfs_create_u32("taken_slow", 0444, d_spin_debug,
313                            &spinlock_stats.contention_stats[TAKEN_SLOW]);
314         debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
315                            &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
316         debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
317                            &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
318
319         debugfs_create_u32("released_slow", 0444, d_spin_debug,
320                            &spinlock_stats.contention_stats[RELEASED_SLOW]);
321         debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
322                            &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
323
324         debugfs_create_u64("time_blocked", 0444, d_spin_debug,
325                            &spinlock_stats.time_blocked);
326
327         debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
328                                 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
329
330         return 0;
331 }
332 fs_initcall(xen_spinlock_debugfs);
333
334 #endif  /* CONFIG_XEN_DEBUG_FS */