]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/x86/kernel/kvm.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[karo-tx-linux.git] / arch / x86 / kernel / kvm.c
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
41 #include <asm/cpu.h>
42 #include <asm/traps.h>
43 #include <asm/desc.h>
44 #include <asm/tlbflush.h>
45 #include <asm/apic.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
48 #include <asm/kvm_guest.h>
49
50 static int kvmapf = 1;
51
52 static int parse_no_kvmapf(char *arg)
53 {
54         kvmapf = 0;
55         return 0;
56 }
57
58 early_param("no-kvmapf", parse_no_kvmapf);
59
60 static int steal_acc = 1;
61 static int parse_no_stealacc(char *arg)
62 {
63         steal_acc = 0;
64         return 0;
65 }
66
67 early_param("no-steal-acc", parse_no_stealacc);
68
69 static int kvmclock_vsyscall = 1;
70 static int parse_no_kvmclock_vsyscall(char *arg)
71 {
72         kvmclock_vsyscall = 0;
73         return 0;
74 }
75
76 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
78 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
80 static int has_steal_clock = 0;
81
82 /*
83  * No need for any "IO delay" on KVM
84  */
85 static void kvm_io_delay(void)
86 {
87 }
88
89 #define KVM_TASK_SLEEP_HASHBITS 8
90 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92 struct kvm_task_sleep_node {
93         struct hlist_node link;
94         struct swait_queue_head wq;
95         u32 token;
96         int cpu;
97         bool halted;
98 };
99
100 static struct kvm_task_sleep_head {
101         raw_spinlock_t lock;
102         struct hlist_head list;
103 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
105 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106                                                   u32 token)
107 {
108         struct hlist_node *p;
109
110         hlist_for_each(p, &b->list) {
111                 struct kvm_task_sleep_node *n =
112                         hlist_entry(p, typeof(*n), link);
113                 if (n->token == token)
114                         return n;
115         }
116
117         return NULL;
118 }
119
120 void kvm_async_pf_task_wait(u32 token)
121 {
122         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
123         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
124         struct kvm_task_sleep_node n, *e;
125         DECLARE_SWAITQUEUE(wait);
126
127         rcu_irq_enter();
128
129         raw_spin_lock(&b->lock);
130         e = _find_apf_task(b, token);
131         if (e) {
132                 /* dummy entry exist -> wake up was delivered ahead of PF */
133                 hlist_del(&e->link);
134                 kfree(e);
135                 raw_spin_unlock(&b->lock);
136
137                 rcu_irq_exit();
138                 return;
139         }
140
141         n.token = token;
142         n.cpu = smp_processor_id();
143         n.halted = is_idle_task(current) || preempt_count() > 1;
144         init_swait_queue_head(&n.wq);
145         hlist_add_head(&n.link, &b->list);
146         raw_spin_unlock(&b->lock);
147
148         for (;;) {
149                 if (!n.halted)
150                         prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
151                 if (hlist_unhashed(&n.link))
152                         break;
153
154                 if (!n.halted) {
155                         local_irq_enable();
156                         schedule();
157                         local_irq_disable();
158                 } else {
159                         /*
160                          * We cannot reschedule. So halt.
161                          */
162                         rcu_irq_exit();
163                         native_safe_halt();
164                         local_irq_disable();
165                         rcu_irq_enter();
166                 }
167         }
168         if (!n.halted)
169                 finish_swait(&n.wq, &wait);
170
171         rcu_irq_exit();
172         return;
173 }
174 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
175
176 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
177 {
178         hlist_del_init(&n->link);
179         if (n->halted)
180                 smp_send_reschedule(n->cpu);
181         else if (swait_active(&n->wq))
182                 swake_up(&n->wq);
183 }
184
185 static void apf_task_wake_all(void)
186 {
187         int i;
188
189         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
190                 struct hlist_node *p, *next;
191                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
192                 raw_spin_lock(&b->lock);
193                 hlist_for_each_safe(p, next, &b->list) {
194                         struct kvm_task_sleep_node *n =
195                                 hlist_entry(p, typeof(*n), link);
196                         if (n->cpu == smp_processor_id())
197                                 apf_task_wake_one(n);
198                 }
199                 raw_spin_unlock(&b->lock);
200         }
201 }
202
203 void kvm_async_pf_task_wake(u32 token)
204 {
205         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
206         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
207         struct kvm_task_sleep_node *n;
208
209         if (token == ~0) {
210                 apf_task_wake_all();
211                 return;
212         }
213
214 again:
215         raw_spin_lock(&b->lock);
216         n = _find_apf_task(b, token);
217         if (!n) {
218                 /*
219                  * async PF was not yet handled.
220                  * Add dummy entry for the token.
221                  */
222                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
223                 if (!n) {
224                         /*
225                          * Allocation failed! Busy wait while other cpu
226                          * handles async PF.
227                          */
228                         raw_spin_unlock(&b->lock);
229                         cpu_relax();
230                         goto again;
231                 }
232                 n->token = token;
233                 n->cpu = smp_processor_id();
234                 init_swait_queue_head(&n->wq);
235                 hlist_add_head(&n->link, &b->list);
236         } else
237                 apf_task_wake_one(n);
238         raw_spin_unlock(&b->lock);
239         return;
240 }
241 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
242
243 u32 kvm_read_and_reset_pf_reason(void)
244 {
245         u32 reason = 0;
246
247         if (__this_cpu_read(apf_reason.enabled)) {
248                 reason = __this_cpu_read(apf_reason.reason);
249                 __this_cpu_write(apf_reason.reason, 0);
250         }
251
252         return reason;
253 }
254 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
255 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
256
257 dotraplinkage void
258 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
259 {
260         enum ctx_state prev_state;
261
262         switch (kvm_read_and_reset_pf_reason()) {
263         default:
264                 trace_do_page_fault(regs, error_code);
265                 break;
266         case KVM_PV_REASON_PAGE_NOT_PRESENT:
267                 /* page is swapped out by the host. */
268                 prev_state = exception_enter();
269                 kvm_async_pf_task_wait((u32)read_cr2());
270                 exception_exit(prev_state);
271                 break;
272         case KVM_PV_REASON_PAGE_READY:
273                 rcu_irq_enter();
274                 kvm_async_pf_task_wake((u32)read_cr2());
275                 rcu_irq_exit();
276                 break;
277         }
278 }
279 NOKPROBE_SYMBOL(do_async_page_fault);
280
281 static void __init paravirt_ops_setup(void)
282 {
283         pv_info.name = "KVM";
284
285         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
286                 pv_cpu_ops.io_delay = kvm_io_delay;
287
288 #ifdef CONFIG_X86_IO_APIC
289         no_timer_check = 1;
290 #endif
291 }
292
293 static void kvm_register_steal_time(void)
294 {
295         int cpu = smp_processor_id();
296         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
297
298         if (!has_steal_clock)
299                 return;
300
301         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
302         pr_info("kvm-stealtime: cpu %d, msr %llx\n",
303                 cpu, (unsigned long long) slow_virt_to_phys(st));
304 }
305
306 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
307
308 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
309 {
310         /**
311          * This relies on __test_and_clear_bit to modify the memory
312          * in a way that is atomic with respect to the local CPU.
313          * The hypervisor only accesses this memory from the local CPU so
314          * there's no need for lock or memory barriers.
315          * An optimization barrier is implied in apic write.
316          */
317         if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
318                 return;
319         apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
320 }
321
322 static void kvm_guest_cpu_init(void)
323 {
324         if (!kvm_para_available())
325                 return;
326
327         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
328                 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
329
330 #ifdef CONFIG_PREEMPT
331                 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
332 #endif
333                 pa |= KVM_ASYNC_PF_ENABLED;
334
335                 /* Async page fault support for L1 hypervisor is optional */
336                 if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
337                         (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
338                         wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
339                 __this_cpu_write(apf_reason.enabled, 1);
340                 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
341                        smp_processor_id());
342         }
343
344         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
345                 unsigned long pa;
346                 /* Size alignment is implied but just to make it explicit. */
347                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
348                 __this_cpu_write(kvm_apic_eoi, 0);
349                 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
350                         | KVM_MSR_ENABLED;
351                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
352         }
353
354         if (has_steal_clock)
355                 kvm_register_steal_time();
356 }
357
358 static void kvm_pv_disable_apf(void)
359 {
360         if (!__this_cpu_read(apf_reason.enabled))
361                 return;
362
363         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
364         __this_cpu_write(apf_reason.enabled, 0);
365
366         printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
367                smp_processor_id());
368 }
369
370 static void kvm_pv_guest_cpu_reboot(void *unused)
371 {
372         /*
373          * We disable PV EOI before we load a new kernel by kexec,
374          * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
375          * New kernel can re-enable when it boots.
376          */
377         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
378                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
379         kvm_pv_disable_apf();
380         kvm_disable_steal_time();
381 }
382
383 static int kvm_pv_reboot_notify(struct notifier_block *nb,
384                                 unsigned long code, void *unused)
385 {
386         if (code == SYS_RESTART)
387                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
388         return NOTIFY_DONE;
389 }
390
391 static struct notifier_block kvm_pv_reboot_nb = {
392         .notifier_call = kvm_pv_reboot_notify,
393 };
394
395 static u64 kvm_steal_clock(int cpu)
396 {
397         u64 steal;
398         struct kvm_steal_time *src;
399         int version;
400
401         src = &per_cpu(steal_time, cpu);
402         do {
403                 version = src->version;
404                 virt_rmb();
405                 steal = src->steal;
406                 virt_rmb();
407         } while ((version & 1) || (version != src->version));
408
409         return steal;
410 }
411
412 void kvm_disable_steal_time(void)
413 {
414         if (!has_steal_clock)
415                 return;
416
417         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
418 }
419
420 #ifdef CONFIG_SMP
421 static void __init kvm_smp_prepare_boot_cpu(void)
422 {
423         kvm_guest_cpu_init();
424         native_smp_prepare_boot_cpu();
425         kvm_spinlock_init();
426 }
427
428 static void kvm_guest_cpu_offline(void)
429 {
430         kvm_disable_steal_time();
431         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
432                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
433         kvm_pv_disable_apf();
434         apf_task_wake_all();
435 }
436
437 static int kvm_cpu_online(unsigned int cpu)
438 {
439         local_irq_disable();
440         kvm_guest_cpu_init();
441         local_irq_enable();
442         return 0;
443 }
444
445 static int kvm_cpu_down_prepare(unsigned int cpu)
446 {
447         local_irq_disable();
448         kvm_guest_cpu_offline();
449         local_irq_enable();
450         return 0;
451 }
452 #endif
453
454 static void __init kvm_apf_trap_init(void)
455 {
456         set_intr_gate(14, async_page_fault);
457 }
458
459 void __init kvm_guest_init(void)
460 {
461         int i;
462
463         if (!kvm_para_available())
464                 return;
465
466         paravirt_ops_setup();
467         register_reboot_notifier(&kvm_pv_reboot_nb);
468         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
469                 raw_spin_lock_init(&async_pf_sleepers[i].lock);
470         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
471                 x86_init.irqs.trap_init = kvm_apf_trap_init;
472
473         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
474                 has_steal_clock = 1;
475                 pv_time_ops.steal_clock = kvm_steal_clock;
476         }
477
478         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
479                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
480
481         if (kvmclock_vsyscall)
482                 kvm_setup_vsyscall_timeinfo();
483
484 #ifdef CONFIG_SMP
485         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
486         if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
487                                       kvm_cpu_online, kvm_cpu_down_prepare) < 0)
488                 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
489 #else
490         kvm_guest_cpu_init();
491 #endif
492
493         /*
494          * Hard lockup detection is enabled by default. Disable it, as guests
495          * can get false positives too easily, for example if the host is
496          * overcommitted.
497          */
498         hardlockup_detector_disable();
499 }
500
501 static noinline uint32_t __kvm_cpuid_base(void)
502 {
503         if (boot_cpu_data.cpuid_level < 0)
504                 return 0;       /* So we don't blow up on old processors */
505
506         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
507                 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
508
509         return 0;
510 }
511
512 static inline uint32_t kvm_cpuid_base(void)
513 {
514         static int kvm_cpuid_base = -1;
515
516         if (kvm_cpuid_base == -1)
517                 kvm_cpuid_base = __kvm_cpuid_base();
518
519         return kvm_cpuid_base;
520 }
521
522 bool kvm_para_available(void)
523 {
524         return kvm_cpuid_base() != 0;
525 }
526 EXPORT_SYMBOL_GPL(kvm_para_available);
527
528 unsigned int kvm_arch_para_features(void)
529 {
530         return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
531 }
532
533 static uint32_t __init kvm_detect(void)
534 {
535         return kvm_cpuid_base();
536 }
537
538 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
539         .name                   = "KVM",
540         .detect                 = kvm_detect,
541         .x2apic_available       = kvm_para_available,
542 };
543 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
544
545 static __init int activate_jump_labels(void)
546 {
547         if (has_steal_clock) {
548                 static_key_slow_inc(&paravirt_steal_enabled);
549                 if (steal_acc)
550                         static_key_slow_inc(&paravirt_steal_rq_enabled);
551         }
552
553         return 0;
554 }
555 arch_initcall(activate_jump_labels);
556
557 #ifdef CONFIG_PARAVIRT_SPINLOCKS
558
559 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
560 static void kvm_kick_cpu(int cpu)
561 {
562         int apicid;
563         unsigned long flags = 0;
564
565         apicid = per_cpu(x86_cpu_to_apicid, cpu);
566         kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
567 }
568
569 #include <asm/qspinlock.h>
570
571 static void kvm_wait(u8 *ptr, u8 val)
572 {
573         unsigned long flags;
574
575         if (in_nmi())
576                 return;
577
578         local_irq_save(flags);
579
580         if (READ_ONCE(*ptr) != val)
581                 goto out;
582
583         /*
584          * halt until it's our turn and kicked. Note that we do safe halt
585          * for irq enabled case to avoid hang when lock info is overwritten
586          * in irq spinlock slowpath and no spurious interrupt occur to save us.
587          */
588         if (arch_irqs_disabled_flags(flags))
589                 halt();
590         else
591                 safe_halt();
592
593 out:
594         local_irq_restore(flags);
595 }
596
597 #ifdef CONFIG_X86_32
598 __visible bool __kvm_vcpu_is_preempted(long cpu)
599 {
600         struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
601
602         return !!src->preempted;
603 }
604 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
605
606 #else
607
608 #include <asm/asm-offsets.h>
609
610 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
611
612 /*
613  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
614  * restoring to/from the stack.
615  */
616 asm(
617 ".pushsection .text;"
618 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
619 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
620 "__raw_callee_save___kvm_vcpu_is_preempted:"
621 "movq   __per_cpu_offset(,%rdi,8), %rax;"
622 "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
623 "setne  %al;"
624 "ret;"
625 ".popsection");
626
627 #endif
628
629 /*
630  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
631  */
632 void __init kvm_spinlock_init(void)
633 {
634         if (!kvm_para_available())
635                 return;
636         /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
637         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
638                 return;
639
640         __pv_init_lock_hash();
641         pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
642         pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
643         pv_lock_ops.wait = kvm_wait;
644         pv_lock_ops.kick = kvm_kick_cpu;
645
646         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
647                 pv_lock_ops.vcpu_is_preempted =
648                         PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
649         }
650 }
651
652 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */