]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/kernel/kvm.c
Merge tag 'v2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[mv-sheeva.git] / arch / x86 / kernel / kvm.c
index 63b0ec8d3d4a4d8856ed21f7f9a174d7732c5ce0..8dc44662394bebadcbbcbb70e6e938adbb2e8ced 100644 (file)
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/hardirq.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/hash.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/kprobes.h>
 #include <asm/timer.h>
+#include <asm/cpu.h>
+#include <asm/traps.h>
+#include <asm/desc.h>
+#include <asm/tlbflush.h>
 
 #define MMU_QUEUE_SIZE 1024
 
+static int kvmapf = 1;
+
+static int parse_no_kvmapf(char *arg)
+{
+        kvmapf = 0;
+        return 0;
+}
+
+early_param("no-kvmapf", parse_no_kvmapf);
+
 struct kvm_para_state {
        u8 mmu_queue[MMU_QUEUE_SIZE];
        int mmu_queue_len;
 };
 
 static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
 
 static struct kvm_para_state *kvm_para_state(void)
 {
@@ -50,6 +71,195 @@ static void kvm_io_delay(void)
 {
 }
 
+#define KVM_TASK_SLEEP_HASHBITS 8
+#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
+
+struct kvm_task_sleep_node {
+       struct hlist_node link;
+       wait_queue_head_t wq;
+       u32 token;
+       int cpu;
+       bool halted;
+       struct mm_struct *mm;
+};
+
+static struct kvm_task_sleep_head {
+       spinlock_t lock;
+       struct hlist_head list;
+} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
+
+static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
+                                                 u32 token)
+{
+       struct hlist_node *p;
+
+       hlist_for_each(p, &b->list) {
+               struct kvm_task_sleep_node *n =
+                       hlist_entry(p, typeof(*n), link);
+               if (n->token == token)
+                       return n;
+       }
+
+       return NULL;
+}
+
+void kvm_async_pf_task_wait(u32 token)
+{
+       u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+       struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+       struct kvm_task_sleep_node n, *e;
+       DEFINE_WAIT(wait);
+       int cpu, idle;
+
+       cpu = get_cpu();
+       idle = idle_cpu(cpu);
+       put_cpu();
+
+       spin_lock(&b->lock);
+       e = _find_apf_task(b, token);
+       if (e) {
+               /* dummy entry exist -> wake up was delivered ahead of PF */
+               hlist_del(&e->link);
+               kfree(e);
+               spin_unlock(&b->lock);
+               return;
+       }
+
+       n.token = token;
+       n.cpu = smp_processor_id();
+       n.mm = current->active_mm;
+       n.halted = idle || preempt_count() > 1;
+       atomic_inc(&n.mm->mm_count);
+       init_waitqueue_head(&n.wq);
+       hlist_add_head(&n.link, &b->list);
+       spin_unlock(&b->lock);
+
+       for (;;) {
+               if (!n.halted)
+                       prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
+               if (hlist_unhashed(&n.link))
+                       break;
+
+               if (!n.halted) {
+                       local_irq_enable();
+                       schedule();
+                       local_irq_disable();
+               } else {
+                       /*
+                        * We cannot reschedule. So halt.
+                        */
+                       native_safe_halt();
+                       local_irq_disable();
+               }
+       }
+       if (!n.halted)
+               finish_wait(&n.wq, &wait);
+
+       return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
+
+static void apf_task_wake_one(struct kvm_task_sleep_node *n)
+{
+       hlist_del_init(&n->link);
+       if (!n->mm)
+               return;
+       mmdrop(n->mm);
+       if (n->halted)
+               smp_send_reschedule(n->cpu);
+       else if (waitqueue_active(&n->wq))
+               wake_up(&n->wq);
+}
+
+static void apf_task_wake_all(void)
+{
+       int i;
+
+       for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
+               struct hlist_node *p, *next;
+               struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
+               spin_lock(&b->lock);
+               hlist_for_each_safe(p, next, &b->list) {
+                       struct kvm_task_sleep_node *n =
+                               hlist_entry(p, typeof(*n), link);
+                       if (n->cpu == smp_processor_id())
+                               apf_task_wake_one(n);
+               }
+               spin_unlock(&b->lock);
+       }
+}
+
+void kvm_async_pf_task_wake(u32 token)
+{
+       u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
+       struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
+       struct kvm_task_sleep_node *n;
+
+       if (token == ~0) {
+               apf_task_wake_all();
+               return;
+       }
+
+again:
+       spin_lock(&b->lock);
+       n = _find_apf_task(b, token);
+       if (!n) {
+               /*
+                * async PF was not yet handled.
+                * Add dummy entry for the token.
+                */
+               n = kmalloc(sizeof(*n), GFP_ATOMIC);
+               if (!n) {
+                       /*
+                        * Allocation failed! Busy wait while other cpu
+                        * handles async PF.
+                        */
+                       spin_unlock(&b->lock);
+                       cpu_relax();
+                       goto again;
+               }
+               n->token = token;
+               n->cpu = smp_processor_id();
+               n->mm = NULL;
+               init_waitqueue_head(&n->wq);
+               hlist_add_head(&n->link, &b->list);
+       } else
+               apf_task_wake_one(n);
+       spin_unlock(&b->lock);
+       return;
+}
+EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
+
+u32 kvm_read_and_reset_pf_reason(void)
+{
+       u32 reason = 0;
+
+       if (__get_cpu_var(apf_reason).enabled) {
+               reason = __get_cpu_var(apf_reason).reason;
+               __get_cpu_var(apf_reason).reason = 0;
+       }
+
+       return reason;
+}
+EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
+
+dotraplinkage void __kprobes
+do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
+{
+       switch (kvm_read_and_reset_pf_reason()) {
+       default:
+               do_page_fault(regs, error_code);
+               break;
+       case KVM_PV_REASON_PAGE_NOT_PRESENT:
+               /* page is swapped out by the host. */
+               kvm_async_pf_task_wait((u32)read_cr2());
+               break;
+       case KVM_PV_REASON_PAGE_READY:
+               kvm_async_pf_task_wake((u32)read_cr2());
+               break;
+       }
+}
+
 static void kvm_mmu_op(void *buffer, unsigned len)
 {
        int r;
@@ -231,10 +441,117 @@ static void __init paravirt_ops_setup(void)
 #endif
 }
 
+void __cpuinit kvm_guest_cpu_init(void)
+{
+       if (!kvm_para_available())
+               return;
+
+       if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
+               u64 pa = __pa(&__get_cpu_var(apf_reason));
+
+#ifdef CONFIG_PREEMPT
+               pa |= KVM_ASYNC_PF_SEND_ALWAYS;
+#endif
+               wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
+               __get_cpu_var(apf_reason).enabled = 1;
+               printk(KERN_INFO"KVM setup async PF for cpu %d\n",
+                      smp_processor_id());
+       }
+}
+
+static void kvm_pv_disable_apf(void *unused)
+{
+       if (!__get_cpu_var(apf_reason).enabled)
+               return;
+
+       wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
+       __get_cpu_var(apf_reason).enabled = 0;
+
+       printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
+              smp_processor_id());
+}
+
+static int kvm_pv_reboot_notify(struct notifier_block *nb,
+                               unsigned long code, void *unused)
+{
+       if (code == SYS_RESTART)
+               on_each_cpu(kvm_pv_disable_apf, NULL, 1);
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block kvm_pv_reboot_nb = {
+       .notifier_call = kvm_pv_reboot_notify,
+};
+
+#ifdef CONFIG_SMP
+static void __init kvm_smp_prepare_boot_cpu(void)
+{
+#ifdef CONFIG_KVM_CLOCK
+       WARN_ON(kvm_register_clock("primary cpu clock"));
+#endif
+       kvm_guest_cpu_init();
+       native_smp_prepare_boot_cpu();
+}
+
+static void kvm_guest_cpu_online(void *dummy)
+{
+       kvm_guest_cpu_init();
+}
+
+static void kvm_guest_cpu_offline(void *dummy)
+{
+       kvm_pv_disable_apf(NULL);
+       apf_task_wake_all();
+}
+
+static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       int cpu = (unsigned long)hcpu;
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_DOWN_FAILED:
+       case CPU_ONLINE_FROZEN:
+               smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
+               break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
+        .notifier_call  = kvm_cpu_notify,
+};
+#endif
+
+static void __init kvm_apf_trap_init(void)
+{
+       set_intr_gate(14, &async_page_fault);
+}
+
 void __init kvm_guest_init(void)
 {
+       int i;
+
        if (!kvm_para_available())
                return;
 
        paravirt_ops_setup();
+       register_reboot_notifier(&kvm_pv_reboot_nb);
+       for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
+               spin_lock_init(&async_pf_sleepers[i].lock);
+       if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
+               x86_init.irqs.trap_init = kvm_apf_trap_init;
+
+#ifdef CONFIG_SMP
+       smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
+       register_cpu_notifier(&kvm_cpu_notifier);
+#else
+       kvm_guest_cpu_init();
+#endif
 }