]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/x86/kvm/x86.c
KVM: x86 emulator: Add Virtual-8086 mode of emulation
[karo-tx-linux.git] / arch / x86 / kvm / x86.c
index 322c2c5f9bc4a6b2b28b6e322c2e9b123b6574d9..a28379507d30a44ddf0d8171b8cdab0c77ac4ea8 100644 (file)
@@ -430,12 +430,16 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        cr0 |= X86_CR0_ET;
 
-       if (cr0 & CR0_RESERVED_BITS) {
+#ifdef CONFIG_X86_64
+       if (cr0 & 0xffffffff00000000UL) {
                printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
                       cr0, kvm_read_cr0(vcpu));
                kvm_inject_gp(vcpu, 0);
                return;
        }
+#endif
+
+       cr0 &= ~CR0_RESERVED_BITS;
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
                printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
@@ -452,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
 #ifdef CONFIG_X86_64
-               if ((vcpu->arch.shadow_efer & EFER_LME)) {
+               if ((vcpu->arch.efer & EFER_LME)) {
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
@@ -651,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        }
 
        if (is_paging(vcpu)
-           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
                printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
                kvm_inject_gp(vcpu, 0);
                return;
@@ -682,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        kvm_x86_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
-       efer |= vcpu->arch.shadow_efer & EFER_LMA;
+       efer |= vcpu->arch.efer & EFER_LMA;
 
-       vcpu->arch.shadow_efer = efer;
+       vcpu->arch.efer = efer;
 
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
@@ -1422,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data |= (((uint64_t)4ULL) << 40);
                break;
        case MSR_EFER:
-               data = vcpu->arch.shadow_efer;
+               data = vcpu->arch.efer;
                break;
        case MSR_KVM_WALL_CLOCK:
                data = vcpu->kvm->arch.wall_clock;
@@ -1565,6 +1569,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_HYPERV:
        case KVM_CAP_HYPERV_VAPIC:
        case KVM_CAP_HYPERV_SPIN:
+       case KVM_CAP_PCI_SEGMENT:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -2766,6 +2771,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
                if (vpic) {
                        r = kvm_ioapic_init(kvm);
                        if (r) {
+                               kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
+                                                         &vpic->dev);
                                kfree(vpic);
                                goto create_irqchip_unlock;
                        }
@@ -2777,10 +2784,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = kvm_setup_default_irq_routing(kvm);
                if (r) {
                        mutex_lock(&kvm->irq_lock);
-                       kfree(kvm->arch.vpic);
-                       kfree(kvm->arch.vioapic);
-                       kvm->arch.vpic = NULL;
-                       kvm->arch.vioapic = NULL;
+                       kvm_ioapic_destroy(kvm);
+                       kvm_destroy_pic(kvm);
                        mutex_unlock(&kvm->irq_lock);
                }
        create_irqchip_unlock:
@@ -3265,34 +3270,20 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
        kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+       kvm_x86_ops->fpu_activate(vcpu);
        return X86EMUL_CONTINUE;
 }
 
 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
-
-       switch (dr) {
-       case 0 ... 3:
-               *dest = kvm_x86_ops->get_dr(vcpu, dr);
-               return X86EMUL_CONTINUE;
-       default:
-               pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
-               return X86EMUL_UNHANDLEABLE;
-       }
+       return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
 }
 
 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 {
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
-       int exception;
 
-       kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
-       if (exception) {
-               /* FIXME: better handling */
-               return X86EMUL_UNHANDLEABLE;
-       }
-       return X86EMUL_CONTINUE;
+       return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
 }
 
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3357,8 +3348,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                vcpu->arch.emulate_ctxt.vcpu = vcpu;
                vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
                vcpu->arch.emulate_ctxt.mode =
+                       (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
                        (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
-                       ? X86EMUL_MODE_REAL : cs_l
+                       ? X86EMUL_MODE_VM86 : cs_l
                        ? X86EMUL_MODE_PROT64 : cs_db
                        ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 
@@ -3560,8 +3552,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
        trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
                      size, 1);
 
-       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       memcpy(vcpu->arch.pio_data, &val, 4);
+       if (!vcpu->arch.pio.in) {
+               val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+               memcpy(vcpu->arch.pio_data, &val, 4);
+       }
 
        if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
                complete_pio(vcpu);
@@ -3796,8 +3790,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
         * hypercall generates UD from non zero cpl and real mode
         * per HYPER-V spec
         */
-       if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
-           !kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
+       if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
                kvm_queue_exception(vcpu, UD_VECTOR);
                return 0;
        }
@@ -3898,10 +3891,8 @@ EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
 {
        char instruction[3];
-       int ret = 0;
        unsigned long rip = kvm_rip_read(vcpu);
 
-
        /*
         * Blow out the MMU to ensure that no other VCPU has an active mapping
         * to ensure that the updated hypercall appears atomically across all
@@ -3910,11 +3901,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
        kvm_mmu_zap_all(vcpu->kvm);
 
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
-       if (emulator_write_emulated(rip, instruction, 3, vcpu)
-           != X86EMUL_CONTINUE)
-               ret = -EFAULT;
 
-       return ret;
+       return emulator_write_emulated(rip, instruction, 3, vcpu);
 }
 
 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
@@ -4250,7 +4238,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
-       kvm_load_guest_fpu(vcpu);
+       if (vcpu->fpu_active)
+               kvm_load_guest_fpu(vcpu);
 
        local_irq_disable();
 
@@ -4579,7 +4568,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        sregs->cr3 = vcpu->arch.cr3;
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
-       sregs->efer = vcpu->arch.shadow_efer;
+       sregs->efer = vcpu->arch.efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
        memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -4672,7 +4661,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 
        if (dtable.limit < index * 8 + 7) {
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
-               return 1;
+               return X86EMUL_PROPAGATE_FAULT;
        }
        return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
 }
@@ -4707,18 +4696,6 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
        return kvm_seg.selector;
 }
 
-static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
-                                               u16 selector,
-                                               struct kvm_segment *kvm_seg)
-{
-       struct desc_struct seg_desc;
-
-       if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
-               return 1;
-       seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
-       return 0;
-}
-
 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
 {
        struct kvm_segment segvar = {
@@ -4759,11 +4736,14 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                int type_bits, int seg)
 {
        struct kvm_segment kvm_seg;
+       struct desc_struct seg_desc;
 
-       if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+       if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
                return kvm_load_realmode_segment(vcpu, selector, seg);
-       if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
+
+       if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
                return 1;
+       seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
 
        kvm_check_segment_descriptor(vcpu, seg, selector);
        kvm_seg.type |= type_bits;
@@ -4774,6 +4754,11 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                        kvm_seg.unusable = 1;
 
        kvm_set_segment(vcpu, &kvm_seg, seg);
+       if (selector && !kvm_seg.unusable && kvm_seg.s) {
+               /* mark segment as accessed */
+               seg_desc.type |= 1;
+               save_guest_segment_descriptor(vcpu, selector, &seg_desc);
+       }
        return 0;
 }
 
@@ -5069,7 +5054,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
-       mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
+       mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
@@ -5112,7 +5097,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        /* Older userspace won't unhalt the vcpu on reset. */
        if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
            sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
-           !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+           !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
        vcpu_put(vcpu);
@@ -5296,14 +5281,14 @@ EXPORT_SYMBOL_GPL(fx_init);
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
+       if (vcpu->guest_fpu_loaded)
                return;
 
        vcpu->guest_fpu_loaded = 1;
        kvm_fx_save(&vcpu->arch.host_fx_image);
        kvm_fx_restore(&vcpu->arch.guest_fx_image);
+       trace_kvm_fpu(1);
 }
-EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
@@ -5315,8 +5300,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        kvm_fx_restore(&vcpu->arch.host_fx_image);
        ++vcpu->stat.fpu_reload;
        set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
+       trace_kvm_fpu(0);
 }
-EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {