]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/x86/kvm/x86.c
KVM: x86 emulator: Add Virtual-8086 mode of emulation
[karo-tx-linux.git] / arch / x86 / kvm / x86.c
index a4a7d1892f72e3737e63840fefa5e3bd473c5a15..a28379507d30a44ddf0d8171b8cdab0c77ac4ea8 100644 (file)
@@ -456,7 +456,7 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
 #ifdef CONFIG_X86_64
-               if ((vcpu->arch.shadow_efer & EFER_LME)) {
+               if ((vcpu->arch.efer & EFER_LME)) {
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
@@ -655,7 +655,7 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        }
 
        if (is_paging(vcpu)
-           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
+           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
                printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
                kvm_inject_gp(vcpu, 0);
                return;
@@ -686,9 +686,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        kvm_x86_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
-       efer |= vcpu->arch.shadow_efer & EFER_LMA;
+       efer |= vcpu->arch.efer & EFER_LMA;
 
-       vcpu->arch.shadow_efer = efer;
+       vcpu->arch.efer = efer;
 
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
@@ -1426,7 +1426,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data |= (((uint64_t)4ULL) << 40);
                break;
        case MSR_EFER:
-               data = vcpu->arch.shadow_efer;
+               data = vcpu->arch.efer;
                break;
        case MSR_KVM_WALL_CLOCK:
                data = vcpu->kvm->arch.wall_clock;
@@ -1569,6 +1569,7 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_HYPERV:
        case KVM_CAP_HYPERV_VAPIC:
        case KVM_CAP_HYPERV_SPIN:
+       case KVM_CAP_PCI_SEGMENT:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -2770,6 +2771,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
                if (vpic) {
                        r = kvm_ioapic_init(kvm);
                        if (r) {
+                               kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
+                                                         &vpic->dev);
                                kfree(vpic);
                                goto create_irqchip_unlock;
                        }
@@ -2781,10 +2784,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = kvm_setup_default_irq_routing(kvm);
                if (r) {
                        mutex_lock(&kvm->irq_lock);
-                       kfree(kvm->arch.vpic);
-                       kfree(kvm->arch.vioapic);
-                       kvm->arch.vpic = NULL;
-                       kvm->arch.vioapic = NULL;
+                       kvm_ioapic_destroy(kvm);
+                       kvm_destroy_pic(kvm);
                        mutex_unlock(&kvm->irq_lock);
                }
        create_irqchip_unlock:
@@ -3347,8 +3348,9 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                vcpu->arch.emulate_ctxt.vcpu = vcpu;
                vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
                vcpu->arch.emulate_ctxt.mode =
+                       (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
                        (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
-                       ? X86EMUL_MODE_REAL : cs_l
+                       ? X86EMUL_MODE_VM86 : cs_l
                        ? X86EMUL_MODE_PROT64 : cs_db
                        ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 
@@ -3550,8 +3552,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
        trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
                      size, 1);
 
-       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       memcpy(vcpu->arch.pio_data, &val, 4);
+       if (!vcpu->arch.pio.in) {
+               val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+               memcpy(vcpu->arch.pio_data, &val, 4);
+       }
 
        if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
                complete_pio(vcpu);
@@ -3887,10 +3891,8 @@ EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
 {
        char instruction[3];
-       int ret = 0;
        unsigned long rip = kvm_rip_read(vcpu);
 
-
        /*
         * Blow out the MMU to ensure that no other VCPU has an active mapping
         * to ensure that the updated hypercall appears atomically across all
@@ -3899,11 +3901,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
        kvm_mmu_zap_all(vcpu->kvm);
 
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
-       if (emulator_write_emulated(rip, instruction, 3, vcpu)
-           != X86EMUL_CONTINUE)
-               ret = -EFAULT;
 
-       return ret;
+       return emulator_write_emulated(rip, instruction, 3, vcpu);
 }
 
 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
@@ -4569,7 +4568,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        sregs->cr3 = vcpu->arch.cr3;
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
-       sregs->efer = vcpu->arch.shadow_efer;
+       sregs->efer = vcpu->arch.efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
        memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -4662,7 +4661,7 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
 
        if (dtable.limit < index * 8 + 7) {
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
-               return 1;
+               return X86EMUL_PROPAGATE_FAULT;
        }
        return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
 }
@@ -4697,18 +4696,6 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
        return kvm_seg.selector;
 }
 
-static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
-                                               u16 selector,
-                                               struct kvm_segment *kvm_seg)
-{
-       struct desc_struct seg_desc;
-
-       if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
-               return 1;
-       seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
-       return 0;
-}
-
 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
 {
        struct kvm_segment segvar = {
@@ -4749,11 +4736,14 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                int type_bits, int seg)
 {
        struct kvm_segment kvm_seg;
+       struct desc_struct seg_desc;
 
        if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
                return kvm_load_realmode_segment(vcpu, selector, seg);
-       if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
+
+       if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
                return 1;
+       seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
 
        kvm_check_segment_descriptor(vcpu, seg, selector);
        kvm_seg.type |= type_bits;
@@ -4764,6 +4754,11 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                        kvm_seg.unusable = 1;
 
        kvm_set_segment(vcpu, &kvm_seg, seg);
+       if (selector && !kvm_seg.unusable && kvm_seg.s) {
+               /* mark segment as accessed */
+               seg_desc.type |= 1;
+               save_guest_segment_descriptor(vcpu, selector, &seg_desc);
+       }
        return 0;
 }
 
@@ -5059,7 +5054,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
-       mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
+       mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
@@ -5292,6 +5287,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
        vcpu->guest_fpu_loaded = 1;
        kvm_fx_save(&vcpu->arch.host_fx_image);
        kvm_fx_restore(&vcpu->arch.guest_fx_image);
+       trace_kvm_fpu(1);
 }
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
@@ -5304,6 +5300,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        kvm_fx_restore(&vcpu->arch.host_fx_image);
        ++vcpu->stat.fpu_reload;
        set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
+       trace_kvm_fpu(0);
 }
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)