]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
KVM: nVMX: generate MSR_IA32_CR{0,4}_FIXED1 from guest CPUID
authorDavid Matlack <dmatlack@google.com>
Wed, 30 Nov 2016 02:14:09 +0000 (18:14 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 8 Dec 2016 14:31:08 +0000 (15:31 +0100)
MSR_IA32_CR{0,4}_FIXED1 define which bits in CR0 and CR4 are allowed to
be 1 during VMX operation. Since the set of allowed-1 bits is the same
in and out of VMX operation, we can generate these MSRs entirely from
the guest's CPUID. This lets userspace avoiding having to save/restore
these MSRs.

This patch also initializes MSR_IA32_CR{0,4}_FIXED1 from the CPU's MSRs
by default. This is a saner than the current default of -1ull, which
includes bits that the host CPU does not support.

Signed-off-by: David Matlack <dmatlack@google.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/kvm/vmx.c

index 2b8d4f5ac246fa3a38a019c6b714b92b176af0d0..fa29585c63ff42470182c44f1f6eb474ad638012 100644 (file)
@@ -2877,16 +2877,18 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
                vmx->nested.nested_vmx_basic |= VMX_BASIC_INOUT;
 
        /*
-        * These MSRs specify bits which the guest must keep fixed (on or off)
+        * These MSRs specify bits which the guest must keep fixed on
         * while L1 is in VMXON mode (in L1's root mode, or running an L2).
         * We picked the standard core2 setting.
         */
 #define VMXON_CR0_ALWAYSON     (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
 #define VMXON_CR4_ALWAYSON     X86_CR4_VMXE
        vmx->nested.nested_vmx_cr0_fixed0 = VMXON_CR0_ALWAYSON;
-       vmx->nested.nested_vmx_cr0_fixed1 = -1ULL;
        vmx->nested.nested_vmx_cr4_fixed0 = VMXON_CR4_ALWAYSON;
-       vmx->nested.nested_vmx_cr4_fixed1 = -1ULL;
+
+       /* These MSRs specify bits which the guest must keep fixed off. */
+       rdmsrl(MSR_IA32_VMX_CR0_FIXED1, vmx->nested.nested_vmx_cr0_fixed1);
+       rdmsrl(MSR_IA32_VMX_CR4_FIXED1, vmx->nested.nested_vmx_cr4_fixed1);
 
        /* highest index: VMX_PREEMPTION_TIMER_VALUE */
        vmx->nested.nested_vmx_vmcs_enum = 0x2e;
@@ -9424,6 +9426,50 @@ static void vmcs_set_secondary_exec_control(u32 new_ctl)
                     (new_ctl & ~mask) | (cur_ctl & mask));
 }
 
+/*
+ * Generate MSR_IA32_VMX_CR{0,4}_FIXED1 according to CPUID. Only set bits
+ * (indicating "allowed-1") if they are supported in the guest's CPUID.
+ */
+static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct kvm_cpuid_entry2 *entry;
+
+       vmx->nested.nested_vmx_cr0_fixed1 = 0xffffffff;
+       vmx->nested.nested_vmx_cr4_fixed1 = X86_CR4_PCE;
+
+#define cr4_fixed1_update(_cr4_mask, _reg, _cpuid_mask) do {           \
+       if (entry && (entry->_reg & (_cpuid_mask)))                     \
+               vmx->nested.nested_vmx_cr4_fixed1 |= (_cr4_mask);       \
+} while (0)
+
+       entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+       cr4_fixed1_update(X86_CR4_VME,        edx, bit(X86_FEATURE_VME));
+       cr4_fixed1_update(X86_CR4_PVI,        edx, bit(X86_FEATURE_VME));
+       cr4_fixed1_update(X86_CR4_TSD,        edx, bit(X86_FEATURE_TSC));
+       cr4_fixed1_update(X86_CR4_DE,         edx, bit(X86_FEATURE_DE));
+       cr4_fixed1_update(X86_CR4_PSE,        edx, bit(X86_FEATURE_PSE));
+       cr4_fixed1_update(X86_CR4_PAE,        edx, bit(X86_FEATURE_PAE));
+       cr4_fixed1_update(X86_CR4_MCE,        edx, bit(X86_FEATURE_MCE));
+       cr4_fixed1_update(X86_CR4_PGE,        edx, bit(X86_FEATURE_PGE));
+       cr4_fixed1_update(X86_CR4_OSFXSR,     edx, bit(X86_FEATURE_FXSR));
+       cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
+       cr4_fixed1_update(X86_CR4_VMXE,       ecx, bit(X86_FEATURE_VMX));
+       cr4_fixed1_update(X86_CR4_SMXE,       ecx, bit(X86_FEATURE_SMX));
+       cr4_fixed1_update(X86_CR4_PCIDE,      ecx, bit(X86_FEATURE_PCID));
+       cr4_fixed1_update(X86_CR4_OSXSAVE,    ecx, bit(X86_FEATURE_XSAVE));
+
+       entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
+       cr4_fixed1_update(X86_CR4_FSGSBASE,   ebx, bit(X86_FEATURE_FSGSBASE));
+       cr4_fixed1_update(X86_CR4_SMEP,       ebx, bit(X86_FEATURE_SMEP));
+       cr4_fixed1_update(X86_CR4_SMAP,       ebx, bit(X86_FEATURE_SMAP));
+       cr4_fixed1_update(X86_CR4_PKE,        ecx, bit(X86_FEATURE_PKU));
+       /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
+       cr4_fixed1_update(bit(11),            ecx, bit(2));
+
+#undef cr4_fixed1_update
+}
+
 static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
@@ -9465,6 +9511,9 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        else
                to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
                        ~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+
+       if (nested_vmx_allowed(vcpu))
+               nested_vmx_cr_fixed1_bits_update(vcpu);
 }
 
 static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)