return result;
}
+int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = (struct kvm_lapic *)vcpu->apic;
+ int highest_irr;
+
+ if (!apic)
+ return 0;
+ highest_irr = apic_find_highest_irr(apic);
+
+ return highest_irr;
+}
+EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
+
int kvm_apic_set_irq(struct kvm_lapic *apic, u8 vec, u8 trig)
{
if (!apic_test_and_set_irr(vec, apic)) {
break;
default:
+ apic_update_ppr(apic);
val = apic_get_reg(apic, offset);
break;
}
return (tpr & 0xf0) >> 4;
}
+EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{
return ret;
}
+EXPORT_SYMBOL_GPL(kvm_lapic_enabled);
/*
*----------------------------------------------------------------------
if (!apic || !apic_enabled(apic))
return -1;
+ apic_update_ppr(apic);
highest_irr = apic_find_highest_irr(apic);
if ((highest_irr == -1) ||
((highest_irr & 0xF0) <= apic_get_reg(apic, APIC_PROCPRI)))
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
+static inline int cpu_has_vmx_tpr_shadow(void)
+{
+ return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
+}
+
+static inline int vm_need_tpr_shadow(struct kvm *kvm)
+{
+ return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
+}
+
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING;
+#ifdef CONFIG_X86_64
+ opt = CPU_BASED_TPR_SHADOW;
+#else
opt = 0;
+#endif
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0)
return -EIO;
+#ifdef CONFIG_X86_64
+ if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
+ _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
+ ~CPU_BASED_CR8_STORE_EXITING;
+#endif
min = 0;
#ifdef CONFIG_X86_64
int ret = 0;
unsigned long kvm_vmx_return;
u64 msr;
+ u32 exec_control;
if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
vmcs_config.pin_based_exec_ctrl);
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
- vmcs_config.cpu_based_exec_ctrl);
+
+ exec_control = vmcs_config.cpu_based_exec_ctrl;
+ if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_STORE_EXITING |
+ CPU_BASED_CR8_LOAD_EXITING;
+#endif
+ }
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
#ifdef CONFIG_X86_64
- vmcs_writel(VIRTUAL_APIC_PAGE_ADDR, 0);
- vmcs_writel(TPR_THRESHOLD, 0);
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
+ if (vm_need_tpr_shadow(vmx->vcpu.kvm))
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
+ page_to_phys(vmx->vcpu.apic->regs_page));
+ vmcs_write32(TPR_THRESHOLD, 0);
#endif
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
return 1;
}
+static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
+ struct kvm_run *kvm_run)
+{
+ return 1;
+}
+
static void post_kvm_run_save(struct kvm_vcpu *vcpu,
struct kvm_run *kvm_run)
{
[EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_VMCALL] = handle_vmcall,
+ [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold
};
static const int kvm_vmx_max_exit_handlers =
{
}
+static void update_tpr_threshold(struct kvm_vcpu *vcpu)
+{
+ int max_irr, tpr;
+
+ if (!vm_need_tpr_shadow(vcpu->kvm))
+ return;
+
+ if (!kvm_lapic_enabled(vcpu) ||
+ ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
+ vmcs_write32(TPR_THRESHOLD, 0);
+ return;
+ }
+
+ tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
+ vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
+}
+
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
u32 idtv_info_field, intr_info_field;
int has_ext_irq, interrupt_window_open;
+ update_tpr_threshold(vcpu);
+
has_ext_irq = kvm_cpu_has_interrupt(vcpu);
intr_info_field = vmcs_read32(VM_ENTRY_INTR_INFO_FIELD);
idtv_info_field = vmcs_read32(IDT_VECTORING_INFO_FIELD);