This reduces overhead by accessing cachelines from the wrong node, as well
as simplifying locking.
[Qing: fix for inactive or expired one-shot timer]
Signed-off-by: Yaozu (Eddie) Dong <Eddie.Dong@intel.com>
Signed-off-by: Qing He <qing.he@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
+void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
#endif
update_divide_count(apic);
start_apic_timer(apic);
}
+
+void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
+{
+ struct kvm_lapic *apic = vcpu->apic;
+ struct hrtimer *timer;
+
+ if (!apic)
+ return;
+
+ timer = &apic->timer.dev;
+ if (hrtimer_cancel(timer))
+ hrtimer_start(timer, timer->expires, HRTIMER_MODE_ABS);
+}
+EXPORT_SYMBOL_GPL(kvm_migrate_apic_timer);
delta = vcpu->host_tsc - tsc_this;
svm->vmcb->control.tsc_offset += delta;
vcpu->cpu = cpu;
+ kvm_migrate_apic_timer(vcpu);
}
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
u64 phys_addr = __pa(vmx->vmcs);
u64 tsc_this, delta;
- if (vcpu->cpu != cpu)
+ if (vcpu->cpu != cpu) {
vcpu_clear(vmx);
+ kvm_migrate_apic_timer(vcpu);
+ }
if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
u8 error;