]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
KVM: X86: Delegate tsc-offset calculation to architecture code
authorJoerg Roedel <joerg.roedel@amd.com>
Fri, 25 Mar 2011 08:44:50 +0000 (09:44 +0100)
committerAvi Kivity <avi@redhat.com>
Wed, 11 May 2011 11:57:05 +0000 (07:57 -0400)
With TSC scaling in SVM the tsc-offset needs to be
calculated differently. This patch propagates this
calculation into the architecture specific modules so that
this complexity can be handled there.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index f3a7116f802f8c4c05bc6bf69f1f3ba7c2381d99..da0a8ce3a139244cb2f661b0e993bad0dbc3a0a7 100644 (file)
@@ -609,6 +609,8 @@ struct kvm_x86_ops {
        void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
        void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
+       u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
+
        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 
        int (*check_intercept)(struct kvm_vcpu *vcpu,
index a39fde4f5fe86a2c54b2621da640414d96cd9b8c..8c4549bef4edc8fa048e865c6eebfa90cdbaf27b 100644 (file)
@@ -943,6 +943,15 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
+static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+{
+       u64 tsc;
+
+       tsc = svm_scale_tsc(vcpu, native_read_tsc());
+
+       return target_tsc - tsc;
+}
+
 static void init_vmcb(struct vcpu_svm *svm)
 {
        struct vmcb_control_area *control = &svm->vmcb->control;
@@ -4194,6 +4203,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .set_tsc_khz = svm_set_tsc_khz,
        .write_tsc_offset = svm_write_tsc_offset,
        .adjust_tsc_offset = svm_adjust_tsc_offset,
+       .compute_tsc_offset = svm_compute_tsc_offset,
 
        .set_tdp_cr3 = set_tdp_cr3,
 
index e19c7a5473d5839c2a5762afebf282f93ae0473a..aabe3334d06454514eaf60155befdbe0891551df 100644 (file)
@@ -1184,6 +1184,11 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
        vmcs_write64(TSC_OFFSET, offset + adjustment);
 }
 
+static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
+{
+       return target_tsc - native_read_tsc();
+}
+
 /*
  * Reads an msr value (of 'msr_index') into 'pdata'.
  * Returns 0 on success, non-0 otherwise.
@@ -4510,6 +4515,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .set_tsc_khz = vmx_set_tsc_khz,
        .write_tsc_offset = vmx_write_tsc_offset,
        .adjust_tsc_offset = vmx_adjust_tsc_offset,
+       .compute_tsc_offset = vmx_compute_tsc_offset,
 
        .set_tdp_cr3 = vmx_set_cr3,
 
index fcce29b7b6faef040c1e0792849b830d81e13cb7..579ce34e7904605fea795a63b08b7c7ddb6e2e74 100644 (file)
@@ -977,7 +977,7 @@ static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
                return __this_cpu_read(cpu_tsc_khz);
 }
 
-static inline u64 nsec_to_cycles(u64 nsec)
+static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
 {
        u64 ret;
 
@@ -985,7 +985,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
        if (kvm_tsc_changes_freq())
                printk_once(KERN_WARNING
                 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
-       ret = nsec * __this_cpu_read(cpu_tsc_khz);
+       ret = nsec * vcpu_tsc_khz(vcpu);
        do_div(ret, USEC_PER_SEC);
        return ret;
 }
@@ -1015,7 +1015,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
        s64 sdiff;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
-       offset = data - native_read_tsc();
+       offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
        sdiff = data - kvm->arch.last_tsc_write;
@@ -1031,13 +1031,13 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
         * In that case, for a reliable TSC, we can match TSC offsets,
         * or make a best guest using elapsed value.
         */
-       if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
+       if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
            elapsed < 5ULL * NSEC_PER_SEC) {
                if (!check_tsc_unstable()) {
                        offset = kvm->arch.last_tsc_offset;
                        pr_debug("kvm: matched tsc offset for %llu\n", data);
                } else {
-                       u64 delta = nsec_to_cycles(elapsed);
+                       u64 delta = nsec_to_cycles(vcpu, elapsed);
                        offset += delta;
                        pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
                }