]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
KVM: x86: remaster kvm_write_tsc code
authorDenis Plotnikov <dplotnikov@virtuozzo.com>
Fri, 7 Apr 2017 09:09:52 +0000 (12:09 +0300)
committerRadim Krčmář <rkrcmar@redhat.com>
Wed, 12 Apr 2017 18:17:15 +0000 (20:17 +0200)
Reuse existing code instead of using inline asm.
Make the code more concise and clear in the TSC
synchronization part.

Signed-off-by: Denis Plotnikov <dplotnikov@virtuozzo.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/kvm/x86.c

index 3046b7f278aedd192bf6879e2166135bcc40fc5f..422f803fa3659603bebec2a6af7deb7d7124d34d 100644 (file)
@@ -1443,10 +1443,10 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        struct kvm *kvm = vcpu->kvm;
        u64 offset, ns, elapsed;
        unsigned long flags;
-       s64 usdiff;
        bool matched;
        bool already_matched;
        u64 data = msr->data;
+       bool synchronizing = false;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
        offset = kvm_compute_tsc_offset(vcpu, data);
@@ -1454,51 +1454,25 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
        if (vcpu->arch.virtual_tsc_khz) {
-               int faulted = 0;
-
-               /* n.b - signed multiplication and division required */
-               usdiff = data - kvm->arch.last_tsc_write;
-#ifdef CONFIG_X86_64
-               usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
-#else
-               /* do_div() only does unsigned */
-               asm("1: idivl %[divisor]\n"
-                   "2: xor %%edx, %%edx\n"
-                   "   movl $0, %[faulted]\n"
-                   "3:\n"
-                   ".section .fixup,\"ax\"\n"
-                   "4: movl $1, %[faulted]\n"
-                   "   jmp  3b\n"
-                   ".previous\n"
-
-               _ASM_EXTABLE(1b, 4b)
-
-               : "=A"(usdiff), [faulted] "=r" (faulted)
-               : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz));
-
-#endif
-               do_div(elapsed, 1000);
-               usdiff -= elapsed;
-               if (usdiff < 0)
-                       usdiff = -usdiff;
-
-               /* idivl overflow => difference is larger than USEC_PER_SEC */
-               if (faulted)
-                       usdiff = USEC_PER_SEC;
-       } else
-               usdiff = USEC_PER_SEC; /* disable TSC match window below */
+               u64 tsc_exp = kvm->arch.last_tsc_write +
+                                       nsec_to_cycles(vcpu, elapsed);
+               u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
+               /*
+                * Special case: TSC write with a small delta (1 second)
+                * of virtual cycle time against real time is
+                * interpreted as an attempt to synchronize the CPU.
+                */
+               synchronizing = data < tsc_exp + tsc_hz &&
+                               data + tsc_hz > tsc_exp;
+       }
 
        /*
-        * Special case: TSC write with a small delta (1 second) of virtual
-        * cycle time against real time is interpreted as an attempt to
-        * synchronize the CPU.
-         *
         * For a reliable TSC, we can match TSC offsets, and for an unstable
         * TSC, we add elapsed time in this computation.  We could let the
         * compensation code attempt to catch up if we fall behind, but
         * it's better to try to match offsets from the beginning.
          */
-       if (usdiff < USEC_PER_SEC &&
+       if (synchronizing &&
            vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
                if (!check_tsc_unstable()) {
                        offset = kvm->arch.cur_tsc_offset;