]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
KVM: hyperv: fix locking of struct kvm_hv fields
authorPaolo Bonzini <pbonzini@redhat.com>
Mon, 12 Dec 2016 09:12:53 +0000 (10:12 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 16 Dec 2016 16:53:38 +0000 (17:53 +0100)
Introduce a new mutex to avoid an AB-BA deadlock between kvm->lock and
vcpu->mutex.  Protect accesses in kvm_hv_setup_tsc_page too, as suggested
by Roman.

Reported-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Roman Kagan <rkagan@virtuozzo.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Documentation/virtual/kvm/locking.txt
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/x86.c

index e5dd9f4d61008ad6431e067b900608788e573020..fd013bf4115be70bafdb6489dc4caedf61e7600d 100644 (file)
@@ -13,8 +13,12 @@ The acquisition orders for mutexes are as follows:
 - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
   them together is quite rare.
 
-For spinlocks, kvm_lock is taken outside kvm->mmu_lock.  Everything
-else is a leaf: no other lock is taken inside the critical sections.
+On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
+
+For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
+
+Everything else is a leaf: no other lock is taken inside the critical
+sections.
 
 2: Exception
 ------------
index 7892530cbacfb38e81684b1c5e60f90662e0d872..2e25038dbd932cfec9381eccbc1f4c30fe0c7b2e 100644 (file)
@@ -704,6 +704,7 @@ struct kvm_apic_map {
 
 /* Hyper-V emulation context */
 struct kvm_hv {
+       struct mutex hv_lock;
        u64 hv_guest_os_id;
        u64 hv_hypercall;
        u64 hv_tsc_page;
index 99cde5220e0799fd84aca5a6774dce47e289a73c..1572c35b4f1a637b2ebb622ae88c5e7e14eafd63 100644 (file)
@@ -852,6 +852,10 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                return;
 
+       mutex_lock(&kvm->arch.hyperv.hv_lock);
+       if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+               goto out_unlock;
+
        gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
        /*
         * Because the TSC parameters only vary when there is a
@@ -859,7 +863,7 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
         */
        if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
                                    &tsc_seq, sizeof(tsc_seq))))
-               return;
+               goto out_unlock;
 
        /*
         * While we're computing and writing the parameters, force the
@@ -868,15 +872,15 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        hv->tsc_ref.tsc_sequence = 0;
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
                            &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
-               return;
+               goto out_unlock;
 
        if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
-               return;
+               goto out_unlock;
 
        /* Ensure sequence is zero before writing the rest of the struct.  */
        smp_wmb();
        if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
-               return;
+               goto out_unlock;
 
        /*
         * Now switch to the TSC page mechanism by writing the sequence.
@@ -891,6 +895,8 @@ void kvm_hv_setup_tsc_page(struct kvm *kvm,
        hv->tsc_ref.tsc_sequence = tsc_seq;
        kvm_write_guest(kvm, gfn_to_gpa(gfn),
                        &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence));
+out_unlock:
+       mutex_unlock(&kvm->arch.hyperv.hv_lock);
 }
 
 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
@@ -1142,9 +1148,9 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
                r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
                return r;
        } else
                return kvm_hv_set_msr(vcpu, msr, data, host);
@@ -1155,9 +1161,9 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        if (kvm_hv_msr_partition_wide(msr)) {
                int r;
 
-               mutex_lock(&vcpu->kvm->lock);
+               mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
                r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
-               mutex_unlock(&vcpu->kvm->lock);
+               mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
                return r;
        } else
                return kvm_hv_get_msr(vcpu, msr, pdata);
@@ -1165,7 +1171,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 
 bool kvm_hv_hypercall_enabled(struct kvm *kvm)
 {
-       return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+       return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
 }
 
 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
index 1f0d2383f5ee6e273751949a77882224947a7861..49da1064ef5073736ba712a6cdce0146414abd20 100644 (file)
@@ -7881,6 +7881,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        raw_spin_lock_init(&kvm->arch.tsc_write_lock);
        mutex_init(&kvm->arch.apic_map_lock);
+       mutex_init(&kvm->arch.hyperv.hv_lock);
        spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
 
        kvm->arch.kvmclock_offset = -ktime_get_boot_ns();