]> git.karo-electronics.de Git - linux-beck.git/commitdiff
KVM: MMU: let page fault handler be aware tracked page
authorXiao Guangrong <guangrong.xiao@linux.intel.com>
Wed, 24 Feb 2016 09:51:11 +0000 (17:51 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 3 Mar 2016 13:36:21 +0000 (14:36 +0100)
The page fault caused by write access on the write tracked page can not
be fixed, it always need to be emulated. page_fault_handle_page_track()
is the fast path we introduce here to skip holding mmu-lock and shadow
page table walking

However, if the page table is not present, it is worth making the page
table entry present and readonly to make the read access happy

mmu_need_write_protect() need to be cooked to avoid page becoming writable
when making page table present or sync/prefetch shadow page table entries

Signed-off-by: Xiao Guangrong <guangrong.xiao@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_page_track.h
arch/x86/kvm/mmu.c
arch/x86/kvm/page_track.c
arch/x86/kvm/paging_tmpl.h

index e363e3040ba49c5120c0bfee601a674cbdd794ce..5f16e2864e731d32abc5899745fe2367230d7875 100644 (file)
@@ -17,4 +17,6 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
 void kvm_slot_page_track_remove_page(struct kvm *kvm,
                                     struct kvm_memory_slot *slot, gfn_t gfn,
                                     enum kvm_page_track_mode mode);
+bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
+                             enum kvm_page_track_mode mode);
 #endif
index 7184218acf78962bd996983f9092aa229222f124..dd8e3ca2d79b2f7073d263b45619ee95be27895c 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/cmpxchg.h>
 #include <asm/io.h>
 #include <asm/vmx.h>
+#include <asm/kvm_page_track.h>
 
 /*
  * When setting this variable to true it enables Two-Dimensional-Paging
@@ -2448,25 +2449,29 @@ static void kvm_unsync_pages(struct kvm_vcpu *vcpu,  gfn_t gfn)
        }
 }
 
-static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
-                                 bool can_unsync)
+static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
+                                  bool can_unsync)
 {
        struct kvm_mmu_page *s;
        bool need_unsync = false;
 
+       if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+               return true;
+
        for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
                if (!can_unsync)
-                       return 1;
+                       return true;
 
                if (s->role.level != PT_PAGE_TABLE_LEVEL)
-                       return 1;
+                       return true;
 
                if (!s->unsync)
                        need_unsync = true;
        }
        if (need_unsync)
                kvm_unsync_pages(vcpu, gfn);
-       return 0;
+
+       return false;
 }
 
 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
@@ -3381,21 +3386,43 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct)
 }
 EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
 
+static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
+                                        u32 error_code, gfn_t gfn)
+{
+       if (unlikely(error_code & PFERR_RSVD_MASK))
+               return false;
+
+       if (!(error_code & PFERR_PRESENT_MASK) ||
+             !(error_code & PFERR_WRITE_MASK))
+               return false;
+
+       /*
+        * guest is writing the page which is write tracked which can
+        * not be fixed by page fault handler.
+        */
+       if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+               return true;
+
+       return false;
+}
+
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
                                u32 error_code, bool prefault)
 {
-       gfn_t gfn;
+       gfn_t gfn = gva >> PAGE_SHIFT;
        int r;
 
        pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
 
+       if (page_fault_handle_page_track(vcpu, error_code, gfn))
+               return 1;
+
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
 
        MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
-       gfn = gva >> PAGE_SHIFT;
 
        return nonpaging_map(vcpu, gva & PAGE_MASK,
                             error_code, gfn, prefault);
@@ -3472,6 +3499,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 
        MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
 
+       if (page_fault_handle_page_track(vcpu, error_code, gfn))
+               return 1;
+
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
index cd76bc31896809e1c5bfd584b6e9c71edbbd8b46..f127f6d04fa1a28f199ecacef027d37dcab93c97 100644 (file)
@@ -135,3 +135,18 @@ void kvm_slot_page_track_remove_page(struct kvm *kvm,
         */
        kvm_mmu_gfn_allow_lpage(slot, gfn);
 }
+
+/*
+ * check if the corresponding access on the specified guest page is tracked.
+ */
+bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
+                             enum kvm_page_track_mode mode)
+{
+       struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+       int index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
+
+       if (WARN_ON(!page_track_mode_is_valid(mode)))
+               return false;
+
+       return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
+}
index 05827ff7bd2e65e790adcc5dc3c5a87793872518..52ae2d94cc9e4cafbf0d0b3c50786868e0c07264 100644 (file)
@@ -728,6 +728,9 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
                return 0;
        }
 
+       if (page_fault_handle_page_track(vcpu, error_code, walker.gfn))
+               return 1;
+
        vcpu->arch.write_fault_to_shadow_pgtable = false;
 
        is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,