]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/kvm/mmu.c
KVM: MMU: fix mmu notifier invalidate handler for huge spte
[mv-sheeva.git] / arch / x86 / kvm / mmu.c
index c5501bc101068401cc7066d2933c9a14135b0010..812770cddc8d5456f59056996531357cf9592c4f 100644 (file)
@@ -92,8 +92,6 @@ module_param(oos_shadow, bool, 0644);
 #define PT_FIRST_AVAIL_BITS_SHIFT 9
 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
 
-#define VALID_PAGE(x) ((x) != INVALID_PAGE)
-
 #define PT64_LEVEL_BITS 9
 
 #define PT64_LEVEL_SHIFT(level) \
@@ -290,6 +288,34 @@ static void __set_spte(u64 *sptep, u64 spte)
 #endif
 }
 
+static u64 __xchg_spte(u64 *sptep, u64 new_spte)
+{
+#ifdef CONFIG_X86_64
+       return xchg(sptep, new_spte);
+#else
+       u64 old_spte;
+
+       do {
+               old_spte = *sptep;
+       } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
+
+       return old_spte;
+#endif
+}
+
+static void update_spte(u64 *sptep, u64 new_spte)
+{
+       u64 old_spte;
+
+       if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask)) {
+               __set_spte(sptep, new_spte);
+       } else {
+               old_spte = __xchg_spte(sptep, new_spte);
+               if (old_spte & shadow_accessed_mask)
+                       mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
+       }
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
                                  struct kmem_cache *base_cache, int min)
 {
@@ -423,8 +449,8 @@ static int *slot_largepage_idx(gfn_t gfn,
 {
        unsigned long idx;
 
-       idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
-             (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
+       idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+             (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
        return &slot->lpage_info[level - 2][idx].write_count;
 }
 
@@ -528,8 +554,8 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
        if (likely(level == PT_PAGE_TABLE_LEVEL))
                return &slot->rmap[gfn - slot->base_gfn];
 
-       idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
-               (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
+       idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
+               (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
 
        return &slot->lpage_info[level - 2][idx].rmap_pde;
 }
@@ -614,19 +640,11 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
        struct kvm_mmu_page *sp;
-       pfn_t pfn;
        gfn_t gfn;
        unsigned long *rmapp;
        int i;
 
-       if (!is_rmap_spte(*spte))
-               return;
        sp = page_header(__pa(spte));
-       pfn = spte_to_pfn(*spte);
-       if (*spte & shadow_accessed_mask)
-               kvm_set_pfn_accessed(pfn);
-       if (is_writable_pte(*spte))
-               kvm_set_pfn_dirty(pfn);
        gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
        rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
        if (!*rmapp) {
@@ -660,6 +678,22 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        }
 }
 
+static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
+{
+       pfn_t pfn;
+       u64 old_spte;
+
+       old_spte = __xchg_spte(sptep, new_spte);
+       if (!is_rmap_spte(old_spte))
+               return;
+       pfn = spte_to_pfn(old_spte);
+       if (old_spte & shadow_accessed_mask)
+               kvm_set_pfn_accessed(pfn);
+       if (is_writable_pte(old_spte))
+               kvm_set_pfn_dirty(pfn);
+       rmap_remove(kvm, sptep);
+}
+
 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
 {
        struct kvm_rmap_desc *desc;
@@ -700,7 +734,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
                if (is_writable_pte(*spte)) {
-                       __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
+                       update_spte(spte, *spte & ~PT_WRITABLE_MASK);
                        write_protected = 1;
                }
                spte = rmap_next(kvm, rmapp, spte);
@@ -724,9 +758,9 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
                        BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
                        pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
                        if (is_writable_pte(*spte)) {
-                               rmap_remove(kvm, spte);
+                               drop_spte(kvm, spte,
+                                         shadow_trap_nonpresent_pte);
                                --kvm->stat.lpages;
-                               __set_spte(spte, shadow_trap_nonpresent_pte);
                                spte = NULL;
                                write_protected = 1;
                        }
@@ -746,8 +780,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
        while ((spte = rmap_next(kvm, rmapp, NULL))) {
                BUG_ON(!(*spte & PT_PRESENT_MASK));
                rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
-               rmap_remove(kvm, spte);
-               __set_spte(spte, shadow_trap_nonpresent_pte);
+               drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
                need_tlb_flush = 1;
        }
        return need_tlb_flush;
@@ -757,7 +790,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
                             unsigned long data)
 {
        int need_flush = 0;
-       u64 *spte, new_spte;
+       u64 *spte, new_spte, old_spte;
        pte_t *ptep = (pte_t *)data;
        pfn_t new_pfn;
 
@@ -769,8 +802,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
                rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
                need_flush = 1;
                if (pte_write(*ptep)) {
-                       rmap_remove(kvm, spte);
-                       __set_spte(spte, shadow_trap_nonpresent_pte);
+                       drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
                        spte = rmap_next(kvm, rmapp, NULL);
                } else {
                        new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
@@ -778,9 +810,13 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
 
                        new_spte &= ~PT_WRITABLE_MASK;
                        new_spte &= ~SPTE_HOST_WRITEABLE;
+                       new_spte &= ~shadow_accessed_mask;
                        if (is_writable_pte(*spte))
                                kvm_set_pfn_dirty(spte_to_pfn(*spte));
-                       __set_spte(spte, new_spte);
+                       old_spte = __xchg_spte(spte, new_spte);
+                       if (is_shadow_present_pte(old_spte)
+                           && (old_spte & shadow_accessed_mask))
+                               mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
                        spte = rmap_next(kvm, rmapp, spte);
                }
        }
@@ -814,8 +850,12 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
                        ret = handler(kvm, &memslot->rmap[gfn_offset], data);
 
                        for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
-                               int idx = gfn_offset;
-                               idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
+                               unsigned long idx;
+                               int sh;
+
+                               sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
+                               idx = ((memslot->base_gfn+gfn_offset) >> sh) -
+                                       (memslot->base_gfn >> sh);
                                ret |= handler(kvm,
                                        &memslot->lpage_info[j][idx].rmap_pde,
                                        data);
@@ -1378,7 +1418,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
 
                mmu_page_add_parent_pte(vcpu, sp, parent_pte);
                if (sp->unsync_children) {
-                       set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
+                       kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
                        kvm_mmu_mark_parents_unsync(sp);
                } else if (sp->unsync)
                        kvm_mmu_mark_parents_unsync(sp);
@@ -1446,6 +1486,47 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
        --iterator->level;
 }
 
+static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
+{
+       u64 spte;
+
+       spte = __pa(sp->spt)
+               | PT_PRESENT_MASK | PT_ACCESSED_MASK
+               | PT_WRITABLE_MASK | PT_USER_MASK;
+       __set_spte(sptep, spte);
+}
+
+static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
+{
+       if (is_large_pte(*sptep)) {
+               drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+               kvm_flush_remote_tlbs(vcpu->kvm);
+       }
+}
+
+static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
+                                  unsigned direct_access)
+{
+       if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
+               struct kvm_mmu_page *child;
+
+               /*
+                * For the direct sp, if the guest pte's dirty bit
+                * changed form clean to dirty, it will corrupt the
+                * sp's access: allow writable in the read-only sp,
+                * so we should update the spte at this point to get
+                * a new sp with the correct access.
+                */
+               child = page_header(*sptep & PT64_BASE_ADDR_MASK);
+               if (child->role.access == direct_access)
+                       return;
+
+               mmu_page_remove_parent_pte(child, sptep);
+               __set_spte(sptep, shadow_trap_nonpresent_pte);
+               kvm_flush_remote_tlbs(vcpu->kvm);
+       }
+}
+
 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                                         struct kvm_mmu_page *sp)
 {
@@ -1466,7 +1547,8 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
                        } else {
                                if (is_large_pte(ent))
                                        --kvm->stat.lpages;
-                               rmap_remove(kvm, &pt[i]);
+                               drop_spte(kvm, &pt[i],
+                                         shadow_trap_nonpresent_pte);
                        }
                }
                pt[i] = shadow_trap_nonpresent_pte;
@@ -1810,11 +1892,14 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
        bool need_unsync = false;
 
        for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
+               if (!can_unsync)
+                       return 1;
+
                if (s->role.level != PT_PAGE_TABLE_LEVEL)
                        return 1;
 
                if (!need_unsync && !s->unsync) {
-                       if (!can_unsync || !oos_shadow)
+                       if (!oos_shadow)
                                return 1;
                        need_unsync = true;
                }
@@ -1867,9 +1952,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                if (level > PT_PAGE_TABLE_LEVEL &&
                    has_wrprotected_page(vcpu->kvm, gfn, level)) {
                        ret = 1;
-                       rmap_remove(vcpu->kvm, sptep);
-                       spte = shadow_trap_nonpresent_pte;
-                       goto set_pte;
+                       drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
+                       goto done;
                }
 
                spte |= PT_WRITABLE_MASK;
@@ -1900,7 +1984,8 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                mark_page_dirty(vcpu->kvm, gfn);
 
 set_pte:
-       __set_spte(sptep, spte);
+       update_spte(sptep, spte);
+done:
        return ret;
 }
 
@@ -1937,8 +2022,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                } else if (pfn != spte_to_pfn(*sptep)) {
                        pgprintk("hfn old %lx new %lx\n",
                                 spte_to_pfn(*sptep), pfn);
-                       rmap_remove(vcpu->kvm, sptep);
-                       __set_spte(sptep, shadow_trap_nonpresent_pte);
+                       drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
                        kvm_flush_remote_tlbs(vcpu->kvm);
                } else
                        was_rmapped = 1;
@@ -2039,7 +2123,9 @@ static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
        if (is_hwpoison_pfn(pfn)) {
                kvm_send_hwpoison_signal(kvm, gfn);
                return 0;
-       }
+       } else if (is_fault_pfn(pfn))
+               return -EFAULT;
+
        return 1;
 }
 
@@ -2131,7 +2217,7 @@ static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
        int ret = 0;
 
        if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
-               set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+               kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
                ret = 1;
        }
 
@@ -2329,7 +2415,7 @@ static int nonpaging_init_context(struct kvm_vcpu *vcpu)
 void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.tlb_flush;
-       set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
+       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
 }
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
@@ -2590,7 +2676,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
        pte = *spte;
        if (is_shadow_present_pte(pte)) {
                if (is_last_spte(pte, sp->role.level))
-                       rmap_remove(vcpu->kvm, spte);
+                       drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
                else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
                        mmu_page_remove_parent_pte(child, spte);