]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - drivers/kvm/mmu.c
KVM: Simply gfn_to_page()
[karo-tx-linux.git] / drivers / kvm / mmu.c
index e85b4c7c36f7139bc20a7bcb3138ac4edb654acd..8bdb9ca1811c6c19e647335473d0c4c6280d4236 100644 (file)
@@ -390,13 +390,11 @@ static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
 {
        struct kvm *kvm = vcpu->kvm;
        struct page *page;
-       struct kvm_memory_slot *slot;
        struct kvm_rmap_desc *desc;
        u64 *spte;
 
-       slot = gfn_to_memslot(kvm, gfn);
-       BUG_ON(!slot);
-       page = gfn_to_page(slot, gfn);
+       page = gfn_to_page(kvm, gfn);
+       BUG_ON(!page);
 
        while (page_private(page)) {
                if (!(page_private(page) & 1))
@@ -437,9 +435,8 @@ static void kvm_mmu_free_page(struct kvm_vcpu *vcpu, hpa_t page_hpa)
        struct kvm_mmu_page *page_head = page_header(page_hpa);
 
        ASSERT(is_empty_shadow_page(page_hpa));
-       list_del(&page_head->link);
        page_head->page_hpa = page_hpa;
-       list_add(&page_head->link, &vcpu->free_pages);
+       list_move(&page_head->link, &vcpu->free_pages);
        ++vcpu->kvm->n_free_mmu_pages;
 }
 
@@ -457,11 +454,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
                return NULL;
 
        page = list_entry(vcpu->free_pages.next, struct kvm_mmu_page, link);
-       list_del(&page->link);
-       list_add(&page->link, &vcpu->kvm->active_mmu_pages);
+       list_move(&page->link, &vcpu->kvm->active_mmu_pages);
        ASSERT(is_empty_shadow_page(page->page_hpa));
        page->slot_bitmap = 0;
-       page->global = 1;
        page->multimapped = 0;
        page->parent_pte = parent_pte;
        --vcpu->kvm->n_free_mmu_pages;
@@ -569,6 +564,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                                             gva_t gaddr,
                                             unsigned level,
                                             int metaphysical,
+                                            unsigned hugepage_access,
                                             u64 *parent_pte)
 {
        union kvm_mmu_page_role role;
@@ -582,6 +578,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
        role.glevels = vcpu->mmu.root_level;
        role.level = level;
        role.metaphysical = metaphysical;
+       role.hugepage_access = hugepage_access;
        if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
                quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
@@ -669,10 +666,8 @@ static void kvm_mmu_zap_page(struct kvm_vcpu *vcpu,
        if (!page->root_count) {
                hlist_del(&page->hash_link);
                kvm_mmu_free_page(vcpu, page->page_hpa);
-       } else {
-               list_del(&page->link);
-               list_add(&page->link, &vcpu->kvm->active_mmu_pages);
-       }
+       } else
+               list_move(&page->link, &vcpu->kvm->active_mmu_pages);
 }
 
 static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
@@ -714,14 +709,12 @@ hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 
 hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
 {
-       struct kvm_memory_slot *slot;
        struct page *page;
 
        ASSERT((gpa & HPA_ERR_MASK) == 0);
-       slot = gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT);
-       if (!slot)
+       page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+       if (!page)
                return gpa | HPA_ERR_MASK;
-       page = gfn_to_page(slot, gpa >> PAGE_SHIFT);
        return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
                | (gpa & (PAGE_SIZE-1));
 }
@@ -735,6 +728,15 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
        return gpa_to_hpa(vcpu, gpa);
 }
 
+struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
+{
+       gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
+
+       if (gpa == UNMAPPED_GVA)
+               return NULL;
+       return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
+}
+
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
@@ -772,7 +774,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
                                >> PAGE_SHIFT;
                        new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
                                                     v, level - 1,
-                                                    1, &table[index]);
+                                                    1, 0, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
                                return -ENOMEM;
@@ -827,7 +829,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
 
                ASSERT(!VALID_PAGE(root));
                page = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                       PT64_ROOT_LEVEL, 0, NULL);
+                                       PT64_ROOT_LEVEL, 0, 0, NULL);
                root = page->page_hpa;
                ++page->root_count;
                vcpu->mmu.root_hpa = root;
@@ -844,7 +846,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                        root_gfn = 0;
                page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
                                        PT32_ROOT_LEVEL, !is_paging(vcpu),
-                                       NULL);
+                                       0, NULL);
                root = page->page_hpa;
                ++page->root_count;
                vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
@@ -918,11 +920,6 @@ static void paging_new_cr3(struct kvm_vcpu *vcpu)
        kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
 }
 
-static void mark_pagetable_nonglobal(void *shadow_pte)
-{
-       page_header(__pa(shadow_pte))->global = 0;
-}
-
 static inline void set_pte_common(struct kvm_vcpu *vcpu,
                             u64 *shadow_pte,
                             gpa_t gaddr,
@@ -940,9 +937,6 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
 
        *shadow_pte |= access_bits;
 
-       if (!(*shadow_pte & PT_GLOBAL_MASK))
-               mark_pagetable_nonglobal(shadow_pte);
-
        if (is_error_hpa(paddr)) {
                *shadow_pte |= gaddr;
                *shadow_pte |= PT_SHADOW_IO_MARK;
@@ -1171,6 +1165,7 @@ void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes)
                         * and zap two pdes instead of one.
                         */
                        if (level == PT32_ROOT_LEVEL) {
+                               page_offset &= ~7; /* kill rounding error */
                                page_offset <<= 1;
                                npte = 2;
                        }
@@ -1315,6 +1310,23 @@ void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot)
        }
 }
 
+void kvm_mmu_zap_all(struct kvm_vcpu *vcpu)
+{
+       destroy_kvm_mmu(vcpu);
+
+       while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
+               struct kvm_mmu_page *page;
+
+               page = container_of(vcpu->kvm->active_mmu_pages.next,
+                                   struct kvm_mmu_page, link);
+               kvm_mmu_zap_page(vcpu, page);
+       }
+
+       mmu_free_memory_caches(vcpu);
+       kvm_arch_ops->tlb_flush(vcpu);
+       init_kvm_mmu(vcpu);
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1359,7 +1371,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
 
 static void audit_mappings(struct kvm_vcpu *vcpu)
 {
-       int i;
+       unsigned i;
 
        if (vcpu->mmu.root_level == 4)
                audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);