]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/arm/kvm/mmu.c
Merge tag 'omap-for-v3.17/soc-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[karo-tx-linux.git] / arch / arm / kvm / mmu.c
index 16f804938b8fea9fee56fa93991cf8c45cf141e5..16e7994bf3473d4c6ec7c32a8571299c02ab7808 100644 (file)
@@ -90,104 +90,115 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
        return p;
 }
 
-static bool page_empty(void *ptr)
+static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
 {
-       struct page *ptr_page = virt_to_page(ptr);
-       return page_count(ptr_page) == 1;
+       pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
+       pgd_clear(pgd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       pud_free(NULL, pud_table);
+       put_page(virt_to_page(pgd));
 }
 
 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
-       if (pud_huge(*pud)) {
-               pud_clear(pud);
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-       } else {
-               pmd_t *pmd_table = pmd_offset(pud, 0);
-               pud_clear(pud);
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-               pmd_free(NULL, pmd_table);
-       }
+       pmd_t *pmd_table = pmd_offset(pud, 0);
+       VM_BUG_ON(pud_huge(*pud));
+       pud_clear(pud);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       pmd_free(NULL, pmd_table);
        put_page(virt_to_page(pud));
 }
 
 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
-       if (kvm_pmd_huge(*pmd)) {
-               pmd_clear(pmd);
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-       } else {
-               pte_t *pte_table = pte_offset_kernel(pmd, 0);
-               pmd_clear(pmd);
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-               pte_free_kernel(NULL, pte_table);
-       }
+       pte_t *pte_table = pte_offset_kernel(pmd, 0);
+       VM_BUG_ON(kvm_pmd_huge(*pmd));
+       pmd_clear(pmd);
+       kvm_tlb_flush_vmid_ipa(kvm, addr);
+       pte_free_kernel(NULL, pte_table);
        put_page(virt_to_page(pmd));
 }
 
-static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
+static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
+                      phys_addr_t addr, phys_addr_t end)
 {
-       if (pte_present(*pte)) {
-               kvm_set_pte(pte, __pte(0));
-               put_page(virt_to_page(pte));
-               kvm_tlb_flush_vmid_ipa(kvm, addr);
-       }
+       phys_addr_t start_addr = addr;
+       pte_t *pte, *start_pte;
+
+       start_pte = pte = pte_offset_kernel(pmd, addr);
+       do {
+               if (!pte_none(*pte)) {
+                       kvm_set_pte(pte, __pte(0));
+                       put_page(virt_to_page(pte));
+                       kvm_tlb_flush_vmid_ipa(kvm, addr);
+               }
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       if (kvm_pte_table_empty(start_pte))
+               clear_pmd_entry(kvm, pmd, start_addr);
 }
 
-static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
-                       unsigned long long start, u64 size)
+static void unmap_pmds(struct kvm *kvm, pud_t *pud,
+                      phys_addr_t addr, phys_addr_t end)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       unsigned long long addr = start, end = start + size;
-       u64 next;
-
-       while (addr < end) {
-               pgd = pgdp + pgd_index(addr);
-               pud = pud_offset(pgd, addr);
-               pte = NULL;
-               if (pud_none(*pud)) {
-                       addr = kvm_pud_addr_end(addr, end);
-                       continue;
-               }
+       phys_addr_t next, start_addr = addr;
+       pmd_t *pmd, *start_pmd;
 
-               if (pud_huge(*pud)) {
-                       /*
-                        * If we are dealing with a huge pud, just clear it and
-                        * move on.
-                        */
-                       clear_pud_entry(kvm, pud, addr);
-                       addr = kvm_pud_addr_end(addr, end);
-                       continue;
+       start_pmd = pmd = pmd_offset(pud, addr);
+       do {
+               next = kvm_pmd_addr_end(addr, end);
+               if (!pmd_none(*pmd)) {
+                       if (kvm_pmd_huge(*pmd)) {
+                               pmd_clear(pmd);
+                               kvm_tlb_flush_vmid_ipa(kvm, addr);
+                               put_page(virt_to_page(pmd));
+                       } else {
+                               unmap_ptes(kvm, pmd, addr, next);
+                       }
                }
+       } while (pmd++, addr = next, addr != end);
 
-               pmd = pmd_offset(pud, addr);
-               if (pmd_none(*pmd)) {
-                       addr = kvm_pmd_addr_end(addr, end);
-                       continue;
-               }
+       if (kvm_pmd_table_empty(start_pmd))
+               clear_pud_entry(kvm, pud, start_addr);
+}
 
-               if (!kvm_pmd_huge(*pmd)) {
-                       pte = pte_offset_kernel(pmd, addr);
-                       clear_pte_entry(kvm, pte, addr);
-                       next = addr + PAGE_SIZE;
-               }
+static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
+                      phys_addr_t addr, phys_addr_t end)
+{
+       phys_addr_t next, start_addr = addr;
+       pud_t *pud, *start_pud;
 
-               /*
-                * If the pmd entry is to be cleared, walk back up the ladder
-                */
-               if (kvm_pmd_huge(*pmd) || (pte && page_empty(pte))) {
-                       clear_pmd_entry(kvm, pmd, addr);
-                       next = kvm_pmd_addr_end(addr, end);
-                       if (page_empty(pmd) && !page_empty(pud)) {
-                               clear_pud_entry(kvm, pud, addr);
-                               next = kvm_pud_addr_end(addr, end);
+       start_pud = pud = pud_offset(pgd, addr);
+       do {
+               next = kvm_pud_addr_end(addr, end);
+               if (!pud_none(*pud)) {
+                       if (pud_huge(*pud)) {
+                               pud_clear(pud);
+                               kvm_tlb_flush_vmid_ipa(kvm, addr);
+                               put_page(virt_to_page(pud));
+                       } else {
+                               unmap_pmds(kvm, pud, addr, next);
                        }
                }
+       } while (pud++, addr = next, addr != end);
 
-               addr = next;
-       }
+       if (kvm_pud_table_empty(start_pud))
+               clear_pgd_entry(kvm, pgd, start_addr);
+}
+
+
+static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
+                       phys_addr_t start, u64 size)
+{
+       pgd_t *pgd;
+       phys_addr_t addr = start, end = start + size;
+       phys_addr_t next;
+
+       pgd = pgdp + pgd_index(addr);
+       do {
+               next = kvm_pgd_addr_end(addr, end);
+               unmap_puds(kvm, pgd, addr, next);
+       } while (pgd++, addr = next, addr != end);
 }
 
 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
@@ -748,6 +759,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
        struct vm_area_struct *vma;
        pfn_t pfn;
+       pgprot_t mem_type = PAGE_S2;
 
        write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
        if (fault_status == FSC_PERM && !write_fault) {
@@ -798,6 +810,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        if (is_error_pfn(pfn))
                return -EFAULT;
 
+       if (kvm_is_mmio_pfn(pfn))
+               mem_type = PAGE_S2_DEVICE;
+
        spin_lock(&kvm->mmu_lock);
        if (mmu_notifier_retry(kvm, mmu_seq))
                goto out_unlock;
@@ -805,7 +820,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
 
        if (hugetlb) {
-               pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
+               pmd_t new_pmd = pfn_pmd(pfn, mem_type);
                new_pmd = pmd_mkhuge(new_pmd);
                if (writable) {
                        kvm_set_s2pmd_writable(&new_pmd);
@@ -814,13 +829,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
-               pte_t new_pte = pfn_pte(pfn, PAGE_S2);
+               pte_t new_pte = pfn_pte(pfn, mem_type);
                if (writable) {
                        kvm_set_s2pte_writable(&new_pte);
                        kvm_set_pfn_dirty(pfn);
                }
                coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
-               ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
+               ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
+                                    mem_type == PAGE_S2_DEVICE);
        }
 
 
@@ -1100,3 +1116,49 @@ out:
        free_hyp_pgds();
        return err;
 }
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  struct kvm_userspace_memory_region *mem,
+                                  const struct kvm_memory_slot *old,
+                                  enum kvm_mr_change change)
+{
+       gpa_t gpa = old->base_gfn << PAGE_SHIFT;
+       phys_addr_t size = old->npages << PAGE_SHIFT;
+       if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
+               spin_lock(&kvm->mmu_lock);
+               unmap_stage2_range(kvm, gpa, size);
+               spin_unlock(&kvm->mmu_lock);
+       }
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_userspace_memory_region *mem,
+                                  enum kvm_mr_change change)
+{
+       return 0;
+}
+
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
+{
+       return 0;
+}
+
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}