2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <trace/events/kvm.h>
23 #include <asm/pgalloc.h>
24 #include <asm/cacheflush.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/kvm_mmio.h>
28 #include <asm/kvm_asm.h>
29 #include <asm/kvm_emulate.h>
33 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
35 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
37 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
39 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
42 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
47 BUG_ON(max > KVM_NR_MEM_OBJS);
48 if (cache->nobjs >= min)
50 while (cache->nobjs < max) {
51 page = (void *)__get_free_page(PGALLOC_GFP);
54 cache->objects[cache->nobjs++] = page;
59 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
62 free_page((unsigned long)mc->objects[--mc->nobjs]);
65 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
69 BUG_ON(!mc || !mc->nobjs);
70 p = mc->objects[--mc->nobjs];
74 static void free_ptes(pmd_t *pmd, unsigned long addr)
79 for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
80 if (!pmd_none(*pmd) && pmd_table(*pmd)) {
81 pte = pte_offset_kernel(pmd, addr);
82 pte_free_kernel(NULL, pte);
88 static void free_hyp_pgd_entry(unsigned long addr)
93 unsigned long hyp_addr = KERN_TO_HYP(addr);
95 pgd = hyp_pgd + pgd_index(hyp_addr);
96 pud = pud_offset(pgd, hyp_addr);
100 BUG_ON(pud_bad(*pud));
102 pmd = pmd_offset(pud, hyp_addr);
103 free_ptes(pmd, addr);
109 * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
111 * Assumes this is a page table used strictly in Hyp-mode and therefore contains
112 * either mappings in the kernel memory area (above PAGE_OFFSET), or
113 * device mappings in the vmalloc range (from VMALLOC_START to VMALLOC_END).
115 void free_hyp_pmds(void)
119 mutex_lock(&kvm_hyp_pgd_mutex);
120 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
121 free_hyp_pgd_entry(addr);
122 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
123 free_hyp_pgd_entry(addr);
124 mutex_unlock(&kvm_hyp_pgd_mutex);
127 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
134 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
135 unsigned long hyp_addr = KERN_TO_HYP(addr);
137 pte = pte_offset_kernel(pmd, hyp_addr);
138 BUG_ON(!virt_addr_valid(addr));
139 page = virt_to_page(addr);
140 kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
144 static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
146 unsigned long *pfn_base)
151 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
152 unsigned long hyp_addr = KERN_TO_HYP(addr);
154 pte = pte_offset_kernel(pmd, hyp_addr);
155 BUG_ON(pfn_valid(*pfn_base));
156 kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
161 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
162 unsigned long end, unsigned long *pfn_base)
166 unsigned long addr, next;
168 for (addr = start; addr < end; addr = next) {
169 unsigned long hyp_addr = KERN_TO_HYP(addr);
170 pmd = pmd_offset(pud, hyp_addr);
172 BUG_ON(pmd_sect(*pmd));
174 if (pmd_none(*pmd)) {
175 pte = pte_alloc_one_kernel(NULL, hyp_addr);
177 kvm_err("Cannot allocate Hyp pte\n");
180 pmd_populate_kernel(NULL, pmd, pte);
183 next = pmd_addr_end(addr, end);
186 * If pfn_base is NULL, we map kernel pages into HYP with the
187 * virtual address. Otherwise, this is considered an I/O
188 * mapping and we map the physical region starting at
189 * *pfn_base to [start, end[.
192 create_hyp_pte_mappings(pmd, addr, next);
194 create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
200 static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
202 unsigned long start = (unsigned long)from;
203 unsigned long end = (unsigned long)to;
207 unsigned long addr, next;
212 /* Check for a valid kernel memory mapping */
213 if (!pfn_base && (!virt_addr_valid(from) || !virt_addr_valid(to - 1)))
215 /* Check for a valid kernel IO mapping */
216 if (pfn_base && (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)))
219 mutex_lock(&kvm_hyp_pgd_mutex);
220 for (addr = start; addr < end; addr = next) {
221 unsigned long hyp_addr = KERN_TO_HYP(addr);
222 pgd = hyp_pgd + pgd_index(hyp_addr);
223 pud = pud_offset(pgd, hyp_addr);
225 if (pud_none_or_clear_bad(pud)) {
226 pmd = pmd_alloc_one(NULL, hyp_addr);
228 kvm_err("Cannot allocate Hyp pmd\n");
232 pud_populate(NULL, pud, pmd);
235 next = pgd_addr_end(addr, end);
236 err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
241 mutex_unlock(&kvm_hyp_pgd_mutex);
246 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
247 * @from: The virtual kernel start address of the range
248 * @to: The virtual kernel end address of the range (exclusive)
250 * The same virtual address as the kernel virtual address is also used
251 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
254 * Note: Wrapping around zero in the "to" address is not supported.
256 int create_hyp_mappings(void *from, void *to)
258 return __create_hyp_mappings(from, to, NULL);
262 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
263 * @from: The kernel start VA of the range
264 * @to: The kernel end VA of the range (exclusive)
265 * @addr: The physical start address which gets mapped
267 * The resulting HYP VA is the same as the kernel VA, modulo
270 int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
272 unsigned long pfn = __phys_to_pfn(addr);
273 return __create_hyp_mappings(from, to, &pfn);
277 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
278 * @kvm: The KVM struct pointer for the VM.
280 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
281 * support either full 40-bit input addresses or limited to 32-bit input
282 * addresses). Clears the allocated pages.
284 * Note we don't need locking here as this is only called when the VM is
285 * created, which can only be done once.
287 int kvm_alloc_stage2_pgd(struct kvm *kvm)
291 if (kvm->arch.pgd != NULL) {
292 kvm_err("kvm_arch already initialized?\n");
296 pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
300 /* stage-2 pgd must be aligned to its size */
301 VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
303 memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
310 static void clear_pud_entry(pud_t *pud)
312 pmd_t *pmd_table = pmd_offset(pud, 0);
314 pmd_free(NULL, pmd_table);
315 put_page(virt_to_page(pud));
318 static void clear_pmd_entry(pmd_t *pmd)
320 pte_t *pte_table = pte_offset_kernel(pmd, 0);
322 pte_free_kernel(NULL, pte_table);
323 put_page(virt_to_page(pmd));
326 static bool pmd_empty(pmd_t *pmd)
328 struct page *pmd_page = virt_to_page(pmd);
329 return page_count(pmd_page) == 1;
332 static void clear_pte_entry(pte_t *pte)
334 if (pte_present(*pte)) {
335 kvm_set_pte(pte, __pte(0));
336 put_page(virt_to_page(pte));
340 static bool pte_empty(pte_t *pte)
342 struct page *pte_page = virt_to_page(pte);
343 return page_count(pte_page) == 1;
347 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
348 * @kvm: The VM pointer
349 * @start: The intermediate physical base address of the range to unmap
350 * @size: The size of the area to unmap
352 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
353 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
354 * destroying the VM), otherwise another faulting VCPU may come in and mess
355 * with things behind our backs.
357 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
363 phys_addr_t addr = start, end = start + size;
367 pgd = kvm->arch.pgd + pgd_index(addr);
368 pud = pud_offset(pgd, addr);
369 if (pud_none(*pud)) {
374 pmd = pmd_offset(pud, addr);
375 if (pmd_none(*pmd)) {
380 pte = pte_offset_kernel(pmd, addr);
381 clear_pte_entry(pte);
384 /* If we emptied the pte, walk back up the ladder */
385 if (pte_empty(pte)) {
386 clear_pmd_entry(pmd);
388 if (pmd_empty(pmd)) {
389 clear_pud_entry(pud);
399 * kvm_free_stage2_pgd - free all stage-2 tables
400 * @kvm: The KVM struct pointer for the VM.
402 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
403 * underlying level-2 and level-3 tables before freeing the actual level-1 table
404 * and setting the struct pointer to NULL.
406 * Note we don't need locking here as this is only called when the VM is
407 * destroyed, which can only be done once.
409 void kvm_free_stage2_pgd(struct kvm *kvm)
411 if (kvm->arch.pgd == NULL)
414 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
415 free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
416 kvm->arch.pgd = NULL;
420 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
421 phys_addr_t addr, const pte_t *new_pte, bool iomap)
428 /* Create 2nd stage page table mapping - Level 1 */
429 pgd = kvm->arch.pgd + pgd_index(addr);
430 pud = pud_offset(pgd, addr);
431 if (pud_none(*pud)) {
433 return 0; /* ignore calls from kvm_set_spte_hva */
434 pmd = mmu_memory_cache_alloc(cache);
435 pud_populate(NULL, pud, pmd);
436 get_page(virt_to_page(pud));
439 pmd = pmd_offset(pud, addr);
441 /* Create 2nd stage page table mapping - Level 2 */
442 if (pmd_none(*pmd)) {
444 return 0; /* ignore calls from kvm_set_spte_hva */
445 pte = mmu_memory_cache_alloc(cache);
447 pmd_populate_kernel(NULL, pmd, pte);
448 get_page(virt_to_page(pmd));
451 pte = pte_offset_kernel(pmd, addr);
453 if (iomap && pte_present(*pte))
456 /* Create 2nd stage page table mapping - Level 3 */
458 kvm_set_pte(pte, *new_pte);
459 if (pte_present(old_pte))
460 kvm_tlb_flush_vmid_ipa(kvm, addr);
462 get_page(virt_to_page(pte));
468 * kvm_phys_addr_ioremap - map a device range to guest IPA
470 * @kvm: The KVM pointer
471 * @guest_ipa: The IPA at which to insert the mapping
472 * @pa: The physical address of the device
473 * @size: The size of the mapping
475 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
476 phys_addr_t pa, unsigned long size)
478 phys_addr_t addr, end;
481 struct kvm_mmu_memory_cache cache = { 0, };
483 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
484 pfn = __phys_to_pfn(pa);
486 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
487 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
488 kvm_set_s2pte_writable(&pte);
490 ret = mmu_topup_memory_cache(&cache, 2, 2);
493 spin_lock(&kvm->mmu_lock);
494 ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
495 spin_unlock(&kvm->mmu_lock);
503 mmu_free_memory_cache(&cache);
507 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
508 gfn_t gfn, struct kvm_memory_slot *memslot,
509 unsigned long fault_status)
514 bool write_fault, writable;
515 unsigned long mmu_seq;
516 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
518 write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
519 if (fault_status == FSC_PERM && !write_fault) {
520 kvm_err("Unexpected L2 read permission error\n");
524 /* We need minimum second+third level pages */
525 ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
529 mmu_seq = vcpu->kvm->mmu_notifier_seq;
531 * Ensure the read of mmu_notifier_seq happens before we call
532 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
533 * the page we just got a reference to gets unmapped before we have a
534 * chance to grab the mmu_lock, which ensure that if the page gets
535 * unmapped afterwards, the call to kvm_unmap_hva will take it away
536 * from us again properly. This smp_rmb() interacts with the smp_wmb()
537 * in kvm_mmu_notifier_invalidate_<page|range_end>.
541 pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
542 if (is_error_pfn(pfn))
545 new_pte = pfn_pte(pfn, PAGE_S2);
546 coherent_icache_guest_page(vcpu->kvm, gfn);
548 spin_lock(&vcpu->kvm->mmu_lock);
549 if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
552 kvm_set_s2pte_writable(&new_pte);
553 kvm_set_pfn_dirty(pfn);
555 stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
558 spin_unlock(&vcpu->kvm->mmu_lock);
559 kvm_release_pfn_clean(pfn);
564 * kvm_handle_guest_abort - handles all 2nd stage aborts
565 * @vcpu: the VCPU pointer
566 * @run: the kvm_run structure
568 * Any abort that gets to the host is almost guaranteed to be caused by a
569 * missing second stage translation table entry, which can mean that either the
570 * guest simply needs more memory and we must allocate an appropriate page or it
571 * can mean that the guest tried to access I/O memory, which is emulated by user
572 * space. The distinction is based on the IPA causing the fault and whether this
573 * memory region has been registered as standard RAM by user space.
575 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
577 unsigned long fault_status;
578 phys_addr_t fault_ipa;
579 struct kvm_memory_slot *memslot;
584 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
585 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
587 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
588 kvm_vcpu_get_hfar(vcpu), fault_ipa);
590 /* Check the stage-2 fault is trans. fault or write fault */
591 fault_status = kvm_vcpu_trap_get_fault(vcpu);
592 if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
593 kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
594 kvm_vcpu_trap_get_class(vcpu), fault_status);
598 idx = srcu_read_lock(&vcpu->kvm->srcu);
600 gfn = fault_ipa >> PAGE_SHIFT;
601 if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
603 /* Prefetch Abort on I/O address */
604 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
609 if (fault_status != FSC_FAULT) {
610 kvm_err("Unsupported fault status on io memory: %#lx\n",
617 * The IPA is reported as [MAX:12], so we need to
618 * complement it with the bottom 12 bits from the
619 * faulting VA. This is always 12 bits, irrespective
622 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
623 ret = io_mem_abort(vcpu, run, fault_ipa);
627 memslot = gfn_to_memslot(vcpu->kvm, gfn);
629 ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
633 srcu_read_unlock(&vcpu->kvm->srcu, idx);
637 static void handle_hva_to_gpa(struct kvm *kvm,
640 void (*handler)(struct kvm *kvm,
641 gpa_t gpa, void *data),
644 struct kvm_memslots *slots;
645 struct kvm_memory_slot *memslot;
647 slots = kvm_memslots(kvm);
649 /* we only care about the pages that the guest sees */
650 kvm_for_each_memslot(memslot, slots) {
651 unsigned long hva_start, hva_end;
654 hva_start = max(start, memslot->userspace_addr);
655 hva_end = min(end, memslot->userspace_addr +
656 (memslot->npages << PAGE_SHIFT));
657 if (hva_start >= hva_end)
661 * {gfn(page) | page intersects with [hva_start, hva_end)} =
662 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
664 gfn = hva_to_gfn_memslot(hva_start, memslot);
665 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
667 for (; gfn < gfn_end; ++gfn) {
668 gpa_t gpa = gfn << PAGE_SHIFT;
669 handler(kvm, gpa, data);
674 static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
676 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
677 kvm_tlb_flush_vmid_ipa(kvm, gpa);
680 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
682 unsigned long end = hva + PAGE_SIZE;
687 trace_kvm_unmap_hva(hva);
688 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
692 int kvm_unmap_hva_range(struct kvm *kvm,
693 unsigned long start, unsigned long end)
698 trace_kvm_unmap_hva_range(start, end);
699 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
703 static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
705 pte_t *pte = (pte_t *)data;
707 stage2_set_pte(kvm, NULL, gpa, pte, false);
711 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
713 unsigned long end = hva + PAGE_SIZE;
719 trace_kvm_set_spte_hva(hva);
720 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
721 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
724 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
726 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
729 phys_addr_t kvm_mmu_get_httbr(void)
731 VM_BUG_ON(!virt_addr_valid(hyp_pgd));
732 return virt_to_phys(hyp_pgd);
735 int kvm_mmu_init(void)
738 kvm_err("Hyp mode PGD not allocated\n");
746 * kvm_clear_idmap - remove all idmaps from the hyp pgd
748 * Free the underlying pmds for all pgds in range and clear the pgds (but
749 * don't free them) afterwards.
751 void kvm_clear_hyp_idmap(void)
753 unsigned long addr, end;
755 pgd_t *pgd = hyp_pgd;
759 addr = virt_to_phys(__hyp_idmap_text_start);
760 end = virt_to_phys(__hyp_idmap_text_end);
762 pgd += pgd_index(addr);
764 next = pgd_addr_end(addr, end);
765 if (pgd_none_or_clear_bad(pgd))
767 pud = pud_offset(pgd, addr);
768 pmd = pmd_offset(pud, addr);
771 kvm_clean_pmd_entry(pmd);
772 pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
773 } while (pgd++, addr = next, addr < end);