2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
19 #include <linux/kernel.h>
20 #include <linux/types.h>
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/kvm.h>
24 #include <linux/kvm_host.h>
25 #include <linux/highmem.h>
26 #include <linux/log2.h>
27 #include <linux/uaccess.h>
28 #include <linux/sched.h>
29 #include <linux/rwsem.h>
30 #include <linux/vmalloc.h>
31 #include <linux/hugetlb.h>
32 #include <asm/kvm_ppc.h>
37 #include "e500_mmu_host.h"
39 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
41 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
43 static inline unsigned int tlb1_max_shadow_size(void)
45 /* reserve one entry for magic page */
46 return host_tlb_params[1].entries - tlbcam_index - 1;
49 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
51 /* Mask off reserved bits. */
52 mas3 &= MAS3_ATTRIB_MASK;
54 #ifndef CONFIG_KVM_BOOKE_HV
56 /* Guest is in supervisor mode,
57 * so we need to translate guest
58 * supervisor permissions into user permissions. */
59 mas3 &= ~E500_TLB_USER_PERM_MASK;
60 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
62 mas3 |= E500_TLB_SUPER_PERM_MASK;
67 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
70 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
72 return mas2 & MAS2_ATTRIB_MASK;
77 * writing shadow tlb entry to host TLB
79 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
84 local_irq_save(flags);
85 mtspr(SPRN_MAS0, mas0);
86 mtspr(SPRN_MAS1, stlbe->mas1);
87 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
88 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
89 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
90 #ifdef CONFIG_KVM_BOOKE_HV
91 mtspr(SPRN_MAS8, stlbe->mas8);
93 asm volatile("isync; tlbwe" : : : "memory");
95 #ifdef CONFIG_KVM_BOOKE_HV
96 /* Must clear mas8 for other host tlbwe's */
100 local_irq_restore(flags);
102 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
103 stlbe->mas2, stlbe->mas7_3);
107 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
109 * We don't care about the address we're searching for, other than that it's
110 * in the right set and is not present in the TLB. Using a zero PID and a
111 * userspace address means we don't have to set and then restore MAS5, or
112 * calculate a proper MAS6 value.
114 static u32 get_host_mas0(unsigned long eaddr)
119 local_irq_save(flags);
121 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
122 mas0 = mfspr(SPRN_MAS0);
123 local_irq_restore(flags);
128 /* sesel is for tlb1 only */
129 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
130 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
135 mas0 = get_host_mas0(stlbe->mas2);
136 __write_host_tlbe(stlbe, mas0);
138 __write_host_tlbe(stlbe,
140 MAS0_ESEL(to_htlb1_esel(sesel)));
144 /* sesel is for tlb1 only */
145 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
146 struct kvm_book3e_206_tlb_entry *gtlbe,
147 struct kvm_book3e_206_tlb_entry *stlbe,
148 int stlbsel, int sesel)
153 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
155 stlbe->mas1 |= MAS1_TID(stid);
156 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
160 #ifdef CONFIG_KVM_E500V2
161 /* XXX should be a hook in the gva2hpa translation */
162 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
164 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
165 struct kvm_book3e_206_tlb_entry magic;
166 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
170 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
171 get_page(pfn_to_page(pfn));
174 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
176 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
177 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
178 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
179 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
180 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
183 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
188 void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
191 struct kvm_book3e_206_tlb_entry *gtlbe =
192 get_entry(vcpu_e500, tlbsel, esel);
193 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
195 /* Don't bother with unmapped entries */
196 if (!(ref->flags & E500_TLB_VALID)) {
197 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
198 "%s: flags %x\n", __func__, ref->flags);
199 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
202 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
203 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
207 local_irq_save(flags);
209 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
212 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
214 asm volatile("tlbwe");
215 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
219 vcpu_e500->g2h_tlb1_map[esel] = 0;
220 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
221 local_irq_restore(flags);
224 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
226 * TLB1 entry is backed by 4k pages. This should happen
227 * rarely and is not worth optimizing. Invalidate everything.
229 kvmppc_e500_tlbil_all(vcpu_e500);
230 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
233 /* Already invalidated in between */
234 if (!(ref->flags & E500_TLB_VALID))
237 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
238 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
240 /* Mark the TLB as not backed by the host anymore */
241 ref->flags &= ~E500_TLB_VALID;
244 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
246 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
249 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
250 struct kvm_book3e_206_tlb_entry *gtlbe,
254 ref->flags |= E500_TLB_VALID;
256 if (tlbe_is_writable(gtlbe))
257 kvm_set_pfn_dirty(pfn);
260 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
262 if (ref->flags & E500_TLB_VALID) {
263 /* FIXME: don't log bogus pfn for TLB1 */
264 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
269 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
271 if (vcpu_e500->g2h_tlb1_map)
272 memset(vcpu_e500->g2h_tlb1_map, 0,
273 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
274 if (vcpu_e500->h2g_tlb1_rmap)
275 memset(vcpu_e500->h2g_tlb1_rmap, 0,
276 sizeof(unsigned int) * host_tlb_params[1].entries);
279 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
284 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
285 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
286 struct tlbe_ref *ref =
287 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
288 kvmppc_e500_ref_release(ref);
293 void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
295 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
296 kvmppc_e500_tlbil_all(vcpu_e500);
297 clear_tlb_privs(vcpu_e500);
298 clear_tlb1_bitmap(vcpu_e500);
301 /* TID must be supplied by the caller */
302 static void kvmppc_e500_setup_stlbe(
303 struct kvm_vcpu *vcpu,
304 struct kvm_book3e_206_tlb_entry *gtlbe,
305 int tsize, struct tlbe_ref *ref, u64 gvaddr,
306 struct kvm_book3e_206_tlb_entry *stlbe)
308 pfn_t pfn = ref->pfn;
309 u32 pr = vcpu->arch.shared->msr & MSR_PR;
311 BUG_ON(!(ref->flags & E500_TLB_VALID));
313 /* Force IPROT=0 for all guest mappings. */
314 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
315 stlbe->mas2 = (gvaddr & MAS2_EPN) |
316 e500_shadow_mas2_attrib(gtlbe->mas2, pr);
317 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
318 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
320 #ifdef CONFIG_KVM_BOOKE_HV
321 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
325 static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
326 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
327 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
328 struct tlbe_ref *ref)
330 struct kvm_memory_slot *slot;
331 unsigned long pfn = 0; /* silence GCC warning */
334 int tsize = BOOK3E_PAGESZ_4K;
337 * Translate guest physical to true physical, acquiring
338 * a page reference if it is normal, non-reserved memory.
340 * gfn_to_memslot() must succeed because otherwise we wouldn't
341 * have gotten this far. Eventually we should just pass the slot
342 * pointer through from the first lookup.
344 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
345 hva = gfn_to_hva_memslot(slot, gfn);
348 struct vm_area_struct *vma;
349 down_read(¤t->mm->mmap_sem);
351 vma = find_vma(current->mm, hva);
352 if (vma && hva >= vma->vm_start &&
353 (vma->vm_flags & VM_PFNMAP)) {
355 * This VMA is a physically contiguous region (e.g.
356 * /dev/mem) that bypasses normal Linux page
357 * management. Find the overlap between the
358 * vma and the memslot.
361 unsigned long start, end;
362 unsigned long slot_start, slot_end;
366 start = vma->vm_pgoff;
368 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
370 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
372 slot_start = pfn - (gfn - slot->base_gfn);
373 slot_end = slot_start + slot->npages;
375 if (start < slot_start)
380 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
384 * e500 doesn't implement the lowest tsize bit,
387 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
390 * Now find the largest tsize (up to what the guest
391 * requested) that will cover gfn, stay within the
392 * range, and for which gfn and pfn are mutually
396 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
397 unsigned long gfn_start, gfn_end, tsize_pages;
398 tsize_pages = 1 << (tsize - 2);
400 gfn_start = gfn & ~(tsize_pages - 1);
401 gfn_end = gfn_start + tsize_pages;
403 if (gfn_start + pfn - gfn < start)
405 if (gfn_end + pfn - gfn > end)
407 if ((gfn & (tsize_pages - 1)) !=
408 (pfn & (tsize_pages - 1)))
411 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
412 pfn &= ~(tsize_pages - 1);
415 } else if (vma && hva >= vma->vm_start &&
416 (vma->vm_flags & VM_HUGETLB)) {
417 unsigned long psize = vma_kernel_pagesize(vma);
419 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
423 * Take the largest page size that satisfies both host
426 tsize = min(__ilog2(psize) - 10, tsize);
429 * e500 doesn't implement the lowest tsize bit,
432 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
435 up_read(¤t->mm->mmap_sem);
438 if (likely(!pfnmap)) {
439 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
440 pfn = gfn_to_pfn_memslot(slot, gfn);
441 if (is_error_noslot_pfn(pfn)) {
442 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
447 /* Align guest and physical address to page map boundaries */
448 pfn &= ~(tsize_pages - 1);
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
457 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn);
460 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn);
466 /* XXX only map the one-one case, for now use TLB0 */
467 static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
468 struct kvm_book3e_206_tlb_entry *stlbe)
470 struct kvm_book3e_206_tlb_entry *gtlbe;
471 struct tlbe_ref *ref;
476 gtlbe = get_entry(vcpu_e500, 0, esel);
477 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
479 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
480 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
481 gtlbe, 0, stlbe, ref);
485 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
490 static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
491 struct tlbe_ref *ref,
494 unsigned int sesel = vcpu_e500->host_tlb1_nv++;
496 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
497 vcpu_e500->host_tlb1_nv = 0;
499 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
500 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
501 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
504 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
505 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
506 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
507 WARN_ON(!(ref->flags & E500_TLB_VALID));
512 /* Caller must ensure that the specified guest TLB entry is safe to insert into
514 /* For both one-one and one-to-many */
515 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
516 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
517 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
519 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
523 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
528 /* Use TLB0 when we can only map a page with 4k */
529 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
530 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
531 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
535 /* Otherwise map into TLB1 */
536 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
537 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
542 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
545 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
546 struct tlbe_priv *priv;
547 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
548 int tlbsel = tlbsel_of(index);
549 int esel = esel_of(index);
551 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
555 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
557 /* Triggers after clear_tlb_privs or on initial mapping */
558 if (!(priv->ref.flags & E500_TLB_VALID)) {
559 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
561 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
562 &priv->ref, eaddr, &stlbe);
563 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
568 gfn_t gfn = gpaddr >> PAGE_SHIFT;
569 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
580 /************* MMU Notifiers *************/
582 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
584 trace_kvm_unmap_hva(hva);
587 * Flush all shadow tlb entries everywhere. This is slow, but
588 * we are 100% sure that we catch the to be unmapped page
590 kvm_flush_remote_tlbs(kvm);
595 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
597 /* kvm_unmap_hva flushes everything anyways */
598 kvm_unmap_hva(kvm, start);
603 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
605 /* XXX could be more clever ;) */
609 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
611 /* XXX could be more clever ;) */
615 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
617 /* The page will get remapped properly on its next fault */
618 kvm_unmap_hva(kvm, hva);
621 /*****************************************/
623 int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
625 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
626 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
629 * This should never happen on real e500 hardware, but is
630 * architecturally possible -- e.g. in some weird nested
631 * virtualization case.
633 if (host_tlb_params[0].entries == 0 ||
634 host_tlb_params[1].entries == 0) {
635 pr_err("%s: need to know host tlb size\n", __func__);
639 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
641 host_tlb_params[1].ways = host_tlb_params[1].entries;
643 if (!is_power_of_2(host_tlb_params[0].entries) ||
644 !is_power_of_2(host_tlb_params[0].ways) ||
645 host_tlb_params[0].entries < host_tlb_params[0].ways ||
646 host_tlb_params[0].ways == 0) {
647 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
648 __func__, host_tlb_params[0].entries,
649 host_tlb_params[0].ways);
653 host_tlb_params[0].sets =
654 host_tlb_params[0].entries / host_tlb_params[0].ways;
655 host_tlb_params[1].sets = 1;
657 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
658 host_tlb_params[1].entries,
660 if (!vcpu_e500->h2g_tlb1_rmap)
666 void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
668 kfree(vcpu_e500->h2g_tlb1_rmap);