2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Ashish Kalra, ashish.kalra@freescale.com
8 * This file is based on arch/powerpc/kvm/44x_tlb.c,
9 * by Hollis Blanchard <hollisb@us.ibm.com>.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License, version 2, as
13 * published by the Free Software Foundation.
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/uaccess.h>
25 #include <linux/sched.h>
26 #include <linux/rwsem.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hugetlb.h>
29 #include <asm/kvm_ppc.h>
35 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
37 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
39 static inline unsigned int gtlb0_get_next_victim(
40 struct kvmppc_vcpu_e500 *vcpu_e500)
44 victim = vcpu_e500->gtlb_nv[0]++;
45 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
46 vcpu_e500->gtlb_nv[0] = 0;
51 static inline unsigned int tlb1_max_shadow_size(void)
53 /* reserve one entry for magic page */
54 return host_tlb_params[1].entries - tlbcam_index - 1;
57 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
59 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
62 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
64 /* Mask off reserved bits. */
65 mas3 &= MAS3_ATTRIB_MASK;
68 /* Guest is in supervisor mode,
69 * so we need to translate guest
70 * supervisor permissions into user permissions. */
71 mas3 &= ~E500_TLB_USER_PERM_MASK;
72 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
75 return mas3 | E500_TLB_SUPER_PERM_MASK;
78 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
81 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
83 return mas2 & MAS2_ATTRIB_MASK;
88 * writing shadow tlb entry to host TLB
90 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
95 local_irq_save(flags);
96 mtspr(SPRN_MAS0, mas0);
97 mtspr(SPRN_MAS1, stlbe->mas1);
98 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
99 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
100 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
101 asm volatile("isync; tlbwe" : : : "memory");
102 local_irq_restore(flags);
104 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
105 stlbe->mas2, stlbe->mas7_3);
109 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
111 * We don't care about the address we're searching for, other than that it's
112 * in the right set and is not present in the TLB. Using a zero PID and a
113 * userspace address means we don't have to set and then restore MAS5, or
114 * calculate a proper MAS6 value.
116 static u32 get_host_mas0(unsigned long eaddr)
121 local_irq_save(flags);
123 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
124 mas0 = mfspr(SPRN_MAS0);
125 local_irq_restore(flags);
130 /* sesel is for tlb1 only */
131 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
132 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
137 mas0 = get_host_mas0(stlbe->mas2);
138 __write_host_tlbe(stlbe, mas0);
140 __write_host_tlbe(stlbe,
142 MAS0_ESEL(to_htlb1_esel(sesel)));
146 #ifdef CONFIG_KVM_E500
147 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
149 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
150 struct kvm_book3e_206_tlb_entry magic;
151 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
155 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
156 get_page(pfn_to_page(pfn));
159 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
161 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
162 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
163 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
164 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
165 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
168 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
173 static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
174 int tlbsel, int esel)
176 struct kvm_book3e_206_tlb_entry *gtlbe =
177 get_entry(vcpu_e500, tlbsel, esel);
180 vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
181 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
185 local_irq_save(flags);
187 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
190 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
192 asm volatile("tlbwe");
193 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
197 vcpu_e500->g2h_tlb1_map[esel] = 0;
198 vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
199 local_irq_restore(flags);
204 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
205 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
208 static int tlb0_set_base(gva_t addr, int sets, int ways)
212 set_base = (addr >> PAGE_SHIFT) & (sets - 1);
218 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
220 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
221 vcpu_e500->gtlb_params[0].ways);
224 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
226 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
227 int esel = get_tlb_esel_bit(vcpu);
230 esel &= vcpu_e500->gtlb_params[0].ways - 1;
231 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
233 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
239 /* Search the guest TLB for a matching entry. */
240 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
241 gva_t eaddr, int tlbsel, unsigned int pid, int as)
243 int size = vcpu_e500->gtlb_params[tlbsel].entries;
244 unsigned int set_base, offset;
248 set_base = gtlb0_set_base(vcpu_e500, eaddr);
249 size = vcpu_e500->gtlb_params[0].ways;
254 offset = vcpu_e500->gtlb_offset[tlbsel];
256 for (i = 0; i < size; i++) {
257 struct kvm_book3e_206_tlb_entry *tlbe =
258 &vcpu_e500->gtlb_arch[offset + set_base + i];
261 if (eaddr < get_tlb_eaddr(tlbe))
264 if (eaddr > get_tlb_end(tlbe))
267 tid = get_tlb_tid(tlbe);
268 if (tid && (tid != pid))
271 if (!get_tlb_v(tlbe))
274 if (get_tlb_ts(tlbe) != as && as != -1)
283 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
284 struct kvm_book3e_206_tlb_entry *gtlbe,
288 ref->flags = E500_TLB_VALID;
290 if (tlbe_is_writable(gtlbe))
291 ref->flags |= E500_TLB_DIRTY;
294 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
296 if (ref->flags & E500_TLB_VALID) {
297 if (ref->flags & E500_TLB_DIRTY)
298 kvm_release_pfn_dirty(ref->pfn);
300 kvm_release_pfn_clean(ref->pfn);
306 static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
308 if (vcpu_e500->g2h_tlb1_map)
309 memset(vcpu_e500->g2h_tlb1_map,
310 sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
311 if (vcpu_e500->h2g_tlb1_rmap)
312 memset(vcpu_e500->h2g_tlb1_rmap,
313 sizeof(unsigned int) * host_tlb_params[1].entries, 0);
316 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
321 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
322 struct tlbe_ref *ref =
323 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
324 kvmppc_e500_ref_release(ref);
328 static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
333 kvmppc_e500_tlbil_all(vcpu_e500);
335 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
336 struct tlbe_ref *ref =
337 &vcpu_e500->tlb_refs[stlbsel][i];
338 kvmppc_e500_ref_release(ref);
341 clear_tlb_privs(vcpu_e500);
344 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
345 unsigned int eaddr, int as)
347 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
348 unsigned int victim, tsized;
351 /* since we only have two TLBs, only lower bit is used. */
352 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
353 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
354 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
356 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
357 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
358 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
359 | MAS1_TID(get_tlbmiss_tid(vcpu))
360 | MAS1_TSIZE(tsized);
361 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
362 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
363 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
364 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
365 | (get_cur_pid(vcpu) << 16)
366 | (as ? MAS6_SAS : 0);
369 /* TID must be supplied by the caller */
370 static inline void kvmppc_e500_setup_stlbe(
371 struct kvm_vcpu *vcpu,
372 struct kvm_book3e_206_tlb_entry *gtlbe,
373 int tsize, struct tlbe_ref *ref, u64 gvaddr,
374 struct kvm_book3e_206_tlb_entry *stlbe)
376 pfn_t pfn = ref->pfn;
377 u32 pr = vcpu->arch.shared->msr & MSR_PR;
379 BUG_ON(!(ref->flags & E500_TLB_VALID));
381 /* Force IPROT=0 for all guest mappings. */
382 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
383 stlbe->mas2 = (gvaddr & MAS2_EPN) |
384 e500_shadow_mas2_attrib(gtlbe->mas2, pr);
385 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
386 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
389 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
390 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
391 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
392 struct tlbe_ref *ref)
394 struct kvm_memory_slot *slot;
395 unsigned long pfn, hva;
397 int tsize = BOOK3E_PAGESZ_4K;
400 * Translate guest physical to true physical, acquiring
401 * a page reference if it is normal, non-reserved memory.
403 * gfn_to_memslot() must succeed because otherwise we wouldn't
404 * have gotten this far. Eventually we should just pass the slot
405 * pointer through from the first lookup.
407 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
408 hva = gfn_to_hva_memslot(slot, gfn);
411 struct vm_area_struct *vma;
412 down_read(¤t->mm->mmap_sem);
414 vma = find_vma(current->mm, hva);
415 if (vma && hva >= vma->vm_start &&
416 (vma->vm_flags & VM_PFNMAP)) {
418 * This VMA is a physically contiguous region (e.g.
419 * /dev/mem) that bypasses normal Linux page
420 * management. Find the overlap between the
421 * vma and the memslot.
424 unsigned long start, end;
425 unsigned long slot_start, slot_end;
429 start = vma->vm_pgoff;
431 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
433 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
435 slot_start = pfn - (gfn - slot->base_gfn);
436 slot_end = slot_start + slot->npages;
438 if (start < slot_start)
443 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
447 * e500 doesn't implement the lowest tsize bit,
450 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
453 * Now find the largest tsize (up to what the guest
454 * requested) that will cover gfn, stay within the
455 * range, and for which gfn and pfn are mutually
459 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
460 unsigned long gfn_start, gfn_end, tsize_pages;
461 tsize_pages = 1 << (tsize - 2);
463 gfn_start = gfn & ~(tsize_pages - 1);
464 gfn_end = gfn_start + tsize_pages;
466 if (gfn_start + pfn - gfn < start)
468 if (gfn_end + pfn - gfn > end)
470 if ((gfn & (tsize_pages - 1)) !=
471 (pfn & (tsize_pages - 1)))
474 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
475 pfn &= ~(tsize_pages - 1);
478 } else if (vma && hva >= vma->vm_start &&
479 (vma->vm_flags & VM_HUGETLB)) {
480 unsigned long psize = vma_kernel_pagesize(vma);
482 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
486 * Take the largest page size that satisfies both host
489 tsize = min(__ilog2(psize) - 10, tsize);
492 * e500 doesn't implement the lowest tsize bit,
495 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
498 up_read(¤t->mm->mmap_sem);
501 if (likely(!pfnmap)) {
502 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
503 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
504 if (is_error_pfn(pfn)) {
505 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
507 kvm_release_pfn_clean(pfn);
511 /* Align guest and physical address to page map boundaries */
512 pfn &= ~(tsize_pages - 1);
513 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
516 /* Drop old ref and setup new one. */
517 kvmppc_e500_ref_release(ref);
518 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
520 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
524 /* XXX only map the one-one case, for now use TLB0 */
525 static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
527 struct kvm_book3e_206_tlb_entry *stlbe)
529 struct kvm_book3e_206_tlb_entry *gtlbe;
530 struct tlbe_ref *ref;
532 gtlbe = get_entry(vcpu_e500, 0, esel);
533 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
535 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
536 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
537 gtlbe, 0, stlbe, ref);
540 /* Caller must ensure that the specified guest TLB entry is safe to insert into
542 /* XXX for both one-one and one-to-many , for now use TLB1 */
543 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
544 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
545 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
547 struct tlbe_ref *ref;
550 victim = vcpu_e500->host_tlb1_nv++;
552 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
553 vcpu_e500->host_tlb1_nv = 0;
555 ref = &vcpu_e500->tlb_refs[1][victim];
556 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
558 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
559 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
560 if (vcpu_e500->h2g_tlb1_rmap[victim]) {
561 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
562 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
564 vcpu_e500->h2g_tlb1_rmap[victim] = esel;
569 static inline int kvmppc_e500_gtlbe_invalidate(
570 struct kvmppc_vcpu_e500 *vcpu_e500,
571 int tlbsel, int esel)
573 struct kvm_book3e_206_tlb_entry *gtlbe =
574 get_entry(vcpu_e500, tlbsel, esel);
576 if (unlikely(get_tlb_iprot(gtlbe)))
584 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
588 if (value & MMUCSR0_TLB0FI)
589 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
590 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
591 if (value & MMUCSR0_TLB1FI)
592 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
593 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
595 /* Invalidate all vcpu id mappings */
596 kvmppc_e500_tlbil_all(vcpu_e500);
601 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
603 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
608 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
610 ia = (ea >> 2) & 0x1;
612 /* since we only have two TLBs, only lower bit is used. */
613 tlbsel = (ea >> 3) & 0x1;
616 /* invalidate all entries */
617 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
619 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
622 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
623 get_cur_pid(vcpu), -1);
625 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
628 /* Invalidate all vcpu id mappings */
629 kvmppc_e500_tlbil_all(vcpu_e500);
634 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
636 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
638 struct kvm_book3e_206_tlb_entry *gtlbe;
640 tlbsel = get_tlb_tlbsel(vcpu);
641 esel = get_tlb_esel(vcpu, tlbsel);
643 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
644 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
645 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
646 vcpu->arch.shared->mas1 = gtlbe->mas1;
647 vcpu->arch.shared->mas2 = gtlbe->mas2;
648 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
653 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
655 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
656 int as = !!get_cur_sas(vcpu);
657 unsigned int pid = get_cur_spid(vcpu);
659 struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
662 ea = kvmppc_get_gpr(vcpu, rb);
664 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
665 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
667 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
673 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
675 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
676 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
677 vcpu->arch.shared->mas1 = gtlbe->mas1;
678 vcpu->arch.shared->mas2 = gtlbe->mas2;
679 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
683 /* since we only have two TLBs, only lower bit is used. */
684 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
685 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
687 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
689 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
690 vcpu->arch.shared->mas1 =
691 (vcpu->arch.shared->mas6 & MAS6_SPID0)
692 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
693 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
694 vcpu->arch.shared->mas2 &= MAS2_EPN;
695 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
697 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
701 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
705 /* sesel is for tlb1 only */
706 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
707 struct kvm_book3e_206_tlb_entry *gtlbe,
708 struct kvm_book3e_206_tlb_entry *stlbe,
709 int stlbsel, int sesel)
714 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
716 stlbe->mas1 |= MAS1_TID(stid);
717 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
721 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
723 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
724 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
725 int tlbsel, esel, stlbsel, sesel;
727 tlbsel = get_tlb_tlbsel(vcpu);
728 esel = get_tlb_esel(vcpu, tlbsel);
730 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
732 if (get_tlb_v(gtlbe))
733 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
735 gtlbe->mas1 = vcpu->arch.shared->mas1;
736 gtlbe->mas2 = vcpu->arch.shared->mas2;
737 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
739 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
740 gtlbe->mas2, gtlbe->mas7_3);
742 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
743 if (tlbe_is_host_safe(vcpu, gtlbe)) {
750 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
751 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
754 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
755 sesel = 0; /* unused */
761 eaddr = get_tlb_eaddr(gtlbe);
762 raddr = get_tlb_raddr(gtlbe);
764 /* Create a 4KB mapping on the host.
765 * If the guest wanted a large page,
766 * only the first 4KB is mapped here and the rest
767 * are mapped on the fly. */
769 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
770 raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
777 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
780 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
784 static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
785 gva_t eaddr, unsigned int pid, int as)
787 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
790 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
791 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
793 return index_of(tlbsel, esel);
799 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
800 int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
801 struct kvm_translation *tr)
808 eaddr = tr->linear_address;
809 pid = (tr->linear_address >> 32) & 0xff;
810 as = (tr->linear_address >> 40) & 0x1;
812 index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
818 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
819 /* XXX what does "writeable" and "usermode" even mean? */
826 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
828 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
830 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
833 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
835 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
837 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
840 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
842 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
844 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
847 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
849 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
851 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
854 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
857 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
858 struct kvm_book3e_206_tlb_entry *gtlbe;
861 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
862 pgmask = get_tlb_bytes(gtlbe) - 1;
864 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
867 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
871 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
874 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
875 struct tlbe_priv *priv;
876 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
877 int tlbsel = tlbsel_of(index);
878 int esel = esel_of(index);
881 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
886 sesel = 0; /* unused */
887 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
889 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
890 &priv->ref, eaddr, &stlbe);
894 gfn_t gfn = gpaddr >> PAGE_SHIFT;
897 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
898 gtlbe, &stlbe, esel);
907 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
910 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
914 clear_tlb1_bitmap(vcpu_e500);
915 kfree(vcpu_e500->g2h_tlb1_map);
917 clear_tlb_refs(vcpu_e500);
918 kfree(vcpu_e500->gtlb_priv[0]);
919 kfree(vcpu_e500->gtlb_priv[1]);
921 if (vcpu_e500->shared_tlb_pages) {
922 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
925 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
926 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
927 put_page(vcpu_e500->shared_tlb_pages[i]);
930 vcpu_e500->num_shared_tlb_pages = 0;
931 vcpu_e500->shared_tlb_pages = NULL;
933 kfree(vcpu_e500->gtlb_arch);
936 vcpu_e500->gtlb_arch = NULL;
939 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
941 sregs->u.e.mas0 = vcpu->arch.shared->mas0;
942 sregs->u.e.mas1 = vcpu->arch.shared->mas1;
943 sregs->u.e.mas2 = vcpu->arch.shared->mas2;
944 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
945 sregs->u.e.mas4 = vcpu->arch.shared->mas4;
946 sregs->u.e.mas6 = vcpu->arch.shared->mas6;
948 sregs->u.e.mmucfg = vcpu->arch.mmucfg;
949 sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
950 sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
951 sregs->u.e.tlbcfg[2] = 0;
952 sregs->u.e.tlbcfg[3] = 0;
955 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
957 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
958 vcpu->arch.shared->mas0 = sregs->u.e.mas0;
959 vcpu->arch.shared->mas1 = sregs->u.e.mas1;
960 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
961 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
962 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
963 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
969 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
970 struct kvm_config_tlb *cfg)
972 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
973 struct kvm_book3e_206_tlb_params params;
976 struct tlbe_priv *privs[2] = {};
977 u64 *g2h_bitmap = NULL;
980 int num_pages, ret, i;
982 if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
985 if (copy_from_user(¶ms, (void __user *)(uintptr_t)cfg->params,
989 if (params.tlb_sizes[1] > 64)
991 if (params.tlb_ways[1] != params.tlb_sizes[1])
993 if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
995 if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
998 if (!is_power_of_2(params.tlb_ways[0]))
1001 sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
1002 if (!is_power_of_2(sets))
1005 array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
1006 array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
1008 if (cfg->array_len < array_len)
1011 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
1012 cfg->array / PAGE_SIZE;
1013 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1017 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
1021 if (ret != num_pages) {
1027 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
1031 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
1033 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
1036 if (!privs[0] || !privs[1])
1039 g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
1044 free_gtlb(vcpu_e500);
1046 vcpu_e500->gtlb_priv[0] = privs[0];
1047 vcpu_e500->gtlb_priv[1] = privs[1];
1048 vcpu_e500->g2h_tlb1_map = g2h_bitmap;
1050 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
1051 (virt + (cfg->array & (PAGE_SIZE - 1)));
1053 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
1054 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
1056 vcpu_e500->gtlb_offset[0] = 0;
1057 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
1059 vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
1061 vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1062 if (params.tlb_sizes[0] <= 2048)
1063 vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
1064 vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
1066 vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1067 vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
1068 vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
1070 vcpu_e500->shared_tlb_pages = pages;
1071 vcpu_e500->num_shared_tlb_pages = num_pages;
1073 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
1074 vcpu_e500->gtlb_params[0].sets = sets;
1076 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
1077 vcpu_e500->gtlb_params[1].sets = 1;
1085 for (i = 0; i < num_pages; i++)
1093 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
1094 struct kvm_dirty_tlb *dirty)
1096 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1098 clear_tlb_refs(vcpu_e500);
1102 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1104 struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
1105 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
1106 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
1108 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
1109 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
1112 * This should never happen on real e500 hardware, but is
1113 * architecturally possible -- e.g. in some weird nested
1114 * virtualization case.
1116 if (host_tlb_params[0].entries == 0 ||
1117 host_tlb_params[1].entries == 0) {
1118 pr_err("%s: need to know host tlb size\n", __func__);
1122 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
1123 TLBnCFG_ASSOC_SHIFT;
1124 host_tlb_params[1].ways = host_tlb_params[1].entries;
1126 if (!is_power_of_2(host_tlb_params[0].entries) ||
1127 !is_power_of_2(host_tlb_params[0].ways) ||
1128 host_tlb_params[0].entries < host_tlb_params[0].ways ||
1129 host_tlb_params[0].ways == 0) {
1130 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1131 __func__, host_tlb_params[0].entries,
1132 host_tlb_params[0].ways);
1136 host_tlb_params[0].sets =
1137 host_tlb_params[0].entries / host_tlb_params[0].ways;
1138 host_tlb_params[1].sets = 1;
1140 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
1141 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
1143 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
1144 vcpu_e500->gtlb_params[0].sets =
1145 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
1147 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
1148 vcpu_e500->gtlb_params[1].sets = 1;
1150 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
1151 if (!vcpu_e500->gtlb_arch)
1154 vcpu_e500->gtlb_offset[0] = 0;
1155 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
1157 vcpu_e500->tlb_refs[0] =
1158 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
1160 if (!vcpu_e500->tlb_refs[0])
1163 vcpu_e500->tlb_refs[1] =
1164 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
1166 if (!vcpu_e500->tlb_refs[1])
1169 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
1170 vcpu_e500->gtlb_params[0].entries,
1172 if (!vcpu_e500->gtlb_priv[0])
1175 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
1176 vcpu_e500->gtlb_params[1].entries,
1178 if (!vcpu_e500->gtlb_priv[1])
1181 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
1182 vcpu_e500->gtlb_params[1].entries,
1184 if (!vcpu_e500->g2h_tlb1_map)
1187 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
1188 host_tlb_params[1].entries,
1190 if (!vcpu_e500->h2g_tlb1_rmap)
1193 /* Init TLB configuration register */
1194 vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
1195 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1196 vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
1197 vcpu->arch.tlbcfg[0] |=
1198 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
1200 vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
1201 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1202 vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[1].entries;
1203 vcpu->arch.tlbcfg[0] |=
1204 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
1209 free_gtlb(vcpu_e500);
1210 kfree(vcpu_e500->tlb_refs[0]);
1211 kfree(vcpu_e500->tlb_refs[1]);
1215 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1217 free_gtlb(vcpu_e500);
1218 kfree(vcpu_e500->h2g_tlb1_rmap);
1219 kfree(vcpu_e500->tlb_refs[0]);
1220 kfree(vcpu_e500->tlb_refs[1]);