2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
4 * Author: Yu Liu, yu.liu@freescale.com
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
15 #include <linux/kernel.h>
16 #include <linux/types.h>
17 #include <linux/slab.h>
18 #include <linux/string.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/highmem.h>
22 #include <linux/log2.h>
23 #include <linux/uaccess.h>
24 #include <linux/sched.h>
25 #include <linux/rwsem.h>
26 #include <linux/vmalloc.h>
27 #include <linux/hugetlb.h>
28 #include <asm/kvm_ppc.h>
29 #include <asm/kvm_e500.h>
31 #include "../mm/mmu_decl.h"
36 #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
46 * This table provide mappings from:
47 * (guestAS,guestTID,guestPR) --> ID of physical cpu
52 * Each vcpu keeps one vcpu_id_table.
54 struct vcpu_id_table {
55 struct id id[2][NUM_TIDS][2];
59 * This table provide reversed mappings of vcpu_id_table:
60 * ID --> address of vcpu_id_table item.
61 * Each physical core has one pcpu_id_table.
63 struct pcpu_id_table {
64 struct id *entry[NUM_TIDS];
67 static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
69 /* This variable keeps last used shadow ID on local core.
70 * The valid range of shadow ID is [1..255] */
71 static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
73 static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
75 static struct kvm_book3e_206_tlb_entry *get_entry(
76 struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
78 int offset = vcpu_e500->gtlb_offset[tlbsel];
79 return &vcpu_e500->gtlb_arch[offset + entry];
83 * Allocate a free shadow id and setup a valid sid mapping in given entry.
84 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
86 * The caller must have preemption disabled, and keep it that way until
87 * it has finished with the returned shadow id (either written into the
88 * TLB or arch.shadow_pid, or discarded).
90 static inline int local_sid_setup_one(struct id *entry)
95 sid = ++(__get_cpu_var(pcpu_last_used_sid));
97 __get_cpu_var(pcpu_sids).entry[sid] = entry;
99 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
104 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
105 * the caller will invalidate everything and start over.
107 * sid > NUM_TIDS indicates a race, which we disable preemption to
110 WARN_ON(sid > NUM_TIDS);
116 * Check if given entry contain a valid shadow id mapping.
117 * An ID mapping is considered valid only if
118 * both vcpu and pcpu know this mapping.
120 * The caller must have preemption disabled, and keep it that way until
121 * it has finished with the returned shadow id (either written into the
122 * TLB or arch.shadow_pid, or discarded).
124 static inline int local_sid_lookup(struct id *entry)
126 if (entry && entry->val != 0 &&
127 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
128 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
133 /* Invalidate all id mappings on local core -- call with preempt disabled */
134 static inline void local_sid_destroy_all(void)
136 __get_cpu_var(pcpu_last_used_sid) = 0;
137 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
140 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
142 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
143 return vcpu_e500->idt;
146 static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
148 kfree(vcpu_e500->idt);
151 /* Invalidate all mappings on vcpu */
152 static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
154 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
156 /* Update shadow pid when mappings are changed */
157 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
160 /* Invalidate one ID mapping on vcpu */
161 static inline void kvmppc_e500_id_table_reset_one(
162 struct kvmppc_vcpu_e500 *vcpu_e500,
163 int as, int pid, int pr)
165 struct vcpu_id_table *idt = vcpu_e500->idt;
168 BUG_ON(pid >= NUM_TIDS);
171 idt->id[as][pid][pr].val = 0;
172 idt->id[as][pid][pr].pentry = NULL;
174 /* Update shadow pid when mappings are changed */
175 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
179 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
180 * This function first lookup if a valid mapping exists,
181 * if not, then creates a new one.
183 * The caller must have preemption disabled, and keep it that way until
184 * it has finished with the returned shadow id (either written into the
185 * TLB or arch.shadow_pid, or discarded).
187 static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
188 unsigned int as, unsigned int gid,
189 unsigned int pr, int avoid_recursion)
191 struct vcpu_id_table *idt = vcpu_e500->idt;
195 BUG_ON(gid >= NUM_TIDS);
198 sid = local_sid_lookup(&idt->id[as][gid][pr]);
202 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
205 local_sid_destroy_all();
208 /* Update shadow pid when mappings are changed */
209 if (!avoid_recursion)
210 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
216 /* Map guest pid to shadow.
217 * We use PID to keep shadow of current guest non-zero PID,
218 * and use PID1 to keep shadow of guest zero PID.
219 * So that guest tlbe with TID=0 can be accessed at any time */
220 void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
223 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
224 get_cur_as(&vcpu_e500->vcpu),
225 get_cur_pid(&vcpu_e500->vcpu),
226 get_cur_pr(&vcpu_e500->vcpu), 1);
227 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
228 get_cur_as(&vcpu_e500->vcpu), 0,
229 get_cur_pr(&vcpu_e500->vcpu), 1);
233 static inline unsigned int gtlb0_get_next_victim(
234 struct kvmppc_vcpu_e500 *vcpu_e500)
238 victim = vcpu_e500->gtlb_nv[0]++;
239 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
240 vcpu_e500->gtlb_nv[0] = 0;
245 static inline unsigned int tlb1_max_shadow_size(void)
247 /* reserve one entry for magic page */
248 return host_tlb_params[1].entries - tlbcam_index - 1;
251 static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
253 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
256 static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
258 /* Mask off reserved bits. */
259 mas3 &= MAS3_ATTRIB_MASK;
262 /* Guest is in supervisor mode,
263 * so we need to translate guest
264 * supervisor permissions into user permissions. */
265 mas3 &= ~E500_TLB_USER_PERM_MASK;
266 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
269 return mas3 | E500_TLB_SUPER_PERM_MASK;
272 static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
275 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
277 return mas2 & MAS2_ATTRIB_MASK;
282 * writing shadow tlb entry to host TLB
284 static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
289 local_irq_save(flags);
290 mtspr(SPRN_MAS0, mas0);
291 mtspr(SPRN_MAS1, stlbe->mas1);
292 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
293 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
294 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
295 asm volatile("isync; tlbwe" : : : "memory");
296 local_irq_restore(flags);
298 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
299 stlbe->mas2, stlbe->mas7_3);
303 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
305 * We don't care about the address we're searching for, other than that it's
306 * in the right set and is not present in the TLB. Using a zero PID and a
307 * userspace address means we don't have to set and then restore MAS5, or
308 * calculate a proper MAS6 value.
310 static u32 get_host_mas0(unsigned long eaddr)
315 local_irq_save(flags);
317 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
318 mas0 = mfspr(SPRN_MAS0);
319 local_irq_restore(flags);
324 /* sesel is for tlb1 only */
325 static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
326 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
331 mas0 = get_host_mas0(stlbe->mas2);
332 __write_host_tlbe(stlbe, mas0);
334 __write_host_tlbe(stlbe,
336 MAS0_ESEL(to_htlb1_esel(sesel)));
340 void kvmppc_map_magic(struct kvm_vcpu *vcpu)
342 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
343 struct kvm_book3e_206_tlb_entry magic;
344 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
348 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
349 get_page(pfn_to_page(pfn));
352 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
354 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
355 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
356 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
357 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
358 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
361 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
365 void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
367 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
369 /* Shadow PID may be expired on local core */
370 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
373 void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
377 static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
378 int tlbsel, int esel)
380 struct kvm_book3e_206_tlb_entry *gtlbe =
381 get_entry(vcpu_e500, tlbsel, esel);
382 struct vcpu_id_table *idt = vcpu_e500->idt;
383 unsigned int pr, tid, ts, pid;
387 ts = get_tlb_ts(gtlbe);
388 tid = get_tlb_tid(gtlbe);
392 /* One guest ID may be mapped to two shadow IDs */
393 for (pr = 0; pr < 2; pr++) {
395 * The shadow PID can have a valid mapping on at most one
396 * host CPU. In the common case, it will be valid on this
397 * CPU, in which case (for TLB0) we do a local invalidation
398 * of the specific address.
400 * If the shadow PID is not valid on the current host CPU, or
401 * if we're invalidating a TLB1 entry, we invalidate the
405 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
406 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
411 * The guest is invalidating a TLB0 entry which is in a PID
412 * that has a valid shadow mapping on this host CPU. We
413 * search host TLB0 to invalidate it's shadow TLB entry,
414 * similar to __tlbil_va except that we need to look in AS1.
416 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
417 eaddr = get_tlb_eaddr(gtlbe);
419 local_irq_save(flags);
421 mtspr(SPRN_MAS6, val);
422 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
423 val = mfspr(SPRN_MAS1);
424 if (val & MAS1_VALID) {
425 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
426 asm volatile("tlbwe");
429 local_irq_restore(flags);
435 static int tlb0_set_base(gva_t addr, int sets, int ways)
439 set_base = (addr >> PAGE_SHIFT) & (sets - 1);
445 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
447 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
448 vcpu_e500->gtlb_params[0].ways);
451 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
453 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
454 int esel = get_tlb_esel_bit(vcpu);
457 esel &= vcpu_e500->gtlb_params[0].ways - 1;
458 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
460 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
466 /* Search the guest TLB for a matching entry. */
467 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
468 gva_t eaddr, int tlbsel, unsigned int pid, int as)
470 int size = vcpu_e500->gtlb_params[tlbsel].entries;
471 unsigned int set_base, offset;
475 set_base = gtlb0_set_base(vcpu_e500, eaddr);
476 size = vcpu_e500->gtlb_params[0].ways;
481 offset = vcpu_e500->gtlb_offset[tlbsel];
483 for (i = 0; i < size; i++) {
484 struct kvm_book3e_206_tlb_entry *tlbe =
485 &vcpu_e500->gtlb_arch[offset + set_base + i];
488 if (eaddr < get_tlb_eaddr(tlbe))
491 if (eaddr > get_tlb_end(tlbe))
494 tid = get_tlb_tid(tlbe);
495 if (tid && (tid != pid))
498 if (!get_tlb_v(tlbe))
501 if (get_tlb_ts(tlbe) != as && as != -1)
510 static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
511 struct kvm_book3e_206_tlb_entry *gtlbe,
515 ref->flags = E500_TLB_VALID;
517 if (tlbe_is_writable(gtlbe))
518 ref->flags |= E500_TLB_DIRTY;
521 static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
523 if (ref->flags & E500_TLB_VALID) {
524 if (ref->flags & E500_TLB_DIRTY)
525 kvm_release_pfn_dirty(ref->pfn);
527 kvm_release_pfn_clean(ref->pfn);
533 static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
538 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
539 struct tlbe_ref *ref =
540 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
541 kvmppc_e500_ref_release(ref);
545 static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
550 kvmppc_e500_id_table_reset_all(vcpu_e500);
552 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
553 struct tlbe_ref *ref =
554 &vcpu_e500->tlb_refs[stlbsel][i];
555 kvmppc_e500_ref_release(ref);
558 clear_tlb_privs(vcpu_e500);
561 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
562 unsigned int eaddr, int as)
564 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
565 unsigned int victim, pidsel, tsized;
568 /* since we only have two TLBs, only lower bit is used. */
569 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
570 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
571 pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
572 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
574 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
575 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
576 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
577 | MAS1_TID(vcpu_e500->pid[pidsel])
578 | MAS1_TSIZE(tsized);
579 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
580 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
581 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
582 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
583 | (get_cur_pid(vcpu) << 16)
584 | (as ? MAS6_SAS : 0);
587 /* TID must be supplied by the caller */
588 static inline void kvmppc_e500_setup_stlbe(
589 struct kvmppc_vcpu_e500 *vcpu_e500,
590 struct kvm_book3e_206_tlb_entry *gtlbe,
591 int tsize, struct tlbe_ref *ref, u64 gvaddr,
592 struct kvm_book3e_206_tlb_entry *stlbe)
594 pfn_t pfn = ref->pfn;
596 BUG_ON(!(ref->flags & E500_TLB_VALID));
598 /* Force TS=1 IPROT=0 for all guest mappings. */
599 stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
600 stlbe->mas2 = (gvaddr & MAS2_EPN)
601 | e500_shadow_mas2_attrib(gtlbe->mas2,
602 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
603 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
604 | e500_shadow_mas3_attrib(gtlbe->mas7_3,
605 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
608 static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
609 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
610 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
611 struct tlbe_ref *ref)
613 struct kvm_memory_slot *slot;
614 unsigned long pfn, hva;
616 int tsize = BOOK3E_PAGESZ_4K;
619 * Translate guest physical to true physical, acquiring
620 * a page reference if it is normal, non-reserved memory.
622 * gfn_to_memslot() must succeed because otherwise we wouldn't
623 * have gotten this far. Eventually we should just pass the slot
624 * pointer through from the first lookup.
626 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
627 hva = gfn_to_hva_memslot(slot, gfn);
630 struct vm_area_struct *vma;
631 down_read(¤t->mm->mmap_sem);
633 vma = find_vma(current->mm, hva);
634 if (vma && hva >= vma->vm_start &&
635 (vma->vm_flags & VM_PFNMAP)) {
637 * This VMA is a physically contiguous region (e.g.
638 * /dev/mem) that bypasses normal Linux page
639 * management. Find the overlap between the
640 * vma and the memslot.
643 unsigned long start, end;
644 unsigned long slot_start, slot_end;
648 start = vma->vm_pgoff;
650 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
652 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
654 slot_start = pfn - (gfn - slot->base_gfn);
655 slot_end = slot_start + slot->npages;
657 if (start < slot_start)
662 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
666 * e500 doesn't implement the lowest tsize bit,
669 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
672 * Now find the largest tsize (up to what the guest
673 * requested) that will cover gfn, stay within the
674 * range, and for which gfn and pfn are mutually
678 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
679 unsigned long gfn_start, gfn_end, tsize_pages;
680 tsize_pages = 1 << (tsize - 2);
682 gfn_start = gfn & ~(tsize_pages - 1);
683 gfn_end = gfn_start + tsize_pages;
685 if (gfn_start + pfn - gfn < start)
687 if (gfn_end + pfn - gfn > end)
689 if ((gfn & (tsize_pages - 1)) !=
690 (pfn & (tsize_pages - 1)))
693 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
694 pfn &= ~(tsize_pages - 1);
697 } else if (vma && hva >= vma->vm_start &&
698 (vma->vm_flags & VM_HUGETLB)) {
699 unsigned long psize = vma_kernel_pagesize(vma);
701 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
705 * Take the largest page size that satisfies both host
708 tsize = min(__ilog2(psize) - 10, tsize);
711 * e500 doesn't implement the lowest tsize bit,
714 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
717 up_read(¤t->mm->mmap_sem);
720 if (likely(!pfnmap)) {
721 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
722 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
723 if (is_error_pfn(pfn)) {
724 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
726 kvm_release_pfn_clean(pfn);
730 /* Align guest and physical address to page map boundaries */
731 pfn &= ~(tsize_pages - 1);
732 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
735 /* Drop old ref and setup new one. */
736 kvmppc_e500_ref_release(ref);
737 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
739 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
742 /* XXX only map the one-one case, for now use TLB0 */
743 static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
745 struct kvm_book3e_206_tlb_entry *stlbe)
747 struct kvm_book3e_206_tlb_entry *gtlbe;
748 struct tlbe_ref *ref;
750 gtlbe = get_entry(vcpu_e500, 0, esel);
751 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
753 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
754 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
755 gtlbe, 0, stlbe, ref);
758 /* Caller must ensure that the specified guest TLB entry is safe to insert into
760 /* XXX for both one-one and one-to-many , for now use TLB1 */
761 static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
762 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
763 struct kvm_book3e_206_tlb_entry *stlbe)
765 struct tlbe_ref *ref;
768 victim = vcpu_e500->host_tlb1_nv++;
770 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
771 vcpu_e500->host_tlb1_nv = 0;
773 ref = &vcpu_e500->tlb_refs[1][victim];
774 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
779 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
781 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
783 /* Recalc shadow pid since MSR changes */
784 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
787 static inline int kvmppc_e500_gtlbe_invalidate(
788 struct kvmppc_vcpu_e500 *vcpu_e500,
789 int tlbsel, int esel)
791 struct kvm_book3e_206_tlb_entry *gtlbe =
792 get_entry(vcpu_e500, tlbsel, esel);
794 if (unlikely(get_tlb_iprot(gtlbe)))
802 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
806 if (value & MMUCSR0_TLB0FI)
807 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
808 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
809 if (value & MMUCSR0_TLB1FI)
810 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
811 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
813 /* Invalidate all vcpu id mappings */
814 kvmppc_e500_id_table_reset_all(vcpu_e500);
819 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
821 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
826 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
828 ia = (ea >> 2) & 0x1;
830 /* since we only have two TLBs, only lower bit is used. */
831 tlbsel = (ea >> 3) & 0x1;
834 /* invalidate all entries */
835 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
837 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
840 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
841 get_cur_pid(vcpu), -1);
843 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
846 /* Invalidate all vcpu id mappings */
847 kvmppc_e500_id_table_reset_all(vcpu_e500);
852 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
854 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
856 struct kvm_book3e_206_tlb_entry *gtlbe;
858 tlbsel = get_tlb_tlbsel(vcpu);
859 esel = get_tlb_esel(vcpu, tlbsel);
861 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
862 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
863 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
864 vcpu->arch.shared->mas1 = gtlbe->mas1;
865 vcpu->arch.shared->mas2 = gtlbe->mas2;
866 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
871 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
873 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
874 int as = !!get_cur_sas(vcpu);
875 unsigned int pid = get_cur_spid(vcpu);
877 struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
880 ea = kvmppc_get_gpr(vcpu, rb);
882 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
883 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
885 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
891 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
893 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
894 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
895 vcpu->arch.shared->mas1 = gtlbe->mas1;
896 vcpu->arch.shared->mas2 = gtlbe->mas2;
897 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
901 /* since we only have two TLBs, only lower bit is used. */
902 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
903 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
905 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
907 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
908 vcpu->arch.shared->mas1 =
909 (vcpu->arch.shared->mas6 & MAS6_SPID0)
910 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
911 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
912 vcpu->arch.shared->mas2 &= MAS2_EPN;
913 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
915 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
919 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
923 /* sesel is for tlb1 only */
924 static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
925 struct kvm_book3e_206_tlb_entry *gtlbe,
926 struct kvm_book3e_206_tlb_entry *stlbe,
927 int stlbsel, int sesel)
932 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
934 get_cur_pr(&vcpu_e500->vcpu), 0);
936 stlbe->mas1 |= MAS1_TID(stid);
937 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
941 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
943 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
944 struct kvm_book3e_206_tlb_entry *gtlbe;
947 tlbsel = get_tlb_tlbsel(vcpu);
948 esel = get_tlb_esel(vcpu, tlbsel);
950 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
952 if (get_tlb_v(gtlbe))
953 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
955 gtlbe->mas1 = vcpu->arch.shared->mas1;
956 gtlbe->mas2 = vcpu->arch.shared->mas2;
957 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
959 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
960 gtlbe->mas2, gtlbe->mas7_3);
962 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
963 if (tlbe_is_host_safe(vcpu, gtlbe)) {
964 struct kvm_book3e_206_tlb_entry stlbe;
972 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
973 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
976 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
977 sesel = 0; /* unused */
983 eaddr = get_tlb_eaddr(gtlbe);
984 raddr = get_tlb_raddr(gtlbe);
986 /* Create a 4KB mapping on the host.
987 * If the guest wanted a large page,
988 * only the first 4KB is mapped here and the rest
989 * are mapped on the fly. */
991 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
992 raddr >> PAGE_SHIFT, gtlbe, &stlbe);
999 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
1002 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
1003 return EMULATE_DONE;
1006 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
1008 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
1010 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
1013 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
1015 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
1017 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
1020 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
1022 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
1024 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
1027 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
1029 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
1031 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
1034 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
1037 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1038 struct kvm_book3e_206_tlb_entry *gtlbe;
1041 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
1042 pgmask = get_tlb_bytes(gtlbe) - 1;
1044 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
1047 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1051 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1054 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1055 struct tlbe_priv *priv;
1056 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
1057 int tlbsel = tlbsel_of(index);
1058 int esel = esel_of(index);
1061 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
1066 sesel = 0; /* unused */
1067 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
1069 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
1070 &priv->ref, eaddr, &stlbe);
1074 gfn_t gfn = gpaddr >> PAGE_SHIFT;
1077 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
1087 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
1090 int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
1091 gva_t eaddr, unsigned int pid, int as)
1093 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1096 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
1097 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
1099 return index_of(tlbsel, esel);
1105 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
1107 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1109 if (vcpu->arch.pid != pid) {
1110 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
1111 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
1115 void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
1117 struct kvm_book3e_206_tlb_entry *tlbe;
1119 /* Insert large initial mapping for guest. */
1120 tlbe = get_entry(vcpu_e500, 1, 0);
1121 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
1123 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
1125 /* 4K map for serial output. Used by kernel wrapper. */
1126 tlbe = get_entry(vcpu_e500, 1, 1);
1127 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
1128 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1129 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1132 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
1136 clear_tlb_refs(vcpu_e500);
1137 kfree(vcpu_e500->gtlb_priv[0]);
1138 kfree(vcpu_e500->gtlb_priv[1]);
1140 if (vcpu_e500->shared_tlb_pages) {
1141 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
1144 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
1145 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
1146 put_page(vcpu_e500->shared_tlb_pages[i]);
1149 vcpu_e500->num_shared_tlb_pages = 0;
1150 vcpu_e500->shared_tlb_pages = NULL;
1152 kfree(vcpu_e500->gtlb_arch);
1155 vcpu_e500->gtlb_arch = NULL;
1158 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1159 struct kvm_config_tlb *cfg)
1161 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1162 struct kvm_book3e_206_tlb_params params;
1164 struct page **pages;
1165 struct tlbe_priv *privs[2] = {};
1168 int num_pages, ret, i;
1170 if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
1173 if (copy_from_user(¶ms, (void __user *)(uintptr_t)cfg->params,
1177 if (params.tlb_sizes[1] > 64)
1179 if (params.tlb_ways[1] != params.tlb_sizes[1])
1181 if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
1183 if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
1186 if (!is_power_of_2(params.tlb_ways[0]))
1189 sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
1190 if (!is_power_of_2(sets))
1193 array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
1194 array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
1196 if (cfg->array_len < array_len)
1199 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
1200 cfg->array / PAGE_SIZE;
1201 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1205 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
1209 if (ret != num_pages) {
1215 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
1219 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
1221 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
1224 if (!privs[0] || !privs[1])
1227 free_gtlb(vcpu_e500);
1229 vcpu_e500->gtlb_priv[0] = privs[0];
1230 vcpu_e500->gtlb_priv[1] = privs[1];
1232 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
1233 (virt + (cfg->array & (PAGE_SIZE - 1)));
1235 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
1236 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
1238 vcpu_e500->gtlb_offset[0] = 0;
1239 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
1241 vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1242 if (params.tlb_sizes[0] <= 2048)
1243 vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
1244 vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
1246 vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1247 vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
1248 vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
1250 vcpu_e500->shared_tlb_pages = pages;
1251 vcpu_e500->num_shared_tlb_pages = num_pages;
1253 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
1254 vcpu_e500->gtlb_params[0].sets = sets;
1256 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
1257 vcpu_e500->gtlb_params[1].sets = 1;
1265 for (i = 0; i < num_pages; i++)
1273 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
1274 struct kvm_dirty_tlb *dirty)
1276 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1278 clear_tlb_refs(vcpu_e500);
1282 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1284 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
1285 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
1287 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
1288 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
1291 * This should never happen on real e500 hardware, but is
1292 * architecturally possible -- e.g. in some weird nested
1293 * virtualization case.
1295 if (host_tlb_params[0].entries == 0 ||
1296 host_tlb_params[1].entries == 0) {
1297 pr_err("%s: need to know host tlb size\n", __func__);
1301 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
1302 TLBnCFG_ASSOC_SHIFT;
1303 host_tlb_params[1].ways = host_tlb_params[1].entries;
1305 if (!is_power_of_2(host_tlb_params[0].entries) ||
1306 !is_power_of_2(host_tlb_params[0].ways) ||
1307 host_tlb_params[0].entries < host_tlb_params[0].ways ||
1308 host_tlb_params[0].ways == 0) {
1309 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1310 __func__, host_tlb_params[0].entries,
1311 host_tlb_params[0].ways);
1315 host_tlb_params[0].sets =
1316 host_tlb_params[0].entries / host_tlb_params[0].ways;
1317 host_tlb_params[1].sets = 1;
1319 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
1320 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
1322 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
1323 vcpu_e500->gtlb_params[0].sets =
1324 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
1326 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
1327 vcpu_e500->gtlb_params[1].sets = 1;
1329 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
1330 if (!vcpu_e500->gtlb_arch)
1333 vcpu_e500->gtlb_offset[0] = 0;
1334 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
1336 vcpu_e500->tlb_refs[0] =
1337 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
1339 if (!vcpu_e500->tlb_refs[0])
1342 vcpu_e500->tlb_refs[1] =
1343 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
1345 if (!vcpu_e500->tlb_refs[1])
1348 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
1349 vcpu_e500->gtlb_params[0].entries,
1351 if (!vcpu_e500->gtlb_priv[0])
1354 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
1355 vcpu_e500->gtlb_params[1].entries,
1357 if (!vcpu_e500->gtlb_priv[1])
1360 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
1363 /* Init TLB configuration register */
1364 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
1365 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1366 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
1367 vcpu_e500->tlb0cfg |=
1368 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
1370 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
1371 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1372 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
1373 vcpu_e500->tlb0cfg |=
1374 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
1379 free_gtlb(vcpu_e500);
1380 kfree(vcpu_e500->tlb_refs[0]);
1381 kfree(vcpu_e500->tlb_refs[1]);
1385 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1387 free_gtlb(vcpu_e500);
1388 kvmppc_e500_id_table_free(vcpu_e500);
1390 kfree(vcpu_e500->tlb_refs[0]);
1391 kfree(vcpu_e500->tlb_refs[1]);