2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7 * TLB handlers run from KSEG0
9 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10 * Authors: Sanjay Lal <sanjayl@kymasys.com>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
16 #include <linux/delay.h>
17 #include <linux/export.h>
18 #include <linux/kvm_host.h>
19 #include <linux/srcu.h>
22 #include <asm/bootinfo.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
25 #include <asm/cacheflush.h>
27 #include <asm/tlbdebug.h>
30 #include <asm/r4kcache.h>
31 #define CONFIG_MIPS_MT
33 #define KVM_GUEST_PC_TLB 0
34 #define KVM_GUEST_SP_TLB 1
36 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
38 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
39 int cpu = smp_processor_id();
41 return cpu_asid(cpu, kern_mm);
44 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
46 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
47 int cpu = smp_processor_id();
49 return cpu_asid(cpu, user_mm);
52 /* Structure defining an tlb entry data set. */
54 void kvm_mips_dump_host_tlbs(void)
58 local_irq_save(flags);
60 kvm_info("HOST TLBs:\n");
65 local_irq_restore(flags);
67 EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
69 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
71 struct mips_coproc *cop0 = vcpu->arch.cop0;
72 struct kvm_mips_tlb tlb;
75 kvm_info("Guest TLBs:\n");
76 kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
78 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
79 tlb = vcpu->arch.guest_tlb[i];
80 kvm_info("TLB%c%3d Hi 0x%08lx ",
81 (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
84 kvm_info("Lo0=0x%09llx %c%c attr %lx ",
85 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
86 (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
87 (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
88 (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
89 kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
90 (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
91 (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
92 (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
93 (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
97 EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
99 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
103 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
105 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
106 if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
107 TLB_HI_ASID_HIT(tlb[i], entryhi)) {
113 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
114 __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
118 EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
120 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
122 unsigned long old_entryhi, flags;
125 local_irq_save(flags);
127 old_entryhi = read_c0_entryhi();
129 if (KVM_GUEST_KERNEL_MODE(vcpu))
130 write_c0_entryhi((vaddr & VPN2_MASK) |
131 kvm_mips_get_kernel_asid(vcpu));
133 write_c0_entryhi((vaddr & VPN2_MASK) |
134 kvm_mips_get_user_asid(vcpu));
141 idx = read_c0_index();
143 /* Restore old ASID */
144 write_c0_entryhi(old_entryhi);
147 local_irq_restore(flags);
149 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
153 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
155 static int _kvm_mips_host_tlb_inv(unsigned long entryhi)
159 write_c0_entryhi(entryhi);
164 idx = read_c0_index();
166 if (idx >= current_cpu_data.tlbsize)
170 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
171 write_c0_entrylo0(0);
172 write_c0_entrylo1(0);
182 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va,
183 bool user, bool kernel)
185 int idx_user, idx_kernel;
186 unsigned long flags, old_entryhi;
188 local_irq_save(flags);
190 old_entryhi = read_c0_entryhi();
193 idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
194 kvm_mips_get_user_asid(vcpu));
196 idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) |
197 kvm_mips_get_kernel_asid(vcpu));
199 write_c0_entryhi(old_entryhi);
202 local_irq_restore(flags);
204 if (user && idx_user >= 0)
205 kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n",
206 __func__, (va & VPN2_MASK) |
207 kvm_mips_get_user_asid(vcpu), idx_user);
208 if (kernel && idx_kernel >= 0)
209 kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n",
210 __func__, (va & VPN2_MASK) |
211 kvm_mips_get_kernel_asid(vcpu), idx_kernel);
215 EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
218 * kvm_mips_suspend_mm() - Suspend the active mm.
219 * @cpu The CPU we're running on.
221 * Suspend the active_mm, ready for a switch to a KVM guest virtual address
222 * space. This is left active for the duration of guest context, including time
223 * with interrupts enabled, so we need to be careful not to confuse e.g. cache
226 * kvm_mips_resume_mm() should be called before context switching to a different
227 * process so we don't need to worry about reference counting.
229 * This needs to be in static kernel code to avoid exporting init_mm.
231 void kvm_mips_suspend_mm(int cpu)
233 cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm));
234 current->active_mm = &init_mm;
236 EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm);
239 * kvm_mips_resume_mm() - Resume the current process mm.
240 * @cpu The CPU we're running on.
242 * Resume the mm of the current process, after a switch back from a KVM guest
243 * virtual address space (see kvm_mips_suspend_mm()).
245 void kvm_mips_resume_mm(int cpu)
247 cpumask_set_cpu(cpu, mm_cpumask(current->mm));
248 current->active_mm = current->mm;
250 EXPORT_SYMBOL_GPL(kvm_mips_resume_mm);