2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/kvm_host.h>
15 #include <linux/uaccess.h>
16 #include <linux/vmalloc.h>
17 #include <asm/mmu_context.h>
18 #include <asm/pgalloc.h>
20 #include "interrupt.h"
22 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
25 gva_t kseg = KSEGX(gva);
26 gva_t gkseg = KVM_GUEST_KSEGX(gva);
28 if ((kseg == CKSEG0) || (kseg == CKSEG1))
30 else if (gkseg == KVM_GUEST_KSEG0)
31 gpa = KVM_GUEST_CPHYSADDR(gva);
33 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
34 kvm_mips_dump_host_tlbs();
35 gpa = KVM_INVALID_ADDR;
38 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
43 static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
45 struct mips_coproc *cop0 = vcpu->arch.cop0;
46 struct kvm_run *run = vcpu->run;
47 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
48 u32 cause = vcpu->arch.host_cp0_cause;
49 enum emulation_result er = EMULATE_DONE;
50 int ret = RESUME_GUEST;
52 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
54 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
55 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
57 * Unusable/no FPU in guest:
58 * deliver guest COP1 Unusable Exception
60 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
62 /* Restore FPU state */
67 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
76 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
81 run->exit_reason = KVM_EXIT_INTR;
91 static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
92 struct kvm_vcpu *vcpu)
94 enum emulation_result er;
95 union mips_instruction inst;
98 /* A code fetch fault doesn't count as an MMIO */
99 if (kvm_is_ifetch_fault(&vcpu->arch)) {
100 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
104 /* Fetch the instruction. */
105 if (cause & CAUSEF_BD)
107 err = kvm_get_badinstr(opc, vcpu, &inst.word);
109 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
113 /* Emulate the load */
114 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
115 if (er == EMULATE_FAIL) {
116 kvm_err("Emulate load from MMIO space failed\n");
117 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
119 run->exit_reason = KVM_EXIT_MMIO;
124 static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
125 struct kvm_vcpu *vcpu)
127 enum emulation_result er;
128 union mips_instruction inst;
131 /* Fetch the instruction. */
132 if (cause & CAUSEF_BD)
134 err = kvm_get_badinstr(opc, vcpu, &inst.word);
136 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
140 /* Emulate the store */
141 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
142 if (er == EMULATE_FAIL) {
143 kvm_err("Emulate store to MMIO space failed\n");
144 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
146 run->exit_reason = KVM_EXIT_MMIO;
151 static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
152 struct kvm_vcpu *vcpu, bool store)
155 return kvm_mips_bad_store(cause, opc, run, vcpu);
157 return kvm_mips_bad_load(cause, opc, run, vcpu);
160 static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
162 struct mips_coproc *cop0 = vcpu->arch.cop0;
163 struct kvm_run *run = vcpu->run;
164 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
165 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
166 u32 cause = vcpu->arch.host_cp0_cause;
167 struct kvm_mips_tlb *tlb;
168 unsigned long entryhi;
171 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
172 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
174 * First find the mapping in the guest TLB. If the failure to
175 * write was due to the guest TLB, it should be up to the guest
178 entryhi = (badvaddr & VPN2_MASK) |
179 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
180 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
183 * These should never happen.
184 * They would indicate stale host TLB entries.
186 if (unlikely(index < 0)) {
187 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
190 tlb = vcpu->arch.guest_tlb + index;
191 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
192 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
197 * Guest entry not dirty? That would explain the TLB modified
198 * exception. Relay that on to the guest so it can handle it.
200 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
201 kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
205 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
207 /* Not writable, needs handling as MMIO */
208 return kvm_mips_bad_store(cause, opc, run, vcpu);
210 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
211 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
212 /* Not writable, needs handling as MMIO */
213 return kvm_mips_bad_store(cause, opc, run, vcpu);
216 /* host kernel addresses are all handled as MMIO */
217 return kvm_mips_bad_store(cause, opc, run, vcpu);
221 static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
223 struct kvm_run *run = vcpu->run;
224 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
225 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
226 u32 cause = vcpu->arch.host_cp0_cause;
227 enum emulation_result er = EMULATE_DONE;
228 int ret = RESUME_GUEST;
230 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
231 && KVM_GUEST_KERNEL_MODE(vcpu)) {
232 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
233 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
236 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
237 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
238 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
239 store ? "ST" : "LD", cause, opc, badvaddr);
242 * User Address (UA) fault, this could happen if
243 * (1) TLB entry not present/valid in both Guest and shadow host
244 * TLBs, in this case we pass on the fault to the guest
245 * kernel and let it handle it.
246 * (2) TLB entry is present in the Guest TLB but not in the
247 * shadow, in this case we inject the TLB from the Guest TLB
248 * into the shadow host TLB
251 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
252 if (er == EMULATE_DONE)
255 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
258 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
260 * All KSEG0 faults are handled by KVM, as the guest kernel does
261 * not expect to ever get them
263 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
264 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
265 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
266 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
268 * With EVA we may get a TLB exception instead of an address
269 * error when the guest performs MMIO to KSeg1 addresses.
271 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
273 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
274 store ? "ST" : "LD", cause, opc, badvaddr);
275 kvm_mips_dump_host_tlbs();
276 kvm_arch_vcpu_dump_regs(vcpu);
277 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
283 static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
285 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
288 static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
290 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
293 static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
295 struct kvm_run *run = vcpu->run;
296 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
297 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
298 u32 cause = vcpu->arch.host_cp0_cause;
299 int ret = RESUME_GUEST;
301 if (KVM_GUEST_KERNEL_MODE(vcpu)
302 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
303 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
305 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
306 cause, opc, badvaddr);
307 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
313 static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
315 struct kvm_run *run = vcpu->run;
316 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
317 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
318 u32 cause = vcpu->arch.host_cp0_cause;
319 int ret = RESUME_GUEST;
321 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
322 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
324 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
325 cause, opc, badvaddr);
326 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
332 static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
334 struct kvm_run *run = vcpu->run;
335 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
336 u32 cause = vcpu->arch.host_cp0_cause;
337 enum emulation_result er = EMULATE_DONE;
338 int ret = RESUME_GUEST;
340 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
341 if (er == EMULATE_DONE)
344 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
350 static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
352 struct kvm_run *run = vcpu->run;
353 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
354 u32 cause = vcpu->arch.host_cp0_cause;
355 enum emulation_result er = EMULATE_DONE;
356 int ret = RESUME_GUEST;
358 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
359 if (er == EMULATE_DONE)
362 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
368 static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
370 struct kvm_run *run = vcpu->run;
371 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
372 u32 cause = vcpu->arch.host_cp0_cause;
373 enum emulation_result er = EMULATE_DONE;
374 int ret = RESUME_GUEST;
376 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
377 if (er == EMULATE_DONE)
380 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
386 static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
388 struct kvm_run *run = vcpu->run;
389 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
390 u32 cause = vcpu->arch.host_cp0_cause;
391 enum emulation_result er = EMULATE_DONE;
392 int ret = RESUME_GUEST;
394 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
395 if (er == EMULATE_DONE) {
398 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
404 static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
406 struct kvm_run *run = vcpu->run;
407 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
408 u32 cause = vcpu->arch.host_cp0_cause;
409 enum emulation_result er = EMULATE_DONE;
410 int ret = RESUME_GUEST;
412 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
413 if (er == EMULATE_DONE) {
416 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
422 static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
424 struct kvm_run *run = vcpu->run;
425 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
426 u32 cause = vcpu->arch.host_cp0_cause;
427 enum emulation_result er = EMULATE_DONE;
428 int ret = RESUME_GUEST;
430 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
431 if (er == EMULATE_DONE) {
434 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
441 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
442 * @vcpu: Virtual CPU context.
444 * Handle when the guest attempts to use MSA when it is disabled.
446 static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
448 struct mips_coproc *cop0 = vcpu->arch.cop0;
449 struct kvm_run *run = vcpu->run;
450 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
451 u32 cause = vcpu->arch.host_cp0_cause;
452 enum emulation_result er = EMULATE_DONE;
453 int ret = RESUME_GUEST;
455 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
456 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
458 * No MSA in guest, or FPU enabled and not in FR=1 mode,
459 * guest reserved instruction exception
461 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
462 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
463 /* MSA disabled by guest, guest MSA disabled exception */
464 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
466 /* Restore MSA/FPU state */
477 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
487 static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
489 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
490 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
493 * Allocate GVA -> HPA page tables.
494 * MIPS doesn't use the mm_struct pointer argument.
496 kern_mm->pgd = pgd_alloc(kern_mm);
500 user_mm->pgd = pgd_alloc(user_mm);
502 pgd_free(kern_mm, kern_mm->pgd);
509 static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
511 /* Don't free host kernel page tables copied from init_mm.pgd */
512 const unsigned long end = 0x80000000;
513 unsigned long pgd_va, pud_va, pmd_va;
519 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
520 if (pgd_none(pgd[i]))
523 pgd_va = (unsigned long)i << PGDIR_SHIFT;
526 pud = pud_offset(pgd + i, 0);
527 for (j = 0; j < PTRS_PER_PUD; j++) {
528 if (pud_none(pud[j]))
531 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
534 pmd = pmd_offset(pud + j, 0);
535 for (k = 0; k < PTRS_PER_PMD; k++) {
536 if (pmd_none(pmd[k]))
539 pmd_va = pud_va | (k << PMD_SHIFT);
542 pte = pte_offset(pmd + k, 0);
543 pte_free_kernel(NULL, pte);
552 static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
554 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
555 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
558 static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
560 struct mips_coproc *cop0 = vcpu->arch.cop0;
562 int vcpu_id = vcpu->vcpu_id;
565 * Arch specific stuff, set up config registers properly so that the
566 * guest will come up as expected
568 #ifndef CONFIG_CPU_MIPSR6
569 /* r2-r5, simulate a MIPS 24kc */
570 kvm_write_c0_guest_prid(cop0, 0x00019300);
572 /* r6+, simulate a generic QEMU machine */
573 kvm_write_c0_guest_prid(cop0, 0x00010000);
576 * Have config1, Cacheable, noncoherent, write-back, write allocate.
577 * Endianness, arch revision & virtually tagged icache should match
580 config = read_c0_config() & MIPS_CONF_AR;
581 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
582 #ifdef CONFIG_CPU_BIG_ENDIAN
585 if (cpu_has_vtag_icache)
586 config |= MIPS_CONF_VI;
587 kvm_write_c0_guest_config(cop0, config);
589 /* Read the cache characteristics from the host Config1 Register */
590 config1 = (read_c0_config1() & ~0x7f);
592 /* Set up MMU size */
593 config1 &= ~(0x3f << 25);
594 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
596 /* We unset some bits that we aren't emulating */
597 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
598 MIPS_CONF1_WR | MIPS_CONF1_CA);
599 kvm_write_c0_guest_config1(cop0, config1);
601 /* Have config3, no tertiary/secondary caches implemented */
602 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
603 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
605 /* Have config4, UserLocal */
606 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
609 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
612 kvm_write_c0_guest_config5(cop0, 0);
614 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
615 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
618 kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
621 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
623 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
625 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
626 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
627 (vcpu_id & MIPS_EBASE_CPUNUM));
629 /* Put PC at guest reset vector */
630 vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
635 static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
637 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
638 kvm_flush_remote_tlbs(kvm);
641 static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
642 const struct kvm_memory_slot *slot)
644 kvm_trap_emul_flush_shadow_all(kvm);
647 static u64 kvm_trap_emul_get_one_regs[] = {
648 KVM_REG_MIPS_CP0_INDEX,
649 KVM_REG_MIPS_CP0_ENTRYLO0,
650 KVM_REG_MIPS_CP0_ENTRYLO1,
651 KVM_REG_MIPS_CP0_CONTEXT,
652 KVM_REG_MIPS_CP0_USERLOCAL,
653 KVM_REG_MIPS_CP0_PAGEMASK,
654 KVM_REG_MIPS_CP0_WIRED,
655 KVM_REG_MIPS_CP0_HWRENA,
656 KVM_REG_MIPS_CP0_BADVADDR,
657 KVM_REG_MIPS_CP0_COUNT,
658 KVM_REG_MIPS_CP0_ENTRYHI,
659 KVM_REG_MIPS_CP0_COMPARE,
660 KVM_REG_MIPS_CP0_STATUS,
661 KVM_REG_MIPS_CP0_INTCTL,
662 KVM_REG_MIPS_CP0_CAUSE,
663 KVM_REG_MIPS_CP0_EPC,
664 KVM_REG_MIPS_CP0_PRID,
665 KVM_REG_MIPS_CP0_EBASE,
666 KVM_REG_MIPS_CP0_CONFIG,
667 KVM_REG_MIPS_CP0_CONFIG1,
668 KVM_REG_MIPS_CP0_CONFIG2,
669 KVM_REG_MIPS_CP0_CONFIG3,
670 KVM_REG_MIPS_CP0_CONFIG4,
671 KVM_REG_MIPS_CP0_CONFIG5,
672 KVM_REG_MIPS_CP0_CONFIG7,
673 KVM_REG_MIPS_CP0_ERROREPC,
674 KVM_REG_MIPS_CP0_KSCRATCH1,
675 KVM_REG_MIPS_CP0_KSCRATCH2,
676 KVM_REG_MIPS_CP0_KSCRATCH3,
677 KVM_REG_MIPS_CP0_KSCRATCH4,
678 KVM_REG_MIPS_CP0_KSCRATCH5,
679 KVM_REG_MIPS_CP0_KSCRATCH6,
681 KVM_REG_MIPS_COUNT_CTL,
682 KVM_REG_MIPS_COUNT_RESUME,
683 KVM_REG_MIPS_COUNT_HZ,
686 static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
688 return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
691 static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
694 if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
695 sizeof(kvm_trap_emul_get_one_regs)))
697 indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
702 static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
703 const struct kvm_one_reg *reg,
706 struct mips_coproc *cop0 = vcpu->arch.cop0;
709 case KVM_REG_MIPS_CP0_INDEX:
710 *v = (long)kvm_read_c0_guest_index(cop0);
712 case KVM_REG_MIPS_CP0_ENTRYLO0:
713 *v = kvm_read_c0_guest_entrylo0(cop0);
715 case KVM_REG_MIPS_CP0_ENTRYLO1:
716 *v = kvm_read_c0_guest_entrylo1(cop0);
718 case KVM_REG_MIPS_CP0_CONTEXT:
719 *v = (long)kvm_read_c0_guest_context(cop0);
721 case KVM_REG_MIPS_CP0_USERLOCAL:
722 *v = (long)kvm_read_c0_guest_userlocal(cop0);
724 case KVM_REG_MIPS_CP0_PAGEMASK:
725 *v = (long)kvm_read_c0_guest_pagemask(cop0);
727 case KVM_REG_MIPS_CP0_WIRED:
728 *v = (long)kvm_read_c0_guest_wired(cop0);
730 case KVM_REG_MIPS_CP0_HWRENA:
731 *v = (long)kvm_read_c0_guest_hwrena(cop0);
733 case KVM_REG_MIPS_CP0_BADVADDR:
734 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
736 case KVM_REG_MIPS_CP0_ENTRYHI:
737 *v = (long)kvm_read_c0_guest_entryhi(cop0);
739 case KVM_REG_MIPS_CP0_COMPARE:
740 *v = (long)kvm_read_c0_guest_compare(cop0);
742 case KVM_REG_MIPS_CP0_STATUS:
743 *v = (long)kvm_read_c0_guest_status(cop0);
745 case KVM_REG_MIPS_CP0_INTCTL:
746 *v = (long)kvm_read_c0_guest_intctl(cop0);
748 case KVM_REG_MIPS_CP0_CAUSE:
749 *v = (long)kvm_read_c0_guest_cause(cop0);
751 case KVM_REG_MIPS_CP0_EPC:
752 *v = (long)kvm_read_c0_guest_epc(cop0);
754 case KVM_REG_MIPS_CP0_PRID:
755 *v = (long)kvm_read_c0_guest_prid(cop0);
757 case KVM_REG_MIPS_CP0_EBASE:
758 *v = (long)kvm_read_c0_guest_ebase(cop0);
760 case KVM_REG_MIPS_CP0_CONFIG:
761 *v = (long)kvm_read_c0_guest_config(cop0);
763 case KVM_REG_MIPS_CP0_CONFIG1:
764 *v = (long)kvm_read_c0_guest_config1(cop0);
766 case KVM_REG_MIPS_CP0_CONFIG2:
767 *v = (long)kvm_read_c0_guest_config2(cop0);
769 case KVM_REG_MIPS_CP0_CONFIG3:
770 *v = (long)kvm_read_c0_guest_config3(cop0);
772 case KVM_REG_MIPS_CP0_CONFIG4:
773 *v = (long)kvm_read_c0_guest_config4(cop0);
775 case KVM_REG_MIPS_CP0_CONFIG5:
776 *v = (long)kvm_read_c0_guest_config5(cop0);
778 case KVM_REG_MIPS_CP0_CONFIG7:
779 *v = (long)kvm_read_c0_guest_config7(cop0);
781 case KVM_REG_MIPS_CP0_COUNT:
782 *v = kvm_mips_read_count(vcpu);
784 case KVM_REG_MIPS_COUNT_CTL:
785 *v = vcpu->arch.count_ctl;
787 case KVM_REG_MIPS_COUNT_RESUME:
788 *v = ktime_to_ns(vcpu->arch.count_resume);
790 case KVM_REG_MIPS_COUNT_HZ:
791 *v = vcpu->arch.count_hz;
793 case KVM_REG_MIPS_CP0_ERROREPC:
794 *v = (long)kvm_read_c0_guest_errorepc(cop0);
796 case KVM_REG_MIPS_CP0_KSCRATCH1:
797 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
799 case KVM_REG_MIPS_CP0_KSCRATCH2:
800 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
802 case KVM_REG_MIPS_CP0_KSCRATCH3:
803 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
805 case KVM_REG_MIPS_CP0_KSCRATCH4:
806 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
808 case KVM_REG_MIPS_CP0_KSCRATCH5:
809 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
811 case KVM_REG_MIPS_CP0_KSCRATCH6:
812 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
820 static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
821 const struct kvm_one_reg *reg,
824 struct mips_coproc *cop0 = vcpu->arch.cop0;
826 unsigned int cur, change;
829 case KVM_REG_MIPS_CP0_INDEX:
830 kvm_write_c0_guest_index(cop0, v);
832 case KVM_REG_MIPS_CP0_ENTRYLO0:
833 kvm_write_c0_guest_entrylo0(cop0, v);
835 case KVM_REG_MIPS_CP0_ENTRYLO1:
836 kvm_write_c0_guest_entrylo1(cop0, v);
838 case KVM_REG_MIPS_CP0_CONTEXT:
839 kvm_write_c0_guest_context(cop0, v);
841 case KVM_REG_MIPS_CP0_USERLOCAL:
842 kvm_write_c0_guest_userlocal(cop0, v);
844 case KVM_REG_MIPS_CP0_PAGEMASK:
845 kvm_write_c0_guest_pagemask(cop0, v);
847 case KVM_REG_MIPS_CP0_WIRED:
848 kvm_write_c0_guest_wired(cop0, v);
850 case KVM_REG_MIPS_CP0_HWRENA:
851 kvm_write_c0_guest_hwrena(cop0, v);
853 case KVM_REG_MIPS_CP0_BADVADDR:
854 kvm_write_c0_guest_badvaddr(cop0, v);
856 case KVM_REG_MIPS_CP0_ENTRYHI:
857 kvm_write_c0_guest_entryhi(cop0, v);
859 case KVM_REG_MIPS_CP0_STATUS:
860 kvm_write_c0_guest_status(cop0, v);
862 case KVM_REG_MIPS_CP0_INTCTL:
863 /* No VInt, so no VS, read-only for now */
865 case KVM_REG_MIPS_CP0_EPC:
866 kvm_write_c0_guest_epc(cop0, v);
868 case KVM_REG_MIPS_CP0_PRID:
869 kvm_write_c0_guest_prid(cop0, v);
871 case KVM_REG_MIPS_CP0_EBASE:
873 * Allow core number to be written, but the exception base must
874 * remain in guest KSeg0.
876 kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
879 case KVM_REG_MIPS_CP0_COUNT:
880 kvm_mips_write_count(vcpu, v);
882 case KVM_REG_MIPS_CP0_COMPARE:
883 kvm_mips_write_compare(vcpu, v, false);
885 case KVM_REG_MIPS_CP0_CAUSE:
887 * If the timer is stopped or started (DC bit) it must look
888 * atomic with changes to the interrupt pending bits (TI, IRQ5).
889 * A timer interrupt should not happen in between.
891 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
893 /* disable timer first */
894 kvm_mips_count_disable_cause(vcpu);
895 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
897 /* enable timer last */
898 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
899 kvm_mips_count_enable_cause(vcpu);
902 kvm_write_c0_guest_cause(cop0, v);
905 case KVM_REG_MIPS_CP0_CONFIG:
906 /* read-only for now */
908 case KVM_REG_MIPS_CP0_CONFIG1:
909 cur = kvm_read_c0_guest_config1(cop0);
910 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
913 kvm_write_c0_guest_config1(cop0, v);
916 case KVM_REG_MIPS_CP0_CONFIG2:
917 /* read-only for now */
919 case KVM_REG_MIPS_CP0_CONFIG3:
920 cur = kvm_read_c0_guest_config3(cop0);
921 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
924 kvm_write_c0_guest_config3(cop0, v);
927 case KVM_REG_MIPS_CP0_CONFIG4:
928 cur = kvm_read_c0_guest_config4(cop0);
929 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
932 kvm_write_c0_guest_config4(cop0, v);
935 case KVM_REG_MIPS_CP0_CONFIG5:
936 cur = kvm_read_c0_guest_config5(cop0);
937 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
940 kvm_write_c0_guest_config5(cop0, v);
943 case KVM_REG_MIPS_CP0_CONFIG7:
946 case KVM_REG_MIPS_COUNT_CTL:
947 ret = kvm_mips_set_count_ctl(vcpu, v);
949 case KVM_REG_MIPS_COUNT_RESUME:
950 ret = kvm_mips_set_count_resume(vcpu, v);
952 case KVM_REG_MIPS_COUNT_HZ:
953 ret = kvm_mips_set_count_hz(vcpu, v);
955 case KVM_REG_MIPS_CP0_ERROREPC:
956 kvm_write_c0_guest_errorepc(cop0, v);
958 case KVM_REG_MIPS_CP0_KSCRATCH1:
959 kvm_write_c0_guest_kscratch1(cop0, v);
961 case KVM_REG_MIPS_CP0_KSCRATCH2:
962 kvm_write_c0_guest_kscratch2(cop0, v);
964 case KVM_REG_MIPS_CP0_KSCRATCH3:
965 kvm_write_c0_guest_kscratch3(cop0, v);
967 case KVM_REG_MIPS_CP0_KSCRATCH4:
968 kvm_write_c0_guest_kscratch4(cop0, v);
970 case KVM_REG_MIPS_CP0_KSCRATCH5:
971 kvm_write_c0_guest_kscratch5(cop0, v);
973 case KVM_REG_MIPS_CP0_KSCRATCH6:
974 kvm_write_c0_guest_kscratch6(cop0, v);
982 static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
984 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
985 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
986 struct mm_struct *mm;
989 * Were we in guest context? If so, restore the appropriate ASID based
990 * on the mode of the Guest (Kernel/User).
992 if (current->flags & PF_VCPU) {
993 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
994 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
995 asid_version_mask(cpu))
996 get_new_mmu_context(mm, cpu);
997 write_c0_entryhi(cpu_asid(cpu, mm));
998 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
999 kvm_mips_suspend_mm(cpu);
1006 static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
1010 if (current->flags & PF_VCPU) {
1011 /* Restore normal Linux process memory map */
1012 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1013 asid_version_mask(cpu)))
1014 get_new_mmu_context(current->mm, cpu);
1015 write_c0_entryhi(cpu_asid(cpu, current->mm));
1016 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
1017 kvm_mips_resume_mm(cpu);
1024 static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
1027 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1028 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1029 struct mm_struct *mm;
1032 if (likely(!vcpu->requests))
1035 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1037 * Both kernel & user GVA mappings must be invalidated. The
1038 * caller is just about to check whether the ASID is stale
1039 * anyway so no need to reload it here.
1041 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
1042 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
1043 for_each_possible_cpu(i) {
1044 cpu_context(i, kern_mm) = 0;
1045 cpu_context(i, user_mm) = 0;
1048 /* Generate new ASID for current mode */
1050 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1051 get_new_mmu_context(mm, cpu);
1053 write_c0_entryhi(cpu_asid(cpu, mm));
1054 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1061 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1062 * @vcpu: VCPU pointer.
1064 * Call before a GVA space access outside of guest mode, to ensure that
1065 * asynchronous TLB flush requests are handled or delayed until completion of
1066 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1068 * Should be called with IRQs already enabled.
1070 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
1072 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1073 WARN_ON_ONCE(irqs_disabled());
1076 * The caller is about to access the GVA space, so we set the mode to
1077 * force TLB flush requests to send an IPI, and also disable IRQs to
1078 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1080 local_irq_disable();
1083 * Make sure the read of VCPU requests is not reordered ahead of the
1084 * write to vcpu->mode, or we could miss a TLB flush request while
1085 * the requester sees the VCPU as outside of guest mode and not needing
1088 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1091 * If a TLB flush has been requested (potentially while
1092 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1093 * before accessing the GVA space, and be sure to reload the ASID if
1094 * necessary as it'll be immediately used.
1096 * TLB flush requests after this check will trigger an IPI due to the
1097 * mode change above, which will be delayed due to IRQs disabled.
1099 kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
1103 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1104 * @vcpu: VCPU pointer.
1106 * Called after a GVA space access outside of guest mode. Should have a matching
1107 * call to kvm_trap_emul_gva_lockless_begin().
1109 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
1112 * Make sure the write to vcpu->mode is not reordered in front of GVA
1113 * accesses, or a TLB flush requester may not think it necessary to send
1116 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1119 * Now that the access to GVA space is complete, its safe for pending
1120 * TLB flush request IPIs to be handled (which indicates completion).
1125 static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1126 struct kvm_vcpu *vcpu)
1128 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1129 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1130 struct mm_struct *mm;
1131 struct mips_coproc *cop0 = vcpu->arch.cop0;
1132 int i, cpu = smp_processor_id();
1136 * No need to reload ASID, IRQs are disabled already so there's no rush,
1137 * and we'll check if we need to regenerate below anyway before
1138 * re-entering the guest.
1140 kvm_trap_emul_check_requests(vcpu, cpu, false);
1142 if (KVM_GUEST_KERNEL_MODE(vcpu)) {
1148 * Lazy host ASID regeneration / PT flush for guest user mode.
1149 * If the guest ASID has changed since the last guest usermode
1150 * execution, invalidate the stale TLB entries and flush GVA PT
1153 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
1154 if (gasid != vcpu->arch.last_user_gasid) {
1155 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
1156 for_each_possible_cpu(i)
1157 cpu_context(i, user_mm) = 0;
1158 vcpu->arch.last_user_gasid = gasid;
1163 * Check if ASID is stale. This may happen due to a TLB flush request or
1164 * a lazy user MM invalidation.
1166 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1167 asid_version_mask(cpu))
1168 get_new_mmu_context(mm, cpu);
1171 static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1173 int cpu = smp_processor_id();
1176 /* Check if we have any exceptions/interrupts pending */
1177 kvm_mips_deliver_interrupts(vcpu,
1178 kvm_read_c0_guest_cause(vcpu->arch.cop0));
1180 kvm_trap_emul_vcpu_reenter(run, vcpu);
1183 * We use user accessors to access guest memory, but we don't want to
1184 * invoke Linux page faulting.
1186 pagefault_disable();
1188 /* Disable hardware page table walking while in guest */
1192 * While in guest context we're in the guest's address space, not the
1193 * host process address space, so we need to be careful not to confuse
1194 * e.g. cache management IPIs.
1196 kvm_mips_suspend_mm(cpu);
1198 r = vcpu->arch.vcpu_run(run, vcpu);
1200 /* We may have migrated while handling guest exits */
1201 cpu = smp_processor_id();
1203 /* Restore normal Linux process memory map */
1204 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1205 asid_version_mask(cpu)))
1206 get_new_mmu_context(current->mm, cpu);
1207 write_c0_entryhi(cpu_asid(cpu, current->mm));
1208 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
1209 kvm_mips_resume_mm(cpu);
1218 static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1220 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
1221 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
1222 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
1223 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
1224 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
1225 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
1226 .handle_syscall = kvm_trap_emul_handle_syscall,
1227 .handle_res_inst = kvm_trap_emul_handle_res_inst,
1228 .handle_break = kvm_trap_emul_handle_break,
1229 .handle_trap = kvm_trap_emul_handle_trap,
1230 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
1231 .handle_fpe = kvm_trap_emul_handle_fpe,
1232 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
1234 .vcpu_init = kvm_trap_emul_vcpu_init,
1235 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
1236 .vcpu_setup = kvm_trap_emul_vcpu_setup,
1237 .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
1238 .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
1239 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
1240 .queue_timer_int = kvm_mips_queue_timer_int_cb,
1241 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
1242 .queue_io_int = kvm_mips_queue_io_int_cb,
1243 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
1244 .irq_deliver = kvm_mips_irq_deliver_cb,
1245 .irq_clear = kvm_mips_irq_clear_cb,
1246 .num_regs = kvm_trap_emul_num_regs,
1247 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
1248 .get_one_reg = kvm_trap_emul_get_one_reg,
1249 .set_one_reg = kvm_trap_emul_set_one_reg,
1250 .vcpu_load = kvm_trap_emul_vcpu_load,
1251 .vcpu_put = kvm_trap_emul_vcpu_put,
1252 .vcpu_run = kvm_trap_emul_vcpu_run,
1253 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
1256 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
1258 *install_callbacks = &kvm_trap_emul_callbacks;