2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Support for hardware virtualization extensions
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Yann Le Du <ledu@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/preempt.h>
16 #include <linux/vmalloc.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cacheops.h>
19 #include <asm/cmpxchg.h>
21 #include <asm/hazards.h>
23 #include <asm/mmu_context.h>
24 #include <asm/r4kcache.h>
27 #include <asm/tlbex.h>
29 #include <linux/kvm_host.h>
31 #include "interrupt.h"
35 /* Pointers to last VCPU loaded on each physical CPU */
36 static struct kvm_vcpu *last_vcpu[NR_CPUS];
37 /* Pointers to last VCPU executed on each physical CPU */
38 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
41 * Number of guest VTLB entries to use, so we can catch inconsistency between
44 static unsigned int kvm_vz_guest_vtlb_size;
46 static inline long kvm_vz_read_gc0_ebase(void)
48 if (sizeof(long) == 8 && cpu_has_ebase_wg)
49 return read_gc0_ebase_64();
51 return read_gc0_ebase();
54 static inline void kvm_vz_write_gc0_ebase(long v)
57 * First write with WG=1 to write upper bits, then write again in case
58 * WG should be left at 0.
59 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
61 if (sizeof(long) == 8 &&
62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
63 write_gc0_ebase_64(v | MIPS_EBASE_WG);
64 write_gc0_ebase_64(v);
66 write_gc0_ebase(v | MIPS_EBASE_WG);
72 * These Config bits may be writable by the guest:
73 * Config: [K23, KU] (!TLB), K0
75 * Config2: [TU, SU] (impl)
77 * Config4: FTLBPageSize
78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
81 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
86 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
91 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
96 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
98 return MIPS_CONF3_ISA_OE;
101 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
103 /* no need to be exact */
104 return MIPS_CONF4_VFTLBPAGESIZE;
107 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
111 /* Permit MSAEn changes if MSA supported and enabled */
112 if (kvm_mips_guest_has_msa(&vcpu->arch))
113 mask |= MIPS_CONF5_MSAEN;
116 * Permit guest FPU mode changes if FPU is enabled and the relevant
117 * feature exists according to FIR register.
119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
121 mask |= MIPS_CONF5_UFR;
123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
130 * VZ optionally allows these additional Config bits to be written by root:
132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
135 * VInt, SP, CDMM, MT, SM, TL]
136 * Config4: M, [VTLBSizeExt, MMUSizeExt]
140 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
145 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
149 /* Permit FPU to be present if FPU is supported */
150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
151 mask |= MIPS_CONF1_FP;
156 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
161 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
164 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
166 /* Permit MSA to be present if MSA is supported */
167 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
168 mask |= MIPS_CONF3_MSA;
173 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
178 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
180 return kvm_vz_config5_guest_wrmask(vcpu);
183 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
185 /* VZ guest has already converted gva to gpa */
189 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
191 set_bit(priority, &vcpu->arch.pending_exceptions);
192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
195 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
197 clear_bit(priority, &vcpu->arch.pending_exceptions);
198 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
201 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
204 * timer expiry is asynchronous to vcpu execution therefore defer guest
207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
210 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
213 * timer expiry is asynchronous to vcpu execution therefore defer guest
216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
219 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
220 struct kvm_mips_interrupt *irq)
222 int intr = (int)irq->irq;
225 * interrupts are asynchronous to vcpu execution therefore defer guest
230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
247 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
248 struct kvm_mips_interrupt *irq)
250 int intr = (int)irq->irq;
253 * interrupts are asynchronous to vcpu execution therefore defer guest
258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
275 static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
276 [MIPS_EXC_INT_TIMER] = C_IRQ5,
277 [MIPS_EXC_INT_IO] = C_IRQ0,
278 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
279 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
282 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
285 u32 irq = (priority < MIPS_EXC_MAX) ?
286 kvm_vz_priority_to_irq[priority] : 0;
289 case MIPS_EXC_INT_TIMER:
293 case MIPS_EXC_INT_IO:
294 case MIPS_EXC_INT_IPI_1:
295 case MIPS_EXC_INT_IPI_2:
296 if (cpu_has_guestctl2)
297 set_c0_guestctl2(irq);
306 clear_bit(priority, &vcpu->arch.pending_exceptions);
310 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
313 u32 irq = (priority < MIPS_EXC_MAX) ?
314 kvm_vz_priority_to_irq[priority] : 0;
317 case MIPS_EXC_INT_TIMER:
319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in
320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
322 * supported or if not using GuestCtl2 Hardware Clear.
324 if (cpu_has_guestctl2) {
325 if (!(read_c0_guestctl2() & (irq << 14)))
326 clear_c0_guestctl2(irq);
328 clear_gc0_cause(irq);
332 case MIPS_EXC_INT_IO:
333 case MIPS_EXC_INT_IPI_1:
334 case MIPS_EXC_INT_IPI_2:
335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
336 if (cpu_has_guestctl2) {
337 if (!(read_c0_guestctl2() & (irq << 14)))
338 clear_c0_guestctl2(irq);
340 clear_gc0_cause(irq);
348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
353 * VZ guest timer handling.
357 * _kvm_vz_restore_stimer() - Restore soft timer state.
358 * @vcpu: Virtual CPU.
359 * @compare: CP0_Compare register value, restored by caller.
360 * @cause: CP0_Cause register to restore.
362 * Restore VZ state relating to the soft timer.
364 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
368 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
369 * after Guest CP0_Compare.
371 write_c0_gtoffset(compare - read_c0_count());
373 back_to_back_c0_hazard();
374 write_gc0_cause(cause);
378 * kvm_vz_restore_timer() - Restore guest timer state.
379 * @vcpu: Virtual CPU.
381 * Restore soft timer state from saved context.
383 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
385 struct mips_coproc *cop0 = vcpu->arch.cop0;
388 compare = kvm_read_sw_gc0_compare(cop0);
389 cause = kvm_read_sw_gc0_cause(cop0);
391 write_gc0_compare(compare);
392 _kvm_vz_restore_stimer(vcpu, compare, cause);
396 * kvm_vz_save_timer() - Save guest timer state.
397 * @vcpu: Virtual CPU.
399 * Save VZ guest timer state.
401 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
403 struct mips_coproc *cop0 = vcpu->arch.cop0;
406 compare = read_gc0_compare();
407 cause = read_gc0_cause();
409 /* save timer-related state to VCPU context */
410 kvm_write_sw_gc0_cause(cop0, cause);
411 kvm_write_sw_gc0_compare(cop0, compare);
415 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
416 * @inst: 32-bit instruction encoding.
418 * Finds whether @inst encodes an EVA memory access instruction, which would
419 * indicate that emulation of it should access the user mode address space
420 * instead of the kernel mode address space. This matters for MUSUK segments
421 * which are TLB mapped for user mode but unmapped for kernel mode.
423 * Returns: Whether @inst encodes an EVA accessor instruction.
425 static bool is_eva_access(union mips_instruction inst)
427 if (inst.spec3_format.opcode != spec3_op)
430 switch (inst.spec3_format.func) {
454 * is_eva_am_mapped() - Find whether an access mode is mapped.
455 * @vcpu: KVM VCPU state.
456 * @am: 3-bit encoded access mode.
457 * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
459 * Decode @am to find whether it encodes a mapped segment for the current VCPU
460 * state. Where necessary @eu and the actual instruction causing the fault are
461 * taken into account to make the decision.
463 * Returns: Whether the VCPU faulted on a TLB mapped address.
465 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
471 * Interpret access control mode. We assume address errors will already
472 * have been caught by the guest, leaving us with:
473 * AM UM SM KM 31..24 23..16
476 * MSK 2 010 TLB TLB 1
477 * MUSK 3 011 TLB TLB TLB 1
478 * MUSUK 4 100 TLB TLB Unm 0 1
479 * USK 5 101 Unm Unm 0 0
481 * UUSK 7 111 Unm Unm Unm 0 0
483 * We shift a magic value by AM across the sign bit to find if always
484 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
486 am_lookup = 0x70080000 << am;
487 if ((s32)am_lookup < 0) {
490 * Always TLB mapped, unless SegCtl.EU && ERL
492 if (!eu || !(read_gc0_status() & ST0_ERL))
496 if ((s32)am_lookup < 0) {
497 union mips_instruction inst;
503 * TLB mapped if not in kernel mode
505 status = read_gc0_status();
506 if (!(status & (ST0_EXL | ST0_ERL)) &&
510 * EVA access instructions in kernel
511 * mode access user address space.
513 opc = (u32 *)vcpu->arch.pc;
514 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
516 err = kvm_get_badinstr(opc, vcpu, &inst.word);
517 if (!err && is_eva_access(inst))
526 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
527 * @vcpu: KVM VCPU state.
528 * @gva: Guest virtual address to convert.
529 * @gpa: Output guest physical address.
531 * Convert a guest virtual address (GVA) which is valid according to the guest
532 * context, to a guest physical address (GPA).
534 * Returns: 0 on success.
537 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
541 unsigned long segctl;
543 if ((long)gva == (s32)gva32) {
544 /* Handle canonical 32-bit virtual address */
545 if (cpu_guest_has_segments) {
546 unsigned long mask, pa;
548 switch (gva32 >> 29) {
550 case 1: /* CFG5 (1GB) */
551 segctl = read_gc0_segctl2() >> 16;
552 mask = (unsigned long)0xfc0000000ull;
555 case 3: /* CFG4 (1GB) */
556 segctl = read_gc0_segctl2();
557 mask = (unsigned long)0xfc0000000ull;
559 case 4: /* CFG3 (512MB) */
560 segctl = read_gc0_segctl1() >> 16;
561 mask = (unsigned long)0xfe0000000ull;
563 case 5: /* CFG2 (512MB) */
564 segctl = read_gc0_segctl1();
565 mask = (unsigned long)0xfe0000000ull;
567 case 6: /* CFG1 (512MB) */
568 segctl = read_gc0_segctl0() >> 16;
569 mask = (unsigned long)0xfe0000000ull;
571 case 7: /* CFG0 (512MB) */
572 segctl = read_gc0_segctl0();
573 mask = (unsigned long)0xfe0000000ull;
577 * GCC 4.9 isn't smart enough to figure out that
578 * segctl and mask are always initialised.
583 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
587 /* Unmapped, find guest physical address */
588 pa = (segctl << 20) & mask;
592 } else if ((s32)gva32 < (s32)0xc0000000) {
593 /* legacy unmapped KSeg0 or KSeg1 */
594 *gpa = gva32 & 0x1fffffff;
598 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
600 if (cpu_guest_has_segments) {
602 * Each of the 8 regions can be overridden by SegCtl2.XR
603 * to use SegCtl1.XAM.
605 segctl = read_gc0_segctl2();
606 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
607 segctl = read_gc0_segctl1();
608 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
615 * Traditionally fully unmapped.
616 * Bits 61:59 specify the CCA, which we can just mask off here.
617 * Bits 58:PABITS should be zero, but we shouldn't have got here
620 *gpa = gva & 0x07ffffffffffffff;
626 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
630 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
631 * @vcpu: KVM VCPU state.
632 * @badvaddr: Root BadVAddr.
633 * @gpa: Output guest physical address.
635 * VZ implementations are permitted to report guest virtual addresses (GVA) in
636 * BadVAddr on a root exception during guest execution, instead of the more
637 * convenient guest physical addresses (GPA). When we get a GVA, this function
638 * converts it to a GPA, taking into account guest segmentation and guest TLB
641 * Returns: 0 on success.
644 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
647 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
648 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
650 /* If BadVAddr is GPA, then all is well in the world */
651 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
656 /* Otherwise we'd expect it to be GVA ... */
657 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
658 "Unexpected gexccode %#x\n", gexccode))
661 /* ... and we need to perform the GVA->GPA translation in software */
662 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
665 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
667 u32 *opc = (u32 *) vcpu->arch.pc;
668 u32 cause = vcpu->arch.host_cp0_cause;
669 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
670 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
674 * Fetch the instruction.
676 if (cause & CAUSEF_BD)
678 kvm_get_badinstr(opc, vcpu, &inst);
680 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
681 exccode, opc, inst, badvaddr,
683 kvm_arch_vcpu_dump_regs(vcpu);
684 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
688 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
691 struct kvm_vcpu *vcpu)
693 struct mips_coproc *cop0 = vcpu->arch.cop0;
694 enum emulation_result er = EMULATE_DONE;
696 unsigned long curr_pc;
700 * Update PC and hold onto current PC in case there is
701 * an error and we want to rollback the PC
703 curr_pc = vcpu->arch.pc;
704 er = update_pc(vcpu, cause);
705 if (er == EMULATE_FAIL)
708 if (inst.co_format.co) {
709 switch (inst.co_format.func) {
711 er = kvm_mips_emul_wait(vcpu);
717 rt = inst.c0r_format.rt;
718 rd = inst.c0r_format.rd;
719 sel = inst.c0r_format.sel;
721 switch (inst.c0r_format.rs) {
724 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
725 cop0->stat[rd][sel]++;
727 if (rd == MIPS_CP0_COUNT &&
728 sel == 0) { /* Count */
729 val = kvm_mips_read_count(vcpu);
730 } else if (rd == MIPS_CP0_COMPARE &&
731 sel == 0) { /* Compare */
732 val = read_gc0_compare();
733 } else if ((rd == MIPS_CP0_PRID &&
734 (sel == 0 || /* PRid */
735 sel == 2 || /* CDMMBase */
736 sel == 3)) || /* CMGCRBase */
737 (rd == MIPS_CP0_STATUS &&
738 (sel == 2 || /* SRSCtl */
739 sel == 3)) || /* SRSMap */
740 (rd == MIPS_CP0_CONFIG &&
741 (sel == 7)) || /* Config7 */
742 (rd == MIPS_CP0_ERRCTL &&
743 (sel == 0))) { /* ErrCtl */
744 val = cop0->reg[rd][sel];
750 if (er != EMULATE_FAIL) {
752 if (inst.c0r_format.rs == mfc_op)
754 vcpu->arch.gprs[rt] = val;
757 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
758 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
759 KVM_TRACE_COP0(rd, sel), val);
764 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
765 cop0->stat[rd][sel]++;
767 val = vcpu->arch.gprs[rt];
768 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
769 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
770 KVM_TRACE_COP0(rd, sel), val);
772 if (rd == MIPS_CP0_COUNT &&
773 sel == 0) { /* Count */
774 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
775 } else if (rd == MIPS_CP0_COMPARE &&
776 sel == 0) { /* Compare */
777 kvm_mips_write_compare(vcpu,
780 } else if (rd == MIPS_CP0_ERRCTL &&
781 (sel == 0)) { /* ErrCtl */
782 /* ignore the written value */
793 /* Rollback PC only if emulation was unsuccessful */
794 if (er == EMULATE_FAIL) {
795 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
796 curr_pc, __func__, inst.word);
798 vcpu->arch.pc = curr_pc;
804 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
807 struct kvm_vcpu *vcpu)
809 enum emulation_result er = EMULATE_DONE;
810 u32 cache, op_inst, op, base;
812 struct kvm_vcpu_arch *arch = &vcpu->arch;
813 unsigned long va, curr_pc;
816 * Update PC and hold onto current PC in case there is
817 * an error and we want to rollback the PC
819 curr_pc = vcpu->arch.pc;
820 er = update_pc(vcpu, cause);
821 if (er == EMULATE_FAIL)
824 base = inst.i_format.rs;
825 op_inst = inst.i_format.rt;
827 offset = inst.spec3_format.simmediate;
829 offset = inst.i_format.simmediate;
830 cache = op_inst & CacheOp_Cache;
831 op = op_inst & CacheOp_Op;
833 va = arch->gprs[base] + offset;
835 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
836 cache, op, base, arch->gprs[base], offset);
838 /* Secondary or tirtiary cache ops ignored */
839 if (cache != Cache_I && cache != Cache_D)
843 case Index_Invalidate_I:
844 flush_icache_line_indexed(va);
846 case Index_Writeback_Inv_D:
847 flush_dcache_line_indexed(va);
853 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
854 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
857 vcpu->arch.pc = curr_pc;
862 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
863 struct kvm_vcpu *vcpu)
865 enum emulation_result er = EMULATE_DONE;
866 struct kvm_vcpu_arch *arch = &vcpu->arch;
867 struct kvm_run *run = vcpu->run;
868 union mips_instruction inst;
873 * Fetch the instruction.
875 if (cause & CAUSEF_BD)
877 err = kvm_get_badinstr(opc, vcpu, &inst.word);
881 switch (inst.r_format.opcode) {
883 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
885 #ifndef CONFIG_CPU_MIPSR6
887 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
888 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
892 switch (inst.spec3_format.func) {
893 #ifdef CONFIG_CPU_MIPSR6
895 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
896 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
900 if (inst.r_format.rs || (inst.r_format.re >> 3))
903 rd = inst.r_format.rd;
904 rt = inst.r_format.rt;
905 sel = inst.r_format.re & 0x7;
908 case MIPS_HWR_CC: /* Read count register */
910 (long)(int)kvm_mips_read_count(vcpu);
913 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
914 KVM_TRACE_HWR(rd, sel), 0);
918 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
919 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
921 er = update_pc(vcpu, cause);
930 kvm_err("GPSI exception not supported (%p/%#x)\n",
932 kvm_arch_vcpu_dump_regs(vcpu);
940 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
941 struct kvm_vcpu *vcpu)
943 enum emulation_result er = EMULATE_DONE;
944 struct kvm_vcpu_arch *arch = &vcpu->arch;
945 union mips_instruction inst;
949 * Fetch the instruction.
951 if (cause & CAUSEF_BD)
953 err = kvm_get_badinstr(opc, vcpu, &inst.word);
957 /* complete MTC0 on behalf of guest and advance EPC */
958 if (inst.c0r_format.opcode == cop0_op &&
959 inst.c0r_format.rs == mtc_op &&
960 inst.c0r_format.z == 0) {
961 int rt = inst.c0r_format.rt;
962 int rd = inst.c0r_format.rd;
963 int sel = inst.c0r_format.sel;
964 unsigned int val = arch->gprs[rt];
965 unsigned int old_val, change;
967 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
970 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
971 /* FR bit should read as zero if no FPU */
972 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
973 val &= ~(ST0_CU1 | ST0_FR);
976 * Also don't allow FR to be set if host doesn't support
979 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
982 old_val = read_gc0_status();
983 change = val ^ old_val;
985 if (change & ST0_FR) {
987 * FPU and Vector register state is made
988 * UNPREDICTABLE by a change of FR, so don't
989 * even bother saving it.
995 * If MSA state is already live, it is undefined how it
996 * interacts with FR=0 FPU state, and we don't want to
997 * hit reserved instruction exceptions trying to save
998 * the MSA state later when CU=1 && FR=1, so play it
999 * safe and save it first.
1001 if (change & ST0_CU1 && !(val & ST0_FR) &&
1002 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1005 write_gc0_status(val);
1006 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1007 u32 old_cause = read_gc0_cause();
1008 u32 change = old_cause ^ val;
1010 /* DC bit enabling/disabling timer? */
1011 if (change & CAUSEF_DC) {
1012 if (val & CAUSEF_DC)
1013 kvm_mips_count_disable_cause(vcpu);
1015 kvm_mips_count_enable_cause(vcpu);
1018 /* Only certain bits are RW to the guest */
1019 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1020 CAUSEF_IP0 | CAUSEF_IP1);
1022 /* WP can only be cleared */
1023 change &= ~CAUSEF_WP | old_cause;
1025 write_gc0_cause(old_cause ^ change);
1026 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1027 write_gc0_intctl(val);
1028 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1029 old_val = read_gc0_config5();
1030 change = val ^ old_val;
1031 /* Handle changes in FPU/MSA modes */
1035 * Propagate FRE changes immediately if the FPU
1036 * context is already loaded.
1038 if (change & MIPS_CONF5_FRE &&
1039 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1040 change_c0_config5(MIPS_CONF5_FRE, val);
1045 (change & kvm_vz_config5_guest_wrmask(vcpu));
1046 write_gc0_config5(val);
1048 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1053 if (er != EMULATE_FAIL)
1054 er = update_pc(vcpu, cause);
1056 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1064 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1065 struct kvm_vcpu *vcpu)
1067 enum emulation_result er;
1068 union mips_instruction inst;
1069 unsigned long curr_pc;
1072 if (cause & CAUSEF_BD)
1074 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1076 return EMULATE_FAIL;
1079 * Update PC and hold onto current PC in case there is
1080 * an error and we want to rollback the PC
1082 curr_pc = vcpu->arch.pc;
1083 er = update_pc(vcpu, cause);
1084 if (er == EMULATE_FAIL)
1087 er = kvm_mips_emul_hypcall(vcpu, inst);
1088 if (er == EMULATE_FAIL)
1089 vcpu->arch.pc = curr_pc;
1094 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1097 struct kvm_vcpu *vcpu)
1102 * Fetch the instruction.
1104 if (cause & CAUSEF_BD)
1106 kvm_get_badinstr(opc, vcpu, &inst);
1108 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1109 gexccode, opc, inst, read_gc0_status());
1111 return EMULATE_FAIL;
1114 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1116 u32 *opc = (u32 *) vcpu->arch.pc;
1117 u32 cause = vcpu->arch.host_cp0_cause;
1118 enum emulation_result er = EMULATE_DONE;
1119 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1120 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1121 int ret = RESUME_GUEST;
1123 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1125 case MIPS_GCTL0_GEXC_GPSI:
1126 ++vcpu->stat.vz_gpsi_exits;
1127 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1129 case MIPS_GCTL0_GEXC_GSFC:
1130 ++vcpu->stat.vz_gsfc_exits;
1131 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1133 case MIPS_GCTL0_GEXC_HC:
1134 ++vcpu->stat.vz_hc_exits;
1135 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1137 case MIPS_GCTL0_GEXC_GRR:
1138 ++vcpu->stat.vz_grr_exits;
1139 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1142 case MIPS_GCTL0_GEXC_GVA:
1143 ++vcpu->stat.vz_gva_exits;
1144 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1147 case MIPS_GCTL0_GEXC_GHFC:
1148 ++vcpu->stat.vz_ghfc_exits;
1149 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1152 case MIPS_GCTL0_GEXC_GPA:
1153 ++vcpu->stat.vz_gpa_exits;
1154 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1158 ++vcpu->stat.vz_resvd_exits;
1159 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1165 if (er == EMULATE_DONE) {
1167 } else if (er == EMULATE_HYPERCALL) {
1168 ret = kvm_mips_handle_hypcall(vcpu);
1170 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1177 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1178 * @vcpu: Virtual CPU context.
1180 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1181 * by the root context.
1183 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1185 struct kvm_run *run = vcpu->run;
1186 u32 cause = vcpu->arch.host_cp0_cause;
1187 enum emulation_result er = EMULATE_FAIL;
1188 int ret = RESUME_GUEST;
1190 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1192 * If guest FPU not present, the FPU operation should have been
1193 * treated as a reserved instruction!
1194 * If FPU already in use, we shouldn't get this at all.
1196 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1197 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1199 return EMULATE_FAIL;
1205 /* other coprocessors not handled */
1213 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1224 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1225 * @vcpu: Virtual CPU context.
1227 * Handle when the guest attempts to use MSA when it is disabled in the root
1230 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1232 struct kvm_run *run = vcpu->run;
1235 * If MSA not present or not exposed to guest or FR=0, the MSA operation
1236 * should have been treated as a reserved instruction!
1237 * Same if CU1=1, FR=0.
1238 * If MSA already in use, we shouldn't get this at all.
1240 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1241 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1242 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1243 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1244 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1250 return RESUME_GUEST;
1253 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1255 struct kvm_run *run = vcpu->run;
1256 u32 *opc = (u32 *) vcpu->arch.pc;
1257 u32 cause = vcpu->arch.host_cp0_cause;
1258 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1259 union mips_instruction inst;
1260 enum emulation_result er = EMULATE_DONE;
1261 int err, ret = RESUME_GUEST;
1263 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1264 /* A code fetch fault doesn't count as an MMIO */
1265 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1266 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1270 /* Fetch the instruction */
1271 if (cause & CAUSEF_BD)
1273 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1275 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1280 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1281 if (er == EMULATE_FAIL) {
1282 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1284 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1288 if (er == EMULATE_DONE) {
1290 } else if (er == EMULATE_DO_MMIO) {
1291 run->exit_reason = KVM_EXIT_MMIO;
1294 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1300 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1302 struct kvm_run *run = vcpu->run;
1303 u32 *opc = (u32 *) vcpu->arch.pc;
1304 u32 cause = vcpu->arch.host_cp0_cause;
1305 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1306 union mips_instruction inst;
1307 enum emulation_result er = EMULATE_DONE;
1309 int ret = RESUME_GUEST;
1311 /* Just try the access again if we couldn't do the translation */
1312 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1313 return RESUME_GUEST;
1314 vcpu->arch.host_cp0_badvaddr = badvaddr;
1316 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1317 /* Fetch the instruction */
1318 if (cause & CAUSEF_BD)
1320 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1322 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1327 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1328 if (er == EMULATE_FAIL) {
1329 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1331 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1335 if (er == EMULATE_DONE) {
1337 } else if (er == EMULATE_DO_MMIO) {
1338 run->exit_reason = KVM_EXIT_MMIO;
1341 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1347 static u64 kvm_vz_get_one_regs[] = {
1348 KVM_REG_MIPS_CP0_INDEX,
1349 KVM_REG_MIPS_CP0_ENTRYLO0,
1350 KVM_REG_MIPS_CP0_ENTRYLO1,
1351 KVM_REG_MIPS_CP0_CONTEXT,
1352 KVM_REG_MIPS_CP0_PAGEMASK,
1353 KVM_REG_MIPS_CP0_PAGEGRAIN,
1354 KVM_REG_MIPS_CP0_WIRED,
1355 KVM_REG_MIPS_CP0_HWRENA,
1356 KVM_REG_MIPS_CP0_BADVADDR,
1357 KVM_REG_MIPS_CP0_COUNT,
1358 KVM_REG_MIPS_CP0_ENTRYHI,
1359 KVM_REG_MIPS_CP0_COMPARE,
1360 KVM_REG_MIPS_CP0_STATUS,
1361 KVM_REG_MIPS_CP0_INTCTL,
1362 KVM_REG_MIPS_CP0_CAUSE,
1363 KVM_REG_MIPS_CP0_EPC,
1364 KVM_REG_MIPS_CP0_PRID,
1365 KVM_REG_MIPS_CP0_EBASE,
1366 KVM_REG_MIPS_CP0_CONFIG,
1367 KVM_REG_MIPS_CP0_CONFIG1,
1368 KVM_REG_MIPS_CP0_CONFIG2,
1369 KVM_REG_MIPS_CP0_CONFIG3,
1370 KVM_REG_MIPS_CP0_CONFIG4,
1371 KVM_REG_MIPS_CP0_CONFIG5,
1373 KVM_REG_MIPS_CP0_XCONTEXT,
1375 KVM_REG_MIPS_CP0_ERROREPC,
1377 KVM_REG_MIPS_COUNT_CTL,
1378 KVM_REG_MIPS_COUNT_RESUME,
1379 KVM_REG_MIPS_COUNT_HZ,
1382 static u64 kvm_vz_get_one_regs_contextconfig[] = {
1383 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1385 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1389 static u64 kvm_vz_get_one_regs_segments[] = {
1390 KVM_REG_MIPS_CP0_SEGCTL0,
1391 KVM_REG_MIPS_CP0_SEGCTL1,
1392 KVM_REG_MIPS_CP0_SEGCTL2,
1395 static u64 kvm_vz_get_one_regs_htw[] = {
1396 KVM_REG_MIPS_CP0_PWBASE,
1397 KVM_REG_MIPS_CP0_PWFIELD,
1398 KVM_REG_MIPS_CP0_PWSIZE,
1399 KVM_REG_MIPS_CP0_PWCTL,
1402 static u64 kvm_vz_get_one_regs_kscratch[] = {
1403 KVM_REG_MIPS_CP0_KSCRATCH1,
1404 KVM_REG_MIPS_CP0_KSCRATCH2,
1405 KVM_REG_MIPS_CP0_KSCRATCH3,
1406 KVM_REG_MIPS_CP0_KSCRATCH4,
1407 KVM_REG_MIPS_CP0_KSCRATCH5,
1408 KVM_REG_MIPS_CP0_KSCRATCH6,
1411 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1415 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1416 if (cpu_guest_has_userlocal)
1418 if (cpu_guest_has_badinstr)
1420 if (cpu_guest_has_badinstrp)
1422 if (cpu_guest_has_contextconfig)
1423 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1424 if (cpu_guest_has_segments)
1425 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1426 if (cpu_guest_has_htw)
1427 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1428 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1433 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1438 if (copy_to_user(indices, kvm_vz_get_one_regs,
1439 sizeof(kvm_vz_get_one_regs)))
1441 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1443 if (cpu_guest_has_userlocal) {
1444 index = KVM_REG_MIPS_CP0_USERLOCAL;
1445 if (copy_to_user(indices, &index, sizeof(index)))
1449 if (cpu_guest_has_badinstr) {
1450 index = KVM_REG_MIPS_CP0_BADINSTR;
1451 if (copy_to_user(indices, &index, sizeof(index)))
1455 if (cpu_guest_has_badinstrp) {
1456 index = KVM_REG_MIPS_CP0_BADINSTRP;
1457 if (copy_to_user(indices, &index, sizeof(index)))
1461 if (cpu_guest_has_contextconfig) {
1462 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1463 sizeof(kvm_vz_get_one_regs_contextconfig)))
1465 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1467 if (cpu_guest_has_segments) {
1468 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1469 sizeof(kvm_vz_get_one_regs_segments)))
1471 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1473 if (cpu_guest_has_htw) {
1474 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1475 sizeof(kvm_vz_get_one_regs_htw)))
1477 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1479 for (i = 0; i < 6; ++i) {
1480 if (!cpu_guest_has_kscr(i + 2))
1483 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1484 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1492 static inline s64 entrylo_kvm_to_user(unsigned long v)
1496 if (BITS_PER_LONG == 32) {
1498 * KVM API exposes 64-bit version of the register, so move the
1499 * RI/XI bits up into place.
1501 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1503 ret |= ((s64)v & mask) << 32;
1508 static inline unsigned long entrylo_user_to_kvm(s64 v)
1510 unsigned long mask, ret = v;
1512 if (BITS_PER_LONG == 32) {
1514 * KVM API exposes 64-bit versiono of the register, so move the
1515 * RI/XI bits down into place.
1517 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1519 ret |= (v >> 32) & mask;
1524 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1525 const struct kvm_one_reg *reg,
1528 struct mips_coproc *cop0 = vcpu->arch.cop0;
1532 case KVM_REG_MIPS_CP0_INDEX:
1533 *v = (long)read_gc0_index();
1535 case KVM_REG_MIPS_CP0_ENTRYLO0:
1536 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1538 case KVM_REG_MIPS_CP0_ENTRYLO1:
1539 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1541 case KVM_REG_MIPS_CP0_CONTEXT:
1542 *v = (long)read_gc0_context();
1544 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1545 if (!cpu_guest_has_contextconfig)
1547 *v = read_gc0_contextconfig();
1549 case KVM_REG_MIPS_CP0_USERLOCAL:
1550 if (!cpu_guest_has_userlocal)
1552 *v = read_gc0_userlocal();
1555 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1556 if (!cpu_guest_has_contextconfig)
1558 *v = read_gc0_xcontextconfig();
1561 case KVM_REG_MIPS_CP0_PAGEMASK:
1562 *v = (long)read_gc0_pagemask();
1564 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1565 *v = (long)read_gc0_pagegrain();
1567 case KVM_REG_MIPS_CP0_SEGCTL0:
1568 if (!cpu_guest_has_segments)
1570 *v = read_gc0_segctl0();
1572 case KVM_REG_MIPS_CP0_SEGCTL1:
1573 if (!cpu_guest_has_segments)
1575 *v = read_gc0_segctl1();
1577 case KVM_REG_MIPS_CP0_SEGCTL2:
1578 if (!cpu_guest_has_segments)
1580 *v = read_gc0_segctl2();
1582 case KVM_REG_MIPS_CP0_PWBASE:
1583 if (!cpu_guest_has_htw)
1585 *v = read_gc0_pwbase();
1587 case KVM_REG_MIPS_CP0_PWFIELD:
1588 if (!cpu_guest_has_htw)
1590 *v = read_gc0_pwfield();
1592 case KVM_REG_MIPS_CP0_PWSIZE:
1593 if (!cpu_guest_has_htw)
1595 *v = read_gc0_pwsize();
1597 case KVM_REG_MIPS_CP0_WIRED:
1598 *v = (long)read_gc0_wired();
1600 case KVM_REG_MIPS_CP0_PWCTL:
1601 if (!cpu_guest_has_htw)
1603 *v = read_gc0_pwctl();
1605 case KVM_REG_MIPS_CP0_HWRENA:
1606 *v = (long)read_gc0_hwrena();
1608 case KVM_REG_MIPS_CP0_BADVADDR:
1609 *v = (long)read_gc0_badvaddr();
1611 case KVM_REG_MIPS_CP0_BADINSTR:
1612 if (!cpu_guest_has_badinstr)
1614 *v = read_gc0_badinstr();
1616 case KVM_REG_MIPS_CP0_BADINSTRP:
1617 if (!cpu_guest_has_badinstrp)
1619 *v = read_gc0_badinstrp();
1621 case KVM_REG_MIPS_CP0_COUNT:
1622 *v = kvm_mips_read_count(vcpu);
1624 case KVM_REG_MIPS_CP0_ENTRYHI:
1625 *v = (long)read_gc0_entryhi();
1627 case KVM_REG_MIPS_CP0_COMPARE:
1628 *v = (long)read_gc0_compare();
1630 case KVM_REG_MIPS_CP0_STATUS:
1631 *v = (long)read_gc0_status();
1633 case KVM_REG_MIPS_CP0_INTCTL:
1634 *v = read_gc0_intctl();
1636 case KVM_REG_MIPS_CP0_CAUSE:
1637 *v = (long)read_gc0_cause();
1639 case KVM_REG_MIPS_CP0_EPC:
1640 *v = (long)read_gc0_epc();
1642 case KVM_REG_MIPS_CP0_PRID:
1643 *v = (long)kvm_read_c0_guest_prid(cop0);
1645 case KVM_REG_MIPS_CP0_EBASE:
1646 *v = kvm_vz_read_gc0_ebase();
1648 case KVM_REG_MIPS_CP0_CONFIG:
1649 *v = read_gc0_config();
1651 case KVM_REG_MIPS_CP0_CONFIG1:
1652 if (!cpu_guest_has_conf1)
1654 *v = read_gc0_config1();
1656 case KVM_REG_MIPS_CP0_CONFIG2:
1657 if (!cpu_guest_has_conf2)
1659 *v = read_gc0_config2();
1661 case KVM_REG_MIPS_CP0_CONFIG3:
1662 if (!cpu_guest_has_conf3)
1664 *v = read_gc0_config3();
1666 case KVM_REG_MIPS_CP0_CONFIG4:
1667 if (!cpu_guest_has_conf4)
1669 *v = read_gc0_config4();
1671 case KVM_REG_MIPS_CP0_CONFIG5:
1672 if (!cpu_guest_has_conf5)
1674 *v = read_gc0_config5();
1677 case KVM_REG_MIPS_CP0_XCONTEXT:
1678 *v = read_gc0_xcontext();
1681 case KVM_REG_MIPS_CP0_ERROREPC:
1682 *v = (long)read_gc0_errorepc();
1684 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
1685 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
1686 if (!cpu_guest_has_kscr(idx))
1690 *v = (long)read_gc0_kscratch1();
1693 *v = (long)read_gc0_kscratch2();
1696 *v = (long)read_gc0_kscratch3();
1699 *v = (long)read_gc0_kscratch4();
1702 *v = (long)read_gc0_kscratch5();
1705 *v = (long)read_gc0_kscratch6();
1709 case KVM_REG_MIPS_COUNT_CTL:
1710 *v = vcpu->arch.count_ctl;
1712 case KVM_REG_MIPS_COUNT_RESUME:
1713 *v = ktime_to_ns(vcpu->arch.count_resume);
1715 case KVM_REG_MIPS_COUNT_HZ:
1716 *v = vcpu->arch.count_hz;
1724 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
1725 const struct kvm_one_reg *reg,
1728 struct mips_coproc *cop0 = vcpu->arch.cop0;
1731 unsigned int cur, change;
1734 case KVM_REG_MIPS_CP0_INDEX:
1737 case KVM_REG_MIPS_CP0_ENTRYLO0:
1738 write_gc0_entrylo0(entrylo_user_to_kvm(v));
1740 case KVM_REG_MIPS_CP0_ENTRYLO1:
1741 write_gc0_entrylo1(entrylo_user_to_kvm(v));
1743 case KVM_REG_MIPS_CP0_CONTEXT:
1744 write_gc0_context(v);
1746 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1747 if (!cpu_guest_has_contextconfig)
1749 write_gc0_contextconfig(v);
1751 case KVM_REG_MIPS_CP0_USERLOCAL:
1752 if (!cpu_guest_has_userlocal)
1754 write_gc0_userlocal(v);
1757 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1758 if (!cpu_guest_has_contextconfig)
1760 write_gc0_xcontextconfig(v);
1763 case KVM_REG_MIPS_CP0_PAGEMASK:
1764 write_gc0_pagemask(v);
1766 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1767 write_gc0_pagegrain(v);
1769 case KVM_REG_MIPS_CP0_SEGCTL0:
1770 if (!cpu_guest_has_segments)
1772 write_gc0_segctl0(v);
1774 case KVM_REG_MIPS_CP0_SEGCTL1:
1775 if (!cpu_guest_has_segments)
1777 write_gc0_segctl1(v);
1779 case KVM_REG_MIPS_CP0_SEGCTL2:
1780 if (!cpu_guest_has_segments)
1782 write_gc0_segctl2(v);
1784 case KVM_REG_MIPS_CP0_PWBASE:
1785 if (!cpu_guest_has_htw)
1787 write_gc0_pwbase(v);
1789 case KVM_REG_MIPS_CP0_PWFIELD:
1790 if (!cpu_guest_has_htw)
1792 write_gc0_pwfield(v);
1794 case KVM_REG_MIPS_CP0_PWSIZE:
1795 if (!cpu_guest_has_htw)
1797 write_gc0_pwsize(v);
1799 case KVM_REG_MIPS_CP0_WIRED:
1800 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
1802 case KVM_REG_MIPS_CP0_PWCTL:
1803 if (!cpu_guest_has_htw)
1807 case KVM_REG_MIPS_CP0_HWRENA:
1808 write_gc0_hwrena(v);
1810 case KVM_REG_MIPS_CP0_BADVADDR:
1811 write_gc0_badvaddr(v);
1813 case KVM_REG_MIPS_CP0_BADINSTR:
1814 if (!cpu_guest_has_badinstr)
1816 write_gc0_badinstr(v);
1818 case KVM_REG_MIPS_CP0_BADINSTRP:
1819 if (!cpu_guest_has_badinstrp)
1821 write_gc0_badinstrp(v);
1823 case KVM_REG_MIPS_CP0_COUNT:
1824 kvm_mips_write_count(vcpu, v);
1826 case KVM_REG_MIPS_CP0_ENTRYHI:
1827 write_gc0_entryhi(v);
1829 case KVM_REG_MIPS_CP0_COMPARE:
1830 kvm_mips_write_compare(vcpu, v, false);
1832 case KVM_REG_MIPS_CP0_STATUS:
1833 write_gc0_status(v);
1835 case KVM_REG_MIPS_CP0_INTCTL:
1836 write_gc0_intctl(v);
1838 case KVM_REG_MIPS_CP0_CAUSE:
1840 * If the timer is stopped or started (DC bit) it must look
1841 * atomic with changes to the timer interrupt pending bit (TI).
1842 * A timer interrupt should not happen in between.
1844 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
1845 if (v & CAUSEF_DC) {
1846 /* disable timer first */
1847 kvm_mips_count_disable_cause(vcpu);
1848 change_gc0_cause((u32)~CAUSEF_DC, v);
1850 /* enable timer last */
1851 change_gc0_cause((u32)~CAUSEF_DC, v);
1852 kvm_mips_count_enable_cause(vcpu);
1858 case KVM_REG_MIPS_CP0_EPC:
1861 case KVM_REG_MIPS_CP0_PRID:
1862 kvm_write_c0_guest_prid(cop0, v);
1864 case KVM_REG_MIPS_CP0_EBASE:
1865 kvm_vz_write_gc0_ebase(v);
1867 case KVM_REG_MIPS_CP0_CONFIG:
1868 cur = read_gc0_config();
1869 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
1872 write_gc0_config(v);
1875 case KVM_REG_MIPS_CP0_CONFIG1:
1876 if (!cpu_guest_has_conf1)
1878 cur = read_gc0_config1();
1879 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
1882 write_gc0_config1(v);
1885 case KVM_REG_MIPS_CP0_CONFIG2:
1886 if (!cpu_guest_has_conf2)
1888 cur = read_gc0_config2();
1889 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
1892 write_gc0_config2(v);
1895 case KVM_REG_MIPS_CP0_CONFIG3:
1896 if (!cpu_guest_has_conf3)
1898 cur = read_gc0_config3();
1899 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
1902 write_gc0_config3(v);
1905 case KVM_REG_MIPS_CP0_CONFIG4:
1906 if (!cpu_guest_has_conf4)
1908 cur = read_gc0_config4();
1909 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
1912 write_gc0_config4(v);
1915 case KVM_REG_MIPS_CP0_CONFIG5:
1916 if (!cpu_guest_has_conf5)
1918 cur = read_gc0_config5();
1919 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
1922 write_gc0_config5(v);
1926 case KVM_REG_MIPS_CP0_XCONTEXT:
1927 write_gc0_xcontext(v);
1930 case KVM_REG_MIPS_CP0_ERROREPC:
1931 write_gc0_errorepc(v);
1933 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
1934 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
1935 if (!cpu_guest_has_kscr(idx))
1939 write_gc0_kscratch1(v);
1942 write_gc0_kscratch2(v);
1945 write_gc0_kscratch3(v);
1948 write_gc0_kscratch4(v);
1951 write_gc0_kscratch5(v);
1954 write_gc0_kscratch6(v);
1958 case KVM_REG_MIPS_COUNT_CTL:
1959 ret = kvm_mips_set_count_ctl(vcpu, v);
1961 case KVM_REG_MIPS_COUNT_RESUME:
1962 ret = kvm_mips_set_count_resume(vcpu, v);
1964 case KVM_REG_MIPS_COUNT_HZ:
1965 ret = kvm_mips_set_count_hz(vcpu, v);
1973 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
1974 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
1976 unsigned long guestid = guestid_cache(cpu);
1978 if (!(++guestid & GUESTID_MASK)) {
1979 if (cpu_has_vtag_icache)
1982 if (!guestid) /* fix version if needed */
1983 guestid = GUESTID_FIRST_VERSION;
1985 ++guestid; /* guestid 0 reserved for root */
1987 /* start new guestid cycle */
1988 kvm_vz_local_flush_roottlb_all_guests();
1989 kvm_vz_local_flush_guesttlb_all();
1992 guestid_cache(cpu) = guestid;
1995 /* Returns 1 if the guest TLB may be clobbered */
1996 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2001 if (!vcpu->requests)
2004 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2005 if (cpu_has_guestid) {
2006 /* Drop all GuestIDs for this VCPU */
2007 for_each_possible_cpu(i)
2008 vcpu->arch.vzguestid[i] = 0;
2009 /* This will clobber guest TLB contents too */
2013 * For Root ASID Dealias (RAD) we don't do anything here, but we
2014 * still need the request to ensure we recheck asid_flush_mask.
2015 * We can still return 0 as only the root TLB will be affected
2016 * by a root ASID flush.
2023 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2025 unsigned int wired = read_gc0_wired();
2026 struct kvm_mips_tlb *tlbs;
2029 /* Expand the wired TLB array if necessary */
2030 wired &= MIPSR6_WIRED_WIRED;
2031 if (wired > vcpu->arch.wired_tlb_limit) {
2032 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2033 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2034 if (WARN_ON(!tlbs)) {
2035 /* Save whatever we can */
2036 wired = vcpu->arch.wired_tlb_limit;
2038 vcpu->arch.wired_tlb = tlbs;
2039 vcpu->arch.wired_tlb_limit = wired;
2044 /* Save wired entries from the guest TLB */
2045 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2046 /* Invalidate any dropped entries since last time */
2047 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2048 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2049 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2050 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2051 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2053 vcpu->arch.wired_tlb_used = wired;
2056 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2058 /* Load wired entries into the guest TLB */
2059 if (vcpu->arch.wired_tlb)
2060 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2061 vcpu->arch.wired_tlb_used);
2064 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2066 struct kvm *kvm = vcpu->kvm;
2067 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2071 * Are we entering guest context on a different CPU to last time?
2072 * If so, the VCPU's guest TLB state on this CPU may be stale.
2074 migrated = (vcpu->arch.last_exec_cpu != cpu);
2075 vcpu->arch.last_exec_cpu = cpu;
2078 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2079 * remains set until another vcpu is loaded in. As a rule GuestRID
2080 * remains zeroed when in root context unless the kernel is busy
2081 * manipulating guest tlb entries.
2083 if (cpu_has_guestid) {
2085 * Check if our GuestID is of an older version and thus invalid.
2087 * We also discard the stored GuestID if we've executed on
2088 * another CPU, as the guest mappings may have changed without
2089 * hypervisor knowledge.
2092 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2093 GUESTID_VERSION_MASK) {
2094 kvm_vz_get_new_guestid(cpu, vcpu);
2095 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2096 trace_kvm_guestid_change(vcpu,
2097 vcpu->arch.vzguestid[cpu]);
2100 /* Restore GuestID */
2101 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2104 * The Guest TLB only stores a single guest's TLB state, so
2105 * flush it if another VCPU has executed on this CPU.
2107 * We also flush if we've executed on another CPU, as the guest
2108 * mappings may have changed without hypervisor knowledge.
2110 if (migrated || last_exec_vcpu[cpu] != vcpu)
2111 kvm_vz_local_flush_guesttlb_all();
2112 last_exec_vcpu[cpu] = vcpu;
2115 * Root ASID dealiases guest GPA mappings in the root TLB.
2116 * Allocate new root ASID if needed.
2118 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
2119 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
2120 asid_version_mask(cpu))
2121 get_new_mmu_context(gpa_mm, cpu);
2125 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2127 struct mips_coproc *cop0 = vcpu->arch.cop0;
2131 * Have we migrated to a different CPU?
2132 * If so, any old guest TLB state may be stale.
2134 migrated = (vcpu->arch.last_sched_cpu != cpu);
2137 * Was this the last VCPU to run on this CPU?
2138 * If not, any old guest state from this VCPU will have been clobbered.
2140 all = migrated || (last_vcpu[cpu] != vcpu);
2141 last_vcpu[cpu] = vcpu;
2144 * Restore CP0_Wired unconditionally as we clear it after use, and
2145 * restore wired guest TLB entries (while in guest context).
2147 kvm_restore_gc0_wired(cop0);
2148 if (current->flags & PF_VCPU) {
2150 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2151 kvm_vz_vcpu_load_wired(vcpu);
2155 * Restore timer state regardless, as e.g. Cause.TI can change over time
2156 * if left unmaintained.
2158 kvm_vz_restore_timer(vcpu);
2160 /* Don't bother restoring registers multiple times unless necessary */
2165 * Restore config registers first, as some implementations restrict
2166 * writes to other registers when the corresponding feature bits aren't
2167 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2169 kvm_restore_gc0_config(cop0);
2170 if (cpu_guest_has_conf1)
2171 kvm_restore_gc0_config1(cop0);
2172 if (cpu_guest_has_conf2)
2173 kvm_restore_gc0_config2(cop0);
2174 if (cpu_guest_has_conf3)
2175 kvm_restore_gc0_config3(cop0);
2176 if (cpu_guest_has_conf4)
2177 kvm_restore_gc0_config4(cop0);
2178 if (cpu_guest_has_conf5)
2179 kvm_restore_gc0_config5(cop0);
2180 if (cpu_guest_has_conf6)
2181 kvm_restore_gc0_config6(cop0);
2182 if (cpu_guest_has_conf7)
2183 kvm_restore_gc0_config7(cop0);
2185 kvm_restore_gc0_index(cop0);
2186 kvm_restore_gc0_entrylo0(cop0);
2187 kvm_restore_gc0_entrylo1(cop0);
2188 kvm_restore_gc0_context(cop0);
2189 if (cpu_guest_has_contextconfig)
2190 kvm_restore_gc0_contextconfig(cop0);
2192 kvm_restore_gc0_xcontext(cop0);
2193 if (cpu_guest_has_contextconfig)
2194 kvm_restore_gc0_xcontextconfig(cop0);
2196 kvm_restore_gc0_pagemask(cop0);
2197 kvm_restore_gc0_pagegrain(cop0);
2198 kvm_restore_gc0_hwrena(cop0);
2199 kvm_restore_gc0_badvaddr(cop0);
2200 kvm_restore_gc0_entryhi(cop0);
2201 kvm_restore_gc0_status(cop0);
2202 kvm_restore_gc0_intctl(cop0);
2203 kvm_restore_gc0_epc(cop0);
2204 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2205 if (cpu_guest_has_userlocal)
2206 kvm_restore_gc0_userlocal(cop0);
2208 kvm_restore_gc0_errorepc(cop0);
2210 /* restore KScratch registers if enabled in guest */
2211 if (cpu_guest_has_conf4) {
2212 if (cpu_guest_has_kscr(2))
2213 kvm_restore_gc0_kscratch1(cop0);
2214 if (cpu_guest_has_kscr(3))
2215 kvm_restore_gc0_kscratch2(cop0);
2216 if (cpu_guest_has_kscr(4))
2217 kvm_restore_gc0_kscratch3(cop0);
2218 if (cpu_guest_has_kscr(5))
2219 kvm_restore_gc0_kscratch4(cop0);
2220 if (cpu_guest_has_kscr(6))
2221 kvm_restore_gc0_kscratch5(cop0);
2222 if (cpu_guest_has_kscr(7))
2223 kvm_restore_gc0_kscratch6(cop0);
2226 if (cpu_guest_has_badinstr)
2227 kvm_restore_gc0_badinstr(cop0);
2228 if (cpu_guest_has_badinstrp)
2229 kvm_restore_gc0_badinstrp(cop0);
2231 if (cpu_guest_has_segments) {
2232 kvm_restore_gc0_segctl0(cop0);
2233 kvm_restore_gc0_segctl1(cop0);
2234 kvm_restore_gc0_segctl2(cop0);
2237 /* restore HTW registers */
2238 if (cpu_guest_has_htw) {
2239 kvm_restore_gc0_pwbase(cop0);
2240 kvm_restore_gc0_pwfield(cop0);
2241 kvm_restore_gc0_pwsize(cop0);
2242 kvm_restore_gc0_pwctl(cop0);
2245 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2246 if (cpu_has_guestctl2)
2248 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2253 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2255 struct mips_coproc *cop0 = vcpu->arch.cop0;
2257 if (current->flags & PF_VCPU)
2258 kvm_vz_vcpu_save_wired(vcpu);
2262 kvm_save_gc0_index(cop0);
2263 kvm_save_gc0_entrylo0(cop0);
2264 kvm_save_gc0_entrylo1(cop0);
2265 kvm_save_gc0_context(cop0);
2266 if (cpu_guest_has_contextconfig)
2267 kvm_save_gc0_contextconfig(cop0);
2269 kvm_save_gc0_xcontext(cop0);
2270 if (cpu_guest_has_contextconfig)
2271 kvm_save_gc0_xcontextconfig(cop0);
2273 kvm_save_gc0_pagemask(cop0);
2274 kvm_save_gc0_pagegrain(cop0);
2275 kvm_save_gc0_wired(cop0);
2276 /* allow wired TLB entries to be overwritten */
2277 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2278 kvm_save_gc0_hwrena(cop0);
2279 kvm_save_gc0_badvaddr(cop0);
2280 kvm_save_gc0_entryhi(cop0);
2281 kvm_save_gc0_status(cop0);
2282 kvm_save_gc0_intctl(cop0);
2283 kvm_save_gc0_epc(cop0);
2284 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2285 if (cpu_guest_has_userlocal)
2286 kvm_save_gc0_userlocal(cop0);
2288 /* only save implemented config registers */
2289 kvm_save_gc0_config(cop0);
2290 if (cpu_guest_has_conf1)
2291 kvm_save_gc0_config1(cop0);
2292 if (cpu_guest_has_conf2)
2293 kvm_save_gc0_config2(cop0);
2294 if (cpu_guest_has_conf3)
2295 kvm_save_gc0_config3(cop0);
2296 if (cpu_guest_has_conf4)
2297 kvm_save_gc0_config4(cop0);
2298 if (cpu_guest_has_conf5)
2299 kvm_save_gc0_config5(cop0);
2300 if (cpu_guest_has_conf6)
2301 kvm_save_gc0_config6(cop0);
2302 if (cpu_guest_has_conf7)
2303 kvm_save_gc0_config7(cop0);
2305 kvm_save_gc0_errorepc(cop0);
2307 /* save KScratch registers if enabled in guest */
2308 if (cpu_guest_has_conf4) {
2309 if (cpu_guest_has_kscr(2))
2310 kvm_save_gc0_kscratch1(cop0);
2311 if (cpu_guest_has_kscr(3))
2312 kvm_save_gc0_kscratch2(cop0);
2313 if (cpu_guest_has_kscr(4))
2314 kvm_save_gc0_kscratch3(cop0);
2315 if (cpu_guest_has_kscr(5))
2316 kvm_save_gc0_kscratch4(cop0);
2317 if (cpu_guest_has_kscr(6))
2318 kvm_save_gc0_kscratch5(cop0);
2319 if (cpu_guest_has_kscr(7))
2320 kvm_save_gc0_kscratch6(cop0);
2323 if (cpu_guest_has_badinstr)
2324 kvm_save_gc0_badinstr(cop0);
2325 if (cpu_guest_has_badinstrp)
2326 kvm_save_gc0_badinstrp(cop0);
2328 if (cpu_guest_has_segments) {
2329 kvm_save_gc0_segctl0(cop0);
2330 kvm_save_gc0_segctl1(cop0);
2331 kvm_save_gc0_segctl2(cop0);
2334 /* save HTW registers if enabled in guest */
2335 if (cpu_guest_has_htw &&
2336 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
2337 kvm_save_gc0_pwbase(cop0);
2338 kvm_save_gc0_pwfield(cop0);
2339 kvm_save_gc0_pwsize(cop0);
2340 kvm_save_gc0_pwctl(cop0);
2343 kvm_vz_save_timer(vcpu);
2345 /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2346 if (cpu_has_guestctl2)
2347 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2348 read_c0_guestctl2();
2354 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2355 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2357 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2358 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2359 * entries in the root VTLB.
2361 * Returns: The resulting guest VTLB size.
2363 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2365 unsigned int config4 = 0, ret = 0, limit;
2367 /* Write MMUSize - 1 into guest Config registers */
2368 if (cpu_guest_has_conf1)
2369 change_gc0_config1(MIPS_CONF1_TLBS,
2370 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2371 if (cpu_guest_has_conf4) {
2372 config4 = read_gc0_config4();
2373 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2374 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2375 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2376 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2377 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2378 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2379 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2380 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2381 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2382 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2384 write_gc0_config4(config4);
2388 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2389 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2392 if (cpu_has_mips_r6) {
2393 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2394 MIPSR6_WIRED_LIMIT_SHIFT;
2395 if (size - 1 <= limit)
2397 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2400 /* Read back MMUSize - 1 */
2401 back_to_back_c0_hazard();
2402 if (cpu_guest_has_conf1)
2403 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2404 MIPS_CONF1_TLBS_SHIFT;
2406 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2407 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2408 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2409 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2410 MIPS_CONF1_TLBS_SIZE;
2411 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2412 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2413 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2414 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2415 MIPS_CONF1_TLBS_SIZE;
2420 static int kvm_vz_hardware_enable(void)
2422 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2425 * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of
2426 * root wired and guest entries, the guest TLB may need resizing.
2428 mmu_size = current_cpu_data.tlbsizevtlb;
2429 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2431 /* Try switching to maximum guest VTLB size for flush */
2432 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2433 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2434 kvm_vz_local_flush_guesttlb_all();
2437 * Reduce to make space for root wired entries and at least 2 root
2438 * non-wired entries. This does assume that long-term wired entries
2439 * won't be added later.
2441 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2442 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2443 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2446 * Write the VTLB size, but if another CPU has already written, check it
2447 * matches or we won't provide a consistent view to the guest. If this
2448 * ever happens it suggests an asymmetric number of wired entries.
2450 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2451 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2452 "Available guest VTLB size mismatch"))
2456 * Enable virtualization features granting guest direct control of
2458 * CP0=1: Guest coprocessor 0 context.
2459 * AT=Guest: Guest MMU.
2460 * CG=1: Hit (virtual address) CACHE operations (optional).
2461 * CF=1: Guest Config registers.
2462 * CGI=1: Indexed flush CACHE operations (optional).
2464 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2465 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2466 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2467 if (cpu_has_guestctl0ext)
2468 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2470 if (cpu_has_guestid) {
2471 write_c0_guestctl1(0);
2472 kvm_vz_local_flush_roottlb_all_guests();
2474 GUESTID_MASK = current_cpu_data.guestid_mask;
2475 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2476 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2478 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2481 /* clear any pending injected virtual guest interrupts */
2482 if (cpu_has_guestctl2)
2483 clear_c0_guestctl2(0x3f << 10);
2488 static void kvm_vz_hardware_disable(void)
2490 kvm_vz_local_flush_guesttlb_all();
2492 if (cpu_has_guestid) {
2493 write_c0_guestctl1(0);
2494 kvm_vz_local_flush_roottlb_all_guests();
2498 static int kvm_vz_check_extension(struct kvm *kvm, long ext)
2503 case KVM_CAP_MIPS_VZ:
2504 /* we wouldn't be here unless cpu_has_vz */
2508 case KVM_CAP_MIPS_64BIT:
2509 /* We support 64-bit registers/operations and addresses */
2521 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
2525 for_each_possible_cpu(i)
2526 vcpu->arch.vzguestid[i] = 0;
2531 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
2536 * If the VCPU is freed and reused as another VCPU, we don't want the
2537 * matching pointer wrongly hanging around in last_vcpu[] or
2540 for_each_possible_cpu(cpu) {
2541 if (last_vcpu[cpu] == vcpu)
2542 last_vcpu[cpu] = NULL;
2543 if (last_exec_vcpu[cpu] == vcpu)
2544 last_exec_vcpu[cpu] = NULL;
2548 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
2550 struct mips_coproc *cop0 = vcpu->arch.cop0;
2551 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
2554 * Start off the timer at the same frequency as the host timer, but the
2555 * soft timer doesn't handle frequencies greater than 1GHz yet.
2557 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
2558 count_hz = mips_hpt_frequency;
2559 kvm_mips_init_count(vcpu, count_hz);
2562 * Initialize guest register state to valid architectural reset state.
2566 if (cpu_has_mips_r6)
2567 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
2569 if (cpu_has_mips_r6)
2570 kvm_write_sw_gc0_wired(cop0,
2571 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
2573 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
2574 if (cpu_has_mips_r6)
2575 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
2577 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
2578 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
2580 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
2582 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
2584 kvm_save_gc0_config(cop0);
2585 /* architecturally writable (e.g. from guest) */
2586 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
2587 _page_cachable_default >> _CACHE_SHIFT);
2588 /* architecturally read only, but maybe writable from root */
2589 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
2590 if (cpu_guest_has_conf1) {
2591 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
2593 kvm_save_gc0_config1(cop0);
2594 /* architecturally read only, but maybe writable from root */
2595 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
2602 if (cpu_guest_has_conf2) {
2603 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
2605 kvm_save_gc0_config2(cop0);
2607 if (cpu_guest_has_conf3) {
2608 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
2610 kvm_save_gc0_config3(cop0);
2611 /* architecturally writable (e.g. from guest) */
2612 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
2613 /* architecturally read only, but maybe writable from root */
2614 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
2629 if (cpu_guest_has_conf4) {
2630 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
2632 kvm_save_gc0_config4(cop0);
2634 if (cpu_guest_has_conf5) {
2635 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
2637 kvm_save_gc0_config5(cop0);
2638 /* architecturally writable (e.g. from guest) */
2639 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
2646 /* architecturally read only, but maybe writable from root */
2647 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
2650 if (cpu_guest_has_contextconfig) {
2652 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
2654 /* XContextConfig */
2655 /* bits SEGBITS-13+3:4 set */
2656 kvm_write_sw_gc0_xcontextconfig(cop0,
2657 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
2661 /* Implementation dependent, use the legacy layout */
2662 if (cpu_guest_has_segments) {
2663 /* SegCtl0, SegCtl1, SegCtl2 */
2664 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
2665 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
2666 (_page_cachable_default >> _CACHE_SHIFT) <<
2667 (16 + MIPS_SEGCFG_C_SHIFT));
2668 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
2671 /* reset HTW registers */
2672 if (cpu_guest_has_htw && cpu_has_mips_r6) {
2674 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
2676 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
2679 /* start with no pending virtual guest interrupts */
2680 if (cpu_has_guestctl2)
2681 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
2683 /* Put PC at reset vector */
2684 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
2689 static void kvm_vz_flush_shadow_all(struct kvm *kvm)
2691 if (cpu_has_guestid) {
2692 /* Flush GuestID for each VCPU individually */
2693 kvm_flush_remote_tlbs(kvm);
2696 * For each CPU there is a single GPA ASID used by all VCPUs in
2697 * the VM, so it doesn't make sense for the VCPUs to handle
2698 * invalidation of these ASIDs individually.
2700 * Instead mark all CPUs as needing ASID invalidation in
2701 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
2702 * kick any running VCPUs so they check asid_flush_mask.
2704 cpumask_setall(&kvm->arch.asid_flush_mask);
2705 kvm_flush_remote_tlbs(kvm);
2709 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
2710 const struct kvm_memory_slot *slot)
2712 kvm_vz_flush_shadow_all(kvm);
2715 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
2717 int cpu = smp_processor_id();
2718 int preserve_guest_tlb;
2720 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
2722 if (preserve_guest_tlb)
2723 kvm_vz_vcpu_save_wired(vcpu);
2725 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2727 if (preserve_guest_tlb)
2728 kvm_vz_vcpu_load_wired(vcpu);
2731 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
2733 int cpu = smp_processor_id();
2736 /* Check if we have any exceptions/interrupts pending */
2737 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
2739 kvm_vz_check_requests(vcpu, cpu);
2740 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2741 kvm_vz_vcpu_load_wired(vcpu);
2743 r = vcpu->arch.vcpu_run(run, vcpu);
2745 kvm_vz_vcpu_save_wired(vcpu);
2750 static struct kvm_mips_callbacks kvm_vz_callbacks = {
2751 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
2752 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
2753 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
2754 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
2755 .handle_addr_err_st = kvm_trap_vz_no_handler,
2756 .handle_addr_err_ld = kvm_trap_vz_no_handler,
2757 .handle_syscall = kvm_trap_vz_no_handler,
2758 .handle_res_inst = kvm_trap_vz_no_handler,
2759 .handle_break = kvm_trap_vz_no_handler,
2760 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
2761 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
2763 .hardware_enable = kvm_vz_hardware_enable,
2764 .hardware_disable = kvm_vz_hardware_disable,
2765 .check_extension = kvm_vz_check_extension,
2766 .vcpu_init = kvm_vz_vcpu_init,
2767 .vcpu_uninit = kvm_vz_vcpu_uninit,
2768 .vcpu_setup = kvm_vz_vcpu_setup,
2769 .flush_shadow_all = kvm_vz_flush_shadow_all,
2770 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
2771 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
2772 .queue_timer_int = kvm_vz_queue_timer_int_cb,
2773 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
2774 .queue_io_int = kvm_vz_queue_io_int_cb,
2775 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
2776 .irq_deliver = kvm_vz_irq_deliver_cb,
2777 .irq_clear = kvm_vz_irq_clear_cb,
2778 .num_regs = kvm_vz_num_regs,
2779 .copy_reg_indices = kvm_vz_copy_reg_indices,
2780 .get_one_reg = kvm_vz_get_one_reg,
2781 .set_one_reg = kvm_vz_set_one_reg,
2782 .vcpu_load = kvm_vz_vcpu_load,
2783 .vcpu_put = kvm_vz_vcpu_put,
2784 .vcpu_run = kvm_vz_vcpu_run,
2785 .vcpu_reenter = kvm_vz_vcpu_reenter,
2788 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
2794 * VZ requires at least 2 KScratch registers, so it should have been
2795 * possible to allocate pgd_reg.
2797 if (WARN(pgd_reg == -1,
2798 "pgd_reg not allocated even though cpu_has_vz\n"))
2801 pr_info("Starting KVM with MIPS VZ extensions\n");
2803 *install_callbacks = &kvm_vz_callbacks;