2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Support for hardware virtualization extensions
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Yann Le Du <ledu@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/preempt.h>
16 #include <linux/vmalloc.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cacheops.h>
19 #include <asm/cmpxchg.h>
21 #include <asm/hazards.h>
23 #include <asm/mmu_context.h>
24 #include <asm/r4kcache.h>
27 #include <asm/tlbex.h>
29 #include <linux/kvm_host.h>
31 #include "interrupt.h"
35 /* Pointers to last VCPU loaded on each physical CPU */
36 static struct kvm_vcpu *last_vcpu[NR_CPUS];
37 /* Pointers to last VCPU executed on each physical CPU */
38 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
41 * Number of guest VTLB entries to use, so we can catch inconsistency between
44 static unsigned int kvm_vz_guest_vtlb_size;
46 static inline long kvm_vz_read_gc0_ebase(void)
48 if (sizeof(long) == 8 && cpu_has_ebase_wg)
49 return read_gc0_ebase_64();
51 return read_gc0_ebase();
54 static inline void kvm_vz_write_gc0_ebase(long v)
57 * First write with WG=1 to write upper bits, then write again in case
58 * WG should be left at 0.
59 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
61 if (sizeof(long) == 8 &&
62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
63 write_gc0_ebase_64(v | MIPS_EBASE_WG);
64 write_gc0_ebase_64(v);
66 write_gc0_ebase(v | MIPS_EBASE_WG);
72 * These Config bits may be writable by the guest:
73 * Config: [K23, KU] (!TLB), K0
75 * Config2: [TU, SU] (impl)
77 * Config4: FTLBPageSize
78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
81 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
86 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
91 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
96 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
98 return MIPS_CONF3_ISA_OE;
101 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
103 /* no need to be exact */
104 return MIPS_CONF4_VFTLBPAGESIZE;
107 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
111 /* Permit MSAEn changes if MSA supported and enabled */
112 if (kvm_mips_guest_has_msa(&vcpu->arch))
113 mask |= MIPS_CONF5_MSAEN;
116 * Permit guest FPU mode changes if FPU is enabled and the relevant
117 * feature exists according to FIR register.
119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
121 mask |= MIPS_CONF5_UFR;
123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
130 * VZ optionally allows these additional Config bits to be written by root:
132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
135 * VInt, SP, CDMM, MT, SM, TL]
136 * Config4: M, [VTLBSizeExt, MMUSizeExt]
140 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
145 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
149 /* Permit FPU to be present if FPU is supported */
150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
151 mask |= MIPS_CONF1_FP;
156 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
161 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
164 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
166 /* Permit MSA to be present if MSA is supported */
167 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
168 mask |= MIPS_CONF3_MSA;
173 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
178 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
180 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
183 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
185 /* VZ guest has already converted gva to gpa */
189 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
191 set_bit(priority, &vcpu->arch.pending_exceptions);
192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
195 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
197 clear_bit(priority, &vcpu->arch.pending_exceptions);
198 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
201 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
204 * timer expiry is asynchronous to vcpu execution therefore defer guest
207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
210 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
213 * timer expiry is asynchronous to vcpu execution therefore defer guest
216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
219 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
220 struct kvm_mips_interrupt *irq)
222 int intr = (int)irq->irq;
225 * interrupts are asynchronous to vcpu execution therefore defer guest
230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
247 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
248 struct kvm_mips_interrupt *irq)
250 int intr = (int)irq->irq;
253 * interrupts are asynchronous to vcpu execution therefore defer guest
258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
275 static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
276 [MIPS_EXC_INT_TIMER] = C_IRQ5,
277 [MIPS_EXC_INT_IO] = C_IRQ0,
278 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
279 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
282 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
285 u32 irq = (priority < MIPS_EXC_MAX) ?
286 kvm_vz_priority_to_irq[priority] : 0;
289 case MIPS_EXC_INT_TIMER:
293 case MIPS_EXC_INT_IO:
294 case MIPS_EXC_INT_IPI_1:
295 case MIPS_EXC_INT_IPI_2:
296 if (cpu_has_guestctl2)
297 set_c0_guestctl2(irq);
306 clear_bit(priority, &vcpu->arch.pending_exceptions);
310 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
313 u32 irq = (priority < MIPS_EXC_MAX) ?
314 kvm_vz_priority_to_irq[priority] : 0;
317 case MIPS_EXC_INT_TIMER:
319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in
320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
322 * supported or if not using GuestCtl2 Hardware Clear.
324 if (cpu_has_guestctl2) {
325 if (!(read_c0_guestctl2() & (irq << 14)))
326 clear_c0_guestctl2(irq);
328 clear_gc0_cause(irq);
332 case MIPS_EXC_INT_IO:
333 case MIPS_EXC_INT_IPI_1:
334 case MIPS_EXC_INT_IPI_2:
335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
336 if (cpu_has_guestctl2) {
337 if (!(read_c0_guestctl2() & (irq << 14)))
338 clear_c0_guestctl2(irq);
340 clear_gc0_cause(irq);
348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
353 * VZ guest timer handling.
357 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
358 * @vcpu: Virtual CPU.
360 * Returns: true if the VZ GTOffset & real guest CP0_Count should be used
361 * instead of software emulation of guest timer.
364 static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
366 if (kvm_mips_count_disabled(vcpu))
369 /* Chosen frequency must match real frequency */
370 if (mips_hpt_frequency != vcpu->arch.count_hz)
373 /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
374 if (current_cpu_data.gtoffset_mask != 0xffffffff)
381 * _kvm_vz_restore_stimer() - Restore soft timer state.
382 * @vcpu: Virtual CPU.
383 * @compare: CP0_Compare register value, restored by caller.
384 * @cause: CP0_Cause register to restore.
386 * Restore VZ state relating to the soft timer. The hard timer can be enabled
389 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
393 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
394 * after Guest CP0_Compare.
396 write_c0_gtoffset(compare - read_c0_count());
398 back_to_back_c0_hazard();
399 write_gc0_cause(cause);
403 * _kvm_vz_restore_htimer() - Restore hard timer state.
404 * @vcpu: Virtual CPU.
405 * @compare: CP0_Compare register value, restored by caller.
406 * @cause: CP0_Cause register to restore.
408 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
409 * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
411 static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
412 u32 compare, u32 cause)
414 u32 start_count, after_count;
419 * Freeze the soft-timer and sync the guest CP0_Count with it. We do
420 * this with interrupts disabled to avoid latency.
422 local_irq_save(flags);
423 freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
424 write_c0_gtoffset(start_count - read_c0_count());
425 local_irq_restore(flags);
427 /* restore guest CP0_Cause, as TI may already be set */
428 back_to_back_c0_hazard();
429 write_gc0_cause(cause);
432 * The above sequence isn't atomic and would result in lost timer
433 * interrupts if we're not careful. Detect if a timer interrupt is due
436 back_to_back_c0_hazard();
437 after_count = read_gc0_count();
438 if (after_count - start_count > compare - start_count - 1)
439 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
443 * kvm_vz_restore_timer() - Restore timer state.
444 * @vcpu: Virtual CPU.
446 * Restore soft timer state from saved context.
448 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
450 struct mips_coproc *cop0 = vcpu->arch.cop0;
453 compare = kvm_read_sw_gc0_compare(cop0);
454 cause = kvm_read_sw_gc0_cause(cop0);
456 write_gc0_compare(compare);
457 _kvm_vz_restore_stimer(vcpu, compare, cause);
461 * kvm_vz_acquire_htimer() - Switch to hard timer state.
462 * @vcpu: Virtual CPU.
464 * Restore hard timer state on top of existing soft timer state if possible.
466 * Since hard timer won't remain active over preemption, preemption should be
467 * disabled by the caller.
469 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
473 gctl0 = read_c0_guestctl0();
474 if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
475 /* enable guest access to hard timer */
476 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
478 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
484 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
485 * @vcpu: Virtual CPU.
486 * @compare: Pointer to write compare value to.
487 * @cause: Pointer to write cause value to.
489 * Save VZ guest timer state and switch to software emulation of guest CP0
490 * timer. The hard timer must already be in use, so preemption should be
493 static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
494 u32 *out_compare, u32 *out_cause)
496 u32 cause, compare, before_count, end_count;
499 compare = read_gc0_compare();
500 *out_compare = compare;
502 before_time = ktime_get();
505 * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
506 * at which no pending timer interrupt is missing.
508 before_count = read_gc0_count();
509 back_to_back_c0_hazard();
510 cause = read_gc0_cause();
514 * Record a final CP0_Count which we will transfer to the soft-timer.
515 * This is recorded *after* saving CP0_Cause, so we don't get any timer
516 * interrupts from just after the final CP0_Count point.
518 back_to_back_c0_hazard();
519 end_count = read_gc0_count();
522 * The above sequence isn't atomic, so we could miss a timer interrupt
523 * between reading CP0_Cause and end_count. Detect and record any timer
524 * interrupt due between before_count and end_count.
526 if (end_count - before_count > compare - before_count - 1)
527 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
530 * Restore soft-timer, ignoring a small amount of negative drift due to
531 * delay between freeze_hrtimer and setting CP0_GTOffset.
533 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
537 * kvm_vz_save_timer() - Save guest timer state.
538 * @vcpu: Virtual CPU.
540 * Save VZ guest timer state and switch to soft guest timer if hard timer was in
543 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
545 struct mips_coproc *cop0 = vcpu->arch.cop0;
546 u32 gctl0, compare, cause;
548 gctl0 = read_c0_guestctl0();
549 if (gctl0 & MIPS_GCTL0_GT) {
550 /* disable guest use of hard timer */
551 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
553 /* save hard timer state */
554 _kvm_vz_save_htimer(vcpu, &compare, &cause);
556 compare = read_gc0_compare();
557 cause = read_gc0_cause();
560 /* save timer-related state to VCPU context */
561 kvm_write_sw_gc0_cause(cop0, cause);
562 kvm_write_sw_gc0_compare(cop0, compare);
566 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
567 * @vcpu: Virtual CPU.
569 * Transfers the state of the hard guest timer to the soft guest timer, leaving
570 * guest state intact so it can continue to be used with the soft timer.
572 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
574 u32 gctl0, compare, cause;
577 gctl0 = read_c0_guestctl0();
578 if (gctl0 & MIPS_GCTL0_GT) {
579 /* disable guest use of timer */
580 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
582 /* switch to soft timer */
583 _kvm_vz_save_htimer(vcpu, &compare, &cause);
585 /* leave soft timer in usable state */
586 _kvm_vz_restore_stimer(vcpu, compare, cause);
592 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
593 * @inst: 32-bit instruction encoding.
595 * Finds whether @inst encodes an EVA memory access instruction, which would
596 * indicate that emulation of it should access the user mode address space
597 * instead of the kernel mode address space. This matters for MUSUK segments
598 * which are TLB mapped for user mode but unmapped for kernel mode.
600 * Returns: Whether @inst encodes an EVA accessor instruction.
602 static bool is_eva_access(union mips_instruction inst)
604 if (inst.spec3_format.opcode != spec3_op)
607 switch (inst.spec3_format.func) {
631 * is_eva_am_mapped() - Find whether an access mode is mapped.
632 * @vcpu: KVM VCPU state.
633 * @am: 3-bit encoded access mode.
634 * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
636 * Decode @am to find whether it encodes a mapped segment for the current VCPU
637 * state. Where necessary @eu and the actual instruction causing the fault are
638 * taken into account to make the decision.
640 * Returns: Whether the VCPU faulted on a TLB mapped address.
642 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
648 * Interpret access control mode. We assume address errors will already
649 * have been caught by the guest, leaving us with:
650 * AM UM SM KM 31..24 23..16
653 * MSK 2 010 TLB TLB 1
654 * MUSK 3 011 TLB TLB TLB 1
655 * MUSUK 4 100 TLB TLB Unm 0 1
656 * USK 5 101 Unm Unm 0 0
658 * UUSK 7 111 Unm Unm Unm 0 0
660 * We shift a magic value by AM across the sign bit to find if always
661 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
663 am_lookup = 0x70080000 << am;
664 if ((s32)am_lookup < 0) {
667 * Always TLB mapped, unless SegCtl.EU && ERL
669 if (!eu || !(read_gc0_status() & ST0_ERL))
673 if ((s32)am_lookup < 0) {
674 union mips_instruction inst;
680 * TLB mapped if not in kernel mode
682 status = read_gc0_status();
683 if (!(status & (ST0_EXL | ST0_ERL)) &&
687 * EVA access instructions in kernel
688 * mode access user address space.
690 opc = (u32 *)vcpu->arch.pc;
691 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
693 err = kvm_get_badinstr(opc, vcpu, &inst.word);
694 if (!err && is_eva_access(inst))
703 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
704 * @vcpu: KVM VCPU state.
705 * @gva: Guest virtual address to convert.
706 * @gpa: Output guest physical address.
708 * Convert a guest virtual address (GVA) which is valid according to the guest
709 * context, to a guest physical address (GPA).
711 * Returns: 0 on success.
714 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
718 unsigned long segctl;
720 if ((long)gva == (s32)gva32) {
721 /* Handle canonical 32-bit virtual address */
722 if (cpu_guest_has_segments) {
723 unsigned long mask, pa;
725 switch (gva32 >> 29) {
727 case 1: /* CFG5 (1GB) */
728 segctl = read_gc0_segctl2() >> 16;
729 mask = (unsigned long)0xfc0000000ull;
732 case 3: /* CFG4 (1GB) */
733 segctl = read_gc0_segctl2();
734 mask = (unsigned long)0xfc0000000ull;
736 case 4: /* CFG3 (512MB) */
737 segctl = read_gc0_segctl1() >> 16;
738 mask = (unsigned long)0xfe0000000ull;
740 case 5: /* CFG2 (512MB) */
741 segctl = read_gc0_segctl1();
742 mask = (unsigned long)0xfe0000000ull;
744 case 6: /* CFG1 (512MB) */
745 segctl = read_gc0_segctl0() >> 16;
746 mask = (unsigned long)0xfe0000000ull;
748 case 7: /* CFG0 (512MB) */
749 segctl = read_gc0_segctl0();
750 mask = (unsigned long)0xfe0000000ull;
754 * GCC 4.9 isn't smart enough to figure out that
755 * segctl and mask are always initialised.
760 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
764 /* Unmapped, find guest physical address */
765 pa = (segctl << 20) & mask;
769 } else if ((s32)gva32 < (s32)0xc0000000) {
770 /* legacy unmapped KSeg0 or KSeg1 */
771 *gpa = gva32 & 0x1fffffff;
775 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
777 if (cpu_guest_has_segments) {
779 * Each of the 8 regions can be overridden by SegCtl2.XR
780 * to use SegCtl1.XAM.
782 segctl = read_gc0_segctl2();
783 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
784 segctl = read_gc0_segctl1();
785 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
792 * Traditionally fully unmapped.
793 * Bits 61:59 specify the CCA, which we can just mask off here.
794 * Bits 58:PABITS should be zero, but we shouldn't have got here
797 *gpa = gva & 0x07ffffffffffffff;
803 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
807 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
808 * @vcpu: KVM VCPU state.
809 * @badvaddr: Root BadVAddr.
810 * @gpa: Output guest physical address.
812 * VZ implementations are permitted to report guest virtual addresses (GVA) in
813 * BadVAddr on a root exception during guest execution, instead of the more
814 * convenient guest physical addresses (GPA). When we get a GVA, this function
815 * converts it to a GPA, taking into account guest segmentation and guest TLB
818 * Returns: 0 on success.
821 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
824 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
825 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
827 /* If BadVAddr is GPA, then all is well in the world */
828 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
833 /* Otherwise we'd expect it to be GVA ... */
834 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
835 "Unexpected gexccode %#x\n", gexccode))
838 /* ... and we need to perform the GVA->GPA translation in software */
839 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
842 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
844 u32 *opc = (u32 *) vcpu->arch.pc;
845 u32 cause = vcpu->arch.host_cp0_cause;
846 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
847 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
851 * Fetch the instruction.
853 if (cause & CAUSEF_BD)
855 kvm_get_badinstr(opc, vcpu, &inst);
857 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
858 exccode, opc, inst, badvaddr,
860 kvm_arch_vcpu_dump_regs(vcpu);
861 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
865 static unsigned long mips_process_maar(unsigned int op, unsigned long val)
867 /* Mask off unused bits */
868 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
870 if (read_gc0_pagegrain() & PG_ELPA)
871 mask |= 0x00ffffff00000000ull;
872 if (cpu_guest_has_mvh)
873 mask |= MIPS_MAAR_VH;
875 /* Set or clear VH */
878 val &= ~MIPS_MAAR_VH;
879 } else if (op == dmtc_op) {
880 /* set VH to match VL */
881 val &= ~MIPS_MAAR_VH;
882 if (val & MIPS_MAAR_VL)
889 static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
891 struct mips_coproc *cop0 = vcpu->arch.cop0;
893 val &= MIPS_MAARI_INDEX;
894 if (val == MIPS_MAARI_INDEX)
895 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
896 else if (val < ARRAY_SIZE(vcpu->arch.maar))
897 kvm_write_sw_gc0_maari(cop0, val);
900 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
903 struct kvm_vcpu *vcpu)
905 struct mips_coproc *cop0 = vcpu->arch.cop0;
906 enum emulation_result er = EMULATE_DONE;
908 unsigned long curr_pc;
912 * Update PC and hold onto current PC in case there is
913 * an error and we want to rollback the PC
915 curr_pc = vcpu->arch.pc;
916 er = update_pc(vcpu, cause);
917 if (er == EMULATE_FAIL)
920 if (inst.co_format.co) {
921 switch (inst.co_format.func) {
923 er = kvm_mips_emul_wait(vcpu);
929 rt = inst.c0r_format.rt;
930 rd = inst.c0r_format.rd;
931 sel = inst.c0r_format.sel;
933 switch (inst.c0r_format.rs) {
936 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
937 cop0->stat[rd][sel]++;
939 if (rd == MIPS_CP0_COUNT &&
940 sel == 0) { /* Count */
941 val = kvm_mips_read_count(vcpu);
942 } else if (rd == MIPS_CP0_COMPARE &&
943 sel == 0) { /* Compare */
944 val = read_gc0_compare();
945 } else if (rd == MIPS_CP0_LLADDR &&
946 sel == 0) { /* LLAddr */
947 if (cpu_guest_has_rw_llb)
948 val = read_gc0_lladdr() &
952 } else if (rd == MIPS_CP0_LLADDR &&
953 sel == 1 && /* MAAR */
954 cpu_guest_has_maar &&
955 !cpu_guest_has_dyn_maar) {
956 /* MAARI must be in range */
957 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
958 ARRAY_SIZE(vcpu->arch.maar));
959 val = vcpu->arch.maar[
960 kvm_read_sw_gc0_maari(cop0)];
961 } else if ((rd == MIPS_CP0_PRID &&
962 (sel == 0 || /* PRid */
963 sel == 2 || /* CDMMBase */
964 sel == 3)) || /* CMGCRBase */
965 (rd == MIPS_CP0_STATUS &&
966 (sel == 2 || /* SRSCtl */
967 sel == 3)) || /* SRSMap */
968 (rd == MIPS_CP0_CONFIG &&
969 (sel == 7)) || /* Config7 */
970 (rd == MIPS_CP0_LLADDR &&
971 (sel == 2) && /* MAARI */
972 cpu_guest_has_maar &&
973 !cpu_guest_has_dyn_maar) ||
974 (rd == MIPS_CP0_ERRCTL &&
975 (sel == 0))) { /* ErrCtl */
976 val = cop0->reg[rd][sel];
982 if (er != EMULATE_FAIL) {
984 if (inst.c0r_format.rs == mfc_op)
986 vcpu->arch.gprs[rt] = val;
989 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
990 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
991 KVM_TRACE_COP0(rd, sel), val);
996 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
997 cop0->stat[rd][sel]++;
999 val = vcpu->arch.gprs[rt];
1000 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
1001 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
1002 KVM_TRACE_COP0(rd, sel), val);
1004 if (rd == MIPS_CP0_COUNT &&
1005 sel == 0) { /* Count */
1006 kvm_vz_lose_htimer(vcpu);
1007 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
1008 } else if (rd == MIPS_CP0_COMPARE &&
1009 sel == 0) { /* Compare */
1010 kvm_mips_write_compare(vcpu,
1011 vcpu->arch.gprs[rt],
1013 } else if (rd == MIPS_CP0_LLADDR &&
1014 sel == 0) { /* LLAddr */
1016 * P5600 generates GPSI on guest MTC0 LLAddr.
1017 * Only allow the guest to clear LLB.
1019 if (cpu_guest_has_rw_llb &&
1020 !(val & MIPS_LLADDR_LLB))
1021 write_gc0_lladdr(0);
1022 } else if (rd == MIPS_CP0_LLADDR &&
1023 sel == 1 && /* MAAR */
1024 cpu_guest_has_maar &&
1025 !cpu_guest_has_dyn_maar) {
1026 val = mips_process_maar(inst.c0r_format.rs,
1029 /* MAARI must be in range */
1030 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
1031 ARRAY_SIZE(vcpu->arch.maar));
1032 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
1034 } else if (rd == MIPS_CP0_LLADDR &&
1035 (sel == 2) && /* MAARI */
1036 cpu_guest_has_maar &&
1037 !cpu_guest_has_dyn_maar) {
1038 kvm_write_maari(vcpu, val);
1039 } else if (rd == MIPS_CP0_ERRCTL &&
1040 (sel == 0)) { /* ErrCtl */
1041 /* ignore the written value */
1052 /* Rollback PC only if emulation was unsuccessful */
1053 if (er == EMULATE_FAIL) {
1054 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1055 curr_pc, __func__, inst.word);
1057 vcpu->arch.pc = curr_pc;
1063 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1064 u32 *opc, u32 cause,
1065 struct kvm_run *run,
1066 struct kvm_vcpu *vcpu)
1068 enum emulation_result er = EMULATE_DONE;
1069 u32 cache, op_inst, op, base;
1071 struct kvm_vcpu_arch *arch = &vcpu->arch;
1072 unsigned long va, curr_pc;
1075 * Update PC and hold onto current PC in case there is
1076 * an error and we want to rollback the PC
1078 curr_pc = vcpu->arch.pc;
1079 er = update_pc(vcpu, cause);
1080 if (er == EMULATE_FAIL)
1083 base = inst.i_format.rs;
1084 op_inst = inst.i_format.rt;
1085 if (cpu_has_mips_r6)
1086 offset = inst.spec3_format.simmediate;
1088 offset = inst.i_format.simmediate;
1089 cache = op_inst & CacheOp_Cache;
1090 op = op_inst & CacheOp_Op;
1092 va = arch->gprs[base] + offset;
1094 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1095 cache, op, base, arch->gprs[base], offset);
1097 /* Secondary or tirtiary cache ops ignored */
1098 if (cache != Cache_I && cache != Cache_D)
1099 return EMULATE_DONE;
1102 case Index_Invalidate_I:
1103 flush_icache_line_indexed(va);
1104 return EMULATE_DONE;
1105 case Index_Writeback_Inv_D:
1106 flush_dcache_line_indexed(va);
1107 return EMULATE_DONE;
1108 case Hit_Invalidate_I:
1109 case Hit_Invalidate_D:
1110 case Hit_Writeback_Inv_D:
1111 if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1112 /* We can just flush entire icache */
1113 local_flush_icache_range(0, 0);
1114 return EMULATE_DONE;
1117 /* So far, other platforms support guest hit cache ops */
1123 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1124 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1127 vcpu->arch.pc = curr_pc;
1129 return EMULATE_FAIL;
1132 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1133 struct kvm_vcpu *vcpu)
1135 enum emulation_result er = EMULATE_DONE;
1136 struct kvm_vcpu_arch *arch = &vcpu->arch;
1137 struct kvm_run *run = vcpu->run;
1138 union mips_instruction inst;
1143 * Fetch the instruction.
1145 if (cause & CAUSEF_BD)
1147 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1149 return EMULATE_FAIL;
1151 switch (inst.r_format.opcode) {
1153 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
1155 #ifndef CONFIG_CPU_MIPSR6
1157 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1158 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1162 switch (inst.spec3_format.func) {
1163 #ifdef CONFIG_CPU_MIPSR6
1165 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1166 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1170 if (inst.r_format.rs || (inst.r_format.re >> 3))
1173 rd = inst.r_format.rd;
1174 rt = inst.r_format.rt;
1175 sel = inst.r_format.re & 0x7;
1178 case MIPS_HWR_CC: /* Read count register */
1180 (long)(int)kvm_mips_read_count(vcpu);
1183 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1184 KVM_TRACE_HWR(rd, sel), 0);
1188 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1189 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1191 er = update_pc(vcpu, cause);
1200 kvm_err("GPSI exception not supported (%p/%#x)\n",
1202 kvm_arch_vcpu_dump_regs(vcpu);
1210 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1211 struct kvm_vcpu *vcpu)
1213 enum emulation_result er = EMULATE_DONE;
1214 struct kvm_vcpu_arch *arch = &vcpu->arch;
1215 union mips_instruction inst;
1219 * Fetch the instruction.
1221 if (cause & CAUSEF_BD)
1223 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1225 return EMULATE_FAIL;
1227 /* complete MTC0 on behalf of guest and advance EPC */
1228 if (inst.c0r_format.opcode == cop0_op &&
1229 inst.c0r_format.rs == mtc_op &&
1230 inst.c0r_format.z == 0) {
1231 int rt = inst.c0r_format.rt;
1232 int rd = inst.c0r_format.rd;
1233 int sel = inst.c0r_format.sel;
1234 unsigned int val = arch->gprs[rt];
1235 unsigned int old_val, change;
1237 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1240 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1241 /* FR bit should read as zero if no FPU */
1242 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1243 val &= ~(ST0_CU1 | ST0_FR);
1246 * Also don't allow FR to be set if host doesn't support
1249 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1252 old_val = read_gc0_status();
1253 change = val ^ old_val;
1255 if (change & ST0_FR) {
1257 * FPU and Vector register state is made
1258 * UNPREDICTABLE by a change of FR, so don't
1259 * even bother saving it.
1265 * If MSA state is already live, it is undefined how it
1266 * interacts with FR=0 FPU state, and we don't want to
1267 * hit reserved instruction exceptions trying to save
1268 * the MSA state later when CU=1 && FR=1, so play it
1269 * safe and save it first.
1271 if (change & ST0_CU1 && !(val & ST0_FR) &&
1272 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1275 write_gc0_status(val);
1276 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1277 u32 old_cause = read_gc0_cause();
1278 u32 change = old_cause ^ val;
1280 /* DC bit enabling/disabling timer? */
1281 if (change & CAUSEF_DC) {
1282 if (val & CAUSEF_DC) {
1283 kvm_vz_lose_htimer(vcpu);
1284 kvm_mips_count_disable_cause(vcpu);
1286 kvm_mips_count_enable_cause(vcpu);
1290 /* Only certain bits are RW to the guest */
1291 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1292 CAUSEF_IP0 | CAUSEF_IP1);
1294 /* WP can only be cleared */
1295 change &= ~CAUSEF_WP | old_cause;
1297 write_gc0_cause(old_cause ^ change);
1298 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1299 write_gc0_intctl(val);
1300 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1301 old_val = read_gc0_config5();
1302 change = val ^ old_val;
1303 /* Handle changes in FPU/MSA modes */
1307 * Propagate FRE changes immediately if the FPU
1308 * context is already loaded.
1310 if (change & MIPS_CONF5_FRE &&
1311 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1312 change_c0_config5(MIPS_CONF5_FRE, val);
1317 (change & kvm_vz_config5_guest_wrmask(vcpu));
1318 write_gc0_config5(val);
1320 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1325 if (er != EMULATE_FAIL)
1326 er = update_pc(vcpu, cause);
1328 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1336 static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1337 struct kvm_vcpu *vcpu)
1340 * Presumably this is due to MC (guest mode change), so lets trace some
1343 trace_kvm_guest_mode_change(vcpu);
1345 return EMULATE_DONE;
1348 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1349 struct kvm_vcpu *vcpu)
1351 enum emulation_result er;
1352 union mips_instruction inst;
1353 unsigned long curr_pc;
1356 if (cause & CAUSEF_BD)
1358 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1360 return EMULATE_FAIL;
1363 * Update PC and hold onto current PC in case there is
1364 * an error and we want to rollback the PC
1366 curr_pc = vcpu->arch.pc;
1367 er = update_pc(vcpu, cause);
1368 if (er == EMULATE_FAIL)
1371 er = kvm_mips_emul_hypcall(vcpu, inst);
1372 if (er == EMULATE_FAIL)
1373 vcpu->arch.pc = curr_pc;
1378 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1381 struct kvm_vcpu *vcpu)
1386 * Fetch the instruction.
1388 if (cause & CAUSEF_BD)
1390 kvm_get_badinstr(opc, vcpu, &inst);
1392 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1393 gexccode, opc, inst, read_gc0_status());
1395 return EMULATE_FAIL;
1398 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1400 u32 *opc = (u32 *) vcpu->arch.pc;
1401 u32 cause = vcpu->arch.host_cp0_cause;
1402 enum emulation_result er = EMULATE_DONE;
1403 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1404 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1405 int ret = RESUME_GUEST;
1407 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1409 case MIPS_GCTL0_GEXC_GPSI:
1410 ++vcpu->stat.vz_gpsi_exits;
1411 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1413 case MIPS_GCTL0_GEXC_GSFC:
1414 ++vcpu->stat.vz_gsfc_exits;
1415 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1417 case MIPS_GCTL0_GEXC_HC:
1418 ++vcpu->stat.vz_hc_exits;
1419 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1421 case MIPS_GCTL0_GEXC_GRR:
1422 ++vcpu->stat.vz_grr_exits;
1423 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1426 case MIPS_GCTL0_GEXC_GVA:
1427 ++vcpu->stat.vz_gva_exits;
1428 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1431 case MIPS_GCTL0_GEXC_GHFC:
1432 ++vcpu->stat.vz_ghfc_exits;
1433 er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1435 case MIPS_GCTL0_GEXC_GPA:
1436 ++vcpu->stat.vz_gpa_exits;
1437 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1441 ++vcpu->stat.vz_resvd_exits;
1442 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1448 if (er == EMULATE_DONE) {
1450 } else if (er == EMULATE_HYPERCALL) {
1451 ret = kvm_mips_handle_hypcall(vcpu);
1453 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1460 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1461 * @vcpu: Virtual CPU context.
1463 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1464 * by the root context.
1466 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1468 struct kvm_run *run = vcpu->run;
1469 u32 cause = vcpu->arch.host_cp0_cause;
1470 enum emulation_result er = EMULATE_FAIL;
1471 int ret = RESUME_GUEST;
1473 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1475 * If guest FPU not present, the FPU operation should have been
1476 * treated as a reserved instruction!
1477 * If FPU already in use, we shouldn't get this at all.
1479 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1480 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1482 return EMULATE_FAIL;
1488 /* other coprocessors not handled */
1496 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1507 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1508 * @vcpu: Virtual CPU context.
1510 * Handle when the guest attempts to use MSA when it is disabled in the root
1513 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1515 struct kvm_run *run = vcpu->run;
1518 * If MSA not present or not exposed to guest or FR=0, the MSA operation
1519 * should have been treated as a reserved instruction!
1520 * Same if CU1=1, FR=0.
1521 * If MSA already in use, we shouldn't get this at all.
1523 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1524 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1525 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1526 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1527 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1533 return RESUME_GUEST;
1536 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1538 struct kvm_run *run = vcpu->run;
1539 u32 *opc = (u32 *) vcpu->arch.pc;
1540 u32 cause = vcpu->arch.host_cp0_cause;
1541 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1542 union mips_instruction inst;
1543 enum emulation_result er = EMULATE_DONE;
1544 int err, ret = RESUME_GUEST;
1546 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1547 /* A code fetch fault doesn't count as an MMIO */
1548 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1549 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1553 /* Fetch the instruction */
1554 if (cause & CAUSEF_BD)
1556 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1558 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1563 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1564 if (er == EMULATE_FAIL) {
1565 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1567 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1571 if (er == EMULATE_DONE) {
1573 } else if (er == EMULATE_DO_MMIO) {
1574 run->exit_reason = KVM_EXIT_MMIO;
1577 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1583 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1585 struct kvm_run *run = vcpu->run;
1586 u32 *opc = (u32 *) vcpu->arch.pc;
1587 u32 cause = vcpu->arch.host_cp0_cause;
1588 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1589 union mips_instruction inst;
1590 enum emulation_result er = EMULATE_DONE;
1592 int ret = RESUME_GUEST;
1594 /* Just try the access again if we couldn't do the translation */
1595 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1596 return RESUME_GUEST;
1597 vcpu->arch.host_cp0_badvaddr = badvaddr;
1599 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1600 /* Fetch the instruction */
1601 if (cause & CAUSEF_BD)
1603 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1605 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1610 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1611 if (er == EMULATE_FAIL) {
1612 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1614 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1618 if (er == EMULATE_DONE) {
1620 } else if (er == EMULATE_DO_MMIO) {
1621 run->exit_reason = KVM_EXIT_MMIO;
1624 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1630 static u64 kvm_vz_get_one_regs[] = {
1631 KVM_REG_MIPS_CP0_INDEX,
1632 KVM_REG_MIPS_CP0_ENTRYLO0,
1633 KVM_REG_MIPS_CP0_ENTRYLO1,
1634 KVM_REG_MIPS_CP0_CONTEXT,
1635 KVM_REG_MIPS_CP0_PAGEMASK,
1636 KVM_REG_MIPS_CP0_PAGEGRAIN,
1637 KVM_REG_MIPS_CP0_WIRED,
1638 KVM_REG_MIPS_CP0_HWRENA,
1639 KVM_REG_MIPS_CP0_BADVADDR,
1640 KVM_REG_MIPS_CP0_COUNT,
1641 KVM_REG_MIPS_CP0_ENTRYHI,
1642 KVM_REG_MIPS_CP0_COMPARE,
1643 KVM_REG_MIPS_CP0_STATUS,
1644 KVM_REG_MIPS_CP0_INTCTL,
1645 KVM_REG_MIPS_CP0_CAUSE,
1646 KVM_REG_MIPS_CP0_EPC,
1647 KVM_REG_MIPS_CP0_PRID,
1648 KVM_REG_MIPS_CP0_EBASE,
1649 KVM_REG_MIPS_CP0_CONFIG,
1650 KVM_REG_MIPS_CP0_CONFIG1,
1651 KVM_REG_MIPS_CP0_CONFIG2,
1652 KVM_REG_MIPS_CP0_CONFIG3,
1653 KVM_REG_MIPS_CP0_CONFIG4,
1654 KVM_REG_MIPS_CP0_CONFIG5,
1656 KVM_REG_MIPS_CP0_XCONTEXT,
1658 KVM_REG_MIPS_CP0_ERROREPC,
1660 KVM_REG_MIPS_COUNT_CTL,
1661 KVM_REG_MIPS_COUNT_RESUME,
1662 KVM_REG_MIPS_COUNT_HZ,
1665 static u64 kvm_vz_get_one_regs_contextconfig[] = {
1666 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1668 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1672 static u64 kvm_vz_get_one_regs_segments[] = {
1673 KVM_REG_MIPS_CP0_SEGCTL0,
1674 KVM_REG_MIPS_CP0_SEGCTL1,
1675 KVM_REG_MIPS_CP0_SEGCTL2,
1678 static u64 kvm_vz_get_one_regs_htw[] = {
1679 KVM_REG_MIPS_CP0_PWBASE,
1680 KVM_REG_MIPS_CP0_PWFIELD,
1681 KVM_REG_MIPS_CP0_PWSIZE,
1682 KVM_REG_MIPS_CP0_PWCTL,
1685 static u64 kvm_vz_get_one_regs_kscratch[] = {
1686 KVM_REG_MIPS_CP0_KSCRATCH1,
1687 KVM_REG_MIPS_CP0_KSCRATCH2,
1688 KVM_REG_MIPS_CP0_KSCRATCH3,
1689 KVM_REG_MIPS_CP0_KSCRATCH4,
1690 KVM_REG_MIPS_CP0_KSCRATCH5,
1691 KVM_REG_MIPS_CP0_KSCRATCH6,
1694 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1698 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1699 if (cpu_guest_has_userlocal)
1701 if (cpu_guest_has_badinstr)
1703 if (cpu_guest_has_badinstrp)
1705 if (cpu_guest_has_contextconfig)
1706 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1707 if (cpu_guest_has_segments)
1708 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1709 if (cpu_guest_has_htw)
1710 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1711 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1712 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1713 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1718 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1723 if (copy_to_user(indices, kvm_vz_get_one_regs,
1724 sizeof(kvm_vz_get_one_regs)))
1726 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1728 if (cpu_guest_has_userlocal) {
1729 index = KVM_REG_MIPS_CP0_USERLOCAL;
1730 if (copy_to_user(indices, &index, sizeof(index)))
1734 if (cpu_guest_has_badinstr) {
1735 index = KVM_REG_MIPS_CP0_BADINSTR;
1736 if (copy_to_user(indices, &index, sizeof(index)))
1740 if (cpu_guest_has_badinstrp) {
1741 index = KVM_REG_MIPS_CP0_BADINSTRP;
1742 if (copy_to_user(indices, &index, sizeof(index)))
1746 if (cpu_guest_has_contextconfig) {
1747 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1748 sizeof(kvm_vz_get_one_regs_contextconfig)))
1750 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1752 if (cpu_guest_has_segments) {
1753 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1754 sizeof(kvm_vz_get_one_regs_segments)))
1756 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1758 if (cpu_guest_has_htw) {
1759 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1760 sizeof(kvm_vz_get_one_regs_htw)))
1762 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1764 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1765 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1766 index = KVM_REG_MIPS_CP0_MAAR(i);
1767 if (copy_to_user(indices, &index, sizeof(index)))
1772 index = KVM_REG_MIPS_CP0_MAARI;
1773 if (copy_to_user(indices, &index, sizeof(index)))
1777 for (i = 0; i < 6; ++i) {
1778 if (!cpu_guest_has_kscr(i + 2))
1781 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1782 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1790 static inline s64 entrylo_kvm_to_user(unsigned long v)
1794 if (BITS_PER_LONG == 32) {
1796 * KVM API exposes 64-bit version of the register, so move the
1797 * RI/XI bits up into place.
1799 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1801 ret |= ((s64)v & mask) << 32;
1806 static inline unsigned long entrylo_user_to_kvm(s64 v)
1808 unsigned long mask, ret = v;
1810 if (BITS_PER_LONG == 32) {
1812 * KVM API exposes 64-bit versiono of the register, so move the
1813 * RI/XI bits down into place.
1815 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1817 ret |= (v >> 32) & mask;
1822 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1823 const struct kvm_one_reg *reg,
1826 struct mips_coproc *cop0 = vcpu->arch.cop0;
1830 case KVM_REG_MIPS_CP0_INDEX:
1831 *v = (long)read_gc0_index();
1833 case KVM_REG_MIPS_CP0_ENTRYLO0:
1834 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1836 case KVM_REG_MIPS_CP0_ENTRYLO1:
1837 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1839 case KVM_REG_MIPS_CP0_CONTEXT:
1840 *v = (long)read_gc0_context();
1842 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1843 if (!cpu_guest_has_contextconfig)
1845 *v = read_gc0_contextconfig();
1847 case KVM_REG_MIPS_CP0_USERLOCAL:
1848 if (!cpu_guest_has_userlocal)
1850 *v = read_gc0_userlocal();
1853 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1854 if (!cpu_guest_has_contextconfig)
1856 *v = read_gc0_xcontextconfig();
1859 case KVM_REG_MIPS_CP0_PAGEMASK:
1860 *v = (long)read_gc0_pagemask();
1862 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1863 *v = (long)read_gc0_pagegrain();
1865 case KVM_REG_MIPS_CP0_SEGCTL0:
1866 if (!cpu_guest_has_segments)
1868 *v = read_gc0_segctl0();
1870 case KVM_REG_MIPS_CP0_SEGCTL1:
1871 if (!cpu_guest_has_segments)
1873 *v = read_gc0_segctl1();
1875 case KVM_REG_MIPS_CP0_SEGCTL2:
1876 if (!cpu_guest_has_segments)
1878 *v = read_gc0_segctl2();
1880 case KVM_REG_MIPS_CP0_PWBASE:
1881 if (!cpu_guest_has_htw)
1883 *v = read_gc0_pwbase();
1885 case KVM_REG_MIPS_CP0_PWFIELD:
1886 if (!cpu_guest_has_htw)
1888 *v = read_gc0_pwfield();
1890 case KVM_REG_MIPS_CP0_PWSIZE:
1891 if (!cpu_guest_has_htw)
1893 *v = read_gc0_pwsize();
1895 case KVM_REG_MIPS_CP0_WIRED:
1896 *v = (long)read_gc0_wired();
1898 case KVM_REG_MIPS_CP0_PWCTL:
1899 if (!cpu_guest_has_htw)
1901 *v = read_gc0_pwctl();
1903 case KVM_REG_MIPS_CP0_HWRENA:
1904 *v = (long)read_gc0_hwrena();
1906 case KVM_REG_MIPS_CP0_BADVADDR:
1907 *v = (long)read_gc0_badvaddr();
1909 case KVM_REG_MIPS_CP0_BADINSTR:
1910 if (!cpu_guest_has_badinstr)
1912 *v = read_gc0_badinstr();
1914 case KVM_REG_MIPS_CP0_BADINSTRP:
1915 if (!cpu_guest_has_badinstrp)
1917 *v = read_gc0_badinstrp();
1919 case KVM_REG_MIPS_CP0_COUNT:
1920 *v = kvm_mips_read_count(vcpu);
1922 case KVM_REG_MIPS_CP0_ENTRYHI:
1923 *v = (long)read_gc0_entryhi();
1925 case KVM_REG_MIPS_CP0_COMPARE:
1926 *v = (long)read_gc0_compare();
1928 case KVM_REG_MIPS_CP0_STATUS:
1929 *v = (long)read_gc0_status();
1931 case KVM_REG_MIPS_CP0_INTCTL:
1932 *v = read_gc0_intctl();
1934 case KVM_REG_MIPS_CP0_CAUSE:
1935 *v = (long)read_gc0_cause();
1937 case KVM_REG_MIPS_CP0_EPC:
1938 *v = (long)read_gc0_epc();
1940 case KVM_REG_MIPS_CP0_PRID:
1941 *v = (long)kvm_read_c0_guest_prid(cop0);
1943 case KVM_REG_MIPS_CP0_EBASE:
1944 *v = kvm_vz_read_gc0_ebase();
1946 case KVM_REG_MIPS_CP0_CONFIG:
1947 *v = read_gc0_config();
1949 case KVM_REG_MIPS_CP0_CONFIG1:
1950 if (!cpu_guest_has_conf1)
1952 *v = read_gc0_config1();
1954 case KVM_REG_MIPS_CP0_CONFIG2:
1955 if (!cpu_guest_has_conf2)
1957 *v = read_gc0_config2();
1959 case KVM_REG_MIPS_CP0_CONFIG3:
1960 if (!cpu_guest_has_conf3)
1962 *v = read_gc0_config3();
1964 case KVM_REG_MIPS_CP0_CONFIG4:
1965 if (!cpu_guest_has_conf4)
1967 *v = read_gc0_config4();
1969 case KVM_REG_MIPS_CP0_CONFIG5:
1970 if (!cpu_guest_has_conf5)
1972 *v = read_gc0_config5();
1974 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
1975 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
1977 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
1978 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
1980 *v = vcpu->arch.maar[idx];
1982 case KVM_REG_MIPS_CP0_MAARI:
1983 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
1985 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
1988 case KVM_REG_MIPS_CP0_XCONTEXT:
1989 *v = read_gc0_xcontext();
1992 case KVM_REG_MIPS_CP0_ERROREPC:
1993 *v = (long)read_gc0_errorepc();
1995 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
1996 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
1997 if (!cpu_guest_has_kscr(idx))
2001 *v = (long)read_gc0_kscratch1();
2004 *v = (long)read_gc0_kscratch2();
2007 *v = (long)read_gc0_kscratch3();
2010 *v = (long)read_gc0_kscratch4();
2013 *v = (long)read_gc0_kscratch5();
2016 *v = (long)read_gc0_kscratch6();
2020 case KVM_REG_MIPS_COUNT_CTL:
2021 *v = vcpu->arch.count_ctl;
2023 case KVM_REG_MIPS_COUNT_RESUME:
2024 *v = ktime_to_ns(vcpu->arch.count_resume);
2026 case KVM_REG_MIPS_COUNT_HZ:
2027 *v = vcpu->arch.count_hz;
2035 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2036 const struct kvm_one_reg *reg,
2039 struct mips_coproc *cop0 = vcpu->arch.cop0;
2042 unsigned int cur, change;
2045 case KVM_REG_MIPS_CP0_INDEX:
2048 case KVM_REG_MIPS_CP0_ENTRYLO0:
2049 write_gc0_entrylo0(entrylo_user_to_kvm(v));
2051 case KVM_REG_MIPS_CP0_ENTRYLO1:
2052 write_gc0_entrylo1(entrylo_user_to_kvm(v));
2054 case KVM_REG_MIPS_CP0_CONTEXT:
2055 write_gc0_context(v);
2057 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2058 if (!cpu_guest_has_contextconfig)
2060 write_gc0_contextconfig(v);
2062 case KVM_REG_MIPS_CP0_USERLOCAL:
2063 if (!cpu_guest_has_userlocal)
2065 write_gc0_userlocal(v);
2068 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2069 if (!cpu_guest_has_contextconfig)
2071 write_gc0_xcontextconfig(v);
2074 case KVM_REG_MIPS_CP0_PAGEMASK:
2075 write_gc0_pagemask(v);
2077 case KVM_REG_MIPS_CP0_PAGEGRAIN:
2078 write_gc0_pagegrain(v);
2080 case KVM_REG_MIPS_CP0_SEGCTL0:
2081 if (!cpu_guest_has_segments)
2083 write_gc0_segctl0(v);
2085 case KVM_REG_MIPS_CP0_SEGCTL1:
2086 if (!cpu_guest_has_segments)
2088 write_gc0_segctl1(v);
2090 case KVM_REG_MIPS_CP0_SEGCTL2:
2091 if (!cpu_guest_has_segments)
2093 write_gc0_segctl2(v);
2095 case KVM_REG_MIPS_CP0_PWBASE:
2096 if (!cpu_guest_has_htw)
2098 write_gc0_pwbase(v);
2100 case KVM_REG_MIPS_CP0_PWFIELD:
2101 if (!cpu_guest_has_htw)
2103 write_gc0_pwfield(v);
2105 case KVM_REG_MIPS_CP0_PWSIZE:
2106 if (!cpu_guest_has_htw)
2108 write_gc0_pwsize(v);
2110 case KVM_REG_MIPS_CP0_WIRED:
2111 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2113 case KVM_REG_MIPS_CP0_PWCTL:
2114 if (!cpu_guest_has_htw)
2118 case KVM_REG_MIPS_CP0_HWRENA:
2119 write_gc0_hwrena(v);
2121 case KVM_REG_MIPS_CP0_BADVADDR:
2122 write_gc0_badvaddr(v);
2124 case KVM_REG_MIPS_CP0_BADINSTR:
2125 if (!cpu_guest_has_badinstr)
2127 write_gc0_badinstr(v);
2129 case KVM_REG_MIPS_CP0_BADINSTRP:
2130 if (!cpu_guest_has_badinstrp)
2132 write_gc0_badinstrp(v);
2134 case KVM_REG_MIPS_CP0_COUNT:
2135 kvm_mips_write_count(vcpu, v);
2137 case KVM_REG_MIPS_CP0_ENTRYHI:
2138 write_gc0_entryhi(v);
2140 case KVM_REG_MIPS_CP0_COMPARE:
2141 kvm_mips_write_compare(vcpu, v, false);
2143 case KVM_REG_MIPS_CP0_STATUS:
2144 write_gc0_status(v);
2146 case KVM_REG_MIPS_CP0_INTCTL:
2147 write_gc0_intctl(v);
2149 case KVM_REG_MIPS_CP0_CAUSE:
2151 * If the timer is stopped or started (DC bit) it must look
2152 * atomic with changes to the timer interrupt pending bit (TI).
2153 * A timer interrupt should not happen in between.
2155 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2156 if (v & CAUSEF_DC) {
2157 /* disable timer first */
2158 kvm_mips_count_disable_cause(vcpu);
2159 change_gc0_cause((u32)~CAUSEF_DC, v);
2161 /* enable timer last */
2162 change_gc0_cause((u32)~CAUSEF_DC, v);
2163 kvm_mips_count_enable_cause(vcpu);
2169 case KVM_REG_MIPS_CP0_EPC:
2172 case KVM_REG_MIPS_CP0_PRID:
2173 kvm_write_c0_guest_prid(cop0, v);
2175 case KVM_REG_MIPS_CP0_EBASE:
2176 kvm_vz_write_gc0_ebase(v);
2178 case KVM_REG_MIPS_CP0_CONFIG:
2179 cur = read_gc0_config();
2180 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2183 write_gc0_config(v);
2186 case KVM_REG_MIPS_CP0_CONFIG1:
2187 if (!cpu_guest_has_conf1)
2189 cur = read_gc0_config1();
2190 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2193 write_gc0_config1(v);
2196 case KVM_REG_MIPS_CP0_CONFIG2:
2197 if (!cpu_guest_has_conf2)
2199 cur = read_gc0_config2();
2200 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2203 write_gc0_config2(v);
2206 case KVM_REG_MIPS_CP0_CONFIG3:
2207 if (!cpu_guest_has_conf3)
2209 cur = read_gc0_config3();
2210 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2213 write_gc0_config3(v);
2216 case KVM_REG_MIPS_CP0_CONFIG4:
2217 if (!cpu_guest_has_conf4)
2219 cur = read_gc0_config4();
2220 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2223 write_gc0_config4(v);
2226 case KVM_REG_MIPS_CP0_CONFIG5:
2227 if (!cpu_guest_has_conf5)
2229 cur = read_gc0_config5();
2230 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2233 write_gc0_config5(v);
2236 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2237 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2239 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2240 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2242 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2244 case KVM_REG_MIPS_CP0_MAARI:
2245 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2247 kvm_write_maari(vcpu, v);
2250 case KVM_REG_MIPS_CP0_XCONTEXT:
2251 write_gc0_xcontext(v);
2254 case KVM_REG_MIPS_CP0_ERROREPC:
2255 write_gc0_errorepc(v);
2257 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2258 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2259 if (!cpu_guest_has_kscr(idx))
2263 write_gc0_kscratch1(v);
2266 write_gc0_kscratch2(v);
2269 write_gc0_kscratch3(v);
2272 write_gc0_kscratch4(v);
2275 write_gc0_kscratch5(v);
2278 write_gc0_kscratch6(v);
2282 case KVM_REG_MIPS_COUNT_CTL:
2283 ret = kvm_mips_set_count_ctl(vcpu, v);
2285 case KVM_REG_MIPS_COUNT_RESUME:
2286 ret = kvm_mips_set_count_resume(vcpu, v);
2288 case KVM_REG_MIPS_COUNT_HZ:
2289 ret = kvm_mips_set_count_hz(vcpu, v);
2297 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
2298 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2300 unsigned long guestid = guestid_cache(cpu);
2302 if (!(++guestid & GUESTID_MASK)) {
2303 if (cpu_has_vtag_icache)
2306 if (!guestid) /* fix version if needed */
2307 guestid = GUESTID_FIRST_VERSION;
2309 ++guestid; /* guestid 0 reserved for root */
2311 /* start new guestid cycle */
2312 kvm_vz_local_flush_roottlb_all_guests();
2313 kvm_vz_local_flush_guesttlb_all();
2316 guestid_cache(cpu) = guestid;
2319 /* Returns 1 if the guest TLB may be clobbered */
2320 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2325 if (!vcpu->requests)
2328 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2329 if (cpu_has_guestid) {
2330 /* Drop all GuestIDs for this VCPU */
2331 for_each_possible_cpu(i)
2332 vcpu->arch.vzguestid[i] = 0;
2333 /* This will clobber guest TLB contents too */
2337 * For Root ASID Dealias (RAD) we don't do anything here, but we
2338 * still need the request to ensure we recheck asid_flush_mask.
2339 * We can still return 0 as only the root TLB will be affected
2340 * by a root ASID flush.
2347 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2349 unsigned int wired = read_gc0_wired();
2350 struct kvm_mips_tlb *tlbs;
2353 /* Expand the wired TLB array if necessary */
2354 wired &= MIPSR6_WIRED_WIRED;
2355 if (wired > vcpu->arch.wired_tlb_limit) {
2356 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2357 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2358 if (WARN_ON(!tlbs)) {
2359 /* Save whatever we can */
2360 wired = vcpu->arch.wired_tlb_limit;
2362 vcpu->arch.wired_tlb = tlbs;
2363 vcpu->arch.wired_tlb_limit = wired;
2368 /* Save wired entries from the guest TLB */
2369 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2370 /* Invalidate any dropped entries since last time */
2371 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2372 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2373 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2374 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2375 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2377 vcpu->arch.wired_tlb_used = wired;
2380 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2382 /* Load wired entries into the guest TLB */
2383 if (vcpu->arch.wired_tlb)
2384 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2385 vcpu->arch.wired_tlb_used);
2388 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2390 struct kvm *kvm = vcpu->kvm;
2391 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2395 * Are we entering guest context on a different CPU to last time?
2396 * If so, the VCPU's guest TLB state on this CPU may be stale.
2398 migrated = (vcpu->arch.last_exec_cpu != cpu);
2399 vcpu->arch.last_exec_cpu = cpu;
2402 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2403 * remains set until another vcpu is loaded in. As a rule GuestRID
2404 * remains zeroed when in root context unless the kernel is busy
2405 * manipulating guest tlb entries.
2407 if (cpu_has_guestid) {
2409 * Check if our GuestID is of an older version and thus invalid.
2411 * We also discard the stored GuestID if we've executed on
2412 * another CPU, as the guest mappings may have changed without
2413 * hypervisor knowledge.
2416 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2417 GUESTID_VERSION_MASK) {
2418 kvm_vz_get_new_guestid(cpu, vcpu);
2419 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2420 trace_kvm_guestid_change(vcpu,
2421 vcpu->arch.vzguestid[cpu]);
2424 /* Restore GuestID */
2425 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2428 * The Guest TLB only stores a single guest's TLB state, so
2429 * flush it if another VCPU has executed on this CPU.
2431 * We also flush if we've executed on another CPU, as the guest
2432 * mappings may have changed without hypervisor knowledge.
2434 if (migrated || last_exec_vcpu[cpu] != vcpu)
2435 kvm_vz_local_flush_guesttlb_all();
2436 last_exec_vcpu[cpu] = vcpu;
2439 * Root ASID dealiases guest GPA mappings in the root TLB.
2440 * Allocate new root ASID if needed.
2442 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
2443 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
2444 asid_version_mask(cpu))
2445 get_new_mmu_context(gpa_mm, cpu);
2449 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2451 struct mips_coproc *cop0 = vcpu->arch.cop0;
2455 * Have we migrated to a different CPU?
2456 * If so, any old guest TLB state may be stale.
2458 migrated = (vcpu->arch.last_sched_cpu != cpu);
2461 * Was this the last VCPU to run on this CPU?
2462 * If not, any old guest state from this VCPU will have been clobbered.
2464 all = migrated || (last_vcpu[cpu] != vcpu);
2465 last_vcpu[cpu] = vcpu;
2468 * Restore CP0_Wired unconditionally as we clear it after use, and
2469 * restore wired guest TLB entries (while in guest context).
2471 kvm_restore_gc0_wired(cop0);
2472 if (current->flags & PF_VCPU) {
2474 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2475 kvm_vz_vcpu_load_wired(vcpu);
2479 * Restore timer state regardless, as e.g. Cause.TI can change over time
2480 * if left unmaintained.
2482 kvm_vz_restore_timer(vcpu);
2484 /* Set MC bit if we want to trace guest mode changes */
2485 if (kvm_trace_guest_mode_change)
2486 set_c0_guestctl0(MIPS_GCTL0_MC);
2488 clear_c0_guestctl0(MIPS_GCTL0_MC);
2490 /* Don't bother restoring registers multiple times unless necessary */
2495 * Restore config registers first, as some implementations restrict
2496 * writes to other registers when the corresponding feature bits aren't
2497 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2499 kvm_restore_gc0_config(cop0);
2500 if (cpu_guest_has_conf1)
2501 kvm_restore_gc0_config1(cop0);
2502 if (cpu_guest_has_conf2)
2503 kvm_restore_gc0_config2(cop0);
2504 if (cpu_guest_has_conf3)
2505 kvm_restore_gc0_config3(cop0);
2506 if (cpu_guest_has_conf4)
2507 kvm_restore_gc0_config4(cop0);
2508 if (cpu_guest_has_conf5)
2509 kvm_restore_gc0_config5(cop0);
2510 if (cpu_guest_has_conf6)
2511 kvm_restore_gc0_config6(cop0);
2512 if (cpu_guest_has_conf7)
2513 kvm_restore_gc0_config7(cop0);
2515 kvm_restore_gc0_index(cop0);
2516 kvm_restore_gc0_entrylo0(cop0);
2517 kvm_restore_gc0_entrylo1(cop0);
2518 kvm_restore_gc0_context(cop0);
2519 if (cpu_guest_has_contextconfig)
2520 kvm_restore_gc0_contextconfig(cop0);
2522 kvm_restore_gc0_xcontext(cop0);
2523 if (cpu_guest_has_contextconfig)
2524 kvm_restore_gc0_xcontextconfig(cop0);
2526 kvm_restore_gc0_pagemask(cop0);
2527 kvm_restore_gc0_pagegrain(cop0);
2528 kvm_restore_gc0_hwrena(cop0);
2529 kvm_restore_gc0_badvaddr(cop0);
2530 kvm_restore_gc0_entryhi(cop0);
2531 kvm_restore_gc0_status(cop0);
2532 kvm_restore_gc0_intctl(cop0);
2533 kvm_restore_gc0_epc(cop0);
2534 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2535 if (cpu_guest_has_userlocal)
2536 kvm_restore_gc0_userlocal(cop0);
2538 kvm_restore_gc0_errorepc(cop0);
2540 /* restore KScratch registers if enabled in guest */
2541 if (cpu_guest_has_conf4) {
2542 if (cpu_guest_has_kscr(2))
2543 kvm_restore_gc0_kscratch1(cop0);
2544 if (cpu_guest_has_kscr(3))
2545 kvm_restore_gc0_kscratch2(cop0);
2546 if (cpu_guest_has_kscr(4))
2547 kvm_restore_gc0_kscratch3(cop0);
2548 if (cpu_guest_has_kscr(5))
2549 kvm_restore_gc0_kscratch4(cop0);
2550 if (cpu_guest_has_kscr(6))
2551 kvm_restore_gc0_kscratch5(cop0);
2552 if (cpu_guest_has_kscr(7))
2553 kvm_restore_gc0_kscratch6(cop0);
2556 if (cpu_guest_has_badinstr)
2557 kvm_restore_gc0_badinstr(cop0);
2558 if (cpu_guest_has_badinstrp)
2559 kvm_restore_gc0_badinstrp(cop0);
2561 if (cpu_guest_has_segments) {
2562 kvm_restore_gc0_segctl0(cop0);
2563 kvm_restore_gc0_segctl1(cop0);
2564 kvm_restore_gc0_segctl2(cop0);
2567 /* restore HTW registers */
2568 if (cpu_guest_has_htw) {
2569 kvm_restore_gc0_pwbase(cop0);
2570 kvm_restore_gc0_pwfield(cop0);
2571 kvm_restore_gc0_pwsize(cop0);
2572 kvm_restore_gc0_pwctl(cop0);
2575 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2576 if (cpu_has_guestctl2)
2578 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2581 * We should clear linked load bit to break interrupted atomics. This
2582 * prevents a SC on the next VCPU from succeeding by matching a LL on
2583 * the previous VCPU.
2585 if (cpu_guest_has_rw_llb)
2586 write_gc0_lladdr(0);
2591 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2593 struct mips_coproc *cop0 = vcpu->arch.cop0;
2595 if (current->flags & PF_VCPU)
2596 kvm_vz_vcpu_save_wired(vcpu);
2600 kvm_save_gc0_index(cop0);
2601 kvm_save_gc0_entrylo0(cop0);
2602 kvm_save_gc0_entrylo1(cop0);
2603 kvm_save_gc0_context(cop0);
2604 if (cpu_guest_has_contextconfig)
2605 kvm_save_gc0_contextconfig(cop0);
2607 kvm_save_gc0_xcontext(cop0);
2608 if (cpu_guest_has_contextconfig)
2609 kvm_save_gc0_xcontextconfig(cop0);
2611 kvm_save_gc0_pagemask(cop0);
2612 kvm_save_gc0_pagegrain(cop0);
2613 kvm_save_gc0_wired(cop0);
2614 /* allow wired TLB entries to be overwritten */
2615 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2616 kvm_save_gc0_hwrena(cop0);
2617 kvm_save_gc0_badvaddr(cop0);
2618 kvm_save_gc0_entryhi(cop0);
2619 kvm_save_gc0_status(cop0);
2620 kvm_save_gc0_intctl(cop0);
2621 kvm_save_gc0_epc(cop0);
2622 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2623 if (cpu_guest_has_userlocal)
2624 kvm_save_gc0_userlocal(cop0);
2626 /* only save implemented config registers */
2627 kvm_save_gc0_config(cop0);
2628 if (cpu_guest_has_conf1)
2629 kvm_save_gc0_config1(cop0);
2630 if (cpu_guest_has_conf2)
2631 kvm_save_gc0_config2(cop0);
2632 if (cpu_guest_has_conf3)
2633 kvm_save_gc0_config3(cop0);
2634 if (cpu_guest_has_conf4)
2635 kvm_save_gc0_config4(cop0);
2636 if (cpu_guest_has_conf5)
2637 kvm_save_gc0_config5(cop0);
2638 if (cpu_guest_has_conf6)
2639 kvm_save_gc0_config6(cop0);
2640 if (cpu_guest_has_conf7)
2641 kvm_save_gc0_config7(cop0);
2643 kvm_save_gc0_errorepc(cop0);
2645 /* save KScratch registers if enabled in guest */
2646 if (cpu_guest_has_conf4) {
2647 if (cpu_guest_has_kscr(2))
2648 kvm_save_gc0_kscratch1(cop0);
2649 if (cpu_guest_has_kscr(3))
2650 kvm_save_gc0_kscratch2(cop0);
2651 if (cpu_guest_has_kscr(4))
2652 kvm_save_gc0_kscratch3(cop0);
2653 if (cpu_guest_has_kscr(5))
2654 kvm_save_gc0_kscratch4(cop0);
2655 if (cpu_guest_has_kscr(6))
2656 kvm_save_gc0_kscratch5(cop0);
2657 if (cpu_guest_has_kscr(7))
2658 kvm_save_gc0_kscratch6(cop0);
2661 if (cpu_guest_has_badinstr)
2662 kvm_save_gc0_badinstr(cop0);
2663 if (cpu_guest_has_badinstrp)
2664 kvm_save_gc0_badinstrp(cop0);
2666 if (cpu_guest_has_segments) {
2667 kvm_save_gc0_segctl0(cop0);
2668 kvm_save_gc0_segctl1(cop0);
2669 kvm_save_gc0_segctl2(cop0);
2672 /* save HTW registers if enabled in guest */
2673 if (cpu_guest_has_htw &&
2674 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
2675 kvm_save_gc0_pwbase(cop0);
2676 kvm_save_gc0_pwfield(cop0);
2677 kvm_save_gc0_pwsize(cop0);
2678 kvm_save_gc0_pwctl(cop0);
2681 kvm_vz_save_timer(vcpu);
2683 /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2684 if (cpu_has_guestctl2)
2685 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2686 read_c0_guestctl2();
2692 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2693 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2695 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2696 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2697 * entries in the root VTLB.
2699 * Returns: The resulting guest VTLB size.
2701 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2703 unsigned int config4 = 0, ret = 0, limit;
2705 /* Write MMUSize - 1 into guest Config registers */
2706 if (cpu_guest_has_conf1)
2707 change_gc0_config1(MIPS_CONF1_TLBS,
2708 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2709 if (cpu_guest_has_conf4) {
2710 config4 = read_gc0_config4();
2711 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2712 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2713 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2714 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2715 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2716 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2717 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2718 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2719 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2720 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2722 write_gc0_config4(config4);
2726 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2727 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2730 if (cpu_has_mips_r6) {
2731 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2732 MIPSR6_WIRED_LIMIT_SHIFT;
2733 if (size - 1 <= limit)
2735 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2738 /* Read back MMUSize - 1 */
2739 back_to_back_c0_hazard();
2740 if (cpu_guest_has_conf1)
2741 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2742 MIPS_CONF1_TLBS_SHIFT;
2744 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2745 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2746 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2747 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2748 MIPS_CONF1_TLBS_SIZE;
2749 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2750 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2751 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2752 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2753 MIPS_CONF1_TLBS_SIZE;
2758 static int kvm_vz_hardware_enable(void)
2760 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2761 u64 guest_cvmctl, cvmvmconfig;
2763 switch (current_cpu_type()) {
2764 case CPU_CAVIUM_OCTEON3:
2765 /* Set up guest timer/perfcount IRQ lines */
2766 guest_cvmctl = read_gc0_cvmctl();
2767 guest_cvmctl &= ~CVMCTL_IPTI;
2768 guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2769 guest_cvmctl &= ~CVMCTL_IPPCI;
2770 guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2771 write_gc0_cvmctl(guest_cvmctl);
2773 cvmvmconfig = read_c0_cvmvmconfig();
2774 /* No I/O hole translation. */
2775 cvmvmconfig |= CVMVMCONF_DGHT;
2776 /* Halve the root MMU size */
2777 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2778 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2779 guest_mmu_size = mmu_size / 2;
2780 mmu_size -= guest_mmu_size;
2781 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2782 cvmvmconfig |= mmu_size - 1;
2783 write_c0_cvmvmconfig(cvmvmconfig);
2785 /* Update our records */
2786 current_cpu_data.tlbsize = mmu_size;
2787 current_cpu_data.tlbsizevtlb = mmu_size;
2788 current_cpu_data.guest.tlbsize = guest_mmu_size;
2790 /* Flush moved entries in new (guest) context */
2791 kvm_vz_local_flush_guesttlb_all();
2795 * ImgTec cores tend to use a shared root/guest TLB. To avoid
2796 * overlap of root wired and guest entries, the guest TLB may
2799 mmu_size = current_cpu_data.tlbsizevtlb;
2800 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2802 /* Try switching to maximum guest VTLB size for flush */
2803 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2804 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2805 kvm_vz_local_flush_guesttlb_all();
2808 * Reduce to make space for root wired entries and at least 2
2809 * root non-wired entries. This does assume that long-term wired
2810 * entries won't be added later.
2812 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2813 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2814 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2817 * Write the VTLB size, but if another CPU has already written,
2818 * check it matches or we won't provide a consistent view to the
2819 * guest. If this ever happens it suggests an asymmetric number
2822 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2823 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2824 "Available guest VTLB size mismatch"))
2830 * Enable virtualization features granting guest direct control of
2832 * CP0=1: Guest coprocessor 0 context.
2833 * AT=Guest: Guest MMU.
2834 * CG=1: Hit (virtual address) CACHE operations (optional).
2835 * CF=1: Guest Config registers.
2836 * CGI=1: Indexed flush CACHE operations (optional).
2838 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2839 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2840 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2841 if (cpu_has_guestctl0ext)
2842 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2844 if (cpu_has_guestid) {
2845 write_c0_guestctl1(0);
2846 kvm_vz_local_flush_roottlb_all_guests();
2848 GUESTID_MASK = current_cpu_data.guestid_mask;
2849 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2850 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2852 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2855 /* clear any pending injected virtual guest interrupts */
2856 if (cpu_has_guestctl2)
2857 clear_c0_guestctl2(0x3f << 10);
2862 static void kvm_vz_hardware_disable(void)
2865 unsigned int mmu_size;
2867 /* Flush any remaining guest TLB entries */
2868 kvm_vz_local_flush_guesttlb_all();
2870 switch (current_cpu_type()) {
2871 case CPU_CAVIUM_OCTEON3:
2873 * Allocate whole TLB for root. Existing guest TLB entries will
2874 * change ownership to the root TLB. We should be safe though as
2875 * they've already been flushed above while in guest TLB.
2877 cvmvmconfig = read_c0_cvmvmconfig();
2878 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2879 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2880 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2881 cvmvmconfig |= mmu_size - 1;
2882 write_c0_cvmvmconfig(cvmvmconfig);
2884 /* Update our records */
2885 current_cpu_data.tlbsize = mmu_size;
2886 current_cpu_data.tlbsizevtlb = mmu_size;
2887 current_cpu_data.guest.tlbsize = 0;
2889 /* Flush moved entries in new (root) context */
2890 local_flush_tlb_all();
2894 if (cpu_has_guestid) {
2895 write_c0_guestctl1(0);
2896 kvm_vz_local_flush_roottlb_all_guests();
2900 static int kvm_vz_check_extension(struct kvm *kvm, long ext)
2905 case KVM_CAP_MIPS_VZ:
2906 /* we wouldn't be here unless cpu_has_vz */
2910 case KVM_CAP_MIPS_64BIT:
2911 /* We support 64-bit registers/operations and addresses */
2923 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
2927 for_each_possible_cpu(i)
2928 vcpu->arch.vzguestid[i] = 0;
2933 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
2938 * If the VCPU is freed and reused as another VCPU, we don't want the
2939 * matching pointer wrongly hanging around in last_vcpu[] or
2942 for_each_possible_cpu(cpu) {
2943 if (last_vcpu[cpu] == vcpu)
2944 last_vcpu[cpu] = NULL;
2945 if (last_exec_vcpu[cpu] == vcpu)
2946 last_exec_vcpu[cpu] = NULL;
2950 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
2952 struct mips_coproc *cop0 = vcpu->arch.cop0;
2953 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
2956 * Start off the timer at the same frequency as the host timer, but the
2957 * soft timer doesn't handle frequencies greater than 1GHz yet.
2959 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
2960 count_hz = mips_hpt_frequency;
2961 kvm_mips_init_count(vcpu, count_hz);
2964 * Initialize guest register state to valid architectural reset state.
2968 if (cpu_has_mips_r6)
2969 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
2971 if (cpu_has_mips_r6)
2972 kvm_write_sw_gc0_wired(cop0,
2973 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
2975 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
2976 if (cpu_has_mips_r6)
2977 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
2979 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
2980 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
2982 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
2984 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
2986 kvm_save_gc0_config(cop0);
2987 /* architecturally writable (e.g. from guest) */
2988 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
2989 _page_cachable_default >> _CACHE_SHIFT);
2990 /* architecturally read only, but maybe writable from root */
2991 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
2992 if (cpu_guest_has_conf1) {
2993 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
2995 kvm_save_gc0_config1(cop0);
2996 /* architecturally read only, but maybe writable from root */
2997 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
3004 if (cpu_guest_has_conf2) {
3005 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3007 kvm_save_gc0_config2(cop0);
3009 if (cpu_guest_has_conf3) {
3010 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3012 kvm_save_gc0_config3(cop0);
3013 /* architecturally writable (e.g. from guest) */
3014 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3015 /* architecturally read only, but maybe writable from root */
3016 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
3031 if (cpu_guest_has_conf4) {
3032 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3034 kvm_save_gc0_config4(cop0);
3036 if (cpu_guest_has_conf5) {
3037 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3039 kvm_save_gc0_config5(cop0);
3040 /* architecturally writable (e.g. from guest) */
3041 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
3048 /* architecturally read only, but maybe writable from root */
3049 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3052 if (cpu_guest_has_contextconfig) {
3054 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3056 /* XContextConfig */
3057 /* bits SEGBITS-13+3:4 set */
3058 kvm_write_sw_gc0_xcontextconfig(cop0,
3059 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3063 /* Implementation dependent, use the legacy layout */
3064 if (cpu_guest_has_segments) {
3065 /* SegCtl0, SegCtl1, SegCtl2 */
3066 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3067 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3068 (_page_cachable_default >> _CACHE_SHIFT) <<
3069 (16 + MIPS_SEGCFG_C_SHIFT));
3070 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3073 /* reset HTW registers */
3074 if (cpu_guest_has_htw && cpu_has_mips_r6) {
3076 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3078 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3081 /* start with no pending virtual guest interrupts */
3082 if (cpu_has_guestctl2)
3083 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3085 /* Put PC at reset vector */
3086 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3091 static void kvm_vz_flush_shadow_all(struct kvm *kvm)
3093 if (cpu_has_guestid) {
3094 /* Flush GuestID for each VCPU individually */
3095 kvm_flush_remote_tlbs(kvm);
3098 * For each CPU there is a single GPA ASID used by all VCPUs in
3099 * the VM, so it doesn't make sense for the VCPUs to handle
3100 * invalidation of these ASIDs individually.
3102 * Instead mark all CPUs as needing ASID invalidation in
3103 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
3104 * kick any running VCPUs so they check asid_flush_mask.
3106 cpumask_setall(&kvm->arch.asid_flush_mask);
3107 kvm_flush_remote_tlbs(kvm);
3111 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
3112 const struct kvm_memory_slot *slot)
3114 kvm_vz_flush_shadow_all(kvm);
3117 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
3119 int cpu = smp_processor_id();
3120 int preserve_guest_tlb;
3122 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3124 if (preserve_guest_tlb)
3125 kvm_vz_vcpu_save_wired(vcpu);
3127 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3129 if (preserve_guest_tlb)
3130 kvm_vz_vcpu_load_wired(vcpu);
3133 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
3135 int cpu = smp_processor_id();
3138 kvm_vz_acquire_htimer(vcpu);
3139 /* Check if we have any exceptions/interrupts pending */
3140 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3142 kvm_vz_check_requests(vcpu, cpu);
3143 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3144 kvm_vz_vcpu_load_wired(vcpu);
3146 r = vcpu->arch.vcpu_run(run, vcpu);
3148 kvm_vz_vcpu_save_wired(vcpu);
3153 static struct kvm_mips_callbacks kvm_vz_callbacks = {
3154 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3155 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3156 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3157 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3158 .handle_addr_err_st = kvm_trap_vz_no_handler,
3159 .handle_addr_err_ld = kvm_trap_vz_no_handler,
3160 .handle_syscall = kvm_trap_vz_no_handler,
3161 .handle_res_inst = kvm_trap_vz_no_handler,
3162 .handle_break = kvm_trap_vz_no_handler,
3163 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3164 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3166 .hardware_enable = kvm_vz_hardware_enable,
3167 .hardware_disable = kvm_vz_hardware_disable,
3168 .check_extension = kvm_vz_check_extension,
3169 .vcpu_init = kvm_vz_vcpu_init,
3170 .vcpu_uninit = kvm_vz_vcpu_uninit,
3171 .vcpu_setup = kvm_vz_vcpu_setup,
3172 .flush_shadow_all = kvm_vz_flush_shadow_all,
3173 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
3174 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3175 .queue_timer_int = kvm_vz_queue_timer_int_cb,
3176 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3177 .queue_io_int = kvm_vz_queue_io_int_cb,
3178 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3179 .irq_deliver = kvm_vz_irq_deliver_cb,
3180 .irq_clear = kvm_vz_irq_clear_cb,
3181 .num_regs = kvm_vz_num_regs,
3182 .copy_reg_indices = kvm_vz_copy_reg_indices,
3183 .get_one_reg = kvm_vz_get_one_reg,
3184 .set_one_reg = kvm_vz_set_one_reg,
3185 .vcpu_load = kvm_vz_vcpu_load,
3186 .vcpu_put = kvm_vz_vcpu_put,
3187 .vcpu_run = kvm_vz_vcpu_run,
3188 .vcpu_reenter = kvm_vz_vcpu_reenter,
3191 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3197 * VZ requires at least 2 KScratch registers, so it should have been
3198 * possible to allocate pgd_reg.
3200 if (WARN(pgd_reg == -1,
3201 "pgd_reg not allocated even though cpu_has_vz\n"))
3204 pr_info("Starting KVM with MIPS VZ extensions\n");
3206 *install_callbacks = &kvm_vz_callbacks;