2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/vmalloc.h>
19 #include <linux/bootmem.h>
22 #include <asm/cacheflush.h>
23 #include <asm/mmu_context.h>
24 #include <asm/pgtable.h>
26 #include <linux/kvm_host.h>
28 #include "interrupt.h"
31 #define CREATE_TRACE_POINTS
35 #define VECTORSPACING 0x100 /* for EI/VI mode */
38 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
39 struct kvm_stats_debugfs_item debugfs_entries[] = {
40 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
41 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
42 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
43 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
44 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
45 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
46 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
47 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
48 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
49 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
50 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
51 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
52 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
53 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
54 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
55 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
56 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
57 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
58 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
59 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
60 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
61 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
65 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
69 for_each_possible_cpu(i) {
70 vcpu->arch.guest_kernel_asid[i] = 0;
71 vcpu->arch.guest_user_asid[i] = 0;
78 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
79 * Config7, so we are "runnable" if interrupts are pending
81 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
83 return !!(vcpu->arch.pending_exceptions);
86 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
91 int kvm_arch_hardware_enable(void)
96 int kvm_arch_hardware_setup(void)
101 void kvm_arch_check_processor_compat(void *rtn)
106 static void kvm_mips_init_tlbs(struct kvm *kvm)
111 * Add a wired entry to the TLB, it is used to map the commpage to
114 wired = read_c0_wired();
115 write_c0_wired(wired + 1);
117 kvm->arch.commpage_tlb = wired;
119 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
120 kvm->arch.commpage_tlb);
123 static void kvm_mips_init_vm_percpu(void *arg)
125 struct kvm *kvm = (struct kvm *)arg;
127 kvm_mips_init_tlbs(kvm);
128 kvm_mips_callbacks->vm_init(kvm);
132 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
134 if (atomic_inc_return(&kvm_mips_instance) == 1) {
135 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
137 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
143 void kvm_mips_free_vcpus(struct kvm *kvm)
146 struct kvm_vcpu *vcpu;
148 /* Put the pages we reserved for the guest pmap */
149 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
150 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
151 kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
153 kfree(kvm->arch.guest_pmap);
155 kvm_for_each_vcpu(i, vcpu, kvm) {
156 kvm_arch_vcpu_free(vcpu);
159 mutex_lock(&kvm->lock);
161 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
162 kvm->vcpus[i] = NULL;
164 atomic_set(&kvm->online_vcpus, 0);
166 mutex_unlock(&kvm->lock);
169 static void kvm_mips_uninit_tlbs(void *arg)
171 /* Restore wired count */
174 /* Clear out all the TLBs */
175 kvm_local_flush_tlb_all();
178 void kvm_arch_destroy_vm(struct kvm *kvm)
180 kvm_mips_free_vcpus(kvm);
182 /* If this is the last instance, restore wired count */
183 if (atomic_dec_return(&kvm_mips_instance) == 0) {
184 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
186 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
190 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
196 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
197 unsigned long npages)
202 int kvm_arch_prepare_memory_region(struct kvm *kvm,
203 struct kvm_memory_slot *memslot,
204 const struct kvm_userspace_memory_region *mem,
205 enum kvm_mr_change change)
210 void kvm_arch_commit_memory_region(struct kvm *kvm,
211 const struct kvm_userspace_memory_region *mem,
212 const struct kvm_memory_slot *old,
213 const struct kvm_memory_slot *new,
214 enum kvm_mr_change change)
216 unsigned long npages = 0;
219 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
220 __func__, kvm, mem->slot, mem->guest_phys_addr,
221 mem->memory_size, mem->userspace_addr);
223 /* Setup Guest PMAP table */
224 if (!kvm->arch.guest_pmap) {
226 npages = mem->memory_size >> PAGE_SHIFT;
229 kvm->arch.guest_pmap_npages = npages;
230 kvm->arch.guest_pmap =
231 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
233 if (!kvm->arch.guest_pmap) {
234 kvm_err("Failed to allocate guest PMAP\n");
238 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
239 npages, kvm->arch.guest_pmap);
241 /* Now setup the page table */
242 for (i = 0; i < npages; i++)
243 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
248 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
250 int err, size, offset;
254 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
261 err = kvm_vcpu_init(vcpu, kvm, id);
266 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
269 * Allocate space for host mode exception handlers that handle
272 if (cpu_has_veic || cpu_has_vint)
273 size = 0x200 + VECTORSPACING * 64;
277 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
283 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
284 ALIGN(size, PAGE_SIZE), gebase);
287 vcpu->arch.guest_ebase = gebase;
289 /* Copy L1 Guest Exception handler to correct offset */
291 /* TLB Refill, EXL = 0 */
292 memcpy(gebase, mips32_exception,
293 mips32_exceptionEnd - mips32_exception);
295 /* General Exception Entry point */
296 memcpy(gebase + 0x180, mips32_exception,
297 mips32_exceptionEnd - mips32_exception);
299 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
300 for (i = 0; i < 8; i++) {
301 kvm_debug("L1 Vectored handler @ %p\n",
302 gebase + 0x200 + (i * VECTORSPACING));
303 memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
304 mips32_exceptionEnd - mips32_exception);
307 /* General handler, relocate to unmapped space for sanity's sake */
309 kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
311 mips32_GuestExceptionEnd - mips32_GuestException);
313 memcpy(gebase + offset, mips32_GuestException,
314 mips32_GuestExceptionEnd - mips32_GuestException);
317 offset += mips32_GuestExceptionEnd - mips32_GuestException;
318 memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
319 __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
320 vcpu->arch.vcpu_run = gebase + offset;
322 vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
325 /* Invalidate the icache for these ranges */
326 local_flush_icache_range((unsigned long)gebase,
327 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
330 * Allocate comm page for guest kernel, a TLB will be reserved for
331 * mapping GVA @ 0xFFFF8000 to this page
333 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
335 if (!vcpu->arch.kseg0_commpage) {
337 goto out_free_gebase;
340 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
341 kvm_mips_commpage_init(vcpu);
344 vcpu->arch.last_sched_cpu = -1;
346 /* Start off the timer */
347 kvm_mips_init_count(vcpu);
355 kvm_vcpu_uninit(vcpu);
364 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
366 hrtimer_cancel(&vcpu->arch.comparecount_timer);
368 kvm_vcpu_uninit(vcpu);
370 kvm_mips_dump_stats(vcpu);
372 kfree(vcpu->arch.guest_ebase);
373 kfree(vcpu->arch.kseg0_commpage);
377 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
379 kvm_arch_vcpu_free(vcpu);
382 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
383 struct kvm_guest_debug *dbg)
388 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
393 if (vcpu->sigset_active)
394 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
396 if (vcpu->mmio_needed) {
397 if (!vcpu->mmio_is_write)
398 kvm_mips_complete_mmio_load(vcpu, run);
399 vcpu->mmio_needed = 0;
405 /* Check if we have any exceptions/interrupts pending */
406 kvm_mips_deliver_interrupts(vcpu,
407 kvm_read_c0_guest_cause(vcpu->arch.cop0));
409 guest_enter_irqoff();
411 /* Disable hardware page table walking while in guest */
414 trace_kvm_enter(vcpu);
415 r = vcpu->arch.vcpu_run(run, vcpu);
418 /* Re-enable HTW before enabling interrupts */
424 if (vcpu->sigset_active)
425 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
430 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
431 struct kvm_mips_interrupt *irq)
433 int intr = (int)irq->irq;
434 struct kvm_vcpu *dvcpu = NULL;
436 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
437 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
443 dvcpu = vcpu->kvm->vcpus[irq->cpu];
445 if (intr == 2 || intr == 3 || intr == 4) {
446 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
448 } else if (intr == -2 || intr == -3 || intr == -4) {
449 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
451 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
456 dvcpu->arch.wait = 0;
458 if (swait_active(&dvcpu->wq))
459 swake_up(&dvcpu->wq);
464 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
465 struct kvm_mp_state *mp_state)
470 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
471 struct kvm_mp_state *mp_state)
476 static u64 kvm_mips_get_one_regs[] = {
514 KVM_REG_MIPS_CP0_INDEX,
515 KVM_REG_MIPS_CP0_CONTEXT,
516 KVM_REG_MIPS_CP0_USERLOCAL,
517 KVM_REG_MIPS_CP0_PAGEMASK,
518 KVM_REG_MIPS_CP0_WIRED,
519 KVM_REG_MIPS_CP0_HWRENA,
520 KVM_REG_MIPS_CP0_BADVADDR,
521 KVM_REG_MIPS_CP0_COUNT,
522 KVM_REG_MIPS_CP0_ENTRYHI,
523 KVM_REG_MIPS_CP0_COMPARE,
524 KVM_REG_MIPS_CP0_STATUS,
525 KVM_REG_MIPS_CP0_CAUSE,
526 KVM_REG_MIPS_CP0_EPC,
527 KVM_REG_MIPS_CP0_PRID,
528 KVM_REG_MIPS_CP0_CONFIG,
529 KVM_REG_MIPS_CP0_CONFIG1,
530 KVM_REG_MIPS_CP0_CONFIG2,
531 KVM_REG_MIPS_CP0_CONFIG3,
532 KVM_REG_MIPS_CP0_CONFIG4,
533 KVM_REG_MIPS_CP0_CONFIG5,
534 KVM_REG_MIPS_CP0_CONFIG7,
535 KVM_REG_MIPS_CP0_ERROREPC,
537 KVM_REG_MIPS_COUNT_CTL,
538 KVM_REG_MIPS_COUNT_RESUME,
539 KVM_REG_MIPS_COUNT_HZ,
542 static u64 kvm_mips_get_one_regs_fpu[] = {
544 KVM_REG_MIPS_FCR_CSR,
547 static u64 kvm_mips_get_one_regs_msa[] = {
549 KVM_REG_MIPS_MSA_CSR,
552 static u64 kvm_mips_get_one_regs_kscratch[] = {
553 KVM_REG_MIPS_CP0_KSCRATCH1,
554 KVM_REG_MIPS_CP0_KSCRATCH2,
555 KVM_REG_MIPS_CP0_KSCRATCH3,
556 KVM_REG_MIPS_CP0_KSCRATCH4,
557 KVM_REG_MIPS_CP0_KSCRATCH5,
558 KVM_REG_MIPS_CP0_KSCRATCH6,
561 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
565 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
566 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
567 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
569 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
572 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
573 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
574 ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
575 ret += kvm_mips_callbacks->num_regs(vcpu);
580 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
585 if (copy_to_user(indices, kvm_mips_get_one_regs,
586 sizeof(kvm_mips_get_one_regs)))
588 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
590 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
591 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
592 sizeof(kvm_mips_get_one_regs_fpu)))
594 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
596 for (i = 0; i < 32; ++i) {
597 index = KVM_REG_MIPS_FPR_32(i);
598 if (copy_to_user(indices, &index, sizeof(index)))
602 /* skip odd doubles if no F64 */
603 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
606 index = KVM_REG_MIPS_FPR_64(i);
607 if (copy_to_user(indices, &index, sizeof(index)))
613 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
614 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
615 sizeof(kvm_mips_get_one_regs_msa)))
617 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
619 for (i = 0; i < 32; ++i) {
620 index = KVM_REG_MIPS_VEC_128(i);
621 if (copy_to_user(indices, &index, sizeof(index)))
627 for (i = 0; i < 6; ++i) {
628 if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
631 if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
632 sizeof(kvm_mips_get_one_regs_kscratch[i])))
637 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
640 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
641 const struct kvm_one_reg *reg)
643 struct mips_coproc *cop0 = vcpu->arch.cop0;
644 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
651 /* General purpose registers */
652 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
653 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
655 case KVM_REG_MIPS_HI:
656 v = (long)vcpu->arch.hi;
658 case KVM_REG_MIPS_LO:
659 v = (long)vcpu->arch.lo;
661 case KVM_REG_MIPS_PC:
662 v = (long)vcpu->arch.pc;
665 /* Floating point registers */
666 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
667 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
669 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
670 /* Odd singles in top of even double when FR=0 */
671 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
672 v = get_fpr32(&fpu->fpr[idx], 0);
674 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
676 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
677 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
679 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
680 /* Can't access odd doubles in FR=0 mode */
681 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
683 v = get_fpr64(&fpu->fpr[idx], 0);
685 case KVM_REG_MIPS_FCR_IR:
686 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
688 v = boot_cpu_data.fpu_id;
690 case KVM_REG_MIPS_FCR_CSR:
691 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
696 /* MIPS SIMD Architecture (MSA) registers */
697 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
698 if (!kvm_mips_guest_has_msa(&vcpu->arch))
700 /* Can't access MSA registers in FR=0 mode */
701 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
703 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
704 #ifdef CONFIG_CPU_LITTLE_ENDIAN
705 /* least significant byte first */
706 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
707 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
709 /* most significant byte first */
710 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
711 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
714 case KVM_REG_MIPS_MSA_IR:
715 if (!kvm_mips_guest_has_msa(&vcpu->arch))
717 v = boot_cpu_data.msa_id;
719 case KVM_REG_MIPS_MSA_CSR:
720 if (!kvm_mips_guest_has_msa(&vcpu->arch))
725 /* Co-processor 0 registers */
726 case KVM_REG_MIPS_CP0_INDEX:
727 v = (long)kvm_read_c0_guest_index(cop0);
729 case KVM_REG_MIPS_CP0_CONTEXT:
730 v = (long)kvm_read_c0_guest_context(cop0);
732 case KVM_REG_MIPS_CP0_USERLOCAL:
733 v = (long)kvm_read_c0_guest_userlocal(cop0);
735 case KVM_REG_MIPS_CP0_PAGEMASK:
736 v = (long)kvm_read_c0_guest_pagemask(cop0);
738 case KVM_REG_MIPS_CP0_WIRED:
739 v = (long)kvm_read_c0_guest_wired(cop0);
741 case KVM_REG_MIPS_CP0_HWRENA:
742 v = (long)kvm_read_c0_guest_hwrena(cop0);
744 case KVM_REG_MIPS_CP0_BADVADDR:
745 v = (long)kvm_read_c0_guest_badvaddr(cop0);
747 case KVM_REG_MIPS_CP0_ENTRYHI:
748 v = (long)kvm_read_c0_guest_entryhi(cop0);
750 case KVM_REG_MIPS_CP0_COMPARE:
751 v = (long)kvm_read_c0_guest_compare(cop0);
753 case KVM_REG_MIPS_CP0_STATUS:
754 v = (long)kvm_read_c0_guest_status(cop0);
756 case KVM_REG_MIPS_CP0_CAUSE:
757 v = (long)kvm_read_c0_guest_cause(cop0);
759 case KVM_REG_MIPS_CP0_EPC:
760 v = (long)kvm_read_c0_guest_epc(cop0);
762 case KVM_REG_MIPS_CP0_PRID:
763 v = (long)kvm_read_c0_guest_prid(cop0);
765 case KVM_REG_MIPS_CP0_CONFIG:
766 v = (long)kvm_read_c0_guest_config(cop0);
768 case KVM_REG_MIPS_CP0_CONFIG1:
769 v = (long)kvm_read_c0_guest_config1(cop0);
771 case KVM_REG_MIPS_CP0_CONFIG2:
772 v = (long)kvm_read_c0_guest_config2(cop0);
774 case KVM_REG_MIPS_CP0_CONFIG3:
775 v = (long)kvm_read_c0_guest_config3(cop0);
777 case KVM_REG_MIPS_CP0_CONFIG4:
778 v = (long)kvm_read_c0_guest_config4(cop0);
780 case KVM_REG_MIPS_CP0_CONFIG5:
781 v = (long)kvm_read_c0_guest_config5(cop0);
783 case KVM_REG_MIPS_CP0_CONFIG7:
784 v = (long)kvm_read_c0_guest_config7(cop0);
786 case KVM_REG_MIPS_CP0_ERROREPC:
787 v = (long)kvm_read_c0_guest_errorepc(cop0);
789 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
790 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
791 if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
795 v = (long)kvm_read_c0_guest_kscratch1(cop0);
798 v = (long)kvm_read_c0_guest_kscratch2(cop0);
801 v = (long)kvm_read_c0_guest_kscratch3(cop0);
804 v = (long)kvm_read_c0_guest_kscratch4(cop0);
807 v = (long)kvm_read_c0_guest_kscratch5(cop0);
810 v = (long)kvm_read_c0_guest_kscratch6(cop0);
814 /* registers to be handled specially */
816 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
821 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
822 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
824 return put_user(v, uaddr64);
825 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
826 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
829 return put_user(v32, uaddr32);
830 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
831 void __user *uaddr = (void __user *)(long)reg->addr;
833 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
839 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
840 const struct kvm_one_reg *reg)
842 struct mips_coproc *cop0 = vcpu->arch.cop0;
843 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
848 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
849 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
851 if (get_user(v, uaddr64) != 0)
853 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
854 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
857 if (get_user(v32, uaddr32) != 0)
860 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
861 void __user *uaddr = (void __user *)(long)reg->addr;
863 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
869 /* General purpose registers */
870 case KVM_REG_MIPS_R0:
871 /* Silently ignore requests to set $0 */
873 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
874 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
876 case KVM_REG_MIPS_HI:
879 case KVM_REG_MIPS_LO:
882 case KVM_REG_MIPS_PC:
886 /* Floating point registers */
887 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
888 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
890 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
891 /* Odd singles in top of even double when FR=0 */
892 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
893 set_fpr32(&fpu->fpr[idx], 0, v);
895 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
897 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
898 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
900 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
901 /* Can't access odd doubles in FR=0 mode */
902 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
904 set_fpr64(&fpu->fpr[idx], 0, v);
906 case KVM_REG_MIPS_FCR_IR:
907 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
911 case KVM_REG_MIPS_FCR_CSR:
912 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
917 /* MIPS SIMD Architecture (MSA) registers */
918 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
919 if (!kvm_mips_guest_has_msa(&vcpu->arch))
921 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
922 #ifdef CONFIG_CPU_LITTLE_ENDIAN
923 /* least significant byte first */
924 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
925 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
927 /* most significant byte first */
928 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
929 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
932 case KVM_REG_MIPS_MSA_IR:
933 if (!kvm_mips_guest_has_msa(&vcpu->arch))
937 case KVM_REG_MIPS_MSA_CSR:
938 if (!kvm_mips_guest_has_msa(&vcpu->arch))
943 /* Co-processor 0 registers */
944 case KVM_REG_MIPS_CP0_INDEX:
945 kvm_write_c0_guest_index(cop0, v);
947 case KVM_REG_MIPS_CP0_CONTEXT:
948 kvm_write_c0_guest_context(cop0, v);
950 case KVM_REG_MIPS_CP0_USERLOCAL:
951 kvm_write_c0_guest_userlocal(cop0, v);
953 case KVM_REG_MIPS_CP0_PAGEMASK:
954 kvm_write_c0_guest_pagemask(cop0, v);
956 case KVM_REG_MIPS_CP0_WIRED:
957 kvm_write_c0_guest_wired(cop0, v);
959 case KVM_REG_MIPS_CP0_HWRENA:
960 kvm_write_c0_guest_hwrena(cop0, v);
962 case KVM_REG_MIPS_CP0_BADVADDR:
963 kvm_write_c0_guest_badvaddr(cop0, v);
965 case KVM_REG_MIPS_CP0_ENTRYHI:
966 kvm_write_c0_guest_entryhi(cop0, v);
968 case KVM_REG_MIPS_CP0_STATUS:
969 kvm_write_c0_guest_status(cop0, v);
971 case KVM_REG_MIPS_CP0_EPC:
972 kvm_write_c0_guest_epc(cop0, v);
974 case KVM_REG_MIPS_CP0_PRID:
975 kvm_write_c0_guest_prid(cop0, v);
977 case KVM_REG_MIPS_CP0_ERROREPC:
978 kvm_write_c0_guest_errorepc(cop0, v);
980 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
981 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
982 if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
986 kvm_write_c0_guest_kscratch1(cop0, v);
989 kvm_write_c0_guest_kscratch2(cop0, v);
992 kvm_write_c0_guest_kscratch3(cop0, v);
995 kvm_write_c0_guest_kscratch4(cop0, v);
998 kvm_write_c0_guest_kscratch5(cop0, v);
1001 kvm_write_c0_guest_kscratch6(cop0, v);
1005 /* registers to be handled specially */
1007 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
1012 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1013 struct kvm_enable_cap *cap)
1017 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
1025 case KVM_CAP_MIPS_FPU:
1026 vcpu->arch.fpu_enabled = true;
1028 case KVM_CAP_MIPS_MSA:
1029 vcpu->arch.msa_enabled = true;
1039 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
1042 struct kvm_vcpu *vcpu = filp->private_data;
1043 void __user *argp = (void __user *)arg;
1047 case KVM_SET_ONE_REG:
1048 case KVM_GET_ONE_REG: {
1049 struct kvm_one_reg reg;
1051 if (copy_from_user(®, argp, sizeof(reg)))
1053 if (ioctl == KVM_SET_ONE_REG)
1054 return kvm_mips_set_reg(vcpu, ®);
1056 return kvm_mips_get_reg(vcpu, ®);
1058 case KVM_GET_REG_LIST: {
1059 struct kvm_reg_list __user *user_list = argp;
1060 struct kvm_reg_list reg_list;
1063 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
1066 reg_list.n = kvm_mips_num_regs(vcpu);
1067 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
1071 return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
1074 /* Treat the NMI as a CPU reset */
1075 r = kvm_mips_reset_vcpu(vcpu);
1079 struct kvm_mips_interrupt irq;
1082 if (copy_from_user(&irq, argp, sizeof(irq)))
1085 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
1088 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1091 case KVM_ENABLE_CAP: {
1092 struct kvm_enable_cap cap;
1095 if (copy_from_user(&cap, argp, sizeof(cap)))
1097 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1108 /* Get (and clear) the dirty memory log for a memory slot. */
1109 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1111 struct kvm_memslots *slots;
1112 struct kvm_memory_slot *memslot;
1113 unsigned long ga, ga_end;
1118 mutex_lock(&kvm->slots_lock);
1120 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1124 /* If nothing is dirty, don't bother messing with page tables. */
1126 slots = kvm_memslots(kvm);
1127 memslot = id_to_memslot(slots, log->slot);
1129 ga = memslot->base_gfn << PAGE_SHIFT;
1130 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1132 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
1135 n = kvm_dirty_bitmap_bytes(memslot);
1136 memset(memslot->dirty_bitmap, 0, n);
1141 mutex_unlock(&kvm->slots_lock);
1146 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1158 int kvm_arch_init(void *opaque)
1160 if (kvm_mips_callbacks) {
1161 kvm_err("kvm: module already exists\n");
1165 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1168 void kvm_arch_exit(void)
1170 kvm_mips_callbacks = NULL;
1173 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1174 struct kvm_sregs *sregs)
1176 return -ENOIOCTLCMD;
1179 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1180 struct kvm_sregs *sregs)
1182 return -ENOIOCTLCMD;
1185 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1189 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1191 return -ENOIOCTLCMD;
1194 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1196 return -ENOIOCTLCMD;
1199 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1201 return VM_FAULT_SIGBUS;
1204 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1209 case KVM_CAP_ONE_REG:
1210 case KVM_CAP_ENABLE_CAP:
1213 case KVM_CAP_COALESCED_MMIO:
1214 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1216 case KVM_CAP_MIPS_FPU:
1217 /* We don't handle systems with inconsistent cpu_has_fpu */
1218 r = !!raw_cpu_has_fpu;
1220 case KVM_CAP_MIPS_MSA:
1222 * We don't support MSA vector partitioning yet:
1223 * 1) It would require explicit support which can't be tested
1224 * yet due to lack of support in current hardware.
1225 * 2) It extends the state that would need to be saved/restored
1226 * by e.g. QEMU for migration.
1228 * When vector partitioning hardware becomes available, support
1229 * could be added by requiring a flag when enabling
1230 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1231 * to save/restore the appropriate extra state.
1233 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1242 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1244 return kvm_mips_pending_timer(vcpu);
1247 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1250 struct mips_coproc *cop0;
1255 kvm_debug("VCPU Register Dump:\n");
1256 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1257 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1259 for (i = 0; i < 32; i += 4) {
1260 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1262 vcpu->arch.gprs[i + 1],
1263 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1265 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1266 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1268 cop0 = vcpu->arch.cop0;
1269 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
1270 kvm_read_c0_guest_status(cop0),
1271 kvm_read_c0_guest_cause(cop0));
1273 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1278 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1282 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1283 vcpu->arch.gprs[i] = regs->gpr[i];
1284 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1285 vcpu->arch.hi = regs->hi;
1286 vcpu->arch.lo = regs->lo;
1287 vcpu->arch.pc = regs->pc;
1292 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1296 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1297 regs->gpr[i] = vcpu->arch.gprs[i];
1299 regs->hi = vcpu->arch.hi;
1300 regs->lo = vcpu->arch.lo;
1301 regs->pc = vcpu->arch.pc;
1306 static void kvm_mips_comparecount_func(unsigned long data)
1308 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1310 kvm_mips_callbacks->queue_timer_int(vcpu);
1312 vcpu->arch.wait = 0;
1313 if (swait_active(&vcpu->wq))
1314 swake_up(&vcpu->wq);
1317 /* low level hrtimer wake routine */
1318 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1320 struct kvm_vcpu *vcpu;
1322 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1323 kvm_mips_comparecount_func((unsigned long) vcpu);
1324 return kvm_mips_count_timeout(vcpu);
1327 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1329 kvm_mips_callbacks->vcpu_init(vcpu);
1330 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1332 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1336 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1337 struct kvm_translation *tr)
1342 /* Initial guest state */
1343 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1345 return kvm_mips_callbacks->vcpu_setup(vcpu);
1348 static void kvm_mips_set_c0_status(void)
1350 u32 status = read_c0_status();
1355 write_c0_status(status);
1360 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1362 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1364 u32 cause = vcpu->arch.host_cp0_cause;
1365 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1366 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1367 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1368 enum emulation_result er = EMULATE_DONE;
1369 int ret = RESUME_GUEST;
1371 /* re-enable HTW before enabling interrupts */
1374 /* Set a default exit reason */
1375 run->exit_reason = KVM_EXIT_UNKNOWN;
1376 run->ready_for_interrupt_injection = 1;
1379 * Set the appropriate status bits based on host CPU features,
1380 * before we hit the scheduler
1382 kvm_mips_set_c0_status();
1386 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1387 cause, opc, run, vcpu);
1388 trace_kvm_exit(vcpu, exccode);
1391 * Do a privilege check, if in UM most of these exit conditions end up
1392 * causing an exception to be delivered to the Guest Kernel
1394 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1395 if (er == EMULATE_PRIV_FAIL) {
1397 } else if (er == EMULATE_FAIL) {
1398 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1405 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1407 ++vcpu->stat.int_exits;
1416 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1418 ++vcpu->stat.cop_unusable_exits;
1419 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1420 /* XXXKYMA: Might need to return to user space */
1421 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1426 ++vcpu->stat.tlbmod_exits;
1427 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1431 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1432 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1435 ++vcpu->stat.tlbmiss_st_exits;
1436 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1440 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1441 cause, opc, badvaddr);
1443 ++vcpu->stat.tlbmiss_ld_exits;
1444 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1448 ++vcpu->stat.addrerr_st_exits;
1449 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1453 ++vcpu->stat.addrerr_ld_exits;
1454 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1458 ++vcpu->stat.syscall_exits;
1459 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1463 ++vcpu->stat.resvd_inst_exits;
1464 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1468 ++vcpu->stat.break_inst_exits;
1469 ret = kvm_mips_callbacks->handle_break(vcpu);
1473 ++vcpu->stat.trap_inst_exits;
1474 ret = kvm_mips_callbacks->handle_trap(vcpu);
1477 case EXCCODE_MSAFPE:
1478 ++vcpu->stat.msa_fpe_exits;
1479 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1483 ++vcpu->stat.fpe_exits;
1484 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1487 case EXCCODE_MSADIS:
1488 ++vcpu->stat.msa_disabled_exits;
1489 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1493 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1494 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1495 kvm_read_c0_guest_status(vcpu->arch.cop0));
1496 kvm_arch_vcpu_dump_regs(vcpu);
1497 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1504 local_irq_disable();
1506 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1507 kvm_mips_deliver_interrupts(vcpu, cause);
1509 if (!(ret & RESUME_HOST)) {
1510 /* Only check for signals if not already exiting to userspace */
1511 if (signal_pending(current)) {
1512 run->exit_reason = KVM_EXIT_INTR;
1513 ret = (-EINTR << 2) | RESUME_HOST;
1514 ++vcpu->stat.signal_exits;
1515 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1519 if (ret == RESUME_GUEST) {
1520 trace_kvm_reenter(vcpu);
1523 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1524 * is live), restore FCR31 / MSACSR.
1526 * This should be before returning to the guest exception
1527 * vector, as it may well cause an [MSA] FP exception if there
1528 * are pending exception bits unmasked. (see
1529 * kvm_mips_csr_die_notifier() for how that is handled).
1531 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1532 read_c0_status() & ST0_CU1)
1533 __kvm_restore_fcsr(&vcpu->arch);
1535 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1536 read_c0_config5() & MIPS_CONF5_MSAEN)
1537 __kvm_restore_msacsr(&vcpu->arch);
1540 /* Disable HTW before returning to guest or host */
1546 /* Enable FPU for guest and restore context */
1547 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1549 struct mips_coproc *cop0 = vcpu->arch.cop0;
1550 unsigned int sr, cfg5;
1554 sr = kvm_read_c0_guest_status(cop0);
1557 * If MSA state is already live, it is undefined how it interacts with
1558 * FR=0 FPU state, and we don't want to hit reserved instruction
1559 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1560 * play it safe and save it first.
1562 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1563 * get called when guest CU1 is set, however we can't trust the guest
1564 * not to clobber the status register directly via the commpage.
1566 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1567 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1571 * Enable FPU for guest
1572 * We set FR and FRE according to guest context
1574 change_c0_status(ST0_CU1 | ST0_FR, sr);
1576 cfg5 = kvm_read_c0_guest_config5(cop0);
1577 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1579 enable_fpu_hazard();
1581 /* If guest FPU state not active, restore it now */
1582 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1583 __kvm_restore_fpu(&vcpu->arch);
1584 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1585 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1587 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1593 #ifdef CONFIG_CPU_HAS_MSA
1594 /* Enable MSA for guest and restore context */
1595 void kvm_own_msa(struct kvm_vcpu *vcpu)
1597 struct mips_coproc *cop0 = vcpu->arch.cop0;
1598 unsigned int sr, cfg5;
1603 * Enable FPU if enabled in guest, since we're restoring FPU context
1604 * anyway. We set FR and FRE according to guest context.
1606 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1607 sr = kvm_read_c0_guest_status(cop0);
1610 * If FR=0 FPU state is already live, it is undefined how it
1611 * interacts with MSA state, so play it safe and save it first.
1613 if (!(sr & ST0_FR) &&
1614 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1615 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1618 change_c0_status(ST0_CU1 | ST0_FR, sr);
1619 if (sr & ST0_CU1 && cpu_has_fre) {
1620 cfg5 = kvm_read_c0_guest_config5(cop0);
1621 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1625 /* Enable MSA for guest */
1626 set_c0_config5(MIPS_CONF5_MSAEN);
1627 enable_fpu_hazard();
1629 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1630 case KVM_MIPS_AUX_FPU:
1632 * Guest FPU state already loaded, only restore upper MSA state
1634 __kvm_restore_msa_upper(&vcpu->arch);
1635 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1636 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1639 /* Neither FPU or MSA already active, restore full MSA state */
1640 __kvm_restore_msa(&vcpu->arch);
1641 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1642 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1643 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1644 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1645 KVM_TRACE_AUX_FPU_MSA);
1648 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1656 /* Drop FPU & MSA without saving it */
1657 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1660 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1662 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1663 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1665 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1666 clear_c0_status(ST0_CU1 | ST0_FR);
1667 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1668 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1673 /* Save and disable FPU & MSA */
1674 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1677 * FPU & MSA get disabled in root context (hardware) when it is disabled
1678 * in guest context (software), but the register state in the hardware
1679 * may still be in use. This is why we explicitly re-enable the hardware
1684 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1685 set_c0_config5(MIPS_CONF5_MSAEN);
1686 enable_fpu_hazard();
1688 __kvm_save_msa(&vcpu->arch);
1689 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1691 /* Disable MSA & FPU */
1693 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1694 clear_c0_status(ST0_CU1 | ST0_FR);
1695 disable_fpu_hazard();
1697 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1698 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1699 set_c0_status(ST0_CU1);
1700 enable_fpu_hazard();
1702 __kvm_save_fpu(&vcpu->arch);
1703 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1704 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1707 clear_c0_status(ST0_CU1 | ST0_FR);
1708 disable_fpu_hazard();
1714 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1715 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1716 * exception if cause bits are set in the value being written.
1718 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1719 unsigned long cmd, void *ptr)
1721 struct die_args *args = (struct die_args *)ptr;
1722 struct pt_regs *regs = args->regs;
1725 /* Only interested in FPE and MSAFPE */
1726 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1729 /* Return immediately if guest context isn't active */
1730 if (!(current->flags & PF_VCPU))
1733 /* Should never get here from user mode */
1734 BUG_ON(user_mode(regs));
1736 pc = instruction_pointer(regs);
1739 /* match 2nd instruction in __kvm_restore_fcsr */
1740 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1744 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1746 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1747 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1752 /* Move PC forward a little and continue executing */
1753 instruction_pointer(regs) += 4;
1758 static struct notifier_block kvm_mips_csr_die_notifier = {
1759 .notifier_call = kvm_mips_csr_die_notify,
1762 static int __init kvm_mips_init(void)
1766 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1771 register_die_notifier(&kvm_mips_csr_die_notifier);
1776 static void __exit kvm_mips_exit(void)
1780 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1783 module_init(kvm_mips_init);
1784 module_exit(kvm_mips_exit);
1786 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);