2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/cputhreads.h>
38 #include <asm/irqflags.h>
39 #include <asm/iommu.h>
44 #include "../mm/mmu_decl.h"
46 #define CREATE_TRACE_POINTS
49 struct kvmppc_ops *kvmppc_hv_ops;
50 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
51 struct kvmppc_ops *kvmppc_pr_ops;
52 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
55 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
57 return !!(v->arch.pending_exceptions) ||
61 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
67 * Common checks before entering the guest world. Call with interrupts
72 * == 1 if we're ready to go into guest state
73 * <= 0 if we need to go back to the host with return value
75 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
79 WARN_ON(irqs_disabled());
90 if (signal_pending(current)) {
91 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
92 vcpu->run->exit_reason = KVM_EXIT_INTR;
97 vcpu->mode = IN_GUEST_MODE;
100 * Reading vcpu->requests must happen after setting vcpu->mode,
101 * so we don't miss a request because the requester sees
102 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
103 * before next entering the guest (and thus doesn't IPI).
104 * This also orders the write to mode from any reads
105 * to the page tables done while the VCPU is running.
106 * Please see the comment in kvm_flush_remote_tlbs.
110 if (vcpu->requests) {
111 /* Make sure we process requests preemptable */
113 trace_kvm_check_requests(vcpu);
114 r = kvmppc_core_check_requests(vcpu);
121 if (kvmppc_core_prepare_to_enter(vcpu)) {
122 /* interrupts got enabled in between, so we
123 are back at square 1 */
127 guest_enter_irqoff();
135 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
137 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
138 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
140 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
143 shared->sprg0 = swab64(shared->sprg0);
144 shared->sprg1 = swab64(shared->sprg1);
145 shared->sprg2 = swab64(shared->sprg2);
146 shared->sprg3 = swab64(shared->sprg3);
147 shared->srr0 = swab64(shared->srr0);
148 shared->srr1 = swab64(shared->srr1);
149 shared->dar = swab64(shared->dar);
150 shared->msr = swab64(shared->msr);
151 shared->dsisr = swab32(shared->dsisr);
152 shared->int_pending = swab32(shared->int_pending);
153 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
154 shared->sr[i] = swab32(shared->sr[i]);
158 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
160 int nr = kvmppc_get_gpr(vcpu, 11);
162 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
163 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
164 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
165 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
166 unsigned long r2 = 0;
168 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
170 param1 &= 0xffffffff;
171 param2 &= 0xffffffff;
172 param3 &= 0xffffffff;
173 param4 &= 0xffffffff;
177 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
179 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
180 /* Book3S can be little endian, find it out here */
181 int shared_big_endian = true;
182 if (vcpu->arch.intr_msr & MSR_LE)
183 shared_big_endian = false;
184 if (shared_big_endian != vcpu->arch.shared_big_endian)
185 kvmppc_swab_shared(vcpu);
186 vcpu->arch.shared_big_endian = shared_big_endian;
189 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
191 * Older versions of the Linux magic page code had
192 * a bug where they would map their trampoline code
193 * NX. If that's the case, remove !PR NX capability.
195 vcpu->arch.disable_kernel_nx = true;
196 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
199 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
200 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
202 #ifdef CONFIG_PPC_64K_PAGES
204 * Make sure our 4k magic page is in the same window of a 64k
205 * page within the guest and within the host's page.
207 if ((vcpu->arch.magic_page_pa & 0xf000) !=
208 ((ulong)vcpu->arch.shared & 0xf000)) {
209 void *old_shared = vcpu->arch.shared;
210 ulong shared = (ulong)vcpu->arch.shared;
214 shared |= vcpu->arch.magic_page_pa & 0xf000;
215 new_shared = (void*)shared;
216 memcpy(new_shared, old_shared, 0x1000);
217 vcpu->arch.shared = new_shared;
221 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
226 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
228 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
229 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
232 /* Second return value is in r4 */
234 case EV_HCALL_TOKEN(EV_IDLE):
236 kvm_vcpu_block(vcpu);
237 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
240 r = EV_UNIMPLEMENTED;
244 kvmppc_set_gpr(vcpu, 4, r2);
248 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
250 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
254 /* We have to know what CPU to virtualize */
258 /* PAPR only works with book3s_64 */
259 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
262 /* HV KVM can only do PAPR mode for now */
263 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
266 #ifdef CONFIG_KVM_BOOKE_HV
267 if (!cpu_has_feature(CPU_FTR_EMB_HV))
275 return r ? 0 : -EINVAL;
277 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
279 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
281 enum emulation_result er;
284 er = kvmppc_emulate_loadstore(vcpu);
287 /* Future optimization: only reload non-volatiles if they were
288 * actually modified. */
294 case EMULATE_DO_MMIO:
295 run->exit_reason = KVM_EXIT_MMIO;
296 /* We must reload nonvolatiles because "update" load/store
297 * instructions modify register state. */
298 /* Future optimization: only reload non-volatiles if they were
299 * actually modified. */
306 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
307 /* XXX Deliver Program interrupt to guest. */
308 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
319 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
321 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
324 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
325 struct kvmppc_pte pte;
330 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
340 /* Magic page override */
341 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
342 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
343 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
344 void *magic = vcpu->arch.shared;
345 magic += pte.eaddr & 0xfff;
346 memcpy(magic, ptr, size);
350 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
351 return EMULATE_DO_MMIO;
355 EXPORT_SYMBOL_GPL(kvmppc_st);
357 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
360 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
361 struct kvmppc_pte pte;
366 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
376 if (!data && !pte.may_execute)
379 /* Magic page override */
380 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
381 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
382 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
383 void *magic = vcpu->arch.shared;
384 magic += pte.eaddr & 0xfff;
385 memcpy(ptr, magic, size);
389 if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size))
390 return EMULATE_DO_MMIO;
394 EXPORT_SYMBOL_GPL(kvmppc_ld);
396 int kvm_arch_hardware_enable(void)
401 int kvm_arch_hardware_setup(void)
406 void kvm_arch_check_processor_compat(void *rtn)
408 *(int *)rtn = kvmppc_core_check_processor_compat();
411 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
413 struct kvmppc_ops *kvm_ops = NULL;
415 * if we have both HV and PR enabled, default is HV
419 kvm_ops = kvmppc_hv_ops;
421 kvm_ops = kvmppc_pr_ops;
424 } else if (type == KVM_VM_PPC_HV) {
427 kvm_ops = kvmppc_hv_ops;
428 } else if (type == KVM_VM_PPC_PR) {
431 kvm_ops = kvmppc_pr_ops;
435 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
438 kvm->arch.kvm_ops = kvm_ops;
439 return kvmppc_core_init_vm(kvm);
444 bool kvm_arch_has_vcpu_debugfs(void)
449 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
454 void kvm_arch_destroy_vm(struct kvm *kvm)
457 struct kvm_vcpu *vcpu;
459 #ifdef CONFIG_KVM_XICS
461 * We call kick_all_cpus_sync() to ensure that all
462 * CPUs have executed any pending IPIs before we
463 * continue and free VCPUs structures below.
465 if (is_kvmppc_hv_enabled(kvm))
466 kick_all_cpus_sync();
469 kvm_for_each_vcpu(i, vcpu, kvm)
470 kvm_arch_vcpu_free(vcpu);
472 mutex_lock(&kvm->lock);
473 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
474 kvm->vcpus[i] = NULL;
476 atomic_set(&kvm->online_vcpus, 0);
478 kvmppc_core_destroy_vm(kvm);
480 mutex_unlock(&kvm->lock);
482 /* drop the module reference */
483 module_put(kvm->arch.kvm_ops->owner);
486 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
489 /* Assume we're using HV mode when the HV module is loaded */
490 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
494 * Hooray - we know which VM type we're running on. Depend on
495 * that rather than the guess above.
497 hv_enabled = is_kvmppc_hv_enabled(kvm);
502 case KVM_CAP_PPC_BOOKE_SREGS:
503 case KVM_CAP_PPC_BOOKE_WATCHDOG:
504 case KVM_CAP_PPC_EPR:
506 case KVM_CAP_PPC_SEGSTATE:
507 case KVM_CAP_PPC_HIOR:
508 case KVM_CAP_PPC_PAPR:
510 case KVM_CAP_PPC_UNSET_IRQ:
511 case KVM_CAP_PPC_IRQ_LEVEL:
512 case KVM_CAP_ENABLE_CAP:
513 case KVM_CAP_ENABLE_CAP_VM:
514 case KVM_CAP_ONE_REG:
515 case KVM_CAP_IOEVENTFD:
516 case KVM_CAP_DEVICE_CTRL:
517 case KVM_CAP_IMMEDIATE_EXIT:
520 case KVM_CAP_PPC_PAIRED_SINGLES:
521 case KVM_CAP_PPC_OSI:
522 case KVM_CAP_PPC_GET_PVINFO:
523 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
526 /* We support this only for PR */
529 #ifdef CONFIG_KVM_MMIO
530 case KVM_CAP_COALESCED_MMIO:
531 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
534 #ifdef CONFIG_KVM_MPIC
535 case KVM_CAP_IRQ_MPIC:
540 #ifdef CONFIG_PPC_BOOK3S_64
541 case KVM_CAP_SPAPR_TCE:
542 case KVM_CAP_SPAPR_TCE_64:
543 case KVM_CAP_PPC_RTAS:
544 case KVM_CAP_PPC_FIXUP_HCALL:
545 case KVM_CAP_PPC_ENABLE_HCALL:
546 #ifdef CONFIG_KVM_XICS
547 case KVM_CAP_IRQ_XICS:
552 case KVM_CAP_PPC_ALLOC_HTAB:
555 #endif /* CONFIG_PPC_BOOK3S_64 */
556 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
557 case KVM_CAP_PPC_SMT:
560 if (cpu_has_feature(CPU_FTR_ARCH_300))
563 r = threads_per_subcore;
566 case KVM_CAP_PPC_RMA:
569 case KVM_CAP_PPC_HWRNG:
570 r = kvmppc_hwrng_present();
572 case KVM_CAP_PPC_MMU_RADIX:
573 r = !!(hv_enabled && radix_enabled());
575 case KVM_CAP_PPC_MMU_HASH_V3:
576 r = !!(hv_enabled && !radix_enabled() &&
577 cpu_has_feature(CPU_FTR_ARCH_300));
580 case KVM_CAP_SYNC_MMU:
581 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
583 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
589 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
590 case KVM_CAP_PPC_HTAB_FD:
594 case KVM_CAP_NR_VCPUS:
596 * Recommending a number of CPUs is somewhat arbitrary; we
597 * return the number of present CPUs for -HV (since a host
598 * will have secondary threads "offline"), and for other KVM
599 * implementations just count online CPUs.
602 r = num_present_cpus();
604 r = num_online_cpus();
606 case KVM_CAP_NR_MEMSLOTS:
607 r = KVM_USER_MEM_SLOTS;
609 case KVM_CAP_MAX_VCPUS:
612 #ifdef CONFIG_PPC_BOOK3S_64
613 case KVM_CAP_PPC_GET_SMMU_INFO:
616 case KVM_CAP_SPAPR_MULTITCE:
619 case KVM_CAP_SPAPR_RESIZE_HPT:
620 /* Disable this on POWER9 until code handles new HPTE format */
621 r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300);
624 case KVM_CAP_PPC_HTM:
625 r = cpu_has_feature(CPU_FTR_TM_COMP) &&
626 is_kvmppc_hv_enabled(kvm);
636 long kvm_arch_dev_ioctl(struct file *filp,
637 unsigned int ioctl, unsigned long arg)
642 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
643 struct kvm_memory_slot *dont)
645 kvmppc_core_free_memslot(kvm, free, dont);
648 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
649 unsigned long npages)
651 return kvmppc_core_create_memslot(kvm, slot, npages);
654 int kvm_arch_prepare_memory_region(struct kvm *kvm,
655 struct kvm_memory_slot *memslot,
656 const struct kvm_userspace_memory_region *mem,
657 enum kvm_mr_change change)
659 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
662 void kvm_arch_commit_memory_region(struct kvm *kvm,
663 const struct kvm_userspace_memory_region *mem,
664 const struct kvm_memory_slot *old,
665 const struct kvm_memory_slot *new,
666 enum kvm_mr_change change)
668 kvmppc_core_commit_memory_region(kvm, mem, old, new);
671 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
672 struct kvm_memory_slot *slot)
674 kvmppc_core_flush_memslot(kvm, slot);
677 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
679 struct kvm_vcpu *vcpu;
680 vcpu = kvmppc_core_vcpu_create(kvm, id);
682 vcpu->arch.wqp = &vcpu->wq;
683 kvmppc_create_vcpu_debugfs(vcpu, id);
688 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
692 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
694 /* Make sure we're not using the vcpu anymore */
695 hrtimer_cancel(&vcpu->arch.dec_timer);
697 kvmppc_remove_vcpu_debugfs(vcpu);
699 switch (vcpu->arch.irq_type) {
700 case KVMPPC_IRQ_MPIC:
701 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
703 case KVMPPC_IRQ_XICS:
705 kvmppc_xive_cleanup_vcpu(vcpu);
707 kvmppc_xics_free_icp(vcpu);
711 kvmppc_core_vcpu_free(vcpu);
714 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
716 kvm_arch_vcpu_free(vcpu);
719 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
721 return kvmppc_core_pending_dec(vcpu);
724 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
726 struct kvm_vcpu *vcpu;
728 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
729 kvmppc_decrementer_func(vcpu);
731 return HRTIMER_NORESTART;
734 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
738 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
739 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
740 vcpu->arch.dec_expires = ~(u64)0;
742 #ifdef CONFIG_KVM_EXIT_TIMING
743 mutex_init(&vcpu->arch.exit_timing_lock);
745 ret = kvmppc_subarch_vcpu_init(vcpu);
749 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
751 kvmppc_mmu_destroy(vcpu);
752 kvmppc_subarch_vcpu_uninit(vcpu);
755 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
759 * vrsave (formerly usprg0) isn't used by Linux, but may
760 * be used by the guest.
762 * On non-booke this is associated with Altivec and
763 * is handled by code in book3s.c.
765 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
767 kvmppc_core_vcpu_load(vcpu, cpu);
770 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
772 kvmppc_core_vcpu_put(vcpu);
774 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
779 * irq_bypass_add_producer and irq_bypass_del_producer are only
780 * useful if the architecture supports PCI passthrough.
781 * irq_bypass_stop and irq_bypass_start are not needed and so
782 * kvm_ops are not defined for them.
784 bool kvm_arch_has_irq_bypass(void)
786 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
787 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
790 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
791 struct irq_bypass_producer *prod)
793 struct kvm_kernel_irqfd *irqfd =
794 container_of(cons, struct kvm_kernel_irqfd, consumer);
795 struct kvm *kvm = irqfd->kvm;
797 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
798 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
803 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
804 struct irq_bypass_producer *prod)
806 struct kvm_kernel_irqfd *irqfd =
807 container_of(cons, struct kvm_kernel_irqfd, consumer);
808 struct kvm *kvm = irqfd->kvm;
810 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
811 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
814 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
817 u64 uninitialized_var(gpr);
819 if (run->mmio.len > sizeof(gpr)) {
820 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
824 if (!vcpu->arch.mmio_host_swabbed) {
825 switch (run->mmio.len) {
826 case 8: gpr = *(u64 *)run->mmio.data; break;
827 case 4: gpr = *(u32 *)run->mmio.data; break;
828 case 2: gpr = *(u16 *)run->mmio.data; break;
829 case 1: gpr = *(u8 *)run->mmio.data; break;
832 switch (run->mmio.len) {
833 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
834 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
835 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
836 case 1: gpr = *(u8 *)run->mmio.data; break;
840 if (vcpu->arch.mmio_sign_extend) {
841 switch (run->mmio.len) {
856 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
858 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
859 case KVM_MMIO_REG_GPR:
860 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
862 case KVM_MMIO_REG_FPR:
863 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
865 #ifdef CONFIG_PPC_BOOK3S
866 case KVM_MMIO_REG_QPR:
867 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
869 case KVM_MMIO_REG_FQPR:
870 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
871 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
879 static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
880 unsigned int rt, unsigned int bytes,
881 int is_default_endian, int sign_extend)
886 /* Pity C doesn't have a logical XOR operator */
887 if (kvmppc_need_byteswap(vcpu)) {
888 host_swabbed = is_default_endian;
890 host_swabbed = !is_default_endian;
893 if (bytes > sizeof(run->mmio.data)) {
894 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
898 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
899 run->mmio.len = bytes;
900 run->mmio.is_write = 0;
902 vcpu->arch.io_gpr = rt;
903 vcpu->arch.mmio_host_swabbed = host_swabbed;
904 vcpu->mmio_needed = 1;
905 vcpu->mmio_is_write = 0;
906 vcpu->arch.mmio_sign_extend = sign_extend;
908 idx = srcu_read_lock(&vcpu->kvm->srcu);
910 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
911 bytes, &run->mmio.data);
913 srcu_read_unlock(&vcpu->kvm->srcu, idx);
916 kvmppc_complete_mmio_load(vcpu, run);
917 vcpu->mmio_needed = 0;
921 return EMULATE_DO_MMIO;
924 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
925 unsigned int rt, unsigned int bytes,
926 int is_default_endian)
928 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0);
930 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
932 /* Same as above, but sign extends */
933 int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
934 unsigned int rt, unsigned int bytes,
935 int is_default_endian)
937 return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
940 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
941 u64 val, unsigned int bytes, int is_default_endian)
943 void *data = run->mmio.data;
947 /* Pity C doesn't have a logical XOR operator */
948 if (kvmppc_need_byteswap(vcpu)) {
949 host_swabbed = is_default_endian;
951 host_swabbed = !is_default_endian;
954 if (bytes > sizeof(run->mmio.data)) {
955 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
959 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
960 run->mmio.len = bytes;
961 run->mmio.is_write = 1;
962 vcpu->mmio_needed = 1;
963 vcpu->mmio_is_write = 1;
965 /* Store the value at the lowest bytes in 'data'. */
968 case 8: *(u64 *)data = val; break;
969 case 4: *(u32 *)data = val; break;
970 case 2: *(u16 *)data = val; break;
971 case 1: *(u8 *)data = val; break;
975 case 8: *(u64 *)data = swab64(val); break;
976 case 4: *(u32 *)data = swab32(val); break;
977 case 2: *(u16 *)data = swab16(val); break;
978 case 1: *(u8 *)data = val; break;
982 idx = srcu_read_lock(&vcpu->kvm->srcu);
984 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
985 bytes, &run->mmio.data);
987 srcu_read_unlock(&vcpu->kvm->srcu, idx);
990 vcpu->mmio_needed = 0;
994 return EMULATE_DO_MMIO;
996 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
998 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1001 union kvmppc_one_reg val;
1004 size = one_reg_size(reg->id);
1005 if (size > sizeof(val))
1008 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1012 #ifdef CONFIG_ALTIVEC
1013 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1014 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1018 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1020 case KVM_REG_PPC_VSCR:
1021 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1025 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1027 case KVM_REG_PPC_VRSAVE:
1028 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1030 #endif /* CONFIG_ALTIVEC */
1040 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1046 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1049 union kvmppc_one_reg val;
1052 size = one_reg_size(reg->id);
1053 if (size > sizeof(val))
1056 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1059 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1063 #ifdef CONFIG_ALTIVEC
1064 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1065 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1069 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1071 case KVM_REG_PPC_VSCR:
1072 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1076 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1078 case KVM_REG_PPC_VRSAVE:
1079 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1083 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1085 #endif /* CONFIG_ALTIVEC */
1095 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
1100 if (vcpu->sigset_active)
1101 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1103 if (vcpu->mmio_needed) {
1104 if (!vcpu->mmio_is_write)
1105 kvmppc_complete_mmio_load(vcpu, run);
1106 vcpu->mmio_needed = 0;
1107 } else if (vcpu->arch.osi_needed) {
1108 u64 *gprs = run->osi.gprs;
1111 for (i = 0; i < 32; i++)
1112 kvmppc_set_gpr(vcpu, i, gprs[i]);
1113 vcpu->arch.osi_needed = 0;
1114 } else if (vcpu->arch.hcall_needed) {
1117 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1118 for (i = 0; i < 9; ++i)
1119 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1120 vcpu->arch.hcall_needed = 0;
1122 } else if (vcpu->arch.epr_needed) {
1123 kvmppc_set_epr(vcpu, run->epr.epr);
1124 vcpu->arch.epr_needed = 0;
1128 if (run->immediate_exit)
1131 r = kvmppc_vcpu_run(run, vcpu);
1133 if (vcpu->sigset_active)
1134 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1139 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1141 if (irq->irq == KVM_INTERRUPT_UNSET) {
1142 kvmppc_core_dequeue_external(vcpu);
1146 kvmppc_core_queue_external(vcpu, irq);
1148 kvm_vcpu_kick(vcpu);
1153 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1154 struct kvm_enable_cap *cap)
1162 case KVM_CAP_PPC_OSI:
1164 vcpu->arch.osi_enabled = true;
1166 case KVM_CAP_PPC_PAPR:
1168 vcpu->arch.papr_enabled = true;
1170 case KVM_CAP_PPC_EPR:
1173 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1175 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1178 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1180 vcpu->arch.watchdog_enabled = true;
1183 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1184 case KVM_CAP_SW_TLB: {
1185 struct kvm_config_tlb cfg;
1186 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1189 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1192 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1196 #ifdef CONFIG_KVM_MPIC
1197 case KVM_CAP_IRQ_MPIC: {
1199 struct kvm_device *dev;
1202 f = fdget(cap->args[0]);
1207 dev = kvm_device_from_filp(f.file);
1209 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1215 #ifdef CONFIG_KVM_XICS
1216 case KVM_CAP_IRQ_XICS: {
1218 struct kvm_device *dev;
1221 f = fdget(cap->args[0]);
1226 dev = kvm_device_from_filp(f.file);
1229 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1231 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1237 #endif /* CONFIG_KVM_XICS */
1244 r = kvmppc_sanity_check(vcpu);
1249 bool kvm_arch_intc_initialized(struct kvm *kvm)
1251 #ifdef CONFIG_KVM_MPIC
1255 #ifdef CONFIG_KVM_XICS
1256 if (kvm->arch.xics || kvm->arch.xive)
1262 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1263 struct kvm_mp_state *mp_state)
1268 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1269 struct kvm_mp_state *mp_state)
1274 long kvm_arch_vcpu_ioctl(struct file *filp,
1275 unsigned int ioctl, unsigned long arg)
1277 struct kvm_vcpu *vcpu = filp->private_data;
1278 void __user *argp = (void __user *)arg;
1282 case KVM_INTERRUPT: {
1283 struct kvm_interrupt irq;
1285 if (copy_from_user(&irq, argp, sizeof(irq)))
1287 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1291 case KVM_ENABLE_CAP:
1293 struct kvm_enable_cap cap;
1295 if (copy_from_user(&cap, argp, sizeof(cap)))
1297 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1301 case KVM_SET_ONE_REG:
1302 case KVM_GET_ONE_REG:
1304 struct kvm_one_reg reg;
1306 if (copy_from_user(®, argp, sizeof(reg)))
1308 if (ioctl == KVM_SET_ONE_REG)
1309 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
1311 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
1315 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1316 case KVM_DIRTY_TLB: {
1317 struct kvm_dirty_tlb dirty;
1319 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1321 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1333 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1335 return VM_FAULT_SIGBUS;
1338 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1340 u32 inst_nop = 0x60000000;
1341 #ifdef CONFIG_KVM_BOOKE_HV
1342 u32 inst_sc1 = 0x44000022;
1343 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1344 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1345 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1346 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1348 u32 inst_lis = 0x3c000000;
1349 u32 inst_ori = 0x60000000;
1350 u32 inst_sc = 0x44000002;
1351 u32 inst_imm_mask = 0xffff;
1354 * The hypercall to get into KVM from within guest context is as
1357 * lis r0, r0, KVM_SC_MAGIC_R0@h
1358 * ori r0, KVM_SC_MAGIC_R0@l
1362 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1363 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1364 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1365 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
1368 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1373 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1376 if (!irqchip_in_kernel(kvm))
1379 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1380 irq_event->irq, irq_event->level,
1386 static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1387 struct kvm_enable_cap *cap)
1395 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1396 case KVM_CAP_PPC_ENABLE_HCALL: {
1397 unsigned long hcall = cap->args[0];
1400 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1403 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1406 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1408 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1421 long kvm_arch_vm_ioctl(struct file *filp,
1422 unsigned int ioctl, unsigned long arg)
1424 struct kvm *kvm __maybe_unused = filp->private_data;
1425 void __user *argp = (void __user *)arg;
1429 case KVM_PPC_GET_PVINFO: {
1430 struct kvm_ppc_pvinfo pvinfo;
1431 memset(&pvinfo, 0, sizeof(pvinfo));
1432 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1433 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1440 case KVM_ENABLE_CAP:
1442 struct kvm_enable_cap cap;
1444 if (copy_from_user(&cap, argp, sizeof(cap)))
1446 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1449 #ifdef CONFIG_PPC_BOOK3S_64
1450 case KVM_CREATE_SPAPR_TCE_64: {
1451 struct kvm_create_spapr_tce_64 create_tce_64;
1454 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
1456 if (create_tce_64.flags) {
1460 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1463 case KVM_CREATE_SPAPR_TCE: {
1464 struct kvm_create_spapr_tce create_tce;
1465 struct kvm_create_spapr_tce_64 create_tce_64;
1468 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1471 create_tce_64.liobn = create_tce.liobn;
1472 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
1473 create_tce_64.offset = 0;
1474 create_tce_64.size = create_tce.window_size >>
1475 IOMMU_PAGE_SHIFT_4K;
1476 create_tce_64.flags = 0;
1477 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
1480 case KVM_PPC_GET_SMMU_INFO: {
1481 struct kvm_ppc_smmu_info info;
1482 struct kvm *kvm = filp->private_data;
1484 memset(&info, 0, sizeof(info));
1485 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
1486 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1490 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1491 struct kvm *kvm = filp->private_data;
1493 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1496 case KVM_PPC_CONFIGURE_V3_MMU: {
1497 struct kvm *kvm = filp->private_data;
1498 struct kvm_ppc_mmuv3_cfg cfg;
1501 if (!kvm->arch.kvm_ops->configure_mmu)
1504 if (copy_from_user(&cfg, argp, sizeof(cfg)))
1506 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
1509 case KVM_PPC_GET_RMMU_INFO: {
1510 struct kvm *kvm = filp->private_data;
1511 struct kvm_ppc_rmmu_info info;
1514 if (!kvm->arch.kvm_ops->get_rmmu_info)
1516 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
1517 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1522 struct kvm *kvm = filp->private_data;
1523 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1525 #else /* CONFIG_PPC_BOOK3S_64 */
1534 static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1535 static unsigned long nr_lpids;
1537 long kvmppc_alloc_lpid(void)
1542 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1543 if (lpid >= nr_lpids) {
1544 pr_err("%s: No LPIDs free\n", __func__);
1547 } while (test_and_set_bit(lpid, lpid_inuse));
1551 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
1553 void kvmppc_claim_lpid(long lpid)
1555 set_bit(lpid, lpid_inuse);
1557 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
1559 void kvmppc_free_lpid(long lpid)
1561 clear_bit(lpid, lpid_inuse);
1563 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
1565 void kvmppc_init_lpid(unsigned long nr_lpids_param)
1567 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1568 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1570 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
1572 int kvm_arch_init(void *opaque)
1577 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);