2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
24 #include <linux/errno.h>
25 #include <linux/err.h>
26 #include <linux/kvm_host.h>
27 #include <linux/gfp.h>
28 #include <linux/module.h>
29 #include <linux/vmalloc.h>
32 #include <asm/cputable.h>
33 #include <asm/uaccess.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/dbell.h>
37 #include <asm/hw_irq.h>
43 unsigned long kvmppc_booke_handlers;
45 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
46 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
48 struct kvm_stats_debugfs_item debugfs_entries[] = {
49 { "mmio", VCPU_STAT(mmio_exits) },
50 { "dcr", VCPU_STAT(dcr_exits) },
51 { "sig", VCPU_STAT(signal_exits) },
52 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
53 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
54 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
55 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
56 { "sysc", VCPU_STAT(syscall_exits) },
57 { "isi", VCPU_STAT(isi_exits) },
58 { "dsi", VCPU_STAT(dsi_exits) },
59 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
60 { "dec", VCPU_STAT(dec_exits) },
61 { "ext_intr", VCPU_STAT(ext_intr_exits) },
62 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "doorbell", VCPU_STAT(dbell_exits) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits) },
68 /* TODO: use vcpu_printf() */
69 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
73 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
74 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
75 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
76 vcpu->arch.shared->srr1);
78 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
80 for (i = 0; i < 32; i += 4) {
81 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
82 kvmppc_get_gpr(vcpu, i),
83 kvmppc_get_gpr(vcpu, i+1),
84 kvmppc_get_gpr(vcpu, i+2),
85 kvmppc_get_gpr(vcpu, i+3));
90 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
94 kvmppc_save_guest_spe(vcpu);
95 vcpu->arch.shadow_msr &= ~MSR_SPE;
99 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
103 kvmppc_load_guest_spe(vcpu);
104 vcpu->arch.shadow_msr |= MSR_SPE;
108 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
110 if (vcpu->arch.shared->msr & MSR_SPE) {
111 if (!(vcpu->arch.shadow_msr & MSR_SPE))
112 kvmppc_vcpu_enable_spe(vcpu);
113 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
114 kvmppc_vcpu_disable_spe(vcpu);
118 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
124 * Helper function for "full" MSR writes. No need to call this if only
125 * EE/CE/ME/DE/RI are changing.
127 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
129 u32 old_msr = vcpu->arch.shared->msr;
131 #ifdef CONFIG_KVM_BOOKE_HV
135 vcpu->arch.shared->msr = new_msr;
137 kvmppc_mmu_msr_notify(vcpu, old_msr);
138 kvmppc_vcpu_sync_spe(vcpu);
141 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
142 unsigned int priority)
144 set_bit(priority, &vcpu->arch.pending_exceptions);
147 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
148 ulong dear_flags, ulong esr_flags)
150 vcpu->arch.queued_dear = dear_flags;
151 vcpu->arch.queued_esr = esr_flags;
152 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
155 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
156 ulong dear_flags, ulong esr_flags)
158 vcpu->arch.queued_dear = dear_flags;
159 vcpu->arch.queued_esr = esr_flags;
160 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
163 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
166 vcpu->arch.queued_esr = esr_flags;
167 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
170 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
172 vcpu->arch.queued_esr = esr_flags;
173 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
176 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
178 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
181 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
183 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
186 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
188 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
191 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
192 struct kvm_interrupt *irq)
194 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
196 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
197 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
199 kvmppc_booke_queue_irqprio(vcpu, prio);
202 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
203 struct kvm_interrupt *irq)
205 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
209 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
211 #ifdef CONFIG_KVM_BOOKE_HV
212 mtspr(SPRN_GSRR0, srr0);
213 mtspr(SPRN_GSRR1, srr1);
215 vcpu->arch.shared->srr0 = srr0;
216 vcpu->arch.shared->srr1 = srr1;
220 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
222 vcpu->arch.csrr0 = srr0;
223 vcpu->arch.csrr1 = srr1;
226 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
228 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
229 vcpu->arch.dsrr0 = srr0;
230 vcpu->arch.dsrr1 = srr1;
232 set_guest_csrr(vcpu, srr0, srr1);
236 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
238 vcpu->arch.mcsrr0 = srr0;
239 vcpu->arch.mcsrr1 = srr1;
242 static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
244 #ifdef CONFIG_KVM_BOOKE_HV
245 return mfspr(SPRN_GDEAR);
247 return vcpu->arch.shared->dar;
251 static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
253 #ifdef CONFIG_KVM_BOOKE_HV
254 mtspr(SPRN_GDEAR, dear);
256 vcpu->arch.shared->dar = dear;
260 static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
262 #ifdef CONFIG_KVM_BOOKE_HV
263 return mfspr(SPRN_GESR);
265 return vcpu->arch.shared->esr;
269 static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
271 #ifdef CONFIG_KVM_BOOKE_HV
272 mtspr(SPRN_GESR, esr);
274 vcpu->arch.shared->esr = esr;
278 /* Deliver the interrupt of the corresponding priority, if possible. */
279 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
280 unsigned int priority)
283 ulong uninitialized_var(msr_mask);
284 bool update_esr = false, update_dear = false;
285 ulong crit_raw = vcpu->arch.shared->critical;
286 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
288 bool keep_irq = false;
289 enum int_class int_class;
291 /* Truncate crit indicators in 32 bit mode */
292 if (!(vcpu->arch.shared->msr & MSR_SF)) {
293 crit_raw &= 0xffffffff;
294 crit_r1 &= 0xffffffff;
297 /* Critical section when crit == r1 */
298 crit = (crit_raw == crit_r1);
299 /* ... and we're in supervisor mode */
300 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
302 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
303 priority = BOOKE_IRQPRIO_EXTERNAL;
308 case BOOKE_IRQPRIO_DTLB_MISS:
309 case BOOKE_IRQPRIO_DATA_STORAGE:
312 case BOOKE_IRQPRIO_INST_STORAGE:
313 case BOOKE_IRQPRIO_PROGRAM:
316 case BOOKE_IRQPRIO_ITLB_MISS:
317 case BOOKE_IRQPRIO_SYSCALL:
318 case BOOKE_IRQPRIO_FP_UNAVAIL:
319 case BOOKE_IRQPRIO_SPE_UNAVAIL:
320 case BOOKE_IRQPRIO_SPE_FP_DATA:
321 case BOOKE_IRQPRIO_SPE_FP_ROUND:
322 case BOOKE_IRQPRIO_AP_UNAVAIL:
323 case BOOKE_IRQPRIO_ALIGNMENT:
325 msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE;
326 int_class = INT_CLASS_NONCRIT;
328 case BOOKE_IRQPRIO_CRITICAL:
329 allowed = vcpu->arch.shared->msr & MSR_CE;
330 allowed = allowed && !crit;
331 msr_mask = MSR_GS | MSR_ME;
332 int_class = INT_CLASS_CRIT;
334 case BOOKE_IRQPRIO_MACHINE_CHECK:
335 allowed = vcpu->arch.shared->msr & MSR_ME;
336 allowed = allowed && !crit;
338 int_class = INT_CLASS_MC;
340 case BOOKE_IRQPRIO_DECREMENTER:
341 case BOOKE_IRQPRIO_FIT:
344 case BOOKE_IRQPRIO_EXTERNAL:
345 allowed = vcpu->arch.shared->msr & MSR_EE;
346 allowed = allowed && !crit;
347 msr_mask = MSR_GS | MSR_CE | MSR_ME | MSR_DE;
348 int_class = INT_CLASS_NONCRIT;
350 case BOOKE_IRQPRIO_DEBUG:
351 allowed = vcpu->arch.shared->msr & MSR_DE;
352 allowed = allowed && !crit;
353 msr_mask = MSR_GS | MSR_ME;
354 int_class = INT_CLASS_CRIT;
360 case INT_CLASS_NONCRIT:
361 set_guest_srr(vcpu, vcpu->arch.pc,
362 vcpu->arch.shared->msr);
365 set_guest_csrr(vcpu, vcpu->arch.pc,
366 vcpu->arch.shared->msr);
369 set_guest_dsrr(vcpu, vcpu->arch.pc,
370 vcpu->arch.shared->msr);
373 set_guest_mcsrr(vcpu, vcpu->arch.pc,
374 vcpu->arch.shared->msr);
378 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
379 if (update_esr == true)
380 set_guest_esr(vcpu, vcpu->arch.queued_esr);
381 if (update_dear == true)
382 set_guest_dear(vcpu, vcpu->arch.queued_dear);
383 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
386 clear_bit(priority, &vcpu->arch.pending_exceptions);
389 #ifdef CONFIG_KVM_BOOKE_HV
391 * If an interrupt is pending but masked, raise a guest doorbell
392 * so that we are notified when the guest enables the relevant
395 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
396 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
397 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
398 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
399 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
400 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
406 static void update_timer_ints(struct kvm_vcpu *vcpu)
408 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
409 kvmppc_core_queue_dec(vcpu);
411 kvmppc_core_dequeue_dec(vcpu);
414 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
416 unsigned long *pending = &vcpu->arch.pending_exceptions;
417 unsigned int priority;
419 if (vcpu->requests) {
420 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
422 update_timer_ints(vcpu);
426 priority = __ffs(*pending);
427 while (priority <= BOOKE_IRQPRIO_MAX) {
428 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
431 priority = find_next_bit(pending,
432 BITS_PER_BYTE * sizeof(*pending),
436 /* Tell the guest about our interrupt status */
437 vcpu->arch.shared->int_pending = !!*pending;
440 /* Check pending exceptions and deliver one, if possible. */
441 void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
443 WARN_ON_ONCE(!irqs_disabled());
445 kvmppc_core_check_exceptions(vcpu);
447 if (vcpu->arch.shared->msr & MSR_WE) {
449 kvm_vcpu_block(vcpu);
452 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
453 kvmppc_core_check_exceptions(vcpu);
457 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
460 #ifdef CONFIG_PPC_FPU
466 if (!vcpu->arch.sane) {
467 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
471 if (!current->thread.kvm_vcpu) {
472 WARN(1, "no vcpu\n");
478 kvmppc_core_prepare_to_enter(vcpu);
480 if (signal_pending(current)) {
481 kvm_run->exit_reason = KVM_EXIT_INTR;
488 #ifdef CONFIG_PPC_FPU
489 /* Save userspace FPU state in stack */
491 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
492 fpscr = current->thread.fpscr.val;
493 fpexc_mode = current->thread.fpexc_mode;
495 /* Restore guest FPU state to thread */
496 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
497 current->thread.fpscr.val = vcpu->arch.fpscr;
500 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
501 * as always using the FPU. Kernel usage of FP (via
502 * enable_kernel_fp()) in this thread must not occur while
503 * vcpu->fpu_active is set.
505 vcpu->fpu_active = 1;
507 kvmppc_load_guest_fp(vcpu);
510 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
512 #ifdef CONFIG_PPC_FPU
513 kvmppc_save_guest_fp(vcpu);
515 vcpu->fpu_active = 0;
517 /* Save guest FPU state from thread */
518 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
519 vcpu->arch.fpscr = current->thread.fpscr.val;
521 /* Restore userspace FPU state from stack */
522 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
523 current->thread.fpscr.val = fpscr;
524 current->thread.fpexc_mode = fpexc_mode;
534 static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
536 enum emulation_result er;
538 er = kvmppc_emulate_instruction(run, vcpu);
541 /* don't overwrite subtypes, just account kvm_stats */
542 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
543 /* Future optimization: only reload non-volatiles if
544 * they were actually modified by emulation. */
545 return RESUME_GUEST_NV;
548 run->exit_reason = KVM_EXIT_DCR;
552 /* XXX Deliver Program interrupt to guest. */
553 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
554 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
555 /* For debugging, encode the failing instruction and
556 * report it to userspace. */
557 run->hw.hardware_exit_reason = ~0ULL << 32;
558 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
569 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
571 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
572 unsigned int exit_nr)
576 /* update before a new last_exit_type is rewritten */
577 kvmppc_update_timing_stats(vcpu);
580 case BOOKE_INTERRUPT_EXTERNAL:
581 do_IRQ(current->thread.regs);
584 case BOOKE_INTERRUPT_DECREMENTER:
585 timer_interrupt(current->thread.regs);
588 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
589 case BOOKE_INTERRUPT_DOORBELL:
590 doorbell_exception(current->thread.regs);
593 case BOOKE_INTERRUPT_MACHINE_CHECK:
600 run->exit_reason = KVM_EXIT_UNKNOWN;
601 run->ready_for_interrupt_injection = 1;
604 case BOOKE_INTERRUPT_MACHINE_CHECK:
609 case BOOKE_INTERRUPT_EXTERNAL:
610 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
615 case BOOKE_INTERRUPT_DECREMENTER:
616 kvmppc_account_exit(vcpu, DEC_EXITS);
621 case BOOKE_INTERRUPT_DOORBELL:
622 kvmppc_account_exit(vcpu, DBELL_EXITS);
627 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
628 kvmppc_account_exit(vcpu, GDBELL_EXITS);
631 * We are here because there is a pending guest interrupt
632 * which could not be delivered as MSR_CE or MSR_ME was not
633 * set. Once we break from here we will retry delivery.
638 case BOOKE_INTERRUPT_GUEST_DBELL:
639 kvmppc_account_exit(vcpu, GDBELL_EXITS);
642 * We are here because there is a pending guest interrupt
643 * which could not be delivered as MSR_EE was not set. Once
644 * we break from here we will retry delivery.
649 case BOOKE_INTERRUPT_HV_PRIV:
650 r = emulation_exit(run, vcpu);
653 case BOOKE_INTERRUPT_PROGRAM:
654 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
655 /* Program traps generated by user-level software must be handled
656 * by the guest kernel. */
657 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
659 kvmppc_account_exit(vcpu, USR_PR_INST);
663 r = emulation_exit(run, vcpu);
666 case BOOKE_INTERRUPT_FP_UNAVAIL:
667 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
668 kvmppc_account_exit(vcpu, FP_UNAVAIL);
673 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
674 if (vcpu->arch.shared->msr & MSR_SPE)
675 kvmppc_vcpu_enable_spe(vcpu);
677 kvmppc_booke_queue_irqprio(vcpu,
678 BOOKE_IRQPRIO_SPE_UNAVAIL);
683 case BOOKE_INTERRUPT_SPE_FP_DATA:
684 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
688 case BOOKE_INTERRUPT_SPE_FP_ROUND:
689 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
693 case BOOKE_INTERRUPT_SPE_UNAVAIL:
695 * Guest wants SPE, but host kernel doesn't support it. Send
696 * an "unimplemented operation" program check to the guest.
698 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
703 * These really should never happen without CONFIG_SPE,
704 * as we should never enable the real MSR[SPE] in the guest.
706 case BOOKE_INTERRUPT_SPE_FP_DATA:
707 case BOOKE_INTERRUPT_SPE_FP_ROUND:
708 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
709 __func__, exit_nr, vcpu->arch.pc);
710 run->hw.hardware_exit_reason = exit_nr;
715 case BOOKE_INTERRUPT_DATA_STORAGE:
716 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
717 vcpu->arch.fault_esr);
718 kvmppc_account_exit(vcpu, DSI_EXITS);
722 case BOOKE_INTERRUPT_INST_STORAGE:
723 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
724 kvmppc_account_exit(vcpu, ISI_EXITS);
728 #ifdef CONFIG_KVM_BOOKE_HV
729 case BOOKE_INTERRUPT_HV_SYSCALL:
730 if (!(vcpu->arch.shared->msr & MSR_PR)) {
731 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
734 * hcall from guest userspace -- send privileged
735 * instruction program check.
737 kvmppc_core_queue_program(vcpu, ESR_PPR);
743 case BOOKE_INTERRUPT_SYSCALL:
744 if (!(vcpu->arch.shared->msr & MSR_PR) &&
745 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
746 /* KVM PV hypercalls */
747 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
751 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
753 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
758 case BOOKE_INTERRUPT_DTLB_MISS: {
759 unsigned long eaddr = vcpu->arch.fault_dear;
764 #ifdef CONFIG_KVM_E500
765 if (!(vcpu->arch.shared->msr & MSR_PR) &&
766 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
767 kvmppc_map_magic(vcpu);
768 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
775 /* Check the guest TLB. */
776 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
777 if (gtlb_index < 0) {
778 /* The guest didn't have a mapping for it. */
779 kvmppc_core_queue_dtlb_miss(vcpu,
780 vcpu->arch.fault_dear,
781 vcpu->arch.fault_esr);
782 kvmppc_mmu_dtlb_miss(vcpu);
783 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
788 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
789 gfn = gpaddr >> PAGE_SHIFT;
791 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
792 /* The guest TLB had a mapping, but the shadow TLB
793 * didn't, and it is RAM. This could be because:
794 * a) the entry is mapping the host kernel, or
795 * b) the guest used a large mapping which we're faking
796 * Either way, we need to satisfy the fault without
797 * invoking the guest. */
798 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
799 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
802 /* Guest has mapped and accessed a page which is not
804 vcpu->arch.paddr_accessed = gpaddr;
805 r = kvmppc_emulate_mmio(run, vcpu);
806 kvmppc_account_exit(vcpu, MMIO_EXITS);
812 case BOOKE_INTERRUPT_ITLB_MISS: {
813 unsigned long eaddr = vcpu->arch.pc;
820 /* Check the guest TLB. */
821 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
822 if (gtlb_index < 0) {
823 /* The guest didn't have a mapping for it. */
824 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
825 kvmppc_mmu_itlb_miss(vcpu);
826 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
830 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
832 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
833 gfn = gpaddr >> PAGE_SHIFT;
835 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
836 /* The guest TLB had a mapping, but the shadow TLB
837 * didn't. This could be because:
838 * a) the entry is mapping the host kernel, or
839 * b) the guest used a large mapping which we're faking
840 * Either way, we need to satisfy the fault without
841 * invoking the guest. */
842 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
844 /* Guest mapped and leaped at non-RAM! */
845 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
851 case BOOKE_INTERRUPT_DEBUG: {
854 vcpu->arch.pc = mfspr(SPRN_CSRR0);
856 /* clear IAC events in DBSR register */
857 dbsr = mfspr(SPRN_DBSR);
858 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
859 mtspr(SPRN_DBSR, dbsr);
861 run->exit_reason = KVM_EXIT_DEBUG;
862 kvmppc_account_exit(vcpu, DEBUG_EXITS);
868 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
874 kvmppc_core_prepare_to_enter(vcpu);
876 if (!(r & RESUME_HOST)) {
877 /* To avoid clobbering exit_reason, only check for signals if
878 * we aren't already exiting to userspace for some other
880 if (signal_pending(current)) {
881 run->exit_reason = KVM_EXIT_INTR;
882 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
883 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
890 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
891 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
897 vcpu->arch.shared->pir = vcpu->vcpu_id;
898 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
899 kvmppc_set_msr(vcpu, 0);
901 #ifndef CONFIG_KVM_BOOKE_HV
902 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
903 vcpu->arch.shadow_pid = 1;
904 vcpu->arch.shared->msr = 0;
907 /* Eye-catching numbers so we know if the guest takes an interrupt
908 * before it's programmed its own IVPR/IVORs. */
909 vcpu->arch.ivpr = 0x55550000;
910 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
911 vcpu->arch.ivor[i] = 0x7700 | i * 4;
913 kvmppc_init_timing_stats(vcpu);
915 r = kvmppc_core_vcpu_setup(vcpu);
916 kvmppc_sanity_check(vcpu);
920 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
924 regs->pc = vcpu->arch.pc;
925 regs->cr = kvmppc_get_cr(vcpu);
926 regs->ctr = vcpu->arch.ctr;
927 regs->lr = vcpu->arch.lr;
928 regs->xer = kvmppc_get_xer(vcpu);
929 regs->msr = vcpu->arch.shared->msr;
930 regs->srr0 = vcpu->arch.shared->srr0;
931 regs->srr1 = vcpu->arch.shared->srr1;
932 regs->pid = vcpu->arch.pid;
933 regs->sprg0 = vcpu->arch.shared->sprg0;
934 regs->sprg1 = vcpu->arch.shared->sprg1;
935 regs->sprg2 = vcpu->arch.shared->sprg2;
936 regs->sprg3 = vcpu->arch.shared->sprg3;
937 regs->sprg4 = vcpu->arch.shared->sprg4;
938 regs->sprg5 = vcpu->arch.shared->sprg5;
939 regs->sprg6 = vcpu->arch.shared->sprg6;
940 regs->sprg7 = vcpu->arch.shared->sprg7;
942 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
943 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
948 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
952 vcpu->arch.pc = regs->pc;
953 kvmppc_set_cr(vcpu, regs->cr);
954 vcpu->arch.ctr = regs->ctr;
955 vcpu->arch.lr = regs->lr;
956 kvmppc_set_xer(vcpu, regs->xer);
957 kvmppc_set_msr(vcpu, regs->msr);
958 vcpu->arch.shared->srr0 = regs->srr0;
959 vcpu->arch.shared->srr1 = regs->srr1;
960 kvmppc_set_pid(vcpu, regs->pid);
961 vcpu->arch.shared->sprg0 = regs->sprg0;
962 vcpu->arch.shared->sprg1 = regs->sprg1;
963 vcpu->arch.shared->sprg2 = regs->sprg2;
964 vcpu->arch.shared->sprg3 = regs->sprg3;
965 vcpu->arch.shared->sprg4 = regs->sprg4;
966 vcpu->arch.shared->sprg5 = regs->sprg5;
967 vcpu->arch.shared->sprg6 = regs->sprg6;
968 vcpu->arch.shared->sprg7 = regs->sprg7;
970 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
971 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
976 static void get_sregs_base(struct kvm_vcpu *vcpu,
977 struct kvm_sregs *sregs)
981 sregs->u.e.features |= KVM_SREGS_E_BASE;
983 sregs->u.e.csrr0 = vcpu->arch.csrr0;
984 sregs->u.e.csrr1 = vcpu->arch.csrr1;
985 sregs->u.e.mcsr = vcpu->arch.mcsr;
986 sregs->u.e.esr = get_guest_esr(vcpu);
987 sregs->u.e.dear = get_guest_dear(vcpu);
988 sregs->u.e.tsr = vcpu->arch.tsr;
989 sregs->u.e.tcr = vcpu->arch.tcr;
990 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
992 sregs->u.e.vrsave = vcpu->arch.vrsave;
995 static int set_sregs_base(struct kvm_vcpu *vcpu,
996 struct kvm_sregs *sregs)
998 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1001 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1002 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1003 vcpu->arch.mcsr = sregs->u.e.mcsr;
1004 set_guest_esr(vcpu, sregs->u.e.esr);
1005 set_guest_dear(vcpu, sregs->u.e.dear);
1006 vcpu->arch.vrsave = sregs->u.e.vrsave;
1007 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1009 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1010 vcpu->arch.dec = sregs->u.e.dec;
1011 kvmppc_emulate_dec(vcpu);
1014 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
1015 vcpu->arch.tsr = sregs->u.e.tsr;
1016 update_timer_ints(vcpu);
1022 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1023 struct kvm_sregs *sregs)
1025 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1027 sregs->u.e.pir = vcpu->vcpu_id;
1028 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1029 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1030 sregs->u.e.decar = vcpu->arch.decar;
1031 sregs->u.e.ivpr = vcpu->arch.ivpr;
1034 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1035 struct kvm_sregs *sregs)
1037 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1040 if (sregs->u.e.pir != vcpu->vcpu_id)
1043 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1044 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1045 vcpu->arch.decar = sregs->u.e.decar;
1046 vcpu->arch.ivpr = sregs->u.e.ivpr;
1051 void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1053 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1055 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1056 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1057 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1058 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1059 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1060 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1061 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1062 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1063 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1064 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1065 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1066 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1067 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1068 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1069 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1070 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1073 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1075 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1078 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1079 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1080 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1081 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1082 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1083 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1084 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1085 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1086 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1087 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1088 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1089 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1090 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1091 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1092 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1093 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1098 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1099 struct kvm_sregs *sregs)
1101 sregs->pvr = vcpu->arch.pvr;
1103 get_sregs_base(vcpu, sregs);
1104 get_sregs_arch206(vcpu, sregs);
1105 kvmppc_core_get_sregs(vcpu, sregs);
1109 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1110 struct kvm_sregs *sregs)
1114 if (vcpu->arch.pvr != sregs->pvr)
1117 ret = set_sregs_base(vcpu, sregs);
1121 ret = set_sregs_arch206(vcpu, sregs);
1125 return kvmppc_core_set_sregs(vcpu, sregs);
1128 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1133 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1138 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1143 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1148 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1149 struct kvm_translation *tr)
1153 r = kvmppc_core_vcpu_translate(vcpu, tr);
1157 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1162 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1163 struct kvm_userspace_memory_region *mem)
1168 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1169 struct kvm_userspace_memory_region *mem)
1173 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1175 vcpu->arch.tcr = new_tcr;
1176 update_timer_ints(vcpu);
1179 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1181 set_bits(tsr_bits, &vcpu->arch.tsr);
1183 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1184 kvm_vcpu_kick(vcpu);
1187 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1189 clear_bits(tsr_bits, &vcpu->arch.tsr);
1190 update_timer_ints(vcpu);
1193 void kvmppc_decrementer_func(unsigned long data)
1195 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1197 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1200 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1202 current->thread.kvm_vcpu = vcpu;
1205 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1207 current->thread.kvm_vcpu = NULL;
1210 int __init kvmppc_booke_init(void)
1212 #ifndef CONFIG_KVM_BOOKE_HV
1213 unsigned long ivor[16];
1214 unsigned long max_ivor = 0;
1217 /* We install our own exception handlers by hijacking IVPR. IVPR must
1218 * be 16-bit aligned, so we need a 64KB allocation. */
1219 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1221 if (!kvmppc_booke_handlers)
1224 /* XXX make sure our handlers are smaller than Linux's */
1226 /* Copy our interrupt handlers to match host IVORs. That way we don't
1227 * have to swap the IVORs on every guest/host transition. */
1228 ivor[0] = mfspr(SPRN_IVOR0);
1229 ivor[1] = mfspr(SPRN_IVOR1);
1230 ivor[2] = mfspr(SPRN_IVOR2);
1231 ivor[3] = mfspr(SPRN_IVOR3);
1232 ivor[4] = mfspr(SPRN_IVOR4);
1233 ivor[5] = mfspr(SPRN_IVOR5);
1234 ivor[6] = mfspr(SPRN_IVOR6);
1235 ivor[7] = mfspr(SPRN_IVOR7);
1236 ivor[8] = mfspr(SPRN_IVOR8);
1237 ivor[9] = mfspr(SPRN_IVOR9);
1238 ivor[10] = mfspr(SPRN_IVOR10);
1239 ivor[11] = mfspr(SPRN_IVOR11);
1240 ivor[12] = mfspr(SPRN_IVOR12);
1241 ivor[13] = mfspr(SPRN_IVOR13);
1242 ivor[14] = mfspr(SPRN_IVOR14);
1243 ivor[15] = mfspr(SPRN_IVOR15);
1245 for (i = 0; i < 16; i++) {
1246 if (ivor[i] > max_ivor)
1249 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1250 kvmppc_handlers_start + i * kvmppc_handler_len,
1251 kvmppc_handler_len);
1253 flush_icache_range(kvmppc_booke_handlers,
1254 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1255 #endif /* !BOOKE_HV */
1259 void __exit kvmppc_booke_exit(void)
1261 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);