2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
44 /* #define EXIT_DEBUG */
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47 { "exits", VCPU_STAT(sum_exits) },
48 { "mmio", VCPU_STAT(mmio_exits) },
49 { "sig", VCPU_STAT(signal_exits) },
50 { "sysc", VCPU_STAT(syscall_exits) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
52 { "dec", VCPU_STAT(dec_exits) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits) },
54 { "queue_intr", VCPU_STAT(queue_intr) },
55 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
56 { "pf_storage", VCPU_STAT(pf_storage) },
57 { "sp_storage", VCPU_STAT(sp_storage) },
58 { "pf_instruc", VCPU_STAT(pf_instruc) },
59 { "sp_instruc", VCPU_STAT(sp_instruc) },
60 { "ld", VCPU_STAT(ld) },
61 { "ld_slow", VCPU_STAT(ld_slow) },
62 { "st", VCPU_STAT(st) },
63 { "st_slow", VCPU_STAT(st_slow) },
67 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
69 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
70 ulong pc = kvmppc_get_pc(vcpu);
71 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
72 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
73 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
76 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
78 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
80 if (!is_kvmppc_hv_enabled(vcpu->kvm))
81 return to_book3s(vcpu)->hior;
85 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
86 unsigned long pending_now, unsigned long old_pending)
88 if (is_kvmppc_hv_enabled(vcpu->kvm))
91 kvmppc_set_int_pending(vcpu, 1);
93 kvmppc_set_int_pending(vcpu, 0);
96 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
102 if (is_kvmppc_hv_enabled(vcpu->kvm))
105 crit_raw = kvmppc_get_critical(vcpu);
106 crit_r1 = kvmppc_get_gpr(vcpu, 1);
108 /* Truncate crit indicators in 32 bit mode */
109 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
110 crit_raw &= 0xffffffff;
111 crit_r1 &= 0xffffffff;
114 /* Critical section when crit == r1 */
115 crit = (crit_raw == crit_r1);
116 /* ... and we're in supervisor mode */
117 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
122 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
124 kvmppc_unfixup_split_real(vcpu);
125 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
126 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
127 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
128 vcpu->arch.mmu.reset_msr(vcpu);
131 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
136 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
137 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
138 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
139 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
140 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
141 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
142 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
143 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
144 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
145 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
146 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
147 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
148 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
149 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
150 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
151 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
152 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
153 default: prio = BOOK3S_IRQPRIO_MAX; break;
159 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
162 unsigned long old_pending = vcpu->arch.pending_exceptions;
164 clear_bit(kvmppc_book3s_vec2irqprio(vec),
165 &vcpu->arch.pending_exceptions);
167 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
171 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
173 vcpu->stat.queue_intr++;
175 set_bit(kvmppc_book3s_vec2irqprio(vec),
176 &vcpu->arch.pending_exceptions);
178 printk(KERN_INFO "Queueing interrupt %x\n", vec);
181 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
183 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
185 /* might as well deliver this straight away */
186 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
188 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
190 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
192 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
194 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
196 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
198 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
200 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
202 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
204 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
206 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
208 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
209 struct kvm_interrupt *irq)
211 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
213 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
214 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
216 kvmppc_book3s_queue_irqprio(vcpu, vec);
219 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
221 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
222 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
225 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
228 kvmppc_set_dar(vcpu, dar);
229 kvmppc_set_dsisr(vcpu, flags);
230 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
233 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
235 u64 msr = kvmppc_get_msr(vcpu);
236 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
237 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
238 kvmppc_set_msr_fast(vcpu, msr);
239 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
242 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
246 bool crit = kvmppc_critical_section(vcpu);
249 case BOOK3S_IRQPRIO_DECREMENTER:
250 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
251 vec = BOOK3S_INTERRUPT_DECREMENTER;
253 case BOOK3S_IRQPRIO_EXTERNAL:
254 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
255 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
256 vec = BOOK3S_INTERRUPT_EXTERNAL;
258 case BOOK3S_IRQPRIO_SYSTEM_RESET:
259 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
261 case BOOK3S_IRQPRIO_MACHINE_CHECK:
262 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
264 case BOOK3S_IRQPRIO_DATA_STORAGE:
265 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
267 case BOOK3S_IRQPRIO_INST_STORAGE:
268 vec = BOOK3S_INTERRUPT_INST_STORAGE;
270 case BOOK3S_IRQPRIO_DATA_SEGMENT:
271 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
273 case BOOK3S_IRQPRIO_INST_SEGMENT:
274 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
276 case BOOK3S_IRQPRIO_ALIGNMENT:
277 vec = BOOK3S_INTERRUPT_ALIGNMENT;
279 case BOOK3S_IRQPRIO_PROGRAM:
280 vec = BOOK3S_INTERRUPT_PROGRAM;
282 case BOOK3S_IRQPRIO_VSX:
283 vec = BOOK3S_INTERRUPT_VSX;
285 case BOOK3S_IRQPRIO_ALTIVEC:
286 vec = BOOK3S_INTERRUPT_ALTIVEC;
288 case BOOK3S_IRQPRIO_FP_UNAVAIL:
289 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
291 case BOOK3S_IRQPRIO_SYSCALL:
292 vec = BOOK3S_INTERRUPT_SYSCALL;
294 case BOOK3S_IRQPRIO_DEBUG:
295 vec = BOOK3S_INTERRUPT_TRACE;
297 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
298 vec = BOOK3S_INTERRUPT_PERFMON;
300 case BOOK3S_IRQPRIO_FAC_UNAVAIL:
301 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
305 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
310 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
314 kvmppc_inject_interrupt(vcpu, vec, 0);
320 * This function determines if an irqprio should be cleared once issued.
322 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
325 case BOOK3S_IRQPRIO_DECREMENTER:
326 /* DEC interrupts get cleared by mtdec */
328 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
329 /* External interrupts get cleared by userspace */
336 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
338 unsigned long *pending = &vcpu->arch.pending_exceptions;
339 unsigned long old_pending = vcpu->arch.pending_exceptions;
340 unsigned int priority;
343 if (vcpu->arch.pending_exceptions)
344 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
346 priority = __ffs(*pending);
347 while (priority < BOOK3S_IRQPRIO_MAX) {
348 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
349 clear_irqprio(vcpu, priority)) {
350 clear_bit(priority, &vcpu->arch.pending_exceptions);
354 priority = find_next_bit(pending,
355 BITS_PER_BYTE * sizeof(*pending),
359 /* Tell the guest about our interrupt status */
360 kvmppc_update_int_pending(vcpu, *pending, old_pending);
364 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
366 pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
369 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
370 gfn_t gfn = gpa >> PAGE_SHIFT;
372 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
373 mp_pa = (uint32_t)mp_pa;
375 /* Magic page override */
377 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
378 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
381 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
382 get_page(pfn_to_page(pfn));
388 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
390 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
392 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
393 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
395 bool data = (xlid == XLATE_DATA);
396 bool iswrite = (xlrw == XLATE_WRITE);
397 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
401 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
404 pte->raddr = eaddr & KVM_PAM;
405 pte->vpage = VSID_REAL | eaddr >> 12;
406 pte->may_read = true;
407 pte->may_write = true;
408 pte->may_execute = true;
411 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
413 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
414 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
415 pte->raddr &= ~SPLIT_HACK_MASK;
422 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
425 ulong pc = kvmppc_get_pc(vcpu);
431 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
432 if (r == EMULATE_DONE)
435 return EMULATE_AGAIN;
437 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
439 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
444 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
449 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
453 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
454 struct kvm_sregs *sregs)
456 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
459 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
460 struct kvm_sregs *sregs)
462 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
465 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
469 regs->pc = kvmppc_get_pc(vcpu);
470 regs->cr = kvmppc_get_cr(vcpu);
471 regs->ctr = kvmppc_get_ctr(vcpu);
472 regs->lr = kvmppc_get_lr(vcpu);
473 regs->xer = kvmppc_get_xer(vcpu);
474 regs->msr = kvmppc_get_msr(vcpu);
475 regs->srr0 = kvmppc_get_srr0(vcpu);
476 regs->srr1 = kvmppc_get_srr1(vcpu);
477 regs->pid = vcpu->arch.pid;
478 regs->sprg0 = kvmppc_get_sprg0(vcpu);
479 regs->sprg1 = kvmppc_get_sprg1(vcpu);
480 regs->sprg2 = kvmppc_get_sprg2(vcpu);
481 regs->sprg3 = kvmppc_get_sprg3(vcpu);
482 regs->sprg4 = kvmppc_get_sprg4(vcpu);
483 regs->sprg5 = kvmppc_get_sprg5(vcpu);
484 regs->sprg6 = kvmppc_get_sprg6(vcpu);
485 regs->sprg7 = kvmppc_get_sprg7(vcpu);
487 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
488 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
493 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
497 kvmppc_set_pc(vcpu, regs->pc);
498 kvmppc_set_cr(vcpu, regs->cr);
499 kvmppc_set_ctr(vcpu, regs->ctr);
500 kvmppc_set_lr(vcpu, regs->lr);
501 kvmppc_set_xer(vcpu, regs->xer);
502 kvmppc_set_msr(vcpu, regs->msr);
503 kvmppc_set_srr0(vcpu, regs->srr0);
504 kvmppc_set_srr1(vcpu, regs->srr1);
505 kvmppc_set_sprg0(vcpu, regs->sprg0);
506 kvmppc_set_sprg1(vcpu, regs->sprg1);
507 kvmppc_set_sprg2(vcpu, regs->sprg2);
508 kvmppc_set_sprg3(vcpu, regs->sprg3);
509 kvmppc_set_sprg4(vcpu, regs->sprg4);
510 kvmppc_set_sprg5(vcpu, regs->sprg5);
511 kvmppc_set_sprg6(vcpu, regs->sprg6);
512 kvmppc_set_sprg7(vcpu, regs->sprg7);
514 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
515 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
520 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
525 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
530 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
531 union kvmppc_one_reg *val)
536 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
540 case KVM_REG_PPC_DAR:
541 *val = get_reg_val(id, kvmppc_get_dar(vcpu));
543 case KVM_REG_PPC_DSISR:
544 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
546 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
547 i = id - KVM_REG_PPC_FPR0;
548 *val = get_reg_val(id, VCPU_FPR(vcpu, i));
550 case KVM_REG_PPC_FPSCR:
551 *val = get_reg_val(id, vcpu->arch.fp.fpscr);
554 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
555 if (cpu_has_feature(CPU_FTR_VSX)) {
556 i = id - KVM_REG_PPC_VSR0;
557 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
558 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
563 #endif /* CONFIG_VSX */
564 case KVM_REG_PPC_DEBUG_INST:
565 *val = get_reg_val(id, INS_TW);
567 #ifdef CONFIG_KVM_XICS
568 case KVM_REG_PPC_ICP_STATE:
569 if (!vcpu->arch.icp) {
573 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
575 #endif /* CONFIG_KVM_XICS */
576 case KVM_REG_PPC_FSCR:
577 *val = get_reg_val(id, vcpu->arch.fscr);
579 case KVM_REG_PPC_TAR:
580 *val = get_reg_val(id, vcpu->arch.tar);
582 case KVM_REG_PPC_EBBHR:
583 *val = get_reg_val(id, vcpu->arch.ebbhr);
585 case KVM_REG_PPC_EBBRR:
586 *val = get_reg_val(id, vcpu->arch.ebbrr);
588 case KVM_REG_PPC_BESCR:
589 *val = get_reg_val(id, vcpu->arch.bescr);
591 case KVM_REG_PPC_VTB:
592 *val = get_reg_val(id, vcpu->arch.vtb);
595 *val = get_reg_val(id, vcpu->arch.ic);
606 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
607 union kvmppc_one_reg *val)
612 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
616 case KVM_REG_PPC_DAR:
617 kvmppc_set_dar(vcpu, set_reg_val(id, *val));
619 case KVM_REG_PPC_DSISR:
620 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
622 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
623 i = id - KVM_REG_PPC_FPR0;
624 VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
626 case KVM_REG_PPC_FPSCR:
627 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
630 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
631 if (cpu_has_feature(CPU_FTR_VSX)) {
632 i = id - KVM_REG_PPC_VSR0;
633 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
634 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
639 #endif /* CONFIG_VSX */
640 #ifdef CONFIG_KVM_XICS
641 case KVM_REG_PPC_ICP_STATE:
642 if (!vcpu->arch.icp) {
646 r = kvmppc_xics_set_icp(vcpu,
647 set_reg_val(id, *val));
649 #endif /* CONFIG_KVM_XICS */
650 case KVM_REG_PPC_FSCR:
651 vcpu->arch.fscr = set_reg_val(id, *val);
653 case KVM_REG_PPC_TAR:
654 vcpu->arch.tar = set_reg_val(id, *val);
656 case KVM_REG_PPC_EBBHR:
657 vcpu->arch.ebbhr = set_reg_val(id, *val);
659 case KVM_REG_PPC_EBBRR:
660 vcpu->arch.ebbrr = set_reg_val(id, *val);
662 case KVM_REG_PPC_BESCR:
663 vcpu->arch.bescr = set_reg_val(id, *val);
665 case KVM_REG_PPC_VTB:
666 vcpu->arch.vtb = set_reg_val(id, *val);
669 vcpu->arch.ic = set_reg_val(id, *val);
680 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
682 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
685 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
687 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
690 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
692 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
694 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
696 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
698 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
701 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
702 struct kvm_translation *tr)
707 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
708 struct kvm_guest_debug *dbg)
710 vcpu->guest_debug = dbg->control;
714 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
716 kvmppc_core_queue_dec(vcpu);
720 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
722 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
725 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
727 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
730 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
732 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
735 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
737 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
740 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
741 struct kvm_memory_slot *dont)
743 kvm->arch.kvm_ops->free_memslot(free, dont);
746 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
747 unsigned long npages)
749 return kvm->arch.kvm_ops->create_memslot(slot, npages);
752 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
754 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
757 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
758 struct kvm_memory_slot *memslot,
759 struct kvm_userspace_memory_region *mem)
761 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
764 void kvmppc_core_commit_memory_region(struct kvm *kvm,
765 struct kvm_userspace_memory_region *mem,
766 const struct kvm_memory_slot *old)
768 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old);
771 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
773 return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
775 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
777 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
779 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
782 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
784 return kvm->arch.kvm_ops->age_hva(kvm, start, end);
787 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
789 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
792 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
794 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
797 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
799 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
802 int kvmppc_core_init_vm(struct kvm *kvm)
806 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
807 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
810 return kvm->arch.kvm_ops->init_vm(kvm);
813 void kvmppc_core_destroy_vm(struct kvm *kvm)
815 kvm->arch.kvm_ops->destroy_vm(kvm);
818 kvmppc_rtas_tokens_free(kvm);
819 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
823 int kvmppc_core_check_processor_compat(void)
826 * We always return 0 for book3s. We check
827 * for compatability while loading the HV
833 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
835 return kvm->arch.kvm_ops->hcall_implemented(hcall);
838 static int kvmppc_book3s_init(void)
842 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
845 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
846 r = kvmppc_book3s_init_pr();
852 static void kvmppc_book3s_exit(void)
854 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
855 kvmppc_book3s_exit_pr();
860 module_init(kvmppc_book3s_init);
861 module_exit(kvmppc_book3s_exit);
863 /* On 32bit this is our one and only kernel module */
864 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
865 MODULE_ALIAS_MISCDEV(KVM_MINOR);
866 MODULE_ALIAS("devname:kvm");