]> git.karo-electronics.de Git - linux-beck.git/commitdiff
KVM: PPC: Convert SRR0 and SRR1 to shared page
authorAlexander Graf <agraf@suse.de>
Thu, 29 Jul 2010 12:47:46 +0000 (14:47 +0200)
committerAvi Kivity <avi@redhat.com>
Sun, 24 Oct 2010 08:50:45 +0000 (10:50 +0200)
The SRR0 and SRR1 registers contain cached values of the PC and MSR
respectively. They get written to by the hypervisor when an interrupt
occurs or directly by the kernel. They are also used to tell the rfi(d)
instruction where to jump to.

Because it only gets touched on defined events that, it's very simple to
share with the guest. Hypervisor and guest both have full r/w access.

This patch converts all users of the current field to the shared page.

Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/kvm_para.h
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/booke_emulate.c
arch/powerpc/kvm/emulate.c

index c852408eac3819c129ceda12651c305f10fc2339..5255d754f9a9b4df8d16b5c7c147341f5d2bc9eb 100644 (file)
@@ -225,8 +225,6 @@ struct kvm_vcpu_arch {
        ulong sprg5;
        ulong sprg6;
        ulong sprg7;
-       ulong srr0;
-       ulong srr1;
        ulong csrr0;
        ulong csrr1;
        ulong dsrr0;
index ec72a1c8c045373b040e86af5c7f80fab99621b8..d7fc6c2c9730312019b873e6779eddaa8d82526d 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/types.h>
 
 struct kvm_vcpu_arch_shared {
+       __u64 srr0;
+       __u64 srr1;
        __u64 dar;
        __u64 msr;
        __u32 dsisr;
index 4d46f8b13cc62dadf114ca170ee50d14e4728113..afa0dd4a27f438adfa984cfc801304d5b348f083 100644 (file)
@@ -162,8 +162,8 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
 
 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
 {
-       vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
-       vcpu->arch.srr1 = vcpu->arch.shared->msr | flags;
+       vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
+       vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
        kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
        vcpu->arch.mmu.reset_msr(vcpu);
 }
@@ -1059,8 +1059,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->lr = kvmppc_get_lr(vcpu);
        regs->xer = kvmppc_get_xer(vcpu);
        regs->msr = vcpu->arch.shared->msr;
-       regs->srr0 = vcpu->arch.srr0;
-       regs->srr1 = vcpu->arch.srr1;
+       regs->srr0 = vcpu->arch.shared->srr0;
+       regs->srr1 = vcpu->arch.shared->srr1;
        regs->pid = vcpu->arch.pid;
        regs->sprg0 = vcpu->arch.sprg0;
        regs->sprg1 = vcpu->arch.sprg1;
@@ -1086,8 +1086,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        kvmppc_set_lr(vcpu, regs->lr);
        kvmppc_set_xer(vcpu, regs->xer);
        kvmppc_set_msr(vcpu, regs->msr);
-       vcpu->arch.srr0 = regs->srr0;
-       vcpu->arch.srr1 = regs->srr1;
+       vcpu->arch.shared->srr0 = regs->srr0;
+       vcpu->arch.shared->srr1 = regs->srr1;
        vcpu->arch.sprg0 = regs->sprg0;
        vcpu->arch.sprg1 = regs->sprg1;
        vcpu->arch.sprg2 = regs->sprg2;
index c1478642f856f4b2e0e2d6c32b0200863cc3cc4a..f333cb44534903f302376de3b35936d1a0474868 100644 (file)
@@ -73,8 +73,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                switch (get_xop(inst)) {
                case OP_19_XOP_RFID:
                case OP_19_XOP_RFI:
-                       kvmppc_set_pc(vcpu, vcpu->arch.srr0);
-                       kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+                       kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
+                       kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
                        *advance = 0;
                        break;
 
index 4aab6d2ce133fef325c90774674da0c6e644548b..793df28b628d0ff4a69b6a989acb6d4d84e836f8 100644 (file)
@@ -64,7 +64,8 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
 
        printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
        printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
-       printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);
+       printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
+                                           vcpu->arch.shared->srr1);
 
        printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
 
@@ -189,8 +190,8 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
        }
 
        if (allowed) {
-               vcpu->arch.srr0 = vcpu->arch.pc;
-               vcpu->arch.srr1 = vcpu->arch.shared->msr;
+               vcpu->arch.shared->srr0 = vcpu->arch.pc;
+               vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
                vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
                if (update_esr == true)
                        vcpu->arch.esr = vcpu->arch.queued_esr;
@@ -491,8 +492,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->lr = vcpu->arch.lr;
        regs->xer = kvmppc_get_xer(vcpu);
        regs->msr = vcpu->arch.shared->msr;
-       regs->srr0 = vcpu->arch.srr0;
-       regs->srr1 = vcpu->arch.srr1;
+       regs->srr0 = vcpu->arch.shared->srr0;
+       regs->srr1 = vcpu->arch.shared->srr1;
        regs->pid = vcpu->arch.pid;
        regs->sprg0 = vcpu->arch.sprg0;
        regs->sprg1 = vcpu->arch.sprg1;
@@ -518,8 +519,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        vcpu->arch.lr = regs->lr;
        kvmppc_set_xer(vcpu, regs->xer);
        kvmppc_set_msr(vcpu, regs->msr);
-       vcpu->arch.srr0 = regs->srr0;
-       vcpu->arch.srr1 = regs->srr1;
+       vcpu->arch.shared->srr0 = regs->srr0;
+       vcpu->arch.shared->srr1 = regs->srr1;
        vcpu->arch.sprg0 = regs->sprg0;
        vcpu->arch.sprg1 = regs->sprg1;
        vcpu->arch.sprg2 = regs->sprg2;
index 51ef4539ed51fc4082b6c8886cb71c381aa394a4..1260f5f24c0c3df9827a4d08131ad589ee83d170 100644 (file)
@@ -31,8 +31,8 @@
 
 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.pc = vcpu->arch.srr0;
-       kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+       vcpu->arch.pc = vcpu->arch.shared->srr0;
+       kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
 }
 
 int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
index 4568ec386c2aa858afe0a6024adfb476cac18c33..ad0fa4ff4ea06a5b5f9fade3c1ccc328178368c3 100644 (file)
@@ -242,9 +242,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
 
                        switch (sprn) {
                        case SPRN_SRR0:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
+                               break;
                        case SPRN_SRR1:
-                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
+                               kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
+                               break;
                        case SPRN_PVR:
                                kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
                        case SPRN_PIR:
@@ -320,9 +322,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        rs = get_rs(inst);
                        switch (sprn) {
                        case SPRN_SRR0:
-                               vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
+                               break;
                        case SPRN_SRR1:
-                               vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
+                               vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
+                               break;
 
                        /* XXX We need to context-switch the timebase for
                         * watchdog and FIT. */