From: Alexander Graf Date: Wed, 24 Mar 2010 20:48:17 +0000 (+0100) Subject: KVM: PPC: Ensure split mode works X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=3eeafd7da2b0293b512abe95c86843fc4ab42add;p=linux-beck.git KVM: PPC: Ensure split mode works On PowerPC we can go into MMU Split Mode. That means that either data relocation is on but instruction relocation is off or vice versa. That mode didn't work properly, as we weren't always flushing entries when going into a new split mode, potentially mapping different code or data that we're supposed to. Signed-off-by: Alexander Graf Signed-off-by: Avi Kivity --- diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index e6ea974df44e..14d0262ae00b 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -99,10 +99,11 @@ struct kvmppc_vcpu_book3s { #define CONTEXT_GUEST 1 #define CONTEXT_GUEST_END 2 -#define VSID_REAL 0xfffffffffff00000 -#define VSID_REAL_DR 0xffffffffffe00000 -#define VSID_REAL_IR 0xffffffffffd00000 -#define VSID_BAT 0xffffffffffc00000 +#define VSID_REAL_DR 0x7ffffffffff00000 +#define VSID_REAL_IR 0x7fffffffffe00000 +#define VSID_SPLIT_MASK 0x7fffffffffe00000 +#define VSID_REAL 0x7fffffffffc00000 +#define VSID_BAT 0x7fffffffffb00000 #define VSID_PR 0x8000000000000000 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 ea, u64 ea_mask); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 8cab902771a2..ff5a42058257 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -134,6 +134,14 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { + bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; + bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; + + /* Flush split mode PTEs */ + if (dr != ir) + kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK, + VSID_SPLIT_MASK); + kvmppc_mmu_flush_segments(vcpu); kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); } @@ -396,15 +404,7 @@ static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, } else { pte->eaddr = eaddr; pte->raddr = eaddr & 0xffffffff; - pte->vpage = eaddr >> 12; - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { - case 0: - pte->vpage |= VSID_REAL; - case MSR_DR: - pte->vpage |= VSID_REAL_DR; - case MSR_IR: - pte->vpage |= VSID_REAL_IR; - } + pte->vpage = VSID_REAL | eaddr >> 12; pte->may_read = true; pte->may_write = true; pte->may_execute = true; @@ -513,12 +513,10 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, int page_found = 0; struct kvmppc_pte pte; bool is_mmio = false; + bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; + bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; - if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { - relocated = (vcpu->arch.msr & MSR_DR); - } else { - relocated = (vcpu->arch.msr & MSR_IR); - } + relocated = data ? dr : ir; /* Resolve real address if translation turned on */ if (relocated) { @@ -530,14 +528,18 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, pte.raddr = eaddr & 0xffffffff; pte.eaddr = eaddr; pte.vpage = eaddr >> 12; - switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { - case 0: - pte.vpage |= VSID_REAL; - case MSR_DR: - pte.vpage |= VSID_REAL_DR; - case MSR_IR: - pte.vpage |= VSID_REAL_IR; - } + } + + switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { + case 0: + pte.vpage |= VSID_REAL; + break; + case MSR_DR: + pte.vpage |= VSID_REAL_DR; + break; + case MSR_IR: + pte.vpage |= VSID_REAL_IR; + break; } if (vcpu->arch.mmu.is_dcbz32(vcpu) &&