]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/kvm/44x_tlb.c
KVM: ppc: Move 440-specific TLB code into 44x_tlb.c
[karo-tx-linux.git] / arch / powerpc / kvm / 44x_tlb.c
index ad72c6f9811f62ed05695ca23221bd2d9b87f31a..dd75ab84e04ea9cda389510639b4f34990a48266 100644 (file)
 
 static unsigned int kvmppc_tlb_44x_pos;
 
+#ifdef DEBUG
+void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct kvmppc_44x_tlbe *tlbe;
+       int i;
+
+       printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
+       printk("| %2s | %3s | %8s | %8s | %8s |\n",
+                       "nr", "tid", "word0", "word1", "word2");
+
+       for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+               tlbe = &vcpu->arch.guest_tlb[i];
+               if (tlbe->word0 & PPC44x_TLB_VALID)
+                       printk(" G%2d |  %02X | %08X | %08X | %08X |\n",
+                              i, tlbe->tid, tlbe->word0, tlbe->word1,
+                              tlbe->word2);
+       }
+
+       for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+               tlbe = &vcpu->arch.shadow_tlb[i];
+               if (tlbe->word0 & PPC44x_TLB_VALID)
+                       printk(" S%2d | %02X | %08X | %08X | %08X |\n",
+                              i, tlbe->tid, tlbe->word0, tlbe->word1,
+                              tlbe->word2);
+       }
+}
+#endif
+
 static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
 {
        /* Mask off reserved bits. */
@@ -191,8 +219,8 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
                        handler);
 }
 
-void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
-                           gva_t eend, u32 asid)
+static void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, gva_t eaddr,
+                                  gva_t eend, u32 asid)
 {
        unsigned int pid = !(asid & 0xff);
        int i;
@@ -249,3 +277,109 @@ void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
 
        vcpu->arch.shadow_pid = !usermode;
 }
+
+static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+                             const struct tlbe *tlbe)
+{
+       gpa_t gpa;
+
+       if (!get_tlb_v(tlbe))
+               return 0;
+
+       /* Does it match current guest AS? */
+       /* XXX what about IS != DS? */
+       if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+               return 0;
+
+       gpa = get_tlb_raddr(tlbe);
+       if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+               /* Mapping is not for RAM. */
+               return 0;
+
+       return 1;
+}
+
+int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
+{
+       u64 eaddr;
+       u64 raddr;
+       u64 asid;
+       u32 flags;
+       struct tlbe *tlbe;
+       unsigned int index;
+
+       index = vcpu->arch.gpr[ra];
+       if (index > PPC44x_TLB_SIZE) {
+               printk("%s: index %d\n", __func__, index);
+               kvmppc_dump_vcpu(vcpu);
+               return EMULATE_FAIL;
+       }
+
+       tlbe = &vcpu->arch.guest_tlb[index];
+
+       /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
+       if (tlbe->word0 & PPC44x_TLB_VALID) {
+               eaddr = get_tlb_eaddr(tlbe);
+               asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+               kvmppc_mmu_invalidate(vcpu, eaddr, get_tlb_end(tlbe), asid);
+       }
+
+       switch (ws) {
+       case PPC44x_TLB_PAGEID:
+               tlbe->tid = vcpu->arch.mmucr & 0xff;
+               tlbe->word0 = vcpu->arch.gpr[rs];
+               break;
+
+       case PPC44x_TLB_XLAT:
+               tlbe->word1 = vcpu->arch.gpr[rs];
+               break;
+
+       case PPC44x_TLB_ATTRIB:
+               tlbe->word2 = vcpu->arch.gpr[rs];
+               break;
+
+       default:
+               return EMULATE_FAIL;
+       }
+
+       if (tlbe_is_host_safe(vcpu, tlbe)) {
+               eaddr = get_tlb_eaddr(tlbe);
+               raddr = get_tlb_raddr(tlbe);
+               asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+               flags = tlbe->word2 & 0xffff;
+
+               /* Create a 4KB mapping on the host. If the guest wanted a
+                * large page, only the first 4KB is mapped here and the rest
+                * are mapped on the fly. */
+               kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
+       }
+
+       KVMTRACE_5D(GTLB_WRITE, vcpu, index,
+                   tlbe->tid, tlbe->word0, tlbe->word1, tlbe->word2,
+                   handler);
+
+       return EMULATE_DONE;
+}
+
+int kvmppc_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
+{
+       u32 ea;
+       int index;
+       unsigned int as = get_mmucr_sts(vcpu);
+       unsigned int pid = get_mmucr_stid(vcpu);
+
+       ea = vcpu->arch.gpr[rb];
+       if (ra)
+               ea += vcpu->arch.gpr[ra];
+
+       index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
+       if (rc) {
+               if (index < 0)
+                       vcpu->arch.cr &= ~0x20000000;
+               else
+                       vcpu->arch.cr |= 0x20000000;
+       }
+       vcpu->arch.gpr[rt] = index;
+
+       return EMULATE_DONE;
+}