]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/powerpc/kvm/book3s_hv_rm_mmu.c
Merge branch 'drm-fixes-4.11' of git://people.freedesktop.org/~agd5f/linux into drm...
[karo-tx-linux.git] / arch / powerpc / kvm / book3s_hv_rm_mmu.c
index 9ef3c4be952f37d847534ab7a4130e0268431062..ce6f2121fffe46857bf4b250c06ad3916ac24aeb 100644 (file)
@@ -43,6 +43,7 @@ static void *real_vmalloc_addr(void *x)
 static int global_invalidates(struct kvm *kvm, unsigned long flags)
 {
        int global;
+       int cpu;
 
        /*
         * If there is only one vcore, and it's currently running,
@@ -60,8 +61,14 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags)
                /* any other core might now have stale TLB entries... */
                smp_wmb();
                cpumask_setall(&kvm->arch.need_tlb_flush);
-               cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
-                                 &kvm->arch.need_tlb_flush);
+               cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
+               /*
+                * On POWER9, threads are independent but the TLB is shared,
+                * so use the bit for the first thread to represent the core.
+                */
+               if (cpu_has_feature(CPU_FTR_ARCH_300))
+                       cpu = cpu_first_thread_sibling(cpu);
+               cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
        }
 
        return global;
@@ -79,10 +86,10 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
 
        if (*rmap & KVMPPC_RMAP_PRESENT) {
                i = *rmap & KVMPPC_RMAP_INDEX;
-               head = &kvm->arch.revmap[i];
+               head = &kvm->arch.hpt.rev[i];
                if (realmode)
                        head = real_vmalloc_addr(head);
-               tail = &kvm->arch.revmap[head->back];
+               tail = &kvm->arch.hpt.rev[head->back];
                if (realmode)
                        tail = real_vmalloc_addr(tail);
                rev->forw = i;
@@ -147,8 +154,8 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
        lock_rmap(rmap);
 
        head = *rmap & KVMPPC_RMAP_INDEX;
-       next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
-       prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
+       next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
+       prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
        next->back = rev->back;
        prev->forw = rev->forw;
        if (head == pte_index) {
@@ -182,6 +189,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
        unsigned long mmu_seq;
        unsigned long rcbits, irq_flags = 0;
 
+       if (kvm_is_radix(kvm))
+               return H_FUNCTION;
        psize = hpte_page_size(pteh, ptel);
        if (!psize)
                return H_PARAMETER;
@@ -247,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                }
                pte = kvmppc_read_update_linux_pte(ptep, writing);
                if (pte_present(pte) && !pte_protnone(pte)) {
-                       if (writing && !pte_write(pte))
+                       if (writing && !__pte_write(pte))
                                /* make the actual HPTE be read-only */
                                ptel = hpte_make_readonly(ptel);
                        is_ci = pte_ci(pte);
@@ -283,11 +292,11 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
 
        /* Find and lock the HPTEG slot to use */
  do_insert:
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
                return H_PARAMETER;
        if (likely((flags & H_EXACT) == 0)) {
                pte_index &= ~7UL;
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
                for (i = 0; i < 8; ++i) {
                        if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
                            try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
@@ -318,7 +327,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
                }
                pte_index += i;
        } else {
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
                if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
                                   HPTE_V_ABSENT)) {
                        /* Lock the slot and check again */
@@ -335,7 +344,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
        }
 
        /* Save away the guest's idea of the second HPTE dword */
-       rev = &kvm->arch.revmap[pte_index];
+       rev = &kvm->arch.hpt.rev[pte_index];
        if (realmode)
                rev = real_vmalloc_addr(rev);
        if (rev) {
@@ -458,9 +467,11 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
        struct revmap_entry *rev;
        u64 pte, orig_pte, pte_r;
 
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (kvm_is_radix(kvm))
+               return H_FUNCTION;
+       if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
                return H_PARAMETER;
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        pte = orig_pte = be64_to_cpu(hpte[0]);
@@ -476,7 +487,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
                return H_NOT_FOUND;
        }
 
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
        v = pte & ~HPTE_V_HVLOCK;
        if (v & HPTE_V_VALID) {
                hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
@@ -529,6 +540,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
        struct revmap_entry *rev, *revs[4];
        u64 hp0, hp1;
 
+       if (kvm_is_radix(kvm))
+               return H_FUNCTION;
        global = global_invalidates(kvm, 0);
        for (i = 0; i < 4 && ret == H_SUCCESS; ) {
                n = 0;
@@ -544,13 +557,13 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
                                break;
                        }
                        if (req != 1 || flags == 3 ||
-                           pte_index >= kvm->arch.hpt_npte) {
+                           pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
                                /* parameter error */
                                args[j] = ((0xa0 | flags) << 56) + pte_index;
                                ret = H_PARAMETER;
                                break;
                        }
-                       hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
+                       hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
                        /* to avoid deadlock, don't spin except for first */
                        if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
                                if (n)
@@ -587,7 +600,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
                        }
 
                        args[j] = ((0x80 | flags) << 56) + pte_index;
-                       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+                       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
                        note_hpte_modification(kvm, rev);
 
                        if (!(hp0 & HPTE_V_VALID)) {
@@ -642,10 +655,12 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
        unsigned long v, r, rb, mask, bits;
        u64 pte_v, pte_r;
 
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (kvm_is_radix(kvm))
+               return H_FUNCTION;
+       if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
                return H_PARAMETER;
 
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        v = pte_v = be64_to_cpu(hpte[0]);
@@ -665,7 +680,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
        /* Update guest view of 2nd HPTE dword */
        mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
                HPTE_R_KEY_HI | HPTE_R_KEY_LO;
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
        if (rev) {
                r = (rev->guest_rpte & ~mask) | bits;
                rev->guest_rpte = r;
@@ -711,15 +726,17 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
        int i, n = 1;
        struct revmap_entry *rev = NULL;
 
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (kvm_is_radix(kvm))
+               return H_FUNCTION;
+       if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
                return H_PARAMETER;
        if (flags & H_READ_4) {
                pte_index &= ~3;
                n = 4;
        }
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
        for (i = 0; i < n; ++i, ++pte_index) {
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
                v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
                r = be64_to_cpu(hpte[1]);
                if (cpu_has_feature(CPU_FTR_ARCH_300)) {
@@ -750,11 +767,13 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
        unsigned long *rmap;
        long ret = H_NOT_FOUND;
 
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (kvm_is_radix(kvm))
+               return H_FUNCTION;
+       if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
                return H_PARAMETER;
 
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        v = be64_to_cpu(hpte[0]);
@@ -796,11 +815,13 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
        unsigned long *rmap;
        long ret = H_NOT_FOUND;
 
-       if (pte_index >= kvm->arch.hpt_npte)
+       if (kvm_is_radix(kvm))
+               return H_FUNCTION;
+       if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
                return H_PARAMETER;
 
-       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
-       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
+       hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
        while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
                cpu_relax();
        v = be64_to_cpu(hpte[0]);
@@ -949,7 +970,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                somask = (1UL << 28) - 1;
                vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
        }
-       hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
+       hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
        avpn = slb_v & ~(somask >> 16); /* also includes B */
        avpn |= (eaddr & somask) >> 16;
 
@@ -960,7 +981,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
        val |= avpn;
 
        for (;;) {
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
 
                for (i = 0; i < 16; i += 2) {
                        /* Read the PTE racily */
@@ -996,7 +1017,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                if (val & HPTE_V_SECONDARY)
                        break;
                val |= HPTE_V_SECONDARY;
-               hash = hash ^ kvm->arch.hpt_mask;
+               hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
        }
        return -1;
 }
@@ -1045,14 +1066,14 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
                                return status;  /* there really was no HPTE */
                        return 0;       /* for prot fault, HPTE disappeared */
                }
-               hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
+               hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
                v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
                r = be64_to_cpu(hpte[1]);
                if (cpu_has_feature(CPU_FTR_ARCH_300)) {
                        v = hpte_new_to_old_v(v, r);
                        r = hpte_new_to_old_r(r);
                }
-               rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
+               rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
                gr = rev->guest_rpte;
 
                unlock_hpte(hpte, orig_v);