]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'kvm-arm/kvm-arm-next'
authorThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:58:43 +0000 (14:58 +0200)
committerThierry Reding <treding@nvidia.com>
Thu, 24 Oct 2013 12:58:43 +0000 (14:58 +0200)
Conflicts:
arch/arm/kvm/reset.c

13 files changed:
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/kvm/Kconfig
arch/arm/kvm/coproc.c
arch/arm/kvm/handle_exit.c
arch/arm/kvm/mmu.c
arch/arm/kvm/psci.c
arch/arm/kvm/reset.c
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/pgtable-hwdef.h

index d556f03bca17491eeecf26a976641389cb49feb2..1d3153c7eb41c54d1e97fcd6078057fe5735a7dd 100644 (file)
@@ -57,6 +57,7 @@
  * TSC:                Trap SMC
  * TSW:                Trap cache operations by set/way
  * TWI:                Trap WFI
+ * TWE:                Trap WFE
  * TIDCP:      Trap L2CTLR/L2ECTLR
  * BSU_IS:     Upgrade barriers to the inner shareable domain
  * FB:         Force broadcast of all maintainance operations
@@ -67,7 +68,7 @@
  */
 #define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
                        HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
-                       HCR_SWIO | HCR_TIDCP)
+                       HCR_TWE | HCR_SWIO | HCR_TIDCP)
 #define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
 
 /* System Control Register (SCTLR) bits */
 #define HSR_EC_DABT    (0x24)
 #define HSR_EC_DABT_HYP        (0x25)
 
+#define HSR_WFI_IS_WFE         (1U << 0)
+
 #define HSR_HVC_IMM_MASK       ((1UL << 16) - 1)
 
 #define HSR_DABT_S1PTW         (1U << 7)
index a464e8d7b6c58f77c5f57fdda4202587f2331962..708e4d8a647f226b4dbdf9722527edd33177912b 100644 (file)
@@ -157,4 +157,9 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
        return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
 }
 
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.cp15[c0_MPIDR];
+}
+
 #endif /* __ARM_KVM_EMULATE_H__ */
index 9b28c41f4ba916a569bf1f105064f092ab434dda..77de4a41cc5045bd3b778f98396a454d23767467 100644 (file)
@@ -62,6 +62,12 @@ phys_addr_t kvm_get_idmap_vector(void);
 int kvm_mmu_init(void);
 void kvm_clear_hyp_idmap(void);
 
+static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
+{
+       *pmd = new_pmd;
+       flush_pmd_entry(pmd);
+}
+
 static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
 {
        *pte = new_pte;
@@ -103,9 +109,15 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
        pte_val(*pte) |= L_PTE_S2_RDWR;
 }
 
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+       pmd_val(*pmd) |= L_PMD_S2_RDWR;
+}
+
 struct kvm;
 
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+                                             unsigned long size)
 {
        /*
         * If we are going to insert an instruction page and the icache is
@@ -120,8 +132,7 @@ static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
         * need any kind of flushing (DDI 0406C.b - Page B3-1392).
         */
        if (icache_is_pipt()) {
-               unsigned long hva = gfn_to_hva(kvm, gfn);
-               __cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
+               __cpuc_coherent_user_range(hva, hva + size);
        } else if (!icache_is_vivt_asid_tagged()) {
                /* any kind of VIPT cache */
                __flush_icache_all();
index 39c54cfa03e9b103ef39982a43bac74d5436b791..4f9503908dca4a7dc536324514801bcbd03d32fd 100644 (file)
 #define L_PTE_S2_RDONLY                 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */
 #define L_PTE_S2_RDWR           (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
+#define L_PMD_S2_RDWR           (_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
+
 /*
  * Hyp-mode PL2 PTE definitions for LPAE.
  */
index ebf5015508b525c052ddfed5c4ea2ea0a0ed969e..466bd299b1a8aad54949364d976d9c5430c2375e 100644 (file)
@@ -20,6 +20,7 @@ config KVM
        bool "Kernel-based Virtual Machine (KVM) support"
        select PREEMPT_NOTIFIERS
        select ANON_INODES
+       select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_ARM_HOST
        depends on ARM_VIRT_EXT && ARM_LPAE
index a629f2c1d0f968e7a39e9d8ab98e0e1bee32d492..78c0885d65015df28ea7554a5e97a85b588e9fb1 100644 (file)
@@ -74,11 +74,13 @@ int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run)
 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
 {
        /*
-        * Compute guest MPIDR. No need to mess around with different clusters
-        * but we read the 'U' bit from the underlying hardware directly.
+        * Compute guest MPIDR. We build a virtual cluster out of the
+        * vcpu_id, but we read the 'U' bit from the underlying
+        * hardware directly.
         */
-       vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & MPIDR_SMP_BITMASK)
-                                       | vcpu->vcpu_id;
+       vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
+                                    ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
+                                    (vcpu->vcpu_id & 3));
 }
 
 /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */
@@ -122,6 +124,10 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
        asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
        l2ctlr &= ~(3 << 24);
        ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
+       /* How many cores in the current cluster and the next ones */
+       ncores -= (vcpu->vcpu_id & ~3);
+       /* Cap it to the maximum number of cores in a single cluster */
+       ncores = min(ncores, 3U);
        l2ctlr |= (ncores & 3) << 24;
 
        vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
index df4c82d47ad7101cf2f87028d39471cd6d63d7df..a92079011a836974a9188ab8db1fa75f2bc5403a 100644 (file)
@@ -73,23 +73,29 @@ static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
 }
 
 /**
- * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
  * @vcpu:      the vcpu pointer
  * @run:       the kvm_run structure pointer
  *
- * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
- * halt execution of world-switches and schedule other host processes until
- * there is an incoming IRQ or FIQ to the VM.
+ * WFE: Yield the CPU and come back to this vcpu when the scheduler
+ * decides to.
+ * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * world-switches and schedule other host processes until there is an
+ * incoming IRQ or FIQ to the VM.
  */
-static int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
        trace_kvm_wfi(*vcpu_pc(vcpu));
-       kvm_vcpu_block(vcpu);
+       if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE)
+               kvm_vcpu_on_spin(vcpu);
+       else
+               kvm_vcpu_block(vcpu);
+
        return 1;
 }
 
 static exit_handle_fn arm_exit_handlers[] = {
-       [HSR_EC_WFI]            = kvm_handle_wfi,
+       [HSR_EC_WFI]            = kvm_handle_wfx,
        [HSR_EC_CP15_32]        = kvm_handle_cp15_32,
        [HSR_EC_CP15_64]        = kvm_handle_cp15_64,
        [HSR_EC_CP14_MR]        = kvm_handle_cp14_access,
index b0de86b56c13006a189efba24967b81d162a56d6..371958370de445fbe0cc200e772fa1e9426fb327 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mman.h>
 #include <linux/kvm_host.h>
 #include <linux/io.h>
+#include <linux/hugetlb.h>
 #include <trace/events/kvm.h>
 #include <asm/pgalloc.h>
 #include <asm/cacheflush.h>
@@ -41,6 +42,8 @@ static unsigned long hyp_idmap_start;
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 
+#define kvm_pmd_huge(_x)       (pmd_huge(_x) || pmd_trans_huge(_x))
+
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
        /*
@@ -93,19 +96,29 @@ static bool page_empty(void *ptr)
 
 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
 {
-       pmd_t *pmd_table = pmd_offset(pud, 0);
-       pud_clear(pud);
-       kvm_tlb_flush_vmid_ipa(kvm, addr);
-       pmd_free(NULL, pmd_table);
+       if (pud_huge(*pud)) {
+               pud_clear(pud);
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+       } else {
+               pmd_t *pmd_table = pmd_offset(pud, 0);
+               pud_clear(pud);
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+               pmd_free(NULL, pmd_table);
+       }
        put_page(virt_to_page(pud));
 }
 
 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
 {
-       pte_t *pte_table = pte_offset_kernel(pmd, 0);
-       pmd_clear(pmd);
-       kvm_tlb_flush_vmid_ipa(kvm, addr);
-       pte_free_kernel(NULL, pte_table);
+       if (kvm_pmd_huge(*pmd)) {
+               pmd_clear(pmd);
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+       } else {
+               pte_t *pte_table = pte_offset_kernel(pmd, 0);
+               pmd_clear(pmd);
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+               pte_free_kernel(NULL, pte_table);
+       }
        put_page(virt_to_page(pmd));
 }
 
@@ -136,18 +149,32 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
                        continue;
                }
 
+               if (pud_huge(*pud)) {
+                       /*
+                        * If we are dealing with a huge pud, just clear it and
+                        * move on.
+                        */
+                       clear_pud_entry(kvm, pud, addr);
+                       addr = pud_addr_end(addr, end);
+                       continue;
+               }
+
                pmd = pmd_offset(pud, addr);
                if (pmd_none(*pmd)) {
                        addr = pmd_addr_end(addr, end);
                        continue;
                }
 
-               pte = pte_offset_kernel(pmd, addr);
-               clear_pte_entry(kvm, pte, addr);
-               next = addr + PAGE_SIZE;
+               if (!kvm_pmd_huge(*pmd)) {
+                       pte = pte_offset_kernel(pmd, addr);
+                       clear_pte_entry(kvm, pte, addr);
+                       next = addr + PAGE_SIZE;
+               }
 
-               /* If we emptied the pte, walk back up the ladder */
-               if (page_empty(pte)) {
+               /*
+                * If the pmd entry is to be cleared, walk back up the ladder
+                */
+               if (kvm_pmd_huge(*pmd) || page_empty(pte)) {
                        clear_pmd_entry(kvm, pmd, addr);
                        next = pmd_addr_end(addr, end);
                        if (page_empty(pmd) && !page_empty(pud)) {
@@ -420,29 +447,71 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
        kvm->arch.pgd = NULL;
 }
 
-
-static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
-                         phys_addr_t addr, const pte_t *new_pte, bool iomap)
+static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                            phys_addr_t addr)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
-       pte_t *pte, old_pte;
 
-       /* Create 2nd stage page table mapping - Level 1 */
        pgd = kvm->arch.pgd + pgd_index(addr);
        pud = pud_offset(pgd, addr);
        if (pud_none(*pud)) {
                if (!cache)
-                       return 0; /* ignore calls from kvm_set_spte_hva */
+                       return NULL;
                pmd = mmu_memory_cache_alloc(cache);
                pud_populate(NULL, pud, pmd);
                get_page(virt_to_page(pud));
        }
 
-       pmd = pmd_offset(pud, addr);
+       return pmd_offset(pud, addr);
+}
+
+static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
+                              *cache, phys_addr_t addr, const pmd_t *new_pmd)
+{
+       pmd_t *pmd, old_pmd;
+
+       pmd = stage2_get_pmd(kvm, cache, addr);
+       VM_BUG_ON(!pmd);
+
+       /*
+        * Mapping in huge pages should only happen through a fault.  If a
+        * page is merged into a transparent huge page, the individual
+        * subpages of that huge page should be unmapped through MMU
+        * notifiers before we get here.
+        *
+        * Merging of CompoundPages is not supported; they should become
+        * splitting first, unmapped, merged, and mapped back in on-demand.
+        */
+       VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
+
+       old_pmd = *pmd;
+       kvm_set_pmd(pmd, *new_pmd);
+       if (pmd_present(old_pmd))
+               kvm_tlb_flush_vmid_ipa(kvm, addr);
+       else
+               get_page(virt_to_page(pmd));
+       return 0;
+}
+
+static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
+                         phys_addr_t addr, const pte_t *new_pte, bool iomap)
+{
+       pmd_t *pmd;
+       pte_t *pte, old_pte;
 
-       /* Create 2nd stage page table mapping - Level 2 */
+       /* Create stage-2 page table mapping - Level 1 */
+       pmd = stage2_get_pmd(kvm, cache, addr);
+       if (!pmd) {
+               /*
+                * Ignore calls from kvm_set_spte_hva for unallocated
+                * address ranges.
+                */
+               return 0;
+       }
+
+       /* Create stage-2 page mappings - Level 2 */
        if (pmd_none(*pmd)) {
                if (!cache)
                        return 0; /* ignore calls from kvm_set_spte_hva */
@@ -507,16 +576,60 @@ out:
        return ret;
 }
 
+static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
+{
+       pfn_t pfn = *pfnp;
+       gfn_t gfn = *ipap >> PAGE_SHIFT;
+
+       if (PageTransCompound(pfn_to_page(pfn))) {
+               unsigned long mask;
+               /*
+                * The address we faulted on is backed by a transparent huge
+                * page.  However, because we map the compound huge page and
+                * not the individual tail page, we need to transfer the
+                * refcount to the head page.  We have to be careful that the
+                * THP doesn't start to split while we are adjusting the
+                * refcounts.
+                *
+                * We are sure this doesn't happen, because mmu_notifier_retry
+                * was successful and we are holding the mmu_lock, so if this
+                * THP is trying to split, it will be blocked in the mmu
+                * notifier before touching any of the pages, specifically
+                * before being able to call __split_huge_page_refcount().
+                *
+                * We can therefore safely transfer the refcount from PG_tail
+                * to PG_head and switch the pfn from a tail page to the head
+                * page accordingly.
+                */
+               mask = PTRS_PER_PMD - 1;
+               VM_BUG_ON((gfn & mask) != (pfn & mask));
+               if (pfn & mask) {
+                       *ipap &= PMD_MASK;
+                       kvm_release_pfn_clean(pfn);
+                       pfn &= ~mask;
+                       kvm_get_pfn(pfn);
+                       *pfnp = pfn;
+               }
+
+               return true;
+       }
+
+       return false;
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
-                         gfn_t gfn, struct kvm_memory_slot *memslot,
+                         struct kvm_memory_slot *memslot,
                          unsigned long fault_status)
 {
-       pte_t new_pte;
-       pfn_t pfn;
        int ret;
-       bool write_fault, writable;
+       bool write_fault, writable, hugetlb = false, force_pte = false;
        unsigned long mmu_seq;
+       gfn_t gfn = fault_ipa >> PAGE_SHIFT;
+       unsigned long hva = gfn_to_hva(vcpu->kvm, gfn);
+       struct kvm *kvm = vcpu->kvm;
        struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+       struct vm_area_struct *vma;
+       pfn_t pfn;
 
        write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu));
        if (fault_status == FSC_PERM && !write_fault) {
@@ -524,6 +637,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return -EFAULT;
        }
 
+       /* Let's check if we will get back a huge page backed by hugetlbfs */
+       down_read(&current->mm->mmap_sem);
+       vma = find_vma_intersection(current->mm, hva, hva + 1);
+       if (is_vm_hugetlb_page(vma)) {
+               hugetlb = true;
+               gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
+       } else {
+               /*
+                * Pages belonging to VMAs not aligned to the PMD mapping
+                * granularity cannot be mapped using block descriptors even
+                * if the pages belong to a THP for the process, because the
+                * stage-2 block descriptor will cover more than a single THP
+                * and we loose atomicity for unmapping, updates, and splits
+                * of the THP or other pages in the stage-2 block range.
+                */
+               if (vma->vm_start & ~PMD_MASK)
+                       force_pte = true;
+       }
+       up_read(&current->mm->mmap_sem);
+
        /* We need minimum second+third level pages */
        ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
        if (ret)
@@ -541,26 +674,40 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         */
        smp_rmb();
 
-       pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
+       pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
        if (is_error_pfn(pfn))
                return -EFAULT;
 
-       new_pte = pfn_pte(pfn, PAGE_S2);
-       coherent_icache_guest_page(vcpu->kvm, gfn);
-
-       spin_lock(&vcpu->kvm->mmu_lock);
-       if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
+       spin_lock(&kvm->mmu_lock);
+       if (mmu_notifier_retry(kvm, mmu_seq))
                goto out_unlock;
-       if (writable) {
-               kvm_set_s2pte_writable(&new_pte);
-               kvm_set_pfn_dirty(pfn);
+       if (!hugetlb && !force_pte)
+               hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
+
+       if (hugetlb) {
+               pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2);
+               new_pmd = pmd_mkhuge(new_pmd);
+               if (writable) {
+                       kvm_set_s2pmd_writable(&new_pmd);
+                       kvm_set_pfn_dirty(pfn);
+               }
+               coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE);
+               ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
+       } else {
+               pte_t new_pte = pfn_pte(pfn, PAGE_S2);
+               if (writable) {
+                       kvm_set_s2pte_writable(&new_pte);
+                       kvm_set_pfn_dirty(pfn);
+               }
+               coherent_icache_guest_page(kvm, hva, PAGE_SIZE);
+               ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false);
        }
-       stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
+
 
 out_unlock:
-       spin_unlock(&vcpu->kvm->mmu_lock);
+       spin_unlock(&kvm->mmu_lock);
        kvm_release_pfn_clean(pfn);
-       return 0;
+       return ret;
 }
 
 /**
@@ -629,7 +776,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
        memslot = gfn_to_memslot(vcpu->kvm, gfn);
 
-       ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
+       ret = user_mem_abort(vcpu, fault_ipa, memslot, fault_status);
        if (ret == 0)
                ret = 1;
 out_unlock:
index 86a693a02ba3a78e4288b94c63f3b1fba77a6b38..311263124acf303606b3081ae71fc0169e7f1dac 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kvm_host.h>
 #include <linux/wait.h>
 
+#include <asm/cputype.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_psci.h>
 
@@ -34,22 +35,30 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
 static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
 {
        struct kvm *kvm = source_vcpu->kvm;
-       struct kvm_vcpu *vcpu;
+       struct kvm_vcpu *vcpu = NULL, *tmp;
        wait_queue_head_t *wq;
        unsigned long cpu_id;
+       unsigned long mpidr;
        phys_addr_t target_pc;
+       int i;
 
        cpu_id = *vcpu_reg(source_vcpu, 1);
        if (vcpu_mode_is_32bit(source_vcpu))
                cpu_id &= ~((u32) 0);
 
-       if (cpu_id >= atomic_read(&kvm->online_vcpus))
+       kvm_for_each_vcpu(i, tmp, kvm) {
+               mpidr = kvm_vcpu_get_mpidr(tmp);
+               if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
+                       vcpu = tmp;
+                       break;
+               }
+       }
+
+       if (!vcpu)
                return KVM_PSCI_RET_INVAL;
 
        target_pc = *vcpu_reg(source_vcpu, 2);
 
-       vcpu = kvm_get_vcpu(kvm, cpu_id);
-
        wq = kvm_arch_vcpu_wq(vcpu);
        if (!waitqueue_active(wq))
                return KVM_PSCI_RET_INVAL;
index d153e64d125505c9a8623521053fbe507db83a2f..f558c073c02378a449a05d337d47a8161ae5c51d 100644 (file)
@@ -33,8 +33,6 @@
  * Cortex-A15 and Cortex-A7 Reset Values
  */
 
-static const int cortexa_max_cpu_idx = 3;
-
 static struct kvm_regs cortexa_regs_reset = {
        .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
 };
@@ -64,8 +62,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        switch (vcpu->arch.target) {
        case KVM_ARM_TARGET_CORTEX_A7:
        case KVM_ARM_TARGET_CORTEX_A15:
-               if (vcpu->vcpu_id > cortexa_max_cpu_idx)
-                       return -EINVAL;
                reset_regs = &cortexa_regs_reset;
                vcpu->arch.midr = read_cpuid_id();
                cpu_vtimer_irq = &cortexa_vtimer_irq;
index eec073875218049371eb7beed7715f10ae3b067a..6df93cdc652bccdf4bd6287f30bc3eb70779ff4d 100644 (file)
@@ -177,4 +177,9 @@ static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
        return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE;
 }
 
+static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
+{
+       return vcpu_sys_reg(vcpu, MPIDR_EL1);
+}
+
 #endif /* __ARM64_KVM_EMULATE_H__ */
index efe609c6a3c95385aa97c61895ab76af4c198450..680f74e674971ab5062047369d6994edddc64e8e 100644 (file)
@@ -91,6 +91,7 @@ int kvm_mmu_init(void);
 void kvm_clear_hyp_idmap(void);
 
 #define        kvm_set_pte(ptep, pte)          set_pte(ptep, pte)
+#define        kvm_set_pmd(pmdp, pmd)          set_pmd(pmdp, pmd)
 
 static inline bool kvm_is_write_fault(unsigned long esr)
 {
@@ -116,13 +117,18 @@ static inline void kvm_set_s2pte_writable(pte_t *pte)
        pte_val(*pte) |= PTE_S2_RDWR;
 }
 
+static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
+{
+       pmd_val(*pmd) |= PMD_S2_RDWR;
+}
+
 struct kvm;
 
-static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
+static inline void coherent_icache_guest_page(struct kvm *kvm, hva_t hva,
+                                             unsigned long size)
 {
        if (!icache_is_aliasing()) {            /* PIPT */
-               unsigned long hva = gfn_to_hva(kvm, gfn);
-               flush_icache_range(hva, hva + PAGE_SIZE);
+               flush_icache_range(hva, hva + size);
        } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
                /* any kind of VIPT cache */
                __flush_icache_all();
index d57e66845c8610fdaf53754bee71f9de8cadf983..755f86143320167038e24e9be093e61b8b307e65 100644 (file)
@@ -85,6 +85,8 @@
 #define PTE_S2_RDONLY          (_AT(pteval_t, 1) << 6)   /* HAP[2:1] */
 #define PTE_S2_RDWR            (_AT(pteval_t, 3) << 6)   /* HAP[2:1] */
 
+#define PMD_S2_RDWR            (_AT(pmdval_t, 3) << 6)   /* HAP[2:1] */
+
 /*
  * Memory Attribute override for Stage-2 (MemAttr[3:0])
  */