unsigned long *nb_ret);
extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
unsigned long gpa, bool dirty);
-extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh, unsigned long ptel);
extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel,
pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
-extern unsigned long kvm_rma_pages;
#endif
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
struct page *pages[0];
};
-struct kvm_rma_info {
- atomic_t use_count;
- unsigned long base_pfn;
-};
-
/* XICS components, defined in book3s_xics.c */
struct kvmppc_xics;
struct kvmppc_icp;
#define KVMPPC_RMAP_PRESENT 0x100000000ul
#define KVMPPC_RMAP_INDEX 0xfffffffful
-/* Low-order bits in memslot->arch.slot_phys[] */
-#define KVMPPC_PAGE_ORDER_MASK 0x1f
-#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
-#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
-#define KVMPPC_GOT_PAGE 0x80
-
struct kvm_arch_memory_slot {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned long *rmap;
- unsigned long *slot_phys;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
};
struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
int rma_setup_done;
- int using_mmu_notifiers;
u32 hpt_order;
atomic_t vcpus_running;
u32 online_vcores;
unsigned long hpt_npte;
unsigned long hpt_mask;
atomic_t hpte_mod_interest;
- spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
unsigned long ioba, unsigned long tce);
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba);
-extern struct kvm_rma_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvm_rma_info *ri);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
- DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
DEFINE(KVM_NEED_FLUSH, offsetof(struct kvm, arch.need_tlb_flush.bits));
DEFINE(KVM_ENABLED_HCALLS, offsetof(struct kvm, arch.enabled_hcalls));
DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
#include "trace_hv.h"
-/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
-#define MAX_LPID_970 63
-
/* Power architecture requires HPT is at least 256kB */
#define PPC_MIN_HPT_ORDER 18
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EINVAL;
- /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
- if (cpu_has_feature(CPU_FTR_ARCH_206)) {
- host_lpid = mfspr(SPRN_LPID); /* POWER7 */
- rsvd_lpid = LPID_RSVD;
- } else {
- host_lpid = 0; /* PPC970 */
- rsvd_lpid = MAX_LPID_970;
- }
+ /* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
+ host_lpid = mfspr(SPRN_LPID);
+ rsvd_lpid = LPID_RSVD;
kvmppc_init_lpid(rsvd_lpid + 1);
kvmppc_set_msr(vcpu, msr);
}
-/*
- * This is called to get a reference to a guest page if there isn't
- * one already in the memslot->arch.slot_phys[] array.
- */
-static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
- struct kvm_memory_slot *memslot,
- unsigned long psize)
-{
- unsigned long start;
- long np, err;
- struct page *page, *hpage, *pages[1];
- unsigned long s, pgsize;
- unsigned long *physp;
- unsigned int is_io, got, pgorder;
- struct vm_area_struct *vma;
- unsigned long pfn, i, npages;
-
- physp = memslot->arch.slot_phys;
- if (!physp)
- return -EINVAL;
- if (physp[gfn - memslot->base_gfn])
- return 0;
-
- is_io = 0;
- got = 0;
- page = NULL;
- pgsize = psize;
- err = -EINVAL;
- start = gfn_to_hva_memslot(memslot, gfn);
-
- /* Instantiate and get the page we want access to */
- np = get_user_pages_fast(start, 1, 1, pages);
- if (np != 1) {
- /* Look up the vma for the page */
- down_read(¤t->mm->mmap_sem);
- vma = find_vma(current->mm, start);
- if (!vma || vma->vm_start > start ||
- start + psize > vma->vm_end ||
- !(vma->vm_flags & VM_PFNMAP))
- goto up_err;
- is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
- pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
- /* check alignment of pfn vs. requested page size */
- if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
- goto up_err;
- up_read(¤t->mm->mmap_sem);
-
- } else {
- page = pages[0];
- got = KVMPPC_GOT_PAGE;
-
- /* See if this is a large page */
- s = PAGE_SIZE;
- if (PageHuge(page)) {
- hpage = compound_head(page);
- s <<= compound_order(hpage);
- /* Get the whole large page if slot alignment is ok */
- if (s > psize && slot_is_aligned(memslot, s) &&
- !(memslot->userspace_addr & (s - 1))) {
- start &= ~(s - 1);
- pgsize = s;
- get_page(hpage);
- put_page(page);
- page = hpage;
- }
- }
- if (s < psize)
- goto out;
- pfn = page_to_pfn(page);
- }
-
- npages = pgsize >> PAGE_SHIFT;
- pgorder = __ilog2(npages);
- physp += (gfn - memslot->base_gfn) & ~(npages - 1);
- spin_lock(&kvm->arch.slot_phys_lock);
- for (i = 0; i < npages; ++i) {
- if (!physp[i]) {
- physp[i] = ((pfn + i) << PAGE_SHIFT) +
- got + is_io + pgorder;
- got = 0;
- }
- }
- spin_unlock(&kvm->arch.slot_phys_lock);
- err = 0;
-
- out:
- if (got)
- put_page(page);
- return err;
-
- up_err:
- up_read(¤t->mm->mmap_sem);
- return err;
-}
-
long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
long pte_index, unsigned long pteh,
unsigned long ptel, unsigned long *pte_idx_ret)
{
- unsigned long psize, gpa, gfn;
- struct kvm_memory_slot *memslot;
long ret;
- if (kvm->arch.using_mmu_notifiers)
- goto do_insert;
-
- psize = hpte_page_size(pteh, ptel);
- if (!psize)
- return H_PARAMETER;
-
- pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
-
- /* Find the memslot (if any) for this address */
- gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
- gfn = gpa >> PAGE_SHIFT;
- memslot = gfn_to_memslot(kvm, gfn);
- if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
- if (!slot_is_aligned(memslot, psize))
- return H_PARAMETER;
- if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
- return H_PARAMETER;
- }
-
- do_insert:
/* Protect linux PTE lookup from page table destruction */
rcu_read_lock_sched(); /* this disables preemption too */
ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
}
-/*
- * We come here on a H_ENTER call from the guest when we are not
- * using mmu notifiers and we don't have the requested page pinned
- * already.
- */
-long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
- long pte_index, unsigned long pteh,
- unsigned long ptel)
-{
- return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
- pteh, ptel, &vcpu->arch.gpr[4]);
-}
-
static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
gva_t eaddr)
{
gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
/* Storage key permission check for POWER7 */
- if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
+ if (data && virtmode) {
int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
if (amrfield & 1)
gpte->may_read = 0;
return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
dsisr & DSISR_ISSTORE);
- if (!kvm->arch.using_mmu_notifiers)
- return -EFAULT; /* should never get here */
-
/*
* This should never happen, because of the slot_is_aligned()
* check in kvmppc_do_h_enter().
psize = hpte_page_size(be64_to_cpu(hptep[0]), ptel);
if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) &&
hpte_rpn(ptel, psize) == gfn) {
- if (kvm->arch.using_mmu_notifiers)
- hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
+ hptep[0] |= cpu_to_be64(HPTE_V_ABSENT);
kvmppc_invalidate_hpte(kvm, hptep, i);
/* Harvest R and C */
rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
{
- if (kvm->arch.using_mmu_notifiers)
- kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
+ kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
return 0;
}
int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
- if (kvm->arch.using_mmu_notifiers)
- kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
+ kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
return 0;
}
int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
- if (!kvm->arch.using_mmu_notifiers)
- return 0;
return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
}
int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
{
- if (!kvm->arch.using_mmu_notifiers)
- return 0;
return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
}
void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
{
- if (!kvm->arch.using_mmu_notifiers)
- return;
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
}
struct page *page, *pages[1];
int npages;
unsigned long hva, offset;
- unsigned long pa;
- unsigned long *physp;
int srcu_idx;
srcu_idx = srcu_read_lock(&kvm->srcu);
memslot = gfn_to_memslot(kvm, gfn);
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
goto err;
- if (!kvm->arch.using_mmu_notifiers) {
- physp = memslot->arch.slot_phys;
- if (!physp)
- goto err;
- physp += gfn - memslot->base_gfn;
- pa = *physp;
- if (!pa) {
- if (kvmppc_get_guest_page(kvm, gfn, memslot,
- PAGE_SIZE) < 0)
- goto err;
- pa = *physp;
- }
- page = pfn_to_page(pa >> PAGE_SHIFT);
- get_page(page);
- } else {
- hva = gfn_to_hva_memslot(memslot, gfn);
- npages = get_user_pages_fast(hva, 1, 1, pages);
- if (npages < 1)
- goto err;
- page = pages[0];
- }
+ hva = gfn_to_hva_memslot(memslot, gfn);
+ npages = get_user_pages_fast(hva, 1, 1, pages);
+ if (npages < 1)
+ goto err;
+ page = pages[0];
srcu_read_unlock(&kvm->srcu, srcu_idx);
offset = gpa & (PAGE_SIZE - 1);
put_page(page);
- if (!dirty || !kvm->arch.using_mmu_notifiers)
+ if (!dirty)
return;
/* We need to mark this page dirty in the rmap chain */
{
struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- vcpu->arch.slb_nr = 32; /* POWER7 */
- else
- vcpu->arch.slb_nr = 64;
+ vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */
mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
if (arch_compat) {
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- return -EINVAL; /* 970 has no compat mode support */
-
switch (arch_compat) {
case PVR_ARCH_205:
/*
return RESUME_HOST;
switch (req) {
- case H_ENTER:
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
- kvmppc_get_gpr(vcpu, 5),
- kvmppc_get_gpr(vcpu, 6),
- kvmppc_get_gpr(vcpu, 7));
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- break;
case H_CEDE:
break;
case H_PROD:
/* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb();
- /* On the first time here, set up HTAB and VRMA or RMA */
+ /* On the first time here, set up HTAB and VRMA */
if (!vcpu->kvm->arch.rma_setup_done) {
r = kvmppc_hv_setup_htab_rma(vcpu);
if (r)
return r;
}
-
-/* Work out RMLS (real mode limit selector) field value for a given RMA size.
- Assumes POWER7 or PPC970. */
-static inline int lpcr_rmls(unsigned long rma_size)
-{
- switch (rma_size) {
- case 32ul << 20: /* 32 MB */
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return 8; /* only supported on POWER7 */
- return -1;
- case 64ul << 20: /* 64 MB */
- return 3;
- case 128ul << 20: /* 128 MB */
- return 7;
- case 256ul << 20: /* 256 MB */
- return 4;
- case 1ul << 30: /* 1 GB */
- return 2;
- case 16ul << 30: /* 16 GB */
- return 1;
- case 256ul << 30: /* 256 GB */
- return 0;
- default:
- return -1;
- }
-}
-
-static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct page *page;
- struct kvm_rma_info *ri = vma->vm_file->private_data;
-
- if (vmf->pgoff >= kvm_rma_pages)
- return VM_FAULT_SIGBUS;
-
- page = pfn_to_page(ri->base_pfn + vmf->pgoff);
- get_page(page);
- vmf->page = page;
- return 0;
-}
-
-static const struct vm_operations_struct kvm_rma_vm_ops = {
- .fault = kvm_rma_fault,
-};
-
-static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
-{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = &kvm_rma_vm_ops;
- return 0;
-}
-
-static int kvm_rma_release(struct inode *inode, struct file *filp)
-{
- struct kvm_rma_info *ri = filp->private_data;
-
- kvm_release_rma(ri);
- return 0;
-}
-
-static const struct file_operations kvm_rma_fops = {
- .mmap = kvm_rma_mmap,
- .release = kvm_rma_release,
-};
-
-static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
- struct kvm_allocate_rma *ret)
-{
- long fd;
- struct kvm_rma_info *ri;
- /*
- * Only do this on PPC970 in HV mode
- */
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_201))
- return -EINVAL;
-
- if (!kvm_rma_pages)
- return -EINVAL;
-
- ri = kvm_alloc_rma();
- if (!ri)
- return -ENOMEM;
-
- fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
- if (fd < 0)
- kvm_release_rma(ri);
-
- ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
- return fd;
-}
-
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
int linux_psize)
{
return r;
}
-static void unpin_slot(struct kvm_memory_slot *memslot)
-{
- unsigned long *physp;
- unsigned long j, npages, pfn;
- struct page *page;
-
- physp = memslot->arch.slot_phys;
- npages = memslot->npages;
- if (!physp)
- return;
- for (j = 0; j < npages; j++) {
- if (!(physp[j] & KVMPPC_GOT_PAGE))
- continue;
- pfn = physp[j] >> PAGE_SHIFT;
- page = pfn_to_page(pfn);
- SetPageDirty(page);
- put_page(page);
- }
-}
-
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
vfree(free->arch.rmap);
free->arch.rmap = NULL;
}
- if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
- unpin_slot(free);
- vfree(free->arch.slot_phys);
- free->arch.slot_phys = NULL;
- }
}
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
if (!slot->arch.rmap)
return -ENOMEM;
- slot->arch.slot_phys = NULL;
return 0;
}
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem)
{
- unsigned long *phys;
-
- /* Allocate a slot_phys array if needed */
- phys = memslot->arch.slot_phys;
- if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
- phys = vzalloc(memslot->npages * sizeof(unsigned long));
- if (!phys)
- return -ENOMEM;
- memslot->arch.slot_phys = phys;
- }
-
return 0;
}
{
int err = 0;
struct kvm *kvm = vcpu->kvm;
- struct kvm_rma_info *ri = NULL;
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
unsigned long lpcr = 0, senc;
- unsigned long lpcr_mask = 0;
unsigned long psize, porder;
- unsigned long rma_size;
- unsigned long rmls;
- unsigned long *physp;
- unsigned long i, npages;
int srcu_idx;
mutex_lock(&kvm->lock);
psize = vma_kernel_pagesize(vma);
porder = __ilog2(psize);
- /* Is this one of our preallocated RMAs? */
- if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
- hva == vma->vm_start)
- ri = vma->vm_file->private_data;
-
up_read(¤t->mm->mmap_sem);
- if (!ri) {
- /* On POWER7, use VRMA; on PPC970, give up */
- err = -EPERM;
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- pr_err("KVM: CPU requires an RMO\n");
- goto out_srcu;
- }
-
- /* We can handle 4k, 64k or 16M pages in the VRMA */
- err = -EINVAL;
- if (!(psize == 0x1000 || psize == 0x10000 ||
- psize == 0x1000000))
- goto out_srcu;
-
- /* Update VRMASD field in the LPCR */
- senc = slb_pgsize_encoding(psize);
- kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr_mask = LPCR_VRMASD;
- /* the -4 is to account for senc values starting at 0x10 */
- lpcr = senc << (LPCR_VRMASD_SH - 4);
+ /* We can handle 4k, 64k or 16M pages in the VRMA */
+ err = -EINVAL;
+ if (!(psize == 0x1000 || psize == 0x10000 ||
+ psize == 0x1000000))
+ goto out_srcu;
- /* Create HPTEs in the hash page table for the VRMA */
- kvmppc_map_vrma(vcpu, memslot, porder);
+ /* Update VRMASD field in the LPCR */
+ senc = slb_pgsize_encoding(psize);
+ kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ /* the -4 is to account for senc values starting at 0x10 */
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
- } else {
- /* Set up to use an RMO region */
- rma_size = kvm_rma_pages;
- if (rma_size > memslot->npages)
- rma_size = memslot->npages;
- rma_size <<= PAGE_SHIFT;
- rmls = lpcr_rmls(rma_size);
- err = -EINVAL;
- if ((long)rmls < 0) {
- pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
- goto out_srcu;
- }
- atomic_inc(&ri->use_count);
- kvm->arch.rma = ri;
-
- /* Update LPCR and RMOR */
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970; insert RMLS value (split field) in HID4 */
- lpcr_mask = (1ul << HID4_RMLS0_SH) |
- (3ul << HID4_RMLS2_SH) | HID4_RMOR;
- lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
- ((rmls & 3) << HID4_RMLS2_SH);
- /* RMOR is also in HID4 */
- lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
- << HID4_RMOR_SH;
- } else {
- /* POWER7 */
- lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
- lpcr = rmls << LPCR_RMLS_SH;
- kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
- }
- pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
- ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
-
- /* Initialize phys addrs of pages in RMO */
- npages = kvm_rma_pages;
- porder = __ilog2(npages);
- physp = memslot->arch.slot_phys;
- if (physp) {
- if (npages > memslot->npages)
- npages = memslot->npages;
- spin_lock(&kvm->arch.slot_phys_lock);
- for (i = 0; i < npages; ++i)
- physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
- porder;
- spin_unlock(&kvm->arch.slot_phys_lock);
- }
- }
+ /* Create HPTEs in the hash page table for the VRMA */
+ kvmppc_map_vrma(vcpu, memslot, porder);
- kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
smp_wmb();
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
sizeof(kvm->arch.enabled_hcalls));
- kvm->arch.rma = NULL;
-
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970; HID4 is effectively the LPCR */
- kvm->arch.host_lpid = 0;
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
- lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
- lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
- ((lpid & 0xf) << HID4_LPID5_SH);
- } else {
- /* POWER7; init LPCR for virtual RMA mode */
- kvm->arch.host_lpid = mfspr(SPRN_LPID);
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
- lpcr &= LPCR_PECE | LPCR_LPES;
- lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
- LPCR_VPM0 | LPCR_VPM1;
- kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
- /* On POWER8 turn on online bit to enable PURR/SPURR */
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
- lpcr |= LPCR_ONL;
- }
+ /* Init LPCR for virtual RMA mode */
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
+ LPCR_VPM0 | LPCR_VPM1;
+ kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
+ /* On POWER8 turn on online bit to enable PURR/SPURR */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ lpcr |= LPCR_ONL;
kvm->arch.lpcr = lpcr;
- kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
- spin_lock_init(&kvm->arch.slot_phys_lock);
-
/*
* Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online.
kvm_hv_vm_deactivated();
kvmppc_free_vcores(kvm);
- if (kvm->arch.rma) {
- kvm_release_rma(kvm->arch.rma);
- kvm->arch.rma = NULL;
- }
kvmppc_free_hpt(kvm);
}
static int kvmppc_core_check_processor_compat_hv(void)
{
- if (!cpu_has_feature(CPU_FTR_HVMODE))
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_206))
return -EIO;
return 0;
}
switch (ioctl) {
- case KVM_ALLOCATE_RMA: {
- struct kvm_allocate_rma rma;
- struct kvm *kvm = filp->private_data;
-
- r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
- if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
- r = -EFAULT;
- break;
- }
-
case KVM_PPC_ALLOCATE_HTAB: {
u32 htab_order;
* By default we reserve 5% of memory for hash pagetable allocation.
*/
static unsigned long kvm_cma_resv_ratio = 5;
-/*
- * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
- * Each RMA has to be physically contiguous and of a size that the
- * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
- * and other larger sizes. Since we are unlikely to be allocate that
- * much physically contiguous memory after the system is up and running,
- * we preallocate a set of RMAs in early boot using CMA.
- * should be power of 2.
- */
-unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
-EXPORT_SYMBOL_GPL(kvm_rma_pages);
static struct cma *kvm_cma;
-/* Work out RMLS (real mode limit selector) field value for a given RMA size.
- Assumes POWER7 or PPC970. */
-static inline int lpcr_rmls(unsigned long rma_size)
-{
- switch (rma_size) {
- case 32ul << 20: /* 32 MB */
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return 8; /* only supported on POWER7 */
- return -1;
- case 64ul << 20: /* 64 MB */
- return 3;
- case 128ul << 20: /* 128 MB */
- return 7;
- case 256ul << 20: /* 256 MB */
- return 4;
- case 1ul << 30: /* 1 GB */
- return 2;
- case 16ul << 30: /* 16 GB */
- return 1;
- case 256ul << 30: /* 256 GB */
- return 0;
- default:
- return -1;
- }
-}
-
-static int __init early_parse_rma_size(char *p)
-{
- unsigned long kvm_rma_size;
-
- pr_debug("%s(%s)\n", __func__, p);
- if (!p)
- return -EINVAL;
- kvm_rma_size = memparse(p, &p);
- /*
- * Check that the requested size is one supported in hardware
- */
- if (lpcr_rmls(kvm_rma_size) < 0) {
- pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
- return -EINVAL;
- }
- kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
- return 0;
-}
-early_param("kvm_rma_size", early_parse_rma_size);
-
-struct kvm_rma_info *kvm_alloc_rma()
-{
- struct page *page;
- struct kvm_rma_info *ri;
-
- ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
- if (!ri)
- return NULL;
- page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
- if (!page)
- goto err_out;
- atomic_set(&ri->use_count, 1);
- ri->base_pfn = page_to_pfn(page);
- return ri;
-err_out:
- kfree(ri);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(kvm_alloc_rma);
-
-void kvm_release_rma(struct kvm_rma_info *ri)
-{
- if (atomic_dec_and_test(&ri->use_count)) {
- cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
- kfree(ri);
- }
-}
-EXPORT_SYMBOL_GPL(kvm_release_rma);
-
static int __init early_parse_kvm_cma_resv(char *p)
{
pr_debug("%s(%s)\n", __func__, p);
struct page *kvm_alloc_hpt(unsigned long nr_pages)
{
- unsigned long align_pages = HPT_ALIGN_PAGES;
-
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
- /* Old CPUs require HPT aligned on a multiple of its size */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- align_pages = nr_pages;
- return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
+ return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
if (selected_size) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
- /*
- * Old CPUs require HPT aligned on a multiple of its size. So for them
- * make the alignment as max size we could request.
- */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- align_size = __rounddown_pow_of_two(selected_size);
- else
- align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
-
- align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
+ align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
cma_declare_contiguous(0, selected_size, 0, align_size,
KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
}
std r3, _CCR(r1)
/* Save host DSCR */
-BEGIN_FTR_SECTION
mfspr r3, SPRN_DSCR
std r3, HSTATE_DSCR(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Save host DABR */
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
mfspr r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
- /* On P7, clear MMCRA in order to disable SDAR updates */
+ /* Clear MMCRA in order to disable SDAR updates */
li r5, 0
mtspr SPRN_MMCRA, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r5, LPPACA_PMCINUSE(r3)
mfspr r7, SPRN_PMC4
mfspr r8, SPRN_PMC5
mfspr r9, SPRN_PMC6
-BEGIN_FTR_SECTION
- mfspr r10, SPRN_PMC7
- mfspr r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r3, HSTATE_PMC(r13)
stw r5, HSTATE_PMC + 4(r13)
stw r6, HSTATE_PMC + 8(r13)
stw r7, HSTATE_PMC + 12(r13)
stw r8, HSTATE_PMC + 16(r13)
stw r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- stw r10, HSTATE_PMC + 24(r13)
- stw r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
31:
/*
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
-#ifdef CONFIG_SMP
- /*
- * On PPC970, if the guest vcpu has an external interrupt pending,
- * send ourselves an IPI so as to interrupt the guest once it
- * enables interrupts. (It must have interrupts disabled,
- * otherwise we would already have delivered the interrupt.)
- *
- * XXX If this is a UP build, smp_send_reschedule is not available,
- * so the interrupt will be delayed until the next time the vcpu
- * enters the guest with interrupts enabled.
- */
-BEGIN_FTR_SECTION
- ld r4, HSTATE_KVM_VCPU(r13)
- ld r0, VCPU_PENDING_EXC(r4)
- li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL)
- oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
- and. r0, r0, r7
- beq 32f
- lhz r3, PACAPACAINDEX(r13)
- bl smp_send_reschedule
- nop
-32:
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-#endif /* CONFIG_SMP */
-
/* Jump to partition switch code */
bl kvmppc_hv_entry_trampoline
nop
long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
{
- if (cpu_has_feature(CPU_FTR_ARCH_206))
- return kvmppc_realmode_mc_power7(vcpu);
-
- return 0;
+ return kvmppc_realmode_mc_power7(vcpu);
}
* as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
* we can use tlbiel as long as we mark all other physical
* cores as potentially having stale TLB entries for this lpid.
- * If we're not using MMU notifiers, we never take pages away
- * from the guest, so we can use tlbiel if requested.
* Otherwise, don't use tlbiel.
*/
if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
global = 0;
- else if (kvm->arch.using_mmu_notifiers)
- global = 1;
else
- global = !(flags & H_LOCAL);
+ global = 1;
if (!global) {
/* any other core might now have stale TLB entries... */
struct revmap_entry *rev;
unsigned long g_ptel;
struct kvm_memory_slot *memslot;
- unsigned long *physp, pte_size;
+ unsigned long pte_size;
unsigned long is_io;
unsigned long *rmap;
pte_t pte;
is_io = ~0ul;
rmap = NULL;
if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
- /* PPC970 can't do emulated MMIO */
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
- return H_PARAMETER;
/* Emulated MMIO - mark this with key=31 */
pteh |= HPTE_V_ABSENT;
ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
slot_fn = gfn - memslot->base_gfn;
rmap = &memslot->arch.rmap[slot_fn];
- if (!kvm->arch.using_mmu_notifiers) {
- physp = memslot->arch.slot_phys;
- if (!physp)
- return H_PARAMETER;
- physp += slot_fn;
- if (realmode)
- physp = real_vmalloc_addr(physp);
- pa = *physp;
- if (!pa)
- return H_TOO_HARD;
- is_io = pa & (HPTE_R_I | HPTE_R_W);
- pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
- pa &= PAGE_MASK;
+ /* Translate to host virtual address */
+ hva = __gfn_to_hva_memslot(memslot, gfn);
+
+ /* Look up the Linux PTE for the backing page */
+ pte_size = psize;
+ pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
+ if (pte_present(pte) && !pte_numa(pte)) {
+ if (writing && !pte_write(pte))
+ /* make the actual HPTE be read-only */
+ ptel = hpte_make_readonly(ptel);
+ is_io = hpte_cache_bits(pte_val(pte));
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (pte_size - 1);
pa |= gpa & ~PAGE_MASK;
- } else {
- /* Translate to host virtual address */
- hva = __gfn_to_hva_memslot(memslot, gfn);
-
- /* Look up the Linux PTE for the backing page */
- pte_size = psize;
- pte = lookup_linux_pte_and_update(pgdir, hva, writing,
- &pte_size);
- if (pte_present(pte) && !pte_numa(pte)) {
- if (writing && !pte_write(pte))
- /* make the actual HPTE be read-only */
- ptel = hpte_make_readonly(ptel);
- is_io = hpte_cache_bits(pte_val(pte));
- pa = pte_pfn(pte) << PAGE_SHIFT;
- pa |= hva & (pte_size - 1);
- pa |= gpa & ~PAGE_MASK;
- }
}
if (pte_size < psize)
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
- if (kvm->arch.using_mmu_notifiers &&
- mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_notifier_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
return old == 0;
}
-/*
- * tlbie/tlbiel is a bit different on the PPC970 compared to later
- * processors such as POWER7; the large page bit is in the instruction
- * not RB, and the top 16 bits and the bottom 12 bits of the VA
- * in RB must be 0.
- */
-static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
- long npages, int global, bool need_sync)
-{
- long i;
-
- if (global) {
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- if (need_sync)
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i) {
- unsigned long rb = rbvalues[i];
-
- if (rb & 1) /* large page */
- asm volatile("tlbie %0,1" : :
- "r" (rb & 0x0000fffffffff000ul));
- else
- asm volatile("tlbie %0,0" : :
- "r" (rb & 0x0000fffffffff000ul));
- }
- asm volatile("eieio; tlbsync; ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- if (need_sync)
- asm volatile("ptesync" : : : "memory");
- for (i = 0; i < npages; ++i) {
- unsigned long rb = rbvalues[i];
-
- if (rb & 1) /* large page */
- asm volatile("tlbiel %0,1" : :
- "r" (rb & 0x0000fffffffff000ul));
- else
- asm volatile("tlbiel %0,0" : :
- "r" (rb & 0x0000fffffffff000ul));
- }
- asm volatile("ptesync" : : : "memory");
- }
-}
-
static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
long npages, int global, bool need_sync)
{
long i;
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
- /* PPC970 tlbie instruction is a bit different */
- do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
- return;
- }
if (global) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax();
*/
pte = be64_to_cpu(hpte[1]);
r = (pte & ~mask) | bits;
- if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers &&
- !hpte_is_writable(pte))
+ if (hpte_is_writable(r) && !hpte_is_writable(pte))
r = hpte_make_readonly(r);
/* If the PTE is changing, invalidate it first */
if (r != pte) {
lwz r6, HSTATE_PMC + 12(r13)
lwz r8, HSTATE_PMC + 16(r13)
lwz r9, HSTATE_PMC + 20(r13)
-BEGIN_FTR_SECTION
- lwz r10, HSTATE_PMC + 24(r13)
- lwz r11, HSTATE_PMC + 28(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_PMC1, r3
mtspr SPRN_PMC2, r4
mtspr SPRN_PMC3, r5
mtspr SPRN_PMC4, r6
mtspr SPRN_PMC5, r8
mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, HSTATE_MMCR(r13)
ld r4, HSTATE_MMCR + 8(r13)
ld r5, HSTATE_MMCR + 16(r13)
cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
-BEGIN_FTR_SECTION
beq 11f
cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
beq cr2, 14f /* HMI check */
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* RFI into the highmem handler, or branch to interrupt handler */
mfmsr r6
mtmsrd r6, 1 /* Clear RI in MSR */
mtsrr0 r8
mtsrr1 r7
- beqa 0x500 /* external interrupt (PPC970) */
beq cr1, 13f /* machine check */
RFI
slbia
ptesync
-BEGIN_FTR_SECTION
- b 30f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
- * POWER7 host -> guest partition switch code.
+ * POWER7/POWER8 host -> guest partition switch code.
* We don't have to lock against concurrent tlbies,
* but we do have to coordinate across hardware threads.
*/
cmpwi r3,512 /* 1 microsecond */
li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
blt hdec_soon
- b 31f
-
- /*
- * PPC970 host -> guest partition switch code.
- * We have to lock against concurrent tlbies,
- * using native_tlbie_lock to lock against host tlbies
- * and kvm->arch.tlbie_lock to lock against guest tlbies.
- * We also have to invalidate the TLB since its
- * entries aren't tagged with the LPID.
- */
-30: ld r5,HSTATE_KVM_VCORE(r13)
- ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
-
- /* first take native_tlbie_lock */
- .section ".toc","aw"
-toc_tlbie_lock:
- .tc native_tlbie_lock[TC],native_tlbie_lock
- .previous
- ld r3,toc_tlbie_lock@toc(r2)
-#ifdef __BIG_ENDIAN__
- lwz r8,PACA_LOCK_TOKEN(r13)
-#else
- lwz r8,PACAPACAINDEX(r13)
-#endif
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r5,HSTATE_KVM_VCORE(r13)
- ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
- li r0,0x18f
- rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
- or r0,r7,r0
- ptesync
- sync
- mtspr SPRN_HID4,r0 /* switch to reserved LPID */
- isync
- li r0,0
- stw r0,0(r3) /* drop native_tlbie_lock */
-
- /* invalidate the whole TLB */
- li r0,256
- mtctr r0
- li r6,0
-25: tlbiel r6
- addi r6,r6,0x1000
- bdnz 25b
- ptesync
- /* Take the guest's tlbie_lock */
- addi r3,r9,KVM_TLBIE_LOCK
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
- ld r6,KVM_SDR1(r9)
- mtspr SPRN_SDR1,r6 /* switch to partition page table */
-
- /* Set up HID4 with the guest's LPID etc. */
- sync
- mtspr SPRN_HID4,r7
- isync
-
- /* drop the guest's tlbie_lock */
- li r0,0
- stw r0,0(r3)
-
- /* Check if HDEC expires soon */
- mfspr r3,SPRN_HDEC
- cmpwi r3,10
- li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- blt hdec_soon
-
- /* Enable HDEC interrupts */
- mfspr r0,SPRN_HID0
- li r3,1
- rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
- sync
- mtspr SPRN_HID0,r0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
-31:
/* Do we have a guest vcpu to run? */
cmpdi r4, 0
beq kvmppc_primary_no_guest
stb r6, VCPU_VPA_DIRTY(r4)
25:
-BEGIN_FTR_SECTION
/* Save purr/spurr */
mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR
ld r8,VCPU_SPURR(r4)
mtspr SPRN_PURR,r7
mtspr SPRN_SPURR,r8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
/* Set partition DABR */
ld r6,VCPU_DABR(r4)
mtspr SPRN_DABRX,r5
mtspr SPRN_DABR,r6
- BEGIN_FTR_SECTION_NESTED(89)
isync
- END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
lwz r7, VCPU_PMC + 12(r4)
lwz r8, VCPU_PMC + 16(r4)
lwz r9, VCPU_PMC + 20(r4)
-BEGIN_FTR_SECTION
- lwz r10, VCPU_PMC + 24(r4)
- lwz r11, VCPU_PMC + 28(r4)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
mtspr SPRN_PMC1, r3
mtspr SPRN_PMC2, r5
mtspr SPRN_PMC3, r6
mtspr SPRN_PMC4, r7
mtspr SPRN_PMC5, r8
mtspr SPRN_PMC6, r9
-BEGIN_FTR_SECTION
- mtspr SPRN_PMC7, r10
- mtspr SPRN_PMC8, r11
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
ld r3, VCPU_MMCR(r4)
ld r5, VCPU_MMCR + 8(r4)
ld r6, VCPU_MMCR + 16(r4)
ld r30, VCPU_GPR(R30)(r4)
ld r31, VCPU_GPR(R31)(r4)
-BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
ld r5, VCPU_DSCR(r4)
mtspr SPRN_DSCR, r5
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
BEGIN_FTR_SECTION
- /* Skip next section on POWER7 or PPC970 */
+ /* Skip next section on POWER7 */
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
-BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
ld r6,VCPU_UAMOR(r4)
mtspr SPRN_AMR,r5
mtspr SPRN_UAMOR,r6
mtspr SPRN_AMOR,r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Restore state of CTRL run bit; assume 1 on entry */
lwz r5,VCPU_CTRL(r4)
rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
cmpdi cr1, r0, 0
andi. r8, r11, MSR_EE
-BEGIN_FTR_SECTION
mfspr r8, SPRN_LPCR
/* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
mtspr SPRN_LPCR, r8
isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
beq 5f
li r0, BOOK3S_INTERRUPT_EXTERNAL
bne cr1, 12f
/* Save HEIR (HV emulation assist reg) in last_inst
if this is an HEI (HV emulation interrupt, e40) */
li r3,KVM_INST_FETCH_FAILED
-BEGIN_FTR_SECTION
cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne 11f
mfspr r3,SPRN_HEIR
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
11: stw r3,VCPU_LAST_INST(r9)
/* these are volatile across C function calls */
std r3, VCPU_CTR(r9)
stw r4, VCPU_XER(r9)
-BEGIN_FTR_SECTION
/* If this is a page table miss then see if it's theirs or ours */
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
beq kvmppc_hdsi
cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
beq kvmppc_hisi
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* See if this is a leftover HDEC interrupt */
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
beq hcall_try_real_mode
- /* Only handle external interrupts here on arch 206 and later */
-BEGIN_FTR_SECTION
- b ext_interrupt_to_host
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
-
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
bne+ ext_interrupt_to_host
mfdsisr r7
std r6, VCPU_DAR(r9)
stw r7, VCPU_DSISR(r9)
-BEGIN_FTR_SECTION
/* don't overwrite fault_dar/fault_dsisr if HDSI */
cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
beq 6f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r6, VCPU_FAULT_DAR(r9)
stw r7, VCPU_FAULT_DSISR(r9)
/*
* Save the guest PURR/SPURR
*/
-BEGIN_FTR_SECTION
mfspr r5,SPRN_PURR
mfspr r6,SPRN_SPURR
ld r7,VCPU_PURR(r9)
add r4,r4,r6
mtspr SPRN_PURR,r3
mtspr SPRN_SPURR,r4
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
/* Save DEC */
mfspr r5,SPRN_DEC
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
mfspr r6,SPRN_UAMOR
std r5,VCPU_AMR(r9)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Switch DSCR back to host value */
-BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
ld r7, HSTATE_DSCR(r13)
std r8, VCPU_DSCR(r9)
mtspr SPRN_DSCR, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save non-volatile GPRs */
std r14, VCPU_GPR(R14)(r9)
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
mfspr r6, SPRN_MMCRA
-BEGIN_FTR_SECTION
- /* On P7, clear MMCRA in order to disable SDAR updates */
+ /* Clear MMCRA in order to disable SDAR updates */
li r7, 0
mtspr SPRN_MMCRA, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
beq 21f /* if no VPA, save PMU stuff anyway */
lbz r7, LPPACA_PMCINUSE(r8)
mfspr r6, SPRN_PMC4
mfspr r7, SPRN_PMC5
mfspr r8, SPRN_PMC6
-BEGIN_FTR_SECTION
- mfspr r10, SPRN_PMC7
- mfspr r11, SPRN_PMC8
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
stw r3, VCPU_PMC(r9)
stw r4, VCPU_PMC + 4(r9)
stw r5, VCPU_PMC + 8(r9)
stw r6, VCPU_PMC + 12(r9)
stw r7, VCPU_PMC + 16(r9)
stw r8, VCPU_PMC + 20(r9)
-BEGIN_FTR_SECTION
- stw r10, VCPU_PMC + 24(r9)
- stw r11, VCPU_PMC + 28(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
BEGIN_FTR_SECTION
mfspr r5, SPRN_SIER
mfspr r6, SPRN_SPMC1
ptesync
hdec_soon: /* r12 = trap, r13 = paca */
-BEGIN_FTR_SECTION
- b 32f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/*
- * POWER7 guest -> host partition switch code.
+ * POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
* have to coordinate the hardware threads.
*/
16: ld r8,KVM_HOST_LPCR(r4)
mtspr SPRN_LPCR,r8
isync
- b 33f
-
- /*
- * PPC970 guest -> host partition switch code.
- * We have to lock against concurrent tlbies, and
- * we have to flush the whole TLB.
- */
-32: ld r5,HSTATE_KVM_VCORE(r13)
- ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
-
- /* Take the guest's tlbie_lock */
-#ifdef __BIG_ENDIAN__
- lwz r8,PACA_LOCK_TOKEN(r13)
-#else
- lwz r8,PACAPACAINDEX(r13)
-#endif
- addi r3,r4,KVM_TLBIE_LOCK
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
- li r0,0x18f
- rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
- or r0,r7,r0
- ptesync
- sync
- mtspr SPRN_HID4,r0 /* switch to reserved LPID */
- isync
- li r0,0
- stw r0,0(r3) /* drop guest tlbie_lock */
-
- /* invalidate the whole TLB */
- li r0,256
- mtctr r0
- li r6,0
-25: tlbiel r6
- addi r6,r6,0x1000
- bdnz 25b
- ptesync
-
- /* take native_tlbie_lock */
- ld r3,toc_tlbie_lock@toc(2)
-24: lwarx r0,0,r3
- cmpwi r0,0
- bne 24b
- stwcx. r8,0,r3
- bne 24b
- isync
-
- ld r6,KVM_HOST_SDR1(r4)
- mtspr SPRN_SDR1,r6 /* switch to host page table */
-
- /* Set up host HID4 value */
- sync
- mtspr SPRN_HID4,r7
- isync
- li r0,0
- stw r0,0(r3) /* drop native_tlbie_lock */
-
- lis r8,0x7fff /* MAX_INT@h */
- mtspr SPRN_HDEC,r8
-
- /* Disable HDEC interrupts */
- mfspr r0,SPRN_HID0
- li r3,0
- rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
- sync
- mtspr SPRN_HID0,r0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
- mfspr r0,SPRN_HID0
/* load host SLB entries */
-33: ld r8,PACA_SLBSHADOWPTR(r13)
+ ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
li r3, SLBSHADOW_SAVEAREA
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
std r0,VCPU_GPR(R3)(r3)
-BEGIN_FTR_SECTION
- b kvm_cede_exit /* just send it up to host on 970 */
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
/*
* Set our bit in the bitmask of napping threads unless all the
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
- isync
addi r3,r3,VCPU_FPRS
bl store_fp_state
#ifdef CONFIG_ALTIVEC
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
mtmsrd r8
- isync
addi r3,r4,VCPU_FPRS
bl load_fp_state
#ifdef CONFIG_ALTIVEC
r = 0;
break;
case KVM_CAP_PPC_RMA:
- r = hv_enabled;
- /* PPC970 requires an RMA */
- if (r && cpu_has_feature(CPU_FTR_ARCH_201))
- r = 2;
+ r = 0;
break;
#endif
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (hv_enabled)
- r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
- else
- r = 0;
+ r = hv_enabled;
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
r = 1;
#else