2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
10 #include <linux/kvm_host.h>
11 #include <linux/preempt.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/sizes.h>
18 #include <linux/cma.h>
19 #include <linux/bitops.h>
21 #include <asm/cputable.h>
22 #include <asm/kvm_ppc.h>
23 #include <asm/kvm_book3s.h>
24 #include <asm/archrandom.h>
26 #include <asm/dbell.h>
27 #include <asm/cputhreads.h>
32 #define KVM_CMA_CHUNK_ORDER 18
35 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
36 * should be power of 2.
38 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
40 * By default we reserve 5% of memory for hash pagetable allocation.
42 static unsigned long kvm_cma_resv_ratio = 5;
44 static struct cma *kvm_cma;
46 static int __init early_parse_kvm_cma_resv(char *p)
48 pr_debug("%s(%s)\n", __func__, p);
51 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
53 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
55 struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
57 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
59 return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES));
61 EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
63 void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
65 cma_release(kvm_cma, page, nr_pages);
67 EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
70 * kvm_cma_reserve() - reserve area for kvm hash pagetable
72 * This function reserves memory from early allocator. It should be
73 * called by arch specific code once the memblock allocator
74 * has been activated and all other subsystems have already allocated/reserved
77 void __init kvm_cma_reserve(void)
79 unsigned long align_size;
80 struct memblock_region *reg;
81 phys_addr_t selected_size = 0;
84 * We need CMA reservation only when we are in HV mode
86 if (!cpu_has_feature(CPU_FTR_HVMODE))
89 * We cannot use memblock_phys_mem_size() here, because
90 * memblock_analyze() has not been called yet.
92 for_each_memblock(memory, reg)
93 selected_size += memblock_region_memory_end_pfn(reg) -
94 memblock_region_memory_base_pfn(reg);
96 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
98 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
99 (unsigned long)selected_size / SZ_1M);
100 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
101 cma_declare_contiguous(0, selected_size, 0, align_size,
102 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
107 * Real-mode H_CONFER implementation.
108 * We check if we are the only vcpu out of this virtual core
109 * still running in the guest and not ceded. If so, we pop up
110 * to the virtual-mode implementation; if not, just return to
113 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
114 unsigned int yield_count)
116 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
117 int ptid = local_paca->kvm_hstate.ptid;
120 int threads_conferring;
121 u64 stop = get_tb() + 10 * tb_ticks_per_usec;
122 int rv = H_SUCCESS; /* => don't yield */
124 set_bit(ptid, &vc->conferring_threads);
125 while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
126 threads_running = VCORE_ENTRY_MAP(vc);
127 threads_ceded = vc->napping_threads;
128 threads_conferring = vc->conferring_threads;
129 if ((threads_ceded | threads_conferring) == threads_running) {
130 rv = H_TOO_HARD; /* => do yield */
134 clear_bit(ptid, &vc->conferring_threads);
139 * When running HV mode KVM we need to block certain operations while KVM VMs
140 * exist in the system. We use a counter of VMs to track this.
142 * One of the operations we need to block is onlining of secondaries, so we
143 * protect hv_vm_count with get/put_online_cpus().
145 static atomic_t hv_vm_count;
147 void kvm_hv_vm_activated(void)
150 atomic_inc(&hv_vm_count);
153 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
155 void kvm_hv_vm_deactivated(void)
158 atomic_dec(&hv_vm_count);
161 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
163 bool kvm_hv_mode_active(void)
165 return atomic_read(&hv_vm_count) != 0;
168 extern int hcall_real_table[], hcall_real_table_end[];
170 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
173 if (cmd < hcall_real_table_end - hcall_real_table &&
174 hcall_real_table[cmd])
179 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
181 int kvmppc_hwrng_present(void)
183 return powernv_hwrng_present();
185 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
187 long kvmppc_h_random(struct kvm_vcpu *vcpu)
189 if (powernv_get_random_real_mode(&vcpu->arch.gpr[4]))
195 static inline void rm_writeb(unsigned long paddr, u8 val)
197 __asm__ __volatile__("stbcix %0,0,%1"
198 : : "r" (val), "r" (paddr) : "memory");
202 * Send an interrupt or message to another CPU.
203 * The caller needs to include any barrier needed to order writes
204 * to memory vs. the IPI/message.
206 void kvmhv_rm_send_ipi(int cpu)
208 unsigned long xics_phys;
209 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
211 /* On POWER9 we can use msgsnd for any destination cpu. */
212 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
213 msg |= get_hard_smp_processor_id(cpu);
214 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
217 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
218 if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
219 cpu_first_thread_sibling(cpu) ==
220 cpu_first_thread_sibling(raw_smp_processor_id())) {
221 msg |= cpu_thread_in_core(cpu);
222 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
226 /* Else poke the target with an IPI */
227 xics_phys = paca[cpu].kvm_hstate.xics_phys;
229 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
231 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
235 * The following functions are called from the assembly code
236 * in book3s_hv_rmhandlers.S.
238 static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
242 /* Order setting of exit map vs. msgsnd/IPI */
244 for (; active; active >>= 1, ++cpu)
246 kvmhv_rm_send_ipi(cpu);
249 void kvmhv_commence_exit(int trap)
251 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
252 int ptid = local_paca->kvm_hstate.ptid;
253 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
256 /* Set our bit in the threads-exiting-guest map in the 0xff00
257 bits of vcore->entry_exit_map */
260 ee = vc->entry_exit_map;
261 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
263 /* Are we the first here? */
268 * Trigger the other threads in this vcore to exit the guest.
269 * If this is a hypervisor decrementer interrupt then they
270 * will be already on their way out of the guest.
272 if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
273 kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
276 * If we are doing dynamic micro-threading, interrupt the other
277 * subcores to pull them out of their guests too.
282 for (i = 0; i < MAX_SUBCORES; ++i) {
283 vc = sip->master_vcs[i];
287 ee = vc->entry_exit_map;
288 /* Already asked to exit? */
291 } while (cmpxchg(&vc->entry_exit_map, ee,
292 ee | VCORE_EXIT_REQ) != ee);
294 kvmhv_interrupt_vcore(vc, ee);
298 struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
299 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
301 #ifdef CONFIG_KVM_XICS
302 static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
308 * We access the mapped array here without a lock. That
309 * is safe because we never reduce the number of entries
310 * in the array and we never change the v_hwirq field of
311 * an entry once it is set.
313 * We have also carefully ordered the stores in the writer
314 * and the loads here in the reader, so that if we find a matching
315 * hwirq here, the associated GSI and irq_desc fields are valid.
317 for (i = 0; i < pimap->n_mapped; i++) {
318 if (xisr == pimap->mapped[i].r_hwirq) {
320 * Order subsequent reads in the caller to serialize
324 return &pimap->mapped[i];
331 * If we have an interrupt that's not an IPI, check if we have a
332 * passthrough adapter and if so, check if this external interrupt
333 * is for the adapter.
334 * We will attempt to deliver the IRQ directly to the target VCPU's
335 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
337 * If the delivery fails or if this is not for a passthrough adapter,
338 * return to the host to handle this interrupt. We earlier
339 * saved a copy of the XIRR in the PACA, it will be picked up by
340 * the host ICP driver.
342 static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
344 struct kvmppc_passthru_irqmap *pimap;
345 struct kvmppc_irq_map *irq_map;
346 struct kvm_vcpu *vcpu;
348 vcpu = local_paca->kvm_hstate.kvm_vcpu;
351 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
354 irq_map = get_irqmap(pimap, xisr);
358 /* We're handling this interrupt, generic code doesn't need to */
359 local_paca->kvm_hstate.saved_xirr = 0;
361 return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
365 static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
372 * Determine what sort of external interrupt is pending (if any).
374 * 0 if no interrupt is pending
375 * 1 if an interrupt is pending that needs to be handled by the host
376 * 2 Passthrough that needs completion in the host
377 * -1 if there was a guest wakeup IPI (which has now been cleared)
378 * -2 if there is PCI passthrough external interrupt that was handled
380 static long kvmppc_read_one_intr(bool *again);
382 long kvmppc_read_intr(void)
390 rc = kvmppc_read_one_intr(&again);
391 if (rc && (ret == 0 || rc > ret))
397 static long kvmppc_read_one_intr(bool *again)
399 unsigned long xics_phys;
406 /* see if a host IPI is pending */
407 host_ipi = local_paca->kvm_hstate.host_ipi;
411 /* Now read the interrupt from the ICP */
412 xics_phys = local_paca->kvm_hstate.xics_phys;
415 rc = opal_int_get_xirr(&xirr, false);
417 xirr = _lwzcix(xics_phys + XICS_XIRR);
422 * Save XIRR for later. Since we get control in reverse endian
423 * on LE systems, save it byte reversed and fetch it back in
424 * host endian. Note that xirr is the value read from the
425 * XIRR register, while h_xirr is the host endian version.
427 h_xirr = be32_to_cpu(xirr);
428 local_paca->kvm_hstate.saved_xirr = h_xirr;
429 xisr = h_xirr & 0xffffff;
431 * Ensure that the store/load complete to guarantee all side
432 * effects of loading from XIRR has completed
436 /* if nothing pending in the ICP */
440 /* We found something in the ICP...
442 * If it is an IPI, clear the MFRR and EOI it.
444 if (xisr == XICS_IPI) {
447 _stbcix(xics_phys + XICS_MFRR, 0xff);
448 _stwcix(xics_phys + XICS_XIRR, xirr);
450 opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
451 rc = opal_int_eoi(h_xirr);
453 /* If rc > 0, there is another interrupt pending */
457 * Need to ensure side effects of above stores
458 * complete before proceeding.
463 * We need to re-check host IPI now in case it got set in the
464 * meantime. If it's clear, we bounce the interrupt to the
467 host_ipi = local_paca->kvm_hstate.host_ipi;
468 if (unlikely(host_ipi != 0)) {
469 /* We raced with the host,
470 * we need to resend that IPI, bummer
473 _stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
475 opal_int_set_mfrr(hard_smp_processor_id(),
477 /* Let side effects complete */
482 /* OK, it's an IPI for us */
483 local_paca->kvm_hstate.saved_xirr = 0;
487 return kvmppc_check_passthru(xisr, xirr, again);