2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
30 #include <asm/tlbflush.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
40 #include <asm/iommu.h>
45 #define WARN_ON_ONCE_RM(condition) ({ \
46 static bool __section(.data.unlikely) __warned; \
47 int __ret_warn_once = !!(condition); \
49 if (unlikely(__ret_warn_once && !__warned)) { \
51 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
52 __stringify(condition), \
53 __func__, __LINE__); \
56 unlikely(__ret_warn_once); \
61 #define WARN_ON_ONCE_RM(condition) ({ \
62 int __ret_warn_on = !!(condition); \
63 unlikely(__ret_warn_on); \
68 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
71 * Finds a TCE table descriptor by LIOBN.
73 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
76 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
79 struct kvmppc_spapr_tce_table *stt;
81 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
82 if (stt->liobn == liobn)
87 EXPORT_SYMBOL_GPL(kvmppc_find_table);
90 * Validates TCE address.
91 * At the moment flags and page mask are validated.
92 * As the host kernel does not access those addresses (just puts them
93 * to the table and user space is supposed to process them), we can skip
94 * checking other things (such as TCE is a guest RAM address or the page
95 * was actually allocated).
97 * WARNING: This will be called in real-mode on HV KVM and virtual
100 long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
102 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
103 enum dma_data_direction dir = iommu_tce_direction(tce);
105 /* Allow userspace to poison TCE table */
109 if (iommu_tce_check_gpa(stt->page_shift, gpa))
114 EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
116 /* Note on the use of page_address() in real mode,
118 * It is safe to use page_address() in real mode on ppc64 because
119 * page_address() is always defined as lowmem_page_address()
120 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
121 * operation and does not access page struct.
123 * Theoretically page_address() could be defined different
124 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
125 * would have to be enabled.
126 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
127 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
128 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
129 * is not expected to be enabled on ppc32, page_address()
130 * is safe for ppc32 as well.
132 * WARNING: This will be called in real-mode on HV KVM and virtual
135 static u64 *kvmppc_page_address(struct page *page)
137 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
138 #error TODO: fix to avoid page_address() here
140 return (u64 *) page_address(page);
144 * Handles TCE requests for emulated devices.
145 * Puts guest TCE values to the table and expects user space to convert them.
146 * Called in both real and virtual modes.
147 * Cannot fail so kvmppc_tce_validate must be called before it.
149 * WARNING: This will be called in real-mode on HV KVM and virtual
152 void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
153 unsigned long idx, unsigned long tce)
159 page = stt->pages[idx / TCES_PER_PAGE];
160 tbl = kvmppc_page_address(page);
162 tbl[idx % TCES_PER_PAGE] = tce;
164 EXPORT_SYMBOL_GPL(kvmppc_tce_put);
166 long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
167 unsigned long *ua, unsigned long **prmap)
169 unsigned long gfn = gpa >> PAGE_SHIFT;
170 struct kvm_memory_slot *memslot;
172 memslot = search_memslots(kvm_memslots(kvm), gfn);
176 *ua = __gfn_to_hva_memslot(memslot, gfn) |
177 (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
179 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
181 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
186 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
189 static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry)
191 unsigned long hpa = 0;
192 enum dma_data_direction dir = DMA_NONE;
194 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
197 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
198 struct iommu_table *tbl, unsigned long entry)
200 struct mm_iommu_table_group_mem_t *mem = NULL;
201 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
202 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
205 /* it_userspace allocation might be delayed */
208 pua = (void *) vmalloc_to_phys(pua);
209 if (WARN_ON_ONCE_RM(!pua))
212 mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
216 mm_iommu_mapped_dec(mem);
223 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
224 struct iommu_table *tbl, unsigned long entry)
226 enum dma_data_direction dir = DMA_NONE;
227 unsigned long hpa = 0;
230 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir))
232 * real mode xchg can fail if struct page crosses
240 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
242 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
247 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
248 unsigned long entry, unsigned long ua,
249 enum dma_data_direction dir)
252 unsigned long hpa = 0;
253 unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
254 struct mm_iommu_table_group_mem_t *mem;
257 /* it_userspace allocation might be delayed */
260 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
264 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
267 pua = (void *) vmalloc_to_phys(pua);
268 if (WARN_ON_ONCE_RM(!pua))
271 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
274 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir);
276 mm_iommu_mapped_dec(mem);
278 * real mode xchg can fail if struct page crosses
285 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
292 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
293 unsigned long ioba, unsigned long tce)
295 struct kvmppc_spapr_tce_table *stt;
297 struct kvmppc_spapr_tce_iommu_table *stit;
298 unsigned long entry, ua = 0;
299 enum dma_data_direction dir;
301 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
302 /* liobn, ioba, tce); */
304 stt = kvmppc_find_table(vcpu->kvm, liobn);
308 ret = kvmppc_ioba_validate(stt, ioba, 1);
309 if (ret != H_SUCCESS)
312 ret = kvmppc_tce_validate(stt, tce);
313 if (ret != H_SUCCESS)
316 dir = iommu_tce_direction(tce);
317 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
318 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
321 entry = ioba >> stt->page_shift;
323 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
325 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
328 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
329 stit->tbl, entry, ua, dir);
331 if (ret == H_SUCCESS)
334 if (ret == H_TOO_HARD)
338 kvmppc_rm_clear_tce(stit->tbl, entry);
341 kvmppc_tce_put(stt, entry, tce);
346 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
347 unsigned long ua, unsigned long *phpa)
352 ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
353 if (!ptep || !pte_present(*ptep))
360 /* Avoid handling anything potentially complicated in realmode */
361 if (shift > PAGE_SHIFT)
367 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
373 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
374 unsigned long liobn, unsigned long ioba,
375 unsigned long tce_list, unsigned long npages)
377 struct kvmppc_spapr_tce_table *stt;
378 long i, ret = H_SUCCESS;
379 unsigned long tces, entry, ua = 0;
380 unsigned long *rmap = NULL;
382 struct kvmppc_spapr_tce_iommu_table *stit;
384 stt = kvmppc_find_table(vcpu->kvm, liobn);
388 entry = ioba >> stt->page_shift;
390 * The spec says that the maximum size of the list is 512 TCEs
391 * so the whole table addressed resides in 4K page
396 if (tce_list & (SZ_4K - 1))
399 ret = kvmppc_ioba_validate(stt, ioba, npages);
400 if (ret != H_SUCCESS)
403 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
405 * We get here if guest memory was pre-registered which
406 * is normally VFIO case and gpa->hpa translation does not
409 struct mm_iommu_table_group_mem_t *mem;
411 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
414 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
416 prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
421 * This is usually a case of a guest with emulated devices only
422 * when TCE list is not in preregistered memory.
423 * We do not require memory to be preregistered in this case
424 * so lock rmap and do __find_linux_pte_or_hugepte().
426 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
429 rmap = (void *) vmalloc_to_phys(rmap);
430 if (WARN_ON_ONCE_RM(!rmap))
434 * Synchronize with the MMU notifier callbacks in
435 * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
436 * While we have the rmap lock, code running on other CPUs
437 * cannot finish unmapping the host real page that backs
438 * this guest real page, so we are OK to access the host
442 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
448 for (i = 0; i < npages; ++i) {
449 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
451 ret = kvmppc_tce_validate(stt, tce);
452 if (ret != H_SUCCESS)
456 if (kvmppc_gpa_to_ua(vcpu->kvm,
457 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
461 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
462 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm,
463 stit->tbl, entry + i, ua,
464 iommu_tce_direction(tce));
466 if (ret == H_SUCCESS)
469 if (ret == H_TOO_HARD)
473 kvmppc_rm_clear_tce(stit->tbl, entry);
476 kvmppc_tce_put(stt, entry + i, tce);
486 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
487 unsigned long liobn, unsigned long ioba,
488 unsigned long tce_value, unsigned long npages)
490 struct kvmppc_spapr_tce_table *stt;
492 struct kvmppc_spapr_tce_iommu_table *stit;
494 stt = kvmppc_find_table(vcpu->kvm, liobn);
498 ret = kvmppc_ioba_validate(stt, ioba, npages);
499 if (ret != H_SUCCESS)
502 /* Check permission bits only to allow userspace poison TCE for debug */
503 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
506 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
507 unsigned long entry = ioba >> stit->tbl->it_page_shift;
509 for (i = 0; i < npages; ++i) {
510 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm,
511 stit->tbl, entry + i);
513 if (ret == H_SUCCESS)
516 if (ret == H_TOO_HARD)
520 kvmppc_rm_clear_tce(stit->tbl, entry);
524 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
525 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
530 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
533 struct kvmppc_spapr_tce_table *stt;
539 stt = kvmppc_find_table(vcpu->kvm, liobn);
543 ret = kvmppc_ioba_validate(stt, ioba, 1);
544 if (ret != H_SUCCESS)
547 idx = (ioba >> stt->page_shift) - stt->offset;
548 page = stt->pages[idx / TCES_PER_PAGE];
549 tbl = (u64 *)page_address(page);
551 vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
555 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
557 #endif /* KVM_BOOK3S_HV_POSSIBLE */