2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/sched/signal.h>
28 #include <linux/hugetlb.h>
29 #include <linux/list.h>
30 #include <linux/anon_inodes.h>
32 #include <asm/tlbflush.h>
33 #include <asm/kvm_ppc.h>
34 #include <asm/kvm_book3s.h>
35 #include <asm/book3s/64/mmu-hash.h>
36 #include <asm/hvcall.h>
37 #include <asm/synch.h>
38 #include <asm/ppc-opcode.h>
39 #include <asm/kvm_host.h>
41 #include <asm/iommu.h>
44 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
46 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
49 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
51 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
52 (tce_pages * sizeof(struct page *));
54 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
57 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
61 if (!current || !current->mm)
62 return ret; /* process exited */
64 down_write(¤t->mm->mmap_sem);
67 unsigned long locked, lock_limit;
69 locked = current->mm->locked_vm + stt_pages;
70 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
71 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
74 current->mm->locked_vm += stt_pages;
76 if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
77 stt_pages = current->mm->locked_vm;
79 current->mm->locked_vm -= stt_pages;
82 pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
84 stt_pages << PAGE_SHIFT,
85 current->mm->locked_vm << PAGE_SHIFT,
86 rlimit(RLIMIT_MEMLOCK),
87 ret ? " - exceeded" : "");
89 up_write(¤t->mm->mmap_sem);
94 static void release_spapr_tce_table(struct rcu_head *head)
96 struct kvmppc_spapr_tce_table *stt = container_of(head,
97 struct kvmppc_spapr_tce_table, rcu);
98 unsigned long i, npages = kvmppc_tce_pages(stt->size);
100 for (i = 0; i < npages; i++)
101 __free_page(stt->pages[i]);
106 static int kvm_spapr_tce_fault(struct vm_fault *vmf)
108 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
111 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
112 return VM_FAULT_SIGBUS;
114 page = stt->pages[vmf->pgoff];
120 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
121 .fault = kvm_spapr_tce_fault,
124 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
126 vma->vm_ops = &kvm_spapr_tce_vm_ops;
130 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
132 struct kvmppc_spapr_tce_table *stt = filp->private_data;
134 list_del_rcu(&stt->list);
136 kvm_put_kvm(stt->kvm);
138 kvmppc_account_memlimit(
139 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
140 call_rcu(&stt->rcu, release_spapr_tce_table);
145 static const struct file_operations kvm_spapr_tce_fops = {
146 .mmap = kvm_spapr_tce_mmap,
147 .release = kvm_spapr_tce_release,
150 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
151 struct kvm_create_spapr_tce_64 *args)
153 struct kvmppc_spapr_tce_table *stt = NULL;
154 unsigned long npages, size;
161 /* Check this LIOBN hasn't been previously allocated */
162 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
163 if (stt->liobn == args->liobn)
168 npages = kvmppc_tce_pages(size);
169 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
176 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
181 stt->liobn = args->liobn;
182 stt->page_shift = args->page_shift;
183 stt->offset = args->offset;
187 for (i = 0; i < npages; i++) {
188 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
195 mutex_lock(&kvm->lock);
196 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
198 mutex_unlock(&kvm->lock);
200 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
201 stt, O_RDWR | O_CLOEXEC);
205 for (i = 0; i < npages; i++)
207 __free_page(stt->pages[i]);
214 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
215 unsigned long ioba, unsigned long tce)
217 struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
220 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
221 /* liobn, ioba, tce); */
226 ret = kvmppc_ioba_validate(stt, ioba, 1);
227 if (ret != H_SUCCESS)
230 ret = kvmppc_tce_validate(stt, tce);
231 if (ret != H_SUCCESS)
234 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
238 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
240 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
241 unsigned long liobn, unsigned long ioba,
242 unsigned long tce_list, unsigned long npages)
244 struct kvmppc_spapr_tce_table *stt;
245 long i, ret = H_SUCCESS, idx;
246 unsigned long entry, ua = 0;
250 stt = kvmppc_find_table(vcpu, liobn);
254 entry = ioba >> stt->page_shift;
256 * SPAPR spec says that the maximum size of the list is 512 TCEs
257 * so the whole table fits in 4K page
262 if (tce_list & (SZ_4K - 1))
265 ret = kvmppc_ioba_validate(stt, ioba, npages);
266 if (ret != H_SUCCESS)
269 idx = srcu_read_lock(&vcpu->kvm->srcu);
270 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
274 tces = (u64 __user *) ua;
276 for (i = 0; i < npages; ++i) {
277 if (get_user(tce, tces + i)) {
281 tce = be64_to_cpu(tce);
283 ret = kvmppc_tce_validate(stt, tce);
284 if (ret != H_SUCCESS)
287 kvmppc_tce_put(stt, entry + i, tce);
291 srcu_read_unlock(&vcpu->kvm->srcu, idx);
295 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
297 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
298 unsigned long liobn, unsigned long ioba,
299 unsigned long tce_value, unsigned long npages)
301 struct kvmppc_spapr_tce_table *stt;
304 stt = kvmppc_find_table(vcpu, liobn);
308 ret = kvmppc_ioba_validate(stt, ioba, npages);
309 if (ret != H_SUCCESS)
312 /* Check permission bits only to allow userspace poison TCE for debug */
313 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
316 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
317 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
321 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);