1 /******************************************************************************
4 * Interface to privileged domain-0 commands.
6 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
13 #include <linux/errno.h>
15 #include <linux/mman.h>
16 #include <linux/uaccess.h>
17 #include <linux/swap.h>
18 #include <linux/smp_lock.h>
19 #include <linux/highmem.h>
20 #include <linux/pagemap.h>
21 #include <linux/seq_file.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
26 #include <asm/xen/hypervisor.h>
27 #include <asm/xen/hypercall.h>
30 #include <xen/privcmd.h>
31 #include <xen/interface/xen.h>
32 #include <xen/features.h>
34 #include <xen/xen-ops.h>
36 #ifndef HAVE_ARCH_PRIVCMD_MMAP
37 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
40 static long privcmd_ioctl_hypercall(void __user *udata)
42 struct privcmd_hypercall hypercall;
45 if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
48 ret = privcmd_call(hypercall.op,
49 hypercall.arg[0], hypercall.arg[1],
50 hypercall.arg[2], hypercall.arg[3],
56 static void free_page_list(struct list_head *pages)
60 list_for_each_entry_safe(p, n, pages, lru)
63 INIT_LIST_HEAD(pages);
67 * Given an array of items in userspace, return a list of pages
68 * containing the data. If copying fails, either because of memory
69 * allocation failure or a problem reading user memory, return an
70 * error code; its up to the caller to dispose of any partial list.
72 static int gather_array(struct list_head *pagelist,
73 unsigned nelem, size_t size,
84 pagedata = NULL; /* quiet, gcc */
86 if (pageidx > PAGE_SIZE-size) {
87 struct page *page = alloc_page(GFP_KERNEL);
93 pagedata = page_address(page);
95 list_add_tail(&page->lru, pagelist);
100 if (copy_from_user(pagedata + pageidx, data, size))
114 * Call function "fn" on each element of the array fragmented
115 * over a list of pages.
117 static int traverse_pages(unsigned nelem, size_t size,
118 struct list_head *pos,
119 int (*fn)(void *data, void *state),
126 BUG_ON(size > PAGE_SIZE);
129 pagedata = NULL; /* hush, gcc */
132 if (pageidx > PAGE_SIZE-size) {
135 page = list_entry(pos, struct page, lru);
136 pagedata = page_address(page);
140 ret = (*fn)(pagedata + pageidx, state);
149 struct mmap_mfn_state {
151 struct vm_area_struct *vma;
155 static int mmap_mfn_range(void *data, void *state)
157 struct privcmd_mmap_entry *msg = data;
158 struct mmap_mfn_state *st = state;
159 struct vm_area_struct *vma = st->vma;
162 /* Do not allow range to wrap the address space. */
163 if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
164 ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
167 /* Range chunks must be contiguous in va space. */
168 if ((msg->va != st->va) ||
169 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
172 rc = xen_remap_domain_mfn_range(vma,
174 msg->mfn, msg->npages,
180 st->va += msg->npages << PAGE_SHIFT;
185 static long privcmd_ioctl_mmap(void __user *udata)
187 struct privcmd_mmap mmapcmd;
188 struct mm_struct *mm = current->mm;
189 struct vm_area_struct *vma;
192 struct mmap_mfn_state state;
194 if (!xen_initial_domain())
197 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
200 rc = gather_array(&pagelist,
201 mmapcmd.num, sizeof(struct privcmd_mmap_entry),
204 if (rc || list_empty(&pagelist))
207 down_write(&mm->mmap_sem);
210 struct page *page = list_first_entry(&pagelist,
212 struct privcmd_mmap_entry *msg = page_address(page);
214 vma = find_vma(mm, msg->va);
217 if (!vma || (msg->va != vma->vm_start) ||
218 !privcmd_enforce_singleshot_mapping(vma))
222 state.va = vma->vm_start;
224 state.domain = mmapcmd.dom;
226 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
228 mmap_mfn_range, &state);
232 up_write(&mm->mmap_sem);
235 free_page_list(&pagelist);
240 struct mmap_batch_state {
243 struct vm_area_struct *vma;
246 xen_pfn_t __user *user;
249 static int mmap_batch_fn(void *data, void *state)
251 xen_pfn_t *mfnp = data;
252 struct mmap_batch_state *st = state;
254 if (xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
255 st->vma->vm_page_prot, st->domain) < 0) {
256 *mfnp |= 0xf0000000U;
264 static int mmap_return_errors(void *data, void *state)
266 xen_pfn_t *mfnp = data;
267 struct mmap_batch_state *st = state;
269 put_user(*mfnp, st->user++);
274 static struct vm_operations_struct privcmd_vm_ops;
276 static long privcmd_ioctl_mmap_batch(void __user *udata)
279 struct privcmd_mmapbatch m;
280 struct mm_struct *mm = current->mm;
281 struct vm_area_struct *vma;
282 unsigned long nr_pages;
284 struct mmap_batch_state state;
286 if (!xen_initial_domain())
289 if (copy_from_user(&m, udata, sizeof(m)))
293 if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
296 ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t),
299 if (ret || list_empty(&pagelist))
302 down_write(&mm->mmap_sem);
304 vma = find_vma(mm, m.addr);
307 vma->vm_ops != &privcmd_vm_ops ||
308 (m.addr != vma->vm_start) ||
309 ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
310 !privcmd_enforce_singleshot_mapping(vma)) {
311 up_write(&mm->mmap_sem);
315 state.domain = m.dom;
320 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
321 &pagelist, mmap_batch_fn, &state);
323 up_write(&mm->mmap_sem);
329 traverse_pages(m.num, sizeof(xen_pfn_t),
331 mmap_return_errors, &state);
335 free_page_list(&pagelist);
340 static long privcmd_ioctl(struct file *file,
341 unsigned int cmd, unsigned long data)
344 void __user *udata = (void __user *) data;
347 case IOCTL_PRIVCMD_HYPERCALL:
348 ret = privcmd_ioctl_hypercall(udata);
351 case IOCTL_PRIVCMD_MMAP:
352 ret = privcmd_ioctl_mmap(udata);
355 case IOCTL_PRIVCMD_MMAPBATCH:
356 ret = privcmd_ioctl_mmap_batch(udata);
367 #ifndef HAVE_ARCH_PRIVCMD_MMAP
368 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
370 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
371 vma, vma->vm_start, vma->vm_end,
372 vmf->pgoff, vmf->virtual_address);
374 return VM_FAULT_SIGBUS;
377 static struct vm_operations_struct privcmd_vm_ops = {
378 .fault = privcmd_fault
381 static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
383 /* Unsupported for auto-translate guests. */
384 if (xen_feature(XENFEAT_auto_translated_physmap))
387 /* DONTCOPY is essential for Xen as copy_page_range is broken. */
388 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
389 vma->vm_ops = &privcmd_vm_ops;
390 vma->vm_private_data = NULL;
395 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
397 return (xchg(&vma->vm_private_data, (void *)1) == NULL);
401 const struct file_operations privcmd_file_ops = {
402 .unlocked_ioctl = privcmd_ioctl,
403 .mmap = privcmd_mmap,