2 * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Licensed under the GPL
7 #include <linux/highmem.h>
9 #include <linux/sched.h>
10 #include <asm/current.h>
12 #include <asm/pgtable.h>
13 #include "kern_util.h"
16 static void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
26 return ERR_PTR(-EINVAL);
27 pgd = pgd_offset(task->mm, addr);
28 if (!pgd_present(*pgd))
29 return ERR_PTR(-EINVAL);
31 pud = pud_offset(pgd, addr);
32 if (!pud_present(*pud))
33 return ERR_PTR(-EINVAL);
35 pmd = pmd_offset(pud, addr);
36 if (!pmd_present(*pmd))
37 return ERR_PTR(-EINVAL);
39 pte = pte_offset_kernel(pmd, addr);
41 if (!pte_present(ptent))
42 return ERR_PTR(-EINVAL);
46 return (void *) (pte_val(ptent) & PAGE_MASK) + (addr & ~PAGE_MASK);
49 static unsigned long maybe_map(unsigned long virt, int is_write)
54 void *phys = um_virt_to_phys(current, virt, &pte);
57 if (IS_ERR(phys) || (is_write && !pte_write(pte))) {
58 err = handle_page_fault(virt, 0, is_write, 1, &dummy_code);
61 phys = um_virt_to_phys(current, virt, NULL);
66 return (unsigned long) phys;
69 static int do_op_one_page(unsigned long addr, int len, int is_write,
70 int (*op)(unsigned long addr, int len, void *arg), void *arg)
75 addr = maybe_map(addr, is_write);
79 page = phys_to_page(addr);
80 addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) +
83 n = (*op)(addr, len, arg);
85 kunmap_atomic(page, KM_UML_USERCOPY);
90 static void do_buffer_op(void *jmpbuf, void *arg_ptr)
94 int len, is_write, size, remain, n;
95 int (*op)(unsigned long, int, void *);
99 va_copy(args, *(va_list *)arg_ptr);
100 addr = va_arg(args, unsigned long);
101 len = va_arg(args, int);
102 is_write = va_arg(args, int);
103 op = va_arg(args, void *);
104 arg = va_arg(args, void *);
105 res = va_arg(args, int *);
107 size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len);
110 current->thread.fault_catcher = jmpbuf;
111 n = do_op_one_page(addr, size, is_write, op, arg);
113 *res = (n < 0 ? remain : 0);
124 while(addr < ((addr + remain) & PAGE_MASK)) {
125 n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg);
127 *res = (n < 0 ? remain : 0);
139 n = do_op_one_page(addr, remain, is_write, op, arg);
141 *res = (n < 0 ? remain : 0);
144 current->thread.fault_catcher = NULL;
147 static int buffer_op(unsigned long addr, int len, int is_write,
148 int (*op)(unsigned long addr, int len, void *arg),
153 faulted = setjmp_wrapper(do_buffer_op, addr, len, is_write, op, arg,
158 return addr + len - (unsigned long) current->thread.fault_addr;
161 static int copy_chunk_from_user(unsigned long from, int len, void *arg)
163 unsigned long *to_ptr = arg, to = *to_ptr;
165 memcpy((void *) to, (void *) from, len);
170 int copy_from_user(void *to, const void __user *from, int n)
172 if (segment_eq(get_fs(), KERNEL_DS)) {
173 memcpy(to, (__force void*)from, n);
177 return access_ok(VERIFY_READ, from, n) ?
178 buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to):
182 static int copy_chunk_to_user(unsigned long to, int len, void *arg)
184 unsigned long *from_ptr = arg, from = *from_ptr;
186 memcpy((void *) to, (void *) from, len);
191 int copy_to_user(void __user *to, const void *from, int n)
193 if (segment_eq(get_fs(), KERNEL_DS)) {
194 memcpy((__force void *) to, from, n);
198 return access_ok(VERIFY_WRITE, to, n) ?
199 buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from) :
203 static int strncpy_chunk_from_user(unsigned long from, int len, void *arg)
205 char **to_ptr = arg, *to = *to_ptr;
208 strncpy(to, (void *) from, len);
209 n = strnlen(to, len);
217 int strncpy_from_user(char *dst, const char __user *src, int count)
222 if (segment_eq(get_fs(), KERNEL_DS)) {
223 strncpy(dst, (__force void *) src, count);
224 return strnlen(dst, count);
227 if (!access_ok(VERIFY_READ, src, 1))
230 n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user,
234 return strnlen(dst, count);
237 static int clear_chunk(unsigned long addr, int len, void *unused)
239 memset((void *) addr, 0, len);
243 int __clear_user(void __user *mem, int len)
245 return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL);
248 int clear_user(void __user *mem, int len)
250 if (segment_eq(get_fs(), KERNEL_DS)) {
251 memset((__force void*)mem, 0, len);
255 return access_ok(VERIFY_WRITE, mem, len) ?
256 buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL) : len;
259 static int strnlen_chunk(unsigned long str, int len, void *arg)
261 int *len_ptr = arg, n;
263 n = strnlen((void *) str, len);
271 int strnlen_user(const void __user *str, int len)
275 if (segment_eq(get_fs(), KERNEL_DS))
276 return strnlen((__force char*)str, len) + 1;
278 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);