2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
5 * Copyright IBM Corp. 2006, 2012
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
12 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
19 * Returns kernel address for user virtual address. If the returned address is
20 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
21 * contains the (negative) exception code.
23 static __always_inline unsigned long follow_table(struct mm_struct *mm,
24 unsigned long addr, int write)
31 pgd = pgd_offset(mm, addr);
32 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
35 pud = pud_offset(pgd, addr);
36 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
39 pmd = pmd_offset(pud, addr);
43 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
45 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
47 if (unlikely(pmd_bad(*pmd)))
50 ptep = pte_offset_map(pmd, addr);
51 if (!pte_present(*ptep))
53 if (write && !pte_write(*ptep))
56 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
59 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
60 size_t n, int write_user)
62 struct mm_struct *mm = current->mm;
63 unsigned long offset, done, size, kaddr;
68 spin_lock(&mm->page_table_lock);
70 kaddr = follow_table(mm, uaddr, write_user);
71 if (IS_ERR_VALUE(kaddr))
74 offset = uaddr & ~PAGE_MASK;
75 size = min(n - done, PAGE_SIZE - offset);
80 from = (void *) kaddr;
83 memcpy(to, from, size);
87 spin_unlock(&mm->page_table_lock);
90 spin_unlock(&mm->page_table_lock);
91 if (__handle_fault(uaddr, -kaddr, write_user))
97 * Do DAT for user address by page table walk, return kernel address.
98 * This function needs to be called with current->mm->page_table_lock held.
100 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
103 struct mm_struct *mm = current->mm;
108 kaddr = follow_table(mm, uaddr, write);
109 if (IS_ERR_VALUE(kaddr))
114 spin_unlock(&mm->page_table_lock);
115 rc = __handle_fault(uaddr, -kaddr, write);
116 spin_lock(&mm->page_table_lock);
122 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
126 if (segment_eq(get_fs(), KERNEL_DS)) {
127 memcpy(to, (void __kernel __force *) from, n);
130 rc = __user_copy_pt((unsigned long) from, to, n, 0);
132 memset(to + n - rc, 0, rc);
136 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
138 if (segment_eq(get_fs(), KERNEL_DS)) {
139 memcpy((void __kernel __force *) to, from, n);
142 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
145 static size_t clear_user_pt(size_t n, void __user *to)
147 long done, size, ret;
149 if (segment_eq(get_fs(), KERNEL_DS)) {
150 memset((void __kernel __force *) to, 0, n);
155 if (n - done > PAGE_SIZE)
159 ret = __user_copy_pt((unsigned long) to + done,
160 &empty_zero_page, size, 1);
163 return ret + n - done;
168 static size_t strnlen_user_pt(size_t count, const char __user *src)
170 unsigned long uaddr = (unsigned long) src;
171 struct mm_struct *mm = current->mm;
172 unsigned long offset, done, len, kaddr;
175 if (segment_eq(get_fs(), KERNEL_DS))
176 return strnlen((const char __kernel __force *) src, count) + 1;
179 spin_lock(&mm->page_table_lock);
181 kaddr = follow_table(mm, uaddr, 0);
182 if (IS_ERR_VALUE(kaddr))
185 offset = uaddr & ~PAGE_MASK;
186 len = min(count - done, PAGE_SIZE - offset);
187 len_str = strnlen((char *) kaddr, len);
190 } while ((len_str == len) && (done < count));
191 spin_unlock(&mm->page_table_lock);
194 spin_unlock(&mm->page_table_lock);
195 if (__handle_fault(uaddr, -kaddr, 0))
200 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
203 size_t n = strnlen_user_pt(count, src);
209 if (segment_eq(get_fs(), KERNEL_DS)) {
210 memcpy(dst, (const char __kernel __force *) src, n);
211 if (dst[n-1] == '\0')
216 if (__user_copy_pt((unsigned long) src, dst, n, 0))
218 if (dst[n-1] == '\0')
224 static size_t copy_in_user_pt(size_t n, void __user *to,
225 const void __user *from)
227 struct mm_struct *mm = current->mm;
228 unsigned long offset_max, uaddr, done, size, error_code;
229 unsigned long uaddr_from = (unsigned long) from;
230 unsigned long uaddr_to = (unsigned long) to;
231 unsigned long kaddr_to, kaddr_from;
234 if (segment_eq(get_fs(), KERNEL_DS)) {
235 memcpy((void __force *) to, (void __force *) from, n);
240 spin_lock(&mm->page_table_lock);
244 kaddr_from = follow_table(mm, uaddr_from, 0);
245 error_code = kaddr_from;
246 if (IS_ERR_VALUE(error_code))
251 kaddr_to = follow_table(mm, uaddr_to, 1);
252 error_code = (unsigned long) kaddr_to;
253 if (IS_ERR_VALUE(error_code))
256 offset_max = max(uaddr_from & ~PAGE_MASK,
257 uaddr_to & ~PAGE_MASK);
258 size = min(n - done, PAGE_SIZE - offset_max);
260 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
265 spin_unlock(&mm->page_table_lock);
268 spin_unlock(&mm->page_table_lock);
269 if (__handle_fault(uaddr, -error_code, write_user))
274 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
275 asm volatile("0: l %1,0(%6)\n" \
277 "2: cs %1,%2,0(%6)\n" \
281 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
282 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
284 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
285 "m" (*uaddr) : "cc" );
287 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
289 int oldval = 0, newval, ret;
293 __futex_atomic_op("lr %2,%5\n",
294 ret, oldval, newval, uaddr, oparg);
297 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
298 ret, oldval, newval, uaddr, oparg);
301 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
302 ret, oldval, newval, uaddr, oparg);
305 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
306 ret, oldval, newval, uaddr, oparg);
309 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
310 ret, oldval, newval, uaddr, oparg);
320 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
324 if (segment_eq(get_fs(), KERNEL_DS))
325 return __futex_atomic_op_pt(op, uaddr, oparg, old);
326 spin_lock(¤t->mm->page_table_lock);
327 uaddr = (u32 __force __user *)
328 __dat_user_addr((__force unsigned long) uaddr, 1);
330 spin_unlock(¤t->mm->page_table_lock);
333 get_page(virt_to_page(uaddr));
334 spin_unlock(¤t->mm->page_table_lock);
335 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
336 put_page(virt_to_page(uaddr));
340 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
341 u32 oldval, u32 newval)
345 asm volatile("0: cs %1,%4,0(%5)\n"
348 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
349 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
350 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
356 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
357 u32 oldval, u32 newval)
361 if (segment_eq(get_fs(), KERNEL_DS))
362 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
363 spin_lock(¤t->mm->page_table_lock);
364 uaddr = (u32 __force __user *)
365 __dat_user_addr((__force unsigned long) uaddr, 1);
367 spin_unlock(¤t->mm->page_table_lock);
370 get_page(virt_to_page(uaddr));
371 spin_unlock(¤t->mm->page_table_lock);
372 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
373 put_page(virt_to_page(uaddr));
377 struct uaccess_ops uaccess_pt = {
378 .copy_from_user = copy_from_user_pt,
379 .copy_from_user_small = copy_from_user_pt,
380 .copy_to_user = copy_to_user_pt,
381 .copy_to_user_small = copy_to_user_pt,
382 .copy_in_user = copy_in_user_pt,
383 .clear_user = clear_user_pt,
384 .strnlen_user = strnlen_user_pt,
385 .strncpy_from_user = strncpy_from_user_pt,
386 .futex_atomic_op = futex_atomic_op_pt,
387 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,