2 * User access functions based on page table walks for enhanced
3 * system layout without hardware support.
5 * Copyright IBM Corp. 2006, 2012
6 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
9 #include <linux/errno.h>
10 #include <linux/hardirq.h>
12 #include <linux/hugetlb.h>
13 #include <asm/uaccess.h>
14 #include <asm/futex.h>
25 static size_t strnlen_kernel(size_t count, const char __user *src)
27 register unsigned long reg0 asm("0") = 0UL;
28 unsigned long tmp1, tmp2;
36 " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
40 : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
41 : "d" (reg0) : "cc", "memory");
45 static size_t copy_in_kernel(size_t count, void __user *to,
46 const void __user *from)
55 "1: mvc 0(1,%1),0(%2)\n"
61 "2: mvc 0(256,%1),0(%2)\n"
66 "4: ex %0,1b-0b(%3)\n"
69 EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
70 : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
76 * Returns kernel address for user virtual address. If the returned address is
77 * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
78 * contains the (negative) exception code.
80 static __always_inline unsigned long follow_table(struct mm_struct *mm,
81 unsigned long addr, int write)
88 pgd = pgd_offset(mm, addr);
89 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
92 pud = pud_offset(pgd, addr);
93 if (pud_none(*pud) || unlikely(pud_bad(*pud)))
96 pmd = pmd_offset(pud, addr);
99 if (pmd_large(*pmd)) {
100 if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
102 return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
104 if (unlikely(pmd_bad(*pmd)))
107 ptep = pte_offset_map(pmd, addr);
108 if (!pte_present(*ptep))
110 if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
113 return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
116 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
117 size_t n, int write_user)
119 struct mm_struct *mm = current->mm;
120 unsigned long offset, done, size, kaddr;
125 spin_lock(&mm->page_table_lock);
127 kaddr = follow_table(mm, uaddr, write_user);
128 if (IS_ERR_VALUE(kaddr))
131 offset = uaddr & ~PAGE_MASK;
132 size = min(n - done, PAGE_SIZE - offset);
137 from = (void *) kaddr;
140 memcpy(to, from, size);
144 spin_unlock(&mm->page_table_lock);
147 spin_unlock(&mm->page_table_lock);
148 if (__handle_fault(uaddr, -kaddr, write_user))
154 * Do DAT for user address by page table walk, return kernel address.
155 * This function needs to be called with current->mm->page_table_lock held.
157 static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
160 struct mm_struct *mm = current->mm;
165 kaddr = follow_table(mm, uaddr, write);
166 if (IS_ERR_VALUE(kaddr))
171 spin_unlock(&mm->page_table_lock);
172 rc = __handle_fault(uaddr, -kaddr, write);
173 spin_lock(&mm->page_table_lock);
179 size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
183 if (segment_eq(get_fs(), KERNEL_DS))
184 return copy_in_kernel(n, (void __user *) to, from);
185 rc = __user_copy_pt((unsigned long) from, to, n, 0);
187 memset(to + n - rc, 0, rc);
191 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
193 if (segment_eq(get_fs(), KERNEL_DS))
194 return copy_in_kernel(n, to, (void __user *) from);
195 return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
198 static size_t clear_user_pt(size_t n, void __user *to)
200 void *zpage = &empty_zero_page;
201 long done, size, ret;
205 if (n - done > PAGE_SIZE)
209 if (segment_eq(get_fs(), KERNEL_DS))
210 ret = copy_in_kernel(n, to, (void __user *) zpage);
212 ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
216 return ret + n - done;
221 static size_t strnlen_user_pt(size_t count, const char __user *src)
223 unsigned long uaddr = (unsigned long) src;
224 struct mm_struct *mm = current->mm;
225 unsigned long offset, done, len, kaddr;
228 if (unlikely(!count))
230 if (segment_eq(get_fs(), KERNEL_DS))
231 return strnlen_kernel(count, src);
234 spin_lock(&mm->page_table_lock);
236 kaddr = follow_table(mm, uaddr, 0);
237 if (IS_ERR_VALUE(kaddr))
240 offset = uaddr & ~PAGE_MASK;
241 len = min(count - done, PAGE_SIZE - offset);
242 len_str = strnlen((char *) kaddr, len);
245 } while ((len_str == len) && (done < count));
246 spin_unlock(&mm->page_table_lock);
249 spin_unlock(&mm->page_table_lock);
250 if (__handle_fault(uaddr, -kaddr, 0))
255 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
258 size_t done, len, offset, len_str;
260 if (unlikely(!count))
264 offset = (size_t)src & ~PAGE_MASK;
265 len = min(count - done, PAGE_SIZE - offset);
266 if (segment_eq(get_fs(), KERNEL_DS)) {
267 if (copy_in_kernel(len, (void __user *) dst, src))
270 if (__user_copy_pt((unsigned long) src, dst, len, 0))
273 len_str = strnlen(dst, len);
277 } while ((len_str == len) && (done < count));
281 static size_t copy_in_user_pt(size_t n, void __user *to,
282 const void __user *from)
284 struct mm_struct *mm = current->mm;
285 unsigned long offset_max, uaddr, done, size, error_code;
286 unsigned long uaddr_from = (unsigned long) from;
287 unsigned long uaddr_to = (unsigned long) to;
288 unsigned long kaddr_to, kaddr_from;
291 if (segment_eq(get_fs(), KERNEL_DS))
292 return copy_in_kernel(n, to, from);
295 spin_lock(&mm->page_table_lock);
299 kaddr_from = follow_table(mm, uaddr_from, 0);
300 error_code = kaddr_from;
301 if (IS_ERR_VALUE(error_code))
306 kaddr_to = follow_table(mm, uaddr_to, 1);
307 error_code = (unsigned long) kaddr_to;
308 if (IS_ERR_VALUE(error_code))
311 offset_max = max(uaddr_from & ~PAGE_MASK,
312 uaddr_to & ~PAGE_MASK);
313 size = min(n - done, PAGE_SIZE - offset_max);
315 memcpy((void *) kaddr_to, (void *) kaddr_from, size);
320 spin_unlock(&mm->page_table_lock);
323 spin_unlock(&mm->page_table_lock);
324 if (__handle_fault(uaddr, -error_code, write_user))
329 #define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
330 asm volatile("0: l %1,0(%6)\n" \
332 "2: cs %1,%2,0(%6)\n" \
336 EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
337 : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
339 : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
340 "m" (*uaddr) : "cc" );
342 static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
344 int oldval = 0, newval, ret;
348 __futex_atomic_op("lr %2,%5\n",
349 ret, oldval, newval, uaddr, oparg);
352 __futex_atomic_op("lr %2,%1\nar %2,%5\n",
353 ret, oldval, newval, uaddr, oparg);
356 __futex_atomic_op("lr %2,%1\nor %2,%5\n",
357 ret, oldval, newval, uaddr, oparg);
360 __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
361 ret, oldval, newval, uaddr, oparg);
364 __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
365 ret, oldval, newval, uaddr, oparg);
375 int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
379 if (segment_eq(get_fs(), KERNEL_DS))
380 return __futex_atomic_op_pt(op, uaddr, oparg, old);
381 spin_lock(¤t->mm->page_table_lock);
382 uaddr = (u32 __force __user *)
383 __dat_user_addr((__force unsigned long) uaddr, 1);
385 spin_unlock(¤t->mm->page_table_lock);
388 get_page(virt_to_page(uaddr));
389 spin_unlock(¤t->mm->page_table_lock);
390 ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
391 put_page(virt_to_page(uaddr));
395 static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
396 u32 oldval, u32 newval)
400 asm volatile("0: cs %1,%4,0(%5)\n"
403 EX_TABLE(0b,2b) EX_TABLE(1b,2b)
404 : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
405 : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
411 int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
412 u32 oldval, u32 newval)
416 if (segment_eq(get_fs(), KERNEL_DS))
417 return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
418 spin_lock(¤t->mm->page_table_lock);
419 uaddr = (u32 __force __user *)
420 __dat_user_addr((__force unsigned long) uaddr, 1);
422 spin_unlock(¤t->mm->page_table_lock);
425 get_page(virt_to_page(uaddr));
426 spin_unlock(¤t->mm->page_table_lock);
427 ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
428 put_page(virt_to_page(uaddr));
432 struct uaccess_ops uaccess_pt = {
433 .copy_from_user = copy_from_user_pt,
434 .copy_from_user_small = copy_from_user_pt,
435 .copy_to_user = copy_to_user_pt,
436 .copy_to_user_small = copy_to_user_pt,
437 .copy_in_user = copy_in_user_pt,
438 .clear_user = clear_user_pt,
439 .strnlen_user = strnlen_user_pt,
440 .strncpy_from_user = strncpy_from_user_pt,
441 .futex_atomic_op = futex_atomic_op_pt,
442 .futex_atomic_cmpxchg = futex_atomic_cmpxchg_pt,