1 #ifndef __X86_64_UACCESS_H
2 #define __X86_64_UACCESS_H
5 * User space memory access functions
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/prefetch.h>
13 #define VERIFY_WRITE 1
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
20 * For historical reasons, these macros are grossly misnamed.
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
25 #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL)
26 #define USER_DS MAKE_MM_SEG(PAGE_OFFSET)
28 #define get_ds() (KERNEL_DS)
29 #define get_fs() (current_thread_info()->addr_limit)
30 #define set_fs(x) (current_thread_info()->addr_limit = (x))
32 #define segment_eq(a, b) ((a).seg == (b).seg)
34 #define __addr_ok(addr) (!((unsigned long)(addr) & \
35 (current_thread_info()->addr_limit.seg)))
38 * Uhhuh, this needs 65-bit arithmetic. We have a carry..
40 #define __range_not_ok(addr, size) \
42 unsigned long flag, roksum; \
43 __chk_user_ptr(addr); \
44 asm("# range_ok\n\r" \
45 "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \
46 : "=&r" (flag), "=r" (roksum) \
47 : "1" (addr), "g" ((long)(size)), \
48 "g" (current_thread_info()->addr_limit.seg)); \
52 #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0)
55 * The exception table consists of pairs of addresses: the first is the
56 * address of an instruction that is allowed to fault, and the second is
57 * the address at which the program should continue. No registers are
58 * modified, so it is entirely up to the continuation code to figure out
61 * All the routines below use bits of fixup code that are out of line
62 * with the main instruction path. This means when everything is well,
63 * we don't even have to jump over them. Further, they do not intrude
64 * on our cache or tlb entries.
67 struct exception_table_entry {
68 unsigned long insn, fixup;
71 extern int fixup_exception(struct pt_regs *regs);
73 #define ARCH_HAS_SEARCH_EXTABLE
76 * These are the main single-value transfer routines. They automatically
77 * use the right size if we just have the right pointer type.
79 * This gets kind of ugly. We want to return _two_ values in "get_user()"
80 * and yet we don't want to do any pointers, because that is too much
81 * of a performance impact. Thus we have a few rather ugly macros here,
82 * and hide all the ugliness from the user.
84 * The "__xxx" versions of the user access functions are versions that
85 * do not verify the address space, that must have been done previously
86 * with a separate "access_ok()" call (this is used when we do multiple
87 * accesses to the same area of user memory).
90 #define __get_user_x(size, ret, x, ptr) \
91 asm volatile("call __get_user_" #size \
92 : "=a" (ret),"=d" (x) \
95 /* Careful: we have to cast the result to the type of the pointer
98 #define get_user(x, ptr) \
100 unsigned long __val_gu; \
102 __chk_user_ptr(ptr); \
103 switch (sizeof(*(ptr))) { \
105 __get_user_x(1, __ret_gu, __val_gu, ptr); \
108 __get_user_x(2, __ret_gu, __val_gu, ptr); \
111 __get_user_x(4, __ret_gu, __val_gu, ptr); \
114 __get_user_x(8, __ret_gu, __val_gu, ptr); \
120 (x) = (__force typeof(*(ptr)))__val_gu; \
124 extern void __put_user_1(void);
125 extern void __put_user_2(void);
126 extern void __put_user_4(void);
127 extern void __put_user_8(void);
128 extern void __put_user_bad(void);
130 #define __put_user_x(size, ret, x, ptr) \
131 asm volatile("call __put_user_" #size \
136 #define put_user(x, ptr) \
137 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
139 #define __get_user(x, ptr) \
140 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
141 #define __put_user(x, ptr) \
142 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
144 #define __get_user_unaligned __get_user
145 #define __put_user_unaligned __put_user
147 #define __put_user_nocheck(x, ptr, size) \
150 __put_user_size((x), (ptr), (size), __pu_err); \
155 #define __put_user_check(x, ptr, size) \
158 typeof(*(ptr)) __user *__pu_addr = (ptr); \
161 __put_user_x(1, __pu_err, x, __pu_addr); \
164 __put_user_x(2, __pu_err, x, __pu_addr); \
167 __put_user_x(4, __pu_err, x, __pu_addr); \
170 __put_user_x(8, __pu_err, x, __pu_addr); \
178 #define __put_user_size(x, ptr, size, retval) \
181 __chk_user_ptr(ptr); \
184 __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\
187 __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\
190 __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\
193 __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \
200 /* FIXME: this hack is definitely wrong -AK */
201 struct __large_struct { unsigned long buf[100]; };
202 #define __m(x) (*(struct __large_struct __user *)(x))
205 * Tell gcc we read from memory instead of writing: this is because
206 * we do not write to any memory gcc knows about, so there are no
209 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \
210 asm volatile("1: mov"itype" %"rtype"1,%2\n" \
212 ".section .fixup, \"ax\"\n" \
216 _ASM_EXTABLE(1b, 3b) \
218 : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err))
221 #define __get_user_nocheck(x, ptr, size) \
224 unsigned long __gu_val; \
225 __get_user_size(__gu_val, (ptr), (size), __gu_err); \
226 (x) = (__force typeof(*(ptr)))__gu_val; \
230 extern int __get_user_1(void);
231 extern int __get_user_2(void);
232 extern int __get_user_4(void);
233 extern int __get_user_8(void);
234 extern int __get_user_bad(void);
236 #define __get_user_size(x, ptr, size, retval) \
239 __chk_user_ptr(ptr); \
242 __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\
245 __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\
248 __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\
251 __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \
254 (x) = __get_user_bad(); \
258 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \
259 asm volatile("1: mov"itype" %2,%"rtype"1\n" \
261 ".section .fixup, \"ax\"\n" \
263 " xor"itype" %"rtype"1,%"rtype"1\n" \
266 _ASM_EXTABLE(1b, 3b) \
267 : "=r" (err), ltype (x) \
268 : "m" (__m(addr)), "i"(errno), "0"(err))
271 * Copy To/From Userspace
274 /* Handles exceptions in both to and from, but doesn't do access_ok */
275 __must_check unsigned long
276 copy_user_generic(void *to, const void *from, unsigned len);
278 __must_check unsigned long
279 copy_to_user(void __user *to, const void *from, unsigned len);
280 __must_check unsigned long
281 copy_from_user(void *to, const void __user *from, unsigned len);
282 __must_check unsigned long
283 copy_in_user(void __user *to, const void __user *from, unsigned len);
285 static __always_inline __must_check
286 int __copy_from_user(void *dst, const void __user *src, unsigned size)
289 if (!__builtin_constant_p(size))
290 return copy_user_generic(dst, (__force void *)src, size);
292 case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
293 ret, "b", "b", "=q", 1);
295 case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
296 ret, "w", "w", "=r", 2);
298 case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
299 ret, "l", "k", "=r", 4);
301 case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
302 ret, "q", "", "=r", 8);
305 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
306 ret, "q", "", "=r", 16);
309 __get_user_asm(*(u16 *)(8 + (char *)dst),
310 (u16 __user *)(8 + (char __user *)src),
311 ret, "w", "w", "=r", 2);
314 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
315 ret, "q", "", "=r", 16);
318 __get_user_asm(*(u64 *)(8 + (char *)dst),
319 (u64 __user *)(8 + (char __user *)src),
320 ret, "q", "", "=r", 8);
323 return copy_user_generic(dst, (__force void *)src, size);
327 static __always_inline __must_check
328 int __copy_to_user(void __user *dst, const void *src, unsigned size)
331 if (!__builtin_constant_p(size))
332 return copy_user_generic((__force void *)dst, src, size);
334 case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
335 ret, "b", "b", "iq", 1);
337 case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
338 ret, "w", "w", "ir", 2);
340 case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
341 ret, "l", "k", "ir", 4);
343 case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
344 ret, "q", "", "ir", 8);
347 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
348 ret, "q", "", "ir", 10);
352 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
353 ret, "w", "w", "ir", 2);
356 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
357 ret, "q", "", "ir", 16);
361 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
362 ret, "q", "", "ir", 8);
365 return copy_user_generic((__force void *)dst, src, size);
369 static __always_inline __must_check
370 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
373 if (!__builtin_constant_p(size))
374 return copy_user_generic((__force void *)dst,
375 (__force void *)src, size);
379 __get_user_asm(tmp, (u8 __user *)src,
380 ret, "b", "b", "=q", 1);
382 __put_user_asm(tmp, (u8 __user *)dst,
383 ret, "b", "b", "iq", 1);
388 __get_user_asm(tmp, (u16 __user *)src,
389 ret, "w", "w", "=r", 2);
391 __put_user_asm(tmp, (u16 __user *)dst,
392 ret, "w", "w", "ir", 2);
398 __get_user_asm(tmp, (u32 __user *)src,
399 ret, "l", "k", "=r", 4);
401 __put_user_asm(tmp, (u32 __user *)dst,
402 ret, "l", "k", "ir", 4);
407 __get_user_asm(tmp, (u64 __user *)src,
408 ret, "q", "", "=r", 8);
410 __put_user_asm(tmp, (u64 __user *)dst,
411 ret, "q", "", "ir", 8);
415 return copy_user_generic((__force void *)dst,
416 (__force void *)src, size);
421 strncpy_from_user(char *dst, const char __user *src, long count);
423 __strncpy_from_user(char *dst, const char __user *src, long count);
424 __must_check long strnlen_user(const char __user *str, long n);
425 __must_check long __strnlen_user(const char __user *str, long n);
426 __must_check long strlen_user(const char __user *str);
427 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
428 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
430 __must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
433 static __must_check __always_inline int
434 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
436 return copy_user_generic((__force void *)dst, src, size);
439 #define ARCH_HAS_NOCACHE_UACCESS 1
440 extern long __copy_user_nocache(void *dst, const void __user *src,
441 unsigned size, int zerorest);
443 static inline int __copy_from_user_nocache(void *dst, const void __user *src,
447 return __copy_user_nocache(dst, src, size, 1);
450 static inline int __copy_from_user_inatomic_nocache(void *dst,
451 const void __user *src,
454 return __copy_user_nocache(dst, src, size, 0);
457 #endif /* __X86_64_UACCESS_H */