]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/mips/include/asm/uaccess.h
712dc40625b69a01bb918901e2d85b102be78d7e
[karo-tx-linux.git] / arch / mips / include / asm / uaccess.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 #include <linux/kernel.h>
15 #include <linux/string.h>
16 #include <asm/asm-eva.h>
17 #include <asm/extable.h>
18
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 #ifdef CONFIG_32BIT
27
28 #ifdef CONFIG_KVM_GUEST
29 #define __UA_LIMIT 0x40000000UL
30 #else
31 #define __UA_LIMIT 0x80000000UL
32 #endif
33
34 #define __UA_ADDR       ".word"
35 #define __UA_LA         "la"
36 #define __UA_ADDU       "addu"
37 #define __UA_t0         "$8"
38 #define __UA_t1         "$9"
39
40 #endif /* CONFIG_32BIT */
41
42 #ifdef CONFIG_64BIT
43
44 extern u64 __ua_limit;
45
46 #define __UA_LIMIT      __ua_limit
47
48 #define __UA_ADDR       ".dword"
49 #define __UA_LA         "dla"
50 #define __UA_ADDU       "daddu"
51 #define __UA_t0         "$12"
52 #define __UA_t1         "$13"
53
54 #endif /* CONFIG_64BIT */
55
56 /*
57  * USER_DS is a bitmask that has the bits set that may not be set in a valid
58  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
59  * the arithmetic we're doing only works if the limit is a power of two, so
60  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
61  * address in this range it's the process's problem, not ours :-)
62  */
63
64 #ifdef CONFIG_KVM_GUEST
65 #define KERNEL_DS       ((mm_segment_t) { 0x80000000UL })
66 #define USER_DS         ((mm_segment_t) { 0xC0000000UL })
67 #else
68 #define KERNEL_DS       ((mm_segment_t) { 0UL })
69 #define USER_DS         ((mm_segment_t) { __UA_LIMIT })
70 #endif
71
72 #define get_ds()        (KERNEL_DS)
73 #define get_fs()        (current_thread_info()->addr_limit)
74 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
75
76 #define segment_eq(a, b)        ((a).seg == (b).seg)
77
78 /*
79  * eva_kernel_access() - determine whether kernel memory access on an EVA system
80  *
81  * Determines whether memory accesses should be performed to kernel memory
82  * on a system using Extended Virtual Addressing (EVA).
83  *
84  * Return: true if a kernel memory access on an EVA system, else false.
85  */
86 static inline bool eva_kernel_access(void)
87 {
88         if (!IS_ENABLED(CONFIG_EVA))
89                 return false;
90
91         return uaccess_kernel();
92 }
93
94 /*
95  * Is a address valid? This does a straightforward calculation rather
96  * than tests.
97  *
98  * Address valid if:
99  *  - "addr" doesn't have any high-bits set
100  *  - AND "size" doesn't have any high-bits set
101  *  - AND "addr+size" doesn't have any high-bits set
102  *  - OR we are in kernel mode.
103  *
104  * __ua_size() is a trick to avoid runtime checking of positive constant
105  * sizes; for those we already know at compile time that the size is ok.
106  */
107 #define __ua_size(size)                                                 \
108         ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
109
110 /*
111  * access_ok: - Checks if a user space pointer is valid
112  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
113  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
114  *        to write to a block, it is always safe to read from it.
115  * @addr: User space pointer to start of block to check
116  * @size: Size of block to check
117  *
118  * Context: User context only. This function may sleep if pagefaults are
119  *          enabled.
120  *
121  * Checks if a pointer to a block of memory in user space is valid.
122  *
123  * Returns true (nonzero) if the memory block may be valid, false (zero)
124  * if it is definitely invalid.
125  *
126  * Note that, depending on architecture, this function probably just
127  * checks that the pointer is in the user space range - after calling
128  * this function, memory access functions may still return -EFAULT.
129  */
130
131 static inline int __access_ok(const void __user *p, unsigned long size)
132 {
133         unsigned long addr = (unsigned long)p;
134         return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
135 }
136
137 #define access_ok(type, addr, size)                                     \
138         likely(__access_ok((addr), (size)))
139
140 /*
141  * put_user: - Write a simple value into user space.
142  * @x:   Value to copy to user space.
143  * @ptr: Destination address, in user space.
144  *
145  * Context: User context only. This function may sleep if pagefaults are
146  *          enabled.
147  *
148  * This macro copies a single simple value from kernel space to user
149  * space.  It supports simple types like char and int, but not larger
150  * data types like structures or arrays.
151  *
152  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
153  * to the result of dereferencing @ptr.
154  *
155  * Returns zero on success, or -EFAULT on error.
156  */
157 #define put_user(x,ptr) \
158         __put_user_check((x), (ptr), sizeof(*(ptr)))
159
160 /*
161  * get_user: - Get a simple variable from user space.
162  * @x:   Variable to store result.
163  * @ptr: Source address, in user space.
164  *
165  * Context: User context only. This function may sleep if pagefaults are
166  *          enabled.
167  *
168  * This macro copies a single simple variable from user space to kernel
169  * space.  It supports simple types like char and int, but not larger
170  * data types like structures or arrays.
171  *
172  * @ptr must have pointer-to-simple-variable type, and the result of
173  * dereferencing @ptr must be assignable to @x without a cast.
174  *
175  * Returns zero on success, or -EFAULT on error.
176  * On error, the variable @x is set to zero.
177  */
178 #define get_user(x,ptr) \
179         __get_user_check((x), (ptr), sizeof(*(ptr)))
180
181 /*
182  * __put_user: - Write a simple value into user space, with less checking.
183  * @x:   Value to copy to user space.
184  * @ptr: Destination address, in user space.
185  *
186  * Context: User context only. This function may sleep if pagefaults are
187  *          enabled.
188  *
189  * This macro copies a single simple value from kernel space to user
190  * space.  It supports simple types like char and int, but not larger
191  * data types like structures or arrays.
192  *
193  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
194  * to the result of dereferencing @ptr.
195  *
196  * Caller must check the pointer with access_ok() before calling this
197  * function.
198  *
199  * Returns zero on success, or -EFAULT on error.
200  */
201 #define __put_user(x,ptr) \
202         __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
203
204 /*
205  * __get_user: - Get a simple variable from user space, with less checking.
206  * @x:   Variable to store result.
207  * @ptr: Source address, in user space.
208  *
209  * Context: User context only. This function may sleep if pagefaults are
210  *          enabled.
211  *
212  * This macro copies a single simple variable from user space to kernel
213  * space.  It supports simple types like char and int, but not larger
214  * data types like structures or arrays.
215  *
216  * @ptr must have pointer-to-simple-variable type, and the result of
217  * dereferencing @ptr must be assignable to @x without a cast.
218  *
219  * Caller must check the pointer with access_ok() before calling this
220  * function.
221  *
222  * Returns zero on success, or -EFAULT on error.
223  * On error, the variable @x is set to zero.
224  */
225 #define __get_user(x,ptr) \
226         __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
227
228 struct __large_struct { unsigned long buf[100]; };
229 #define __m(x) (*(struct __large_struct __user *)(x))
230
231 /*
232  * Yuck.  We need two variants, one for 64bit operation and one
233  * for 32 bit mode and old iron.
234  */
235 #ifndef CONFIG_EVA
236 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
237 #else
238 /*
239  * Kernel specific functions for EVA. We need to use normal load instructions
240  * to read data from kernel when operating in EVA mode. We use these macros to
241  * avoid redefining __get_user_asm for EVA.
242  */
243 #undef _loadd
244 #undef _loadw
245 #undef _loadh
246 #undef _loadb
247 #ifdef CONFIG_32BIT
248 #define _loadd                  _loadw
249 #else
250 #define _loadd(reg, addr)       "ld " reg ", " addr
251 #endif
252 #define _loadw(reg, addr)       "lw " reg ", " addr
253 #define _loadh(reg, addr)       "lh " reg ", " addr
254 #define _loadb(reg, addr)       "lb " reg ", " addr
255
256 #define __get_kernel_common(val, size, ptr)                             \
257 do {                                                                    \
258         switch (size) {                                                 \
259         case 1: __get_data_asm(val, _loadb, ptr); break;                \
260         case 2: __get_data_asm(val, _loadh, ptr); break;                \
261         case 4: __get_data_asm(val, _loadw, ptr); break;                \
262         case 8: __GET_DW(val, _loadd, ptr); break;                      \
263         default: __get_user_unknown(); break;                           \
264         }                                                               \
265 } while (0)
266 #endif
267
268 #ifdef CONFIG_32BIT
269 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
270 #endif
271 #ifdef CONFIG_64BIT
272 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
273 #endif
274
275 extern void __get_user_unknown(void);
276
277 #define __get_user_common(val, size, ptr)                               \
278 do {                                                                    \
279         switch (size) {                                                 \
280         case 1: __get_data_asm(val, user_lb, ptr); break;               \
281         case 2: __get_data_asm(val, user_lh, ptr); break;               \
282         case 4: __get_data_asm(val, user_lw, ptr); break;               \
283         case 8: __GET_DW(val, user_ld, ptr); break;                     \
284         default: __get_user_unknown(); break;                           \
285         }                                                               \
286 } while (0)
287
288 #define __get_user_nocheck(x, ptr, size)                                \
289 ({                                                                      \
290         int __gu_err;                                                   \
291                                                                         \
292         if (eva_kernel_access()) {                                      \
293                 __get_kernel_common((x), size, ptr);                    \
294         } else {                                                        \
295                 __chk_user_ptr(ptr);                                    \
296                 __get_user_common((x), size, ptr);                      \
297         }                                                               \
298         __gu_err;                                                       \
299 })
300
301 #define __get_user_check(x, ptr, size)                                  \
302 ({                                                                      \
303         int __gu_err = -EFAULT;                                         \
304         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
305                                                                         \
306         might_fault();                                                  \
307         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {          \
308                 if (eva_kernel_access())                                \
309                         __get_kernel_common((x), size, __gu_ptr);       \
310                 else                                                    \
311                         __get_user_common((x), size, __gu_ptr);         \
312         } else                                                          \
313                 (x) = 0;                                                \
314                                                                         \
315         __gu_err;                                                       \
316 })
317
318 #define __get_data_asm(val, insn, addr)                                 \
319 {                                                                       \
320         long __gu_tmp;                                                  \
321                                                                         \
322         __asm__ __volatile__(                                           \
323         "1:     "insn("%1", "%3")"                              \n"     \
324         "2:                                                     \n"     \
325         "       .insn                                           \n"     \
326         "       .section .fixup,\"ax\"                          \n"     \
327         "3:     li      %0, %4                                  \n"     \
328         "       move    %1, $0                                  \n"     \
329         "       j       2b                                      \n"     \
330         "       .previous                                       \n"     \
331         "       .section __ex_table,\"a\"                       \n"     \
332         "       "__UA_ADDR "\t1b, 3b                            \n"     \
333         "       .previous                                       \n"     \
334         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
335         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
336                                                                         \
337         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
338 }
339
340 /*
341  * Get a long long 64 using 32 bit registers.
342  */
343 #define __get_data_asm_ll32(val, insn, addr)                            \
344 {                                                                       \
345         union {                                                         \
346                 unsigned long long      l;                              \
347                 __typeof__(*(addr))     t;                              \
348         } __gu_tmp;                                                     \
349                                                                         \
350         __asm__ __volatile__(                                           \
351         "1:     " insn("%1", "(%3)")"                           \n"     \
352         "2:     " insn("%D1", "4(%3)")"                         \n"     \
353         "3:                                                     \n"     \
354         "       .insn                                           \n"     \
355         "       .section        .fixup,\"ax\"                   \n"     \
356         "4:     li      %0, %4                                  \n"     \
357         "       move    %1, $0                                  \n"     \
358         "       move    %D1, $0                                 \n"     \
359         "       j       3b                                      \n"     \
360         "       .previous                                       \n"     \
361         "       .section        __ex_table,\"a\"                \n"     \
362         "       " __UA_ADDR "   1b, 4b                          \n"     \
363         "       " __UA_ADDR "   2b, 4b                          \n"     \
364         "       .previous                                       \n"     \
365         : "=r" (__gu_err), "=&r" (__gu_tmp.l)                           \
366         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
367                                                                         \
368         (val) = __gu_tmp.t;                                             \
369 }
370
371 #ifndef CONFIG_EVA
372 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
373 #else
374 /*
375  * Kernel specific functions for EVA. We need to use normal load instructions
376  * to read data from kernel when operating in EVA mode. We use these macros to
377  * avoid redefining __get_data_asm for EVA.
378  */
379 #undef _stored
380 #undef _storew
381 #undef _storeh
382 #undef _storeb
383 #ifdef CONFIG_32BIT
384 #define _stored                 _storew
385 #else
386 #define _stored(reg, addr)      "ld " reg ", " addr
387 #endif
388
389 #define _storew(reg, addr)      "sw " reg ", " addr
390 #define _storeh(reg, addr)      "sh " reg ", " addr
391 #define _storeb(reg, addr)      "sb " reg ", " addr
392
393 #define __put_kernel_common(ptr, size)                                  \
394 do {                                                                    \
395         switch (size) {                                                 \
396         case 1: __put_data_asm(_storeb, ptr); break;                    \
397         case 2: __put_data_asm(_storeh, ptr); break;                    \
398         case 4: __put_data_asm(_storew, ptr); break;                    \
399         case 8: __PUT_DW(_stored, ptr); break;                          \
400         default: __put_user_unknown(); break;                           \
401         }                                                               \
402 } while(0)
403 #endif
404
405 /*
406  * Yuck.  We need two variants, one for 64bit operation and one
407  * for 32 bit mode and old iron.
408  */
409 #ifdef CONFIG_32BIT
410 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
411 #endif
412 #ifdef CONFIG_64BIT
413 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
414 #endif
415
416 #define __put_user_common(ptr, size)                                    \
417 do {                                                                    \
418         switch (size) {                                                 \
419         case 1: __put_data_asm(user_sb, ptr); break;                    \
420         case 2: __put_data_asm(user_sh, ptr); break;                    \
421         case 4: __put_data_asm(user_sw, ptr); break;                    \
422         case 8: __PUT_DW(user_sd, ptr); break;                          \
423         default: __put_user_unknown(); break;                           \
424         }                                                               \
425 } while (0)
426
427 #define __put_user_nocheck(x, ptr, size)                                \
428 ({                                                                      \
429         __typeof__(*(ptr)) __pu_val;                                    \
430         int __pu_err = 0;                                               \
431                                                                         \
432         __pu_val = (x);                                                 \
433         if (eva_kernel_access()) {                                      \
434                 __put_kernel_common(ptr, size);                         \
435         } else {                                                        \
436                 __chk_user_ptr(ptr);                                    \
437                 __put_user_common(ptr, size);                           \
438         }                                                               \
439         __pu_err;                                                       \
440 })
441
442 #define __put_user_check(x, ptr, size)                                  \
443 ({                                                                      \
444         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
445         __typeof__(*(ptr)) __pu_val = (x);                              \
446         int __pu_err = -EFAULT;                                         \
447                                                                         \
448         might_fault();                                                  \
449         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {        \
450                 if (eva_kernel_access())                                \
451                         __put_kernel_common(__pu_addr, size);           \
452                 else                                                    \
453                         __put_user_common(__pu_addr, size);             \
454         }                                                               \
455                                                                         \
456         __pu_err;                                                       \
457 })
458
459 #define __put_data_asm(insn, ptr)                                       \
460 {                                                                       \
461         __asm__ __volatile__(                                           \
462         "1:     "insn("%z2", "%3")"     # __put_data_asm        \n"     \
463         "2:                                                     \n"     \
464         "       .insn                                           \n"     \
465         "       .section        .fixup,\"ax\"                   \n"     \
466         "3:     li      %0, %4                                  \n"     \
467         "       j       2b                                      \n"     \
468         "       .previous                                       \n"     \
469         "       .section        __ex_table,\"a\"                \n"     \
470         "       " __UA_ADDR "   1b, 3b                          \n"     \
471         "       .previous                                       \n"     \
472         : "=r" (__pu_err)                                               \
473         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
474           "i" (-EFAULT));                                               \
475 }
476
477 #define __put_data_asm_ll32(insn, ptr)                                  \
478 {                                                                       \
479         __asm__ __volatile__(                                           \
480         "1:     "insn("%2", "(%3)")"    # __put_data_asm_ll32   \n"     \
481         "2:     "insn("%D2", "4(%3)")"                          \n"     \
482         "3:                                                     \n"     \
483         "       .insn                                           \n"     \
484         "       .section        .fixup,\"ax\"                   \n"     \
485         "4:     li      %0, %4                                  \n"     \
486         "       j       3b                                      \n"     \
487         "       .previous                                       \n"     \
488         "       .section        __ex_table,\"a\"                \n"     \
489         "       " __UA_ADDR "   1b, 4b                          \n"     \
490         "       " __UA_ADDR "   2b, 4b                          \n"     \
491         "       .previous"                                              \
492         : "=r" (__pu_err)                                               \
493         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
494           "i" (-EFAULT));                                               \
495 }
496
497 extern void __put_user_unknown(void);
498
499 /*
500  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
501  * EVA unaligned access is handled in the ADE exception handler.
502  */
503 #ifndef CONFIG_EVA
504 /*
505  * put_user_unaligned: - Write a simple value into user space.
506  * @x:   Value to copy to user space.
507  * @ptr: Destination address, in user space.
508  *
509  * Context: User context only. This function may sleep if pagefaults are
510  *          enabled.
511  *
512  * This macro copies a single simple value from kernel space to user
513  * space.  It supports simple types like char and int, but not larger
514  * data types like structures or arrays.
515  *
516  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
517  * to the result of dereferencing @ptr.
518  *
519  * Returns zero on success, or -EFAULT on error.
520  */
521 #define put_user_unaligned(x,ptr)       \
522         __put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
523
524 /*
525  * get_user_unaligned: - Get a simple variable from user space.
526  * @x:   Variable to store result.
527  * @ptr: Source address, in user space.
528  *
529  * Context: User context only. This function may sleep if pagefaults are
530  *          enabled.
531  *
532  * This macro copies a single simple variable from user space to kernel
533  * space.  It supports simple types like char and int, but not larger
534  * data types like structures or arrays.
535  *
536  * @ptr must have pointer-to-simple-variable type, and the result of
537  * dereferencing @ptr must be assignable to @x without a cast.
538  *
539  * Returns zero on success, or -EFAULT on error.
540  * On error, the variable @x is set to zero.
541  */
542 #define get_user_unaligned(x,ptr) \
543         __get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
544
545 /*
546  * __put_user_unaligned: - Write a simple value into user space, with less checking.
547  * @x:   Value to copy to user space.
548  * @ptr: Destination address, in user space.
549  *
550  * Context: User context only. This function may sleep if pagefaults are
551  *          enabled.
552  *
553  * This macro copies a single simple value from kernel space to user
554  * space.  It supports simple types like char and int, but not larger
555  * data types like structures or arrays.
556  *
557  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
558  * to the result of dereferencing @ptr.
559  *
560  * Caller must check the pointer with access_ok() before calling this
561  * function.
562  *
563  * Returns zero on success, or -EFAULT on error.
564  */
565 #define __put_user_unaligned(x,ptr) \
566         __put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
567
568 /*
569  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
570  * @x:   Variable to store result.
571  * @ptr: Source address, in user space.
572  *
573  * Context: User context only. This function may sleep if pagefaults are
574  *          enabled.
575  *
576  * This macro copies a single simple variable from user space to kernel
577  * space.  It supports simple types like char and int, but not larger
578  * data types like structures or arrays.
579  *
580  * @ptr must have pointer-to-simple-variable type, and the result of
581  * dereferencing @ptr must be assignable to @x without a cast.
582  *
583  * Caller must check the pointer with access_ok() before calling this
584  * function.
585  *
586  * Returns zero on success, or -EFAULT on error.
587  * On error, the variable @x is set to zero.
588  */
589 #define __get_user_unaligned(x,ptr) \
590         __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
591
592 /*
593  * Yuck.  We need two variants, one for 64bit operation and one
594  * for 32 bit mode and old iron.
595  */
596 #ifdef CONFIG_32BIT
597 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
598         __get_user_unaligned_asm_ll32(val, ptr)
599 #endif
600 #ifdef CONFIG_64BIT
601 #define __GET_USER_UNALIGNED_DW(val, ptr)                               \
602         __get_user_unaligned_asm(val, "uld", ptr)
603 #endif
604
605 extern void __get_user_unaligned_unknown(void);
606
607 #define __get_user_unaligned_common(val, size, ptr)                     \
608 do {                                                                    \
609         switch (size) {                                                 \
610         case 1: __get_data_asm(val, "lb", ptr); break;                  \
611         case 2: __get_data_unaligned_asm(val, "ulh", ptr); break;       \
612         case 4: __get_data_unaligned_asm(val, "ulw", ptr); break;       \
613         case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;               \
614         default: __get_user_unaligned_unknown(); break;                 \
615         }                                                               \
616 } while (0)
617
618 #define __get_user_unaligned_nocheck(x,ptr,size)                        \
619 ({                                                                      \
620         int __gu_err;                                                   \
621                                                                         \
622         __get_user_unaligned_common((x), size, ptr);                    \
623         __gu_err;                                                       \
624 })
625
626 #define __get_user_unaligned_check(x,ptr,size)                          \
627 ({                                                                      \
628         int __gu_err = -EFAULT;                                         \
629         const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);             \
630                                                                         \
631         if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))            \
632                 __get_user_unaligned_common((x), size, __gu_ptr);       \
633                                                                         \
634         __gu_err;                                                       \
635 })
636
637 #define __get_data_unaligned_asm(val, insn, addr)                       \
638 {                                                                       \
639         long __gu_tmp;                                                  \
640                                                                         \
641         __asm__ __volatile__(                                           \
642         "1:     " insn "        %1, %3                          \n"     \
643         "2:                                                     \n"     \
644         "       .insn                                           \n"     \
645         "       .section .fixup,\"ax\"                          \n"     \
646         "3:     li      %0, %4                                  \n"     \
647         "       move    %1, $0                                  \n"     \
648         "       j       2b                                      \n"     \
649         "       .previous                                       \n"     \
650         "       .section __ex_table,\"a\"                       \n"     \
651         "       "__UA_ADDR "\t1b, 3b                            \n"     \
652         "       "__UA_ADDR "\t1b + 4, 3b                        \n"     \
653         "       .previous                                       \n"     \
654         : "=r" (__gu_err), "=r" (__gu_tmp)                              \
655         : "0" (0), "o" (__m(addr)), "i" (-EFAULT));                     \
656                                                                         \
657         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
658 }
659
660 /*
661  * Get a long long 64 using 32 bit registers.
662  */
663 #define __get_user_unaligned_asm_ll32(val, addr)                        \
664 {                                                                       \
665         unsigned long long __gu_tmp;                                    \
666                                                                         \
667         __asm__ __volatile__(                                           \
668         "1:     ulw     %1, (%3)                                \n"     \
669         "2:     ulw     %D1, 4(%3)                              \n"     \
670         "       move    %0, $0                                  \n"     \
671         "3:                                                     \n"     \
672         "       .insn                                           \n"     \
673         "       .section        .fixup,\"ax\"                   \n"     \
674         "4:     li      %0, %4                                  \n"     \
675         "       move    %1, $0                                  \n"     \
676         "       move    %D1, $0                                 \n"     \
677         "       j       3b                                      \n"     \
678         "       .previous                                       \n"     \
679         "       .section        __ex_table,\"a\"                \n"     \
680         "       " __UA_ADDR "   1b, 4b                          \n"     \
681         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
682         "       " __UA_ADDR "   2b, 4b                          \n"     \
683         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
684         "       .previous                                       \n"     \
685         : "=r" (__gu_err), "=&r" (__gu_tmp)                             \
686         : "0" (0), "r" (addr), "i" (-EFAULT));                          \
687         (val) = (__typeof__(*(addr))) __gu_tmp;                         \
688 }
689
690 /*
691  * Yuck.  We need two variants, one for 64bit operation and one
692  * for 32 bit mode and old iron.
693  */
694 #ifdef CONFIG_32BIT
695 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
696 #endif
697 #ifdef CONFIG_64BIT
698 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
699 #endif
700
701 #define __put_user_unaligned_common(ptr, size)                          \
702 do {                                                                    \
703         switch (size) {                                                 \
704         case 1: __put_data_asm("sb", ptr); break;                       \
705         case 2: __put_user_unaligned_asm("ush", ptr); break;            \
706         case 4: __put_user_unaligned_asm("usw", ptr); break;            \
707         case 8: __PUT_USER_UNALIGNED_DW(ptr); break;                    \
708         default: __put_user_unaligned_unknown(); break;                 \
709 } while (0)
710
711 #define __put_user_unaligned_nocheck(x,ptr,size)                        \
712 ({                                                                      \
713         __typeof__(*(ptr)) __pu_val;                                    \
714         int __pu_err = 0;                                               \
715                                                                         \
716         __pu_val = (x);                                                 \
717         __put_user_unaligned_common(ptr, size);                         \
718         __pu_err;                                                       \
719 })
720
721 #define __put_user_unaligned_check(x,ptr,size)                          \
722 ({                                                                      \
723         __typeof__(*(ptr)) __user *__pu_addr = (ptr);                   \
724         __typeof__(*(ptr)) __pu_val = (x);                              \
725         int __pu_err = -EFAULT;                                         \
726                                                                         \
727         if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))          \
728                 __put_user_unaligned_common(__pu_addr, size);           \
729                                                                         \
730         __pu_err;                                                       \
731 })
732
733 #define __put_user_unaligned_asm(insn, ptr)                             \
734 {                                                                       \
735         __asm__ __volatile__(                                           \
736         "1:     " insn "        %z2, %3         # __put_user_unaligned_asm\n" \
737         "2:                                                     \n"     \
738         "       .insn                                           \n"     \
739         "       .section        .fixup,\"ax\"                   \n"     \
740         "3:     li      %0, %4                                  \n"     \
741         "       j       2b                                      \n"     \
742         "       .previous                                       \n"     \
743         "       .section        __ex_table,\"a\"                \n"     \
744         "       " __UA_ADDR "   1b, 3b                          \n"     \
745         "       .previous                                       \n"     \
746         : "=r" (__pu_err)                                               \
747         : "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),                     \
748           "i" (-EFAULT));                                               \
749 }
750
751 #define __put_user_unaligned_asm_ll32(ptr)                              \
752 {                                                                       \
753         __asm__ __volatile__(                                           \
754         "1:     sw      %2, (%3)        # __put_user_unaligned_asm_ll32 \n" \
755         "2:     sw      %D2, 4(%3)                              \n"     \
756         "3:                                                     \n"     \
757         "       .insn                                           \n"     \
758         "       .section        .fixup,\"ax\"                   \n"     \
759         "4:     li      %0, %4                                  \n"     \
760         "       j       3b                                      \n"     \
761         "       .previous                                       \n"     \
762         "       .section        __ex_table,\"a\"                \n"     \
763         "       " __UA_ADDR "   1b, 4b                          \n"     \
764         "       " __UA_ADDR "   1b + 4, 4b                      \n"     \
765         "       " __UA_ADDR "   2b, 4b                          \n"     \
766         "       " __UA_ADDR "   2b + 4, 4b                      \n"     \
767         "       .previous"                                              \
768         : "=r" (__pu_err)                                               \
769         : "0" (0), "r" (__pu_val), "r" (ptr),                           \
770           "i" (-EFAULT));                                               \
771 }
772
773 extern void __put_user_unaligned_unknown(void);
774 #endif
775
776 /*
777  * We're generating jump to subroutines which will be outside the range of
778  * jump instructions
779  */
780 #ifdef MODULE
781 #define __MODULE_JAL(destination)                                       \
782         ".set\tnoat\n\t"                                                \
783         __UA_LA "\t$1, " #destination "\n\t"                            \
784         "jalr\t$1\n\t"                                                  \
785         ".set\tat\n\t"
786 #else
787 #define __MODULE_JAL(destination)                                       \
788         "jal\t" #destination "\n\t"
789 #endif
790
791 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&    \
792                                               defined(CONFIG_CPU_HAS_PREFETCH))
793 #define DADDI_SCRATCH "$3"
794 #else
795 #define DADDI_SCRATCH "$0"
796 #endif
797
798 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
799
800 #define __invoke_copy_from(func, to, from, n)                           \
801 ({                                                                      \
802         register void *__cu_to_r __asm__("$4");                         \
803         register const void __user *__cu_from_r __asm__("$5");          \
804         register long __cu_len_r __asm__("$6");                         \
805                                                                         \
806         __cu_to_r = (to);                                               \
807         __cu_from_r = (from);                                           \
808         __cu_len_r = (n);                                               \
809         __asm__ __volatile__(                                           \
810         ".set\tnoreorder\n\t"                                           \
811         __MODULE_JAL(func)                                              \
812         ".set\tnoat\n\t"                                                \
813         __UA_ADDU "\t$1, %1, %2\n\t"                                    \
814         ".set\tat\n\t"                                                  \
815         ".set\treorder"                                                 \
816         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
817         :                                                               \
818         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
819           DADDI_SCRATCH, "memory");                                     \
820         __cu_len_r;                                                     \
821 })
822
823 #define __invoke_copy_to(func, to, from, n)                             \
824 ({                                                                      \
825         register void __user *__cu_to_r __asm__("$4");                  \
826         register const void *__cu_from_r __asm__("$5");                 \
827         register long __cu_len_r __asm__("$6");                         \
828                                                                         \
829         __cu_to_r = (to);                                               \
830         __cu_from_r = (from);                                           \
831         __cu_len_r = (n);                                               \
832         __asm__ __volatile__(                                           \
833         __MODULE_JAL(func)                                              \
834         : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)       \
835         :                                                               \
836         : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",  \
837           DADDI_SCRATCH, "memory");                                     \
838         __cu_len_r;                                                     \
839 })
840
841 #define __invoke_copy_from_kernel(to, from, n)                          \
842         __invoke_copy_from(__copy_user, to, from, n)
843
844 #define __invoke_copy_from_kernel_inatomic(to, from, n)                 \
845         __invoke_copy_from(__copy_user_inatomic, to, from, n)
846
847 #define __invoke_copy_to_kernel(to, from, n)                            \
848         __invoke_copy_to(__copy_user, to, from, n)
849
850 #define ___invoke_copy_in_kernel(to, from, n)                           \
851         __invoke_copy_from(__copy_user, to, from, n)
852
853 #ifndef CONFIG_EVA
854 #define __invoke_copy_from_user(to, from, n)                            \
855         __invoke_copy_from(__copy_user, to, from, n)
856
857 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
858         __invoke_copy_from(__copy_user_inatomic, to, from, n)
859
860 #define __invoke_copy_to_user(to, from, n)                              \
861         __invoke_copy_to(__copy_user, to, from, n)
862
863 #define ___invoke_copy_in_user(to, from, n)                             \
864         __invoke_copy_from(__copy_user, to, from, n)
865
866 #else
867
868 /* EVA specific functions */
869
870 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
871                                        size_t __n);
872 extern size_t __copy_from_user_eva(void *__to, const void *__from,
873                                    size_t __n);
874 extern size_t __copy_to_user_eva(void *__to, const void *__from,
875                                  size_t __n);
876 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
877
878 /*
879  * Source or destination address is in userland. We need to go through
880  * the TLB
881  */
882 #define __invoke_copy_from_user(to, from, n)                            \
883         __invoke_copy_from(__copy_from_user_eva, to, from, n)
884
885 #define __invoke_copy_from_user_inatomic(to, from, n)                   \
886         __invoke_copy_from(__copy_user_inatomic_eva, to, from, n)
887
888 #define __invoke_copy_to_user(to, from, n)                              \
889         __invoke_copy_to(__copy_to_user_eva, to, from, n)
890
891 #define ___invoke_copy_in_user(to, from, n)                             \
892         __invoke_copy_from(__copy_in_user_eva, to, from, n)
893
894 #endif /* CONFIG_EVA */
895
896 /*
897  * __copy_to_user: - Copy a block of data into user space, with less checking.
898  * @to:   Destination address, in user space.
899  * @from: Source address, in kernel space.
900  * @n:    Number of bytes to copy.
901  *
902  * Context: User context only. This function may sleep if pagefaults are
903  *          enabled.
904  *
905  * Copy data from kernel space to user space.  Caller must check
906  * the specified block with access_ok() before calling this function.
907  *
908  * Returns number of bytes that could not be copied.
909  * On success, this will be zero.
910  */
911 #define __copy_to_user(to, from, n)                                     \
912 ({                                                                      \
913         void __user *__cu_to;                                           \
914         const void *__cu_from;                                          \
915         long __cu_len;                                                  \
916                                                                         \
917         __cu_to = (to);                                                 \
918         __cu_from = (from);                                             \
919         __cu_len = (n);                                                 \
920                                                                         \
921         check_object_size(__cu_from, __cu_len, true);                   \
922         might_fault();                                                  \
923                                                                         \
924         if (eva_kernel_access())                                        \
925                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
926                                                    __cu_len);           \
927         else                                                            \
928                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
929                                                  __cu_len);             \
930         __cu_len;                                                       \
931 })
932
933 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
934
935 #define __copy_to_user_inatomic(to, from, n)                            \
936 ({                                                                      \
937         void __user *__cu_to;                                           \
938         const void *__cu_from;                                          \
939         long __cu_len;                                                  \
940                                                                         \
941         __cu_to = (to);                                                 \
942         __cu_from = (from);                                             \
943         __cu_len = (n);                                                 \
944                                                                         \
945         check_object_size(__cu_from, __cu_len, true);                   \
946                                                                         \
947         if (eva_kernel_access())                                        \
948                 __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,  \
949                                                    __cu_len);           \
950         else                                                            \
951                 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from,    \
952                                                  __cu_len);             \
953         __cu_len;                                                       \
954 })
955
956 #define __copy_from_user_inatomic(to, from, n)                          \
957 ({                                                                      \
958         void *__cu_to;                                                  \
959         const void __user *__cu_from;                                   \
960         long __cu_len;                                                  \
961                                                                         \
962         __cu_to = (to);                                                 \
963         __cu_from = (from);                                             \
964         __cu_len = (n);                                                 \
965                                                                         \
966         check_object_size(__cu_to, __cu_len, false);                    \
967                                                                         \
968         if (eva_kernel_access())                                        \
969                 __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,  \
970                                                               __cu_from,\
971                                                               __cu_len);\
972         else                                                            \
973                 __cu_len = __invoke_copy_from_user_inatomic(__cu_to,    \
974                                                             __cu_from,  \
975                                                             __cu_len);  \
976         __cu_len;                                                       \
977 })
978
979 /*
980  * copy_to_user: - Copy a block of data into user space.
981  * @to:   Destination address, in user space.
982  * @from: Source address, in kernel space.
983  * @n:    Number of bytes to copy.
984  *
985  * Context: User context only. This function may sleep if pagefaults are
986  *          enabled.
987  *
988  * Copy data from kernel space to user space.
989  *
990  * Returns number of bytes that could not be copied.
991  * On success, this will be zero.
992  */
993 #define copy_to_user(to, from, n)                                       \
994 ({                                                                      \
995         void __user *__cu_to;                                           \
996         const void *__cu_from;                                          \
997         long __cu_len;                                                  \
998                                                                         \
999         __cu_to = (to);                                                 \
1000         __cu_from = (from);                                             \
1001         __cu_len = (n);                                                 \
1002                                                                         \
1003         check_object_size(__cu_from, __cu_len, true);                   \
1004                                                                         \
1005         if (eva_kernel_access()) {                                      \
1006                 __cu_len = __invoke_copy_to_kernel(__cu_to,             \
1007                                                    __cu_from,           \
1008                                                    __cu_len);           \
1009         } else {                                                        \
1010                 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
1011                         might_fault();                                  \
1012                         __cu_len = __invoke_copy_to_user(__cu_to,       \
1013                                                          __cu_from,     \
1014                                                          __cu_len);     \
1015                 }                                                       \
1016         }                                                               \
1017         __cu_len;                                                       \
1018 })
1019
1020 /*
1021  * __copy_from_user: - Copy a block of data from user space, with less checking.
1022  * @to:   Destination address, in kernel space.
1023  * @from: Source address, in user space.
1024  * @n:    Number of bytes to copy.
1025  *
1026  * Context: User context only. This function may sleep if pagefaults are
1027  *          enabled.
1028  *
1029  * Copy data from user space to kernel space.  Caller must check
1030  * the specified block with access_ok() before calling this function.
1031  *
1032  * Returns number of bytes that could not be copied.
1033  * On success, this will be zero.
1034  *
1035  * If some data could not be copied, this function will pad the copied
1036  * data to the requested size using zero bytes.
1037  */
1038 #define __copy_from_user(to, from, n)                                   \
1039 ({                                                                      \
1040         void *__cu_to;                                                  \
1041         const void __user *__cu_from;                                   \
1042         long __cu_len;                                                  \
1043                                                                         \
1044         __cu_to = (to);                                                 \
1045         __cu_from = (from);                                             \
1046         __cu_len = (n);                                                 \
1047                                                                         \
1048         check_object_size(__cu_to, __cu_len, false);                    \
1049                                                                         \
1050         if (eva_kernel_access()) {                                      \
1051                 __cu_len = __invoke_copy_from_kernel(__cu_to,           \
1052                                                      __cu_from,         \
1053                                                      __cu_len);         \
1054         } else {                                                        \
1055                 might_fault();                                          \
1056                 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from,  \
1057                                                    __cu_len);           \
1058         }                                                               \
1059         __cu_len;                                                       \
1060 })
1061
1062 /*
1063  * copy_from_user: - Copy a block of data from user space.
1064  * @to:   Destination address, in kernel space.
1065  * @from: Source address, in user space.
1066  * @n:    Number of bytes to copy.
1067  *
1068  * Context: User context only. This function may sleep if pagefaults are
1069  *          enabled.
1070  *
1071  * Copy data from user space to kernel space.
1072  *
1073  * Returns number of bytes that could not be copied.
1074  * On success, this will be zero.
1075  *
1076  * If some data could not be copied, this function will pad the copied
1077  * data to the requested size using zero bytes.
1078  */
1079 #define copy_from_user(to, from, n)                                     \
1080 ({                                                                      \
1081         void *__cu_to;                                                  \
1082         const void __user *__cu_from;                                   \
1083         long __cu_len, __cu_res;                                        \
1084                                                                         \
1085         __cu_to = (to);                                                 \
1086         __cu_from = (from);                                             \
1087         __cu_res = __cu_len = (n);                                      \
1088                                                                         \
1089         check_object_size(__cu_to, __cu_len, false);                    \
1090                                                                         \
1091         if (eva_kernel_access()) {                                      \
1092                 __cu_res = __invoke_copy_from_kernel(__cu_to,           \
1093                                                      __cu_from,         \
1094                                                      __cu_len);         \
1095         } else {                                                        \
1096                 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {      \
1097                         might_fault();                                  \
1098                         __cu_res = __invoke_copy_from_user(__cu_to,     \
1099                                                            __cu_from,   \
1100                                                            __cu_len);   \
1101                 }                                                       \
1102         }                                                               \
1103         if (unlikely(__cu_res))                                         \
1104                 memset(__cu_to + __cu_len - __cu_res, 0, __cu_res);     \
1105         __cu_res;                                                       \
1106 })
1107
1108 #define __copy_in_user(to, from, n)                                     \
1109 ({                                                                      \
1110         void __user *__cu_to;                                           \
1111         const void __user *__cu_from;                                   \
1112         long __cu_len;                                                  \
1113                                                                         \
1114         __cu_to = (to);                                                 \
1115         __cu_from = (from);                                             \
1116         __cu_len = (n);                                                 \
1117         if (eva_kernel_access()) {                                      \
1118                 __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \
1119                                                     __cu_len);          \
1120         } else {                                                        \
1121                 might_fault();                                          \
1122                 __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,   \
1123                                                   __cu_len);            \
1124         }                                                               \
1125         __cu_len;                                                       \
1126 })
1127
1128 #define copy_in_user(to, from, n)                                       \
1129 ({                                                                      \
1130         void __user *__cu_to;                                           \
1131         const void __user *__cu_from;                                   \
1132         long __cu_len;                                                  \
1133                                                                         \
1134         __cu_to = (to);                                                 \
1135         __cu_from = (from);                                             \
1136         __cu_len = (n);                                                 \
1137         if (eva_kernel_access()) {                                      \
1138                 __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,  \
1139                                                     __cu_len);          \
1140         } else {                                                        \
1141                 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1142                            access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1143                         might_fault();                                  \
1144                         __cu_len = ___invoke_copy_in_user(__cu_to,      \
1145                                                           __cu_from,    \
1146                                                           __cu_len);    \
1147                 }                                                       \
1148         }                                                               \
1149         __cu_len;                                                       \
1150 })
1151
1152 extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size);
1153 extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size);
1154
1155 /*
1156  * __clear_user: - Zero a block of memory in user space, with less checking.
1157  * @to:   Destination address, in user space.
1158  * @n:    Number of bytes to zero.
1159  *
1160  * Zero a block of memory in user space.  Caller must check
1161  * the specified block with access_ok() before calling this function.
1162  *
1163  * Returns number of bytes that could not be cleared.
1164  * On success, this will be zero.
1165  */
1166 static inline __kernel_size_t
1167 __clear_user(void __user *addr, __kernel_size_t size)
1168 {
1169         __kernel_size_t res;
1170
1171         if (eva_kernel_access()) {
1172                 __asm__ __volatile__(
1173                         "move\t$4, %1\n\t"
1174                         "move\t$5, $0\n\t"
1175                         "move\t$6, %2\n\t"
1176                         __MODULE_JAL(__bzero_kernel)
1177                         "move\t%0, $6"
1178                         : "=r" (res)
1179                         : "r" (addr), "r" (size)
1180                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1181         } else {
1182                 might_fault();
1183                 __asm__ __volatile__(
1184                         "move\t$4, %1\n\t"
1185                         "move\t$5, $0\n\t"
1186                         "move\t$6, %2\n\t"
1187                         __MODULE_JAL(__bzero)
1188                         "move\t%0, $6"
1189                         : "=r" (res)
1190                         : "r" (addr), "r" (size)
1191                         : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1192         }
1193
1194         return res;
1195 }
1196
1197 #define clear_user(addr,n)                                              \
1198 ({                                                                      \
1199         void __user * __cl_addr = (addr);                               \
1200         unsigned long __cl_size = (n);                                  \
1201         if (__cl_size && access_ok(VERIFY_WRITE,                        \
1202                                         __cl_addr, __cl_size))          \
1203                 __cl_size = __clear_user(__cl_addr, __cl_size);         \
1204         __cl_size;                                                      \
1205 })
1206
1207 extern long __strncpy_from_kernel_nocheck_asm(char *__to, const char __user *__from, long __len);
1208 extern long __strncpy_from_user_nocheck_asm(char *__to, const char __user *__from, long __len);
1209
1210 /*
1211  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1212  * @dst:   Destination address, in kernel space.  This buffer must be at
1213  *         least @count bytes long.
1214  * @src:   Source address, in user space.
1215  * @count: Maximum number of bytes to copy, including the trailing NUL.
1216  *
1217  * Copies a NUL-terminated string from userspace to kernel space.
1218  * Caller must check the specified block with access_ok() before calling
1219  * this function.
1220  *
1221  * On success, returns the length of the string (not including the trailing
1222  * NUL).
1223  *
1224  * If access to userspace fails, returns -EFAULT (some data may have been
1225  * copied).
1226  *
1227  * If @count is smaller than the length of the string, copies @count bytes
1228  * and returns @count.
1229  */
1230 static inline long
1231 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1232 {
1233         long res;
1234
1235         if (eva_kernel_access()) {
1236                 __asm__ __volatile__(
1237                         "move\t$4, %1\n\t"
1238                         "move\t$5, %2\n\t"
1239                         "move\t$6, %3\n\t"
1240                         __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1241                         "move\t%0, $2"
1242                         : "=r" (res)
1243                         : "r" (__to), "r" (__from), "r" (__len)
1244                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1245         } else {
1246                 might_fault();
1247                 __asm__ __volatile__(
1248                         "move\t$4, %1\n\t"
1249                         "move\t$5, %2\n\t"
1250                         "move\t$6, %3\n\t"
1251                         __MODULE_JAL(__strncpy_from_user_nocheck_asm)
1252                         "move\t%0, $2"
1253                         : "=r" (res)
1254                         : "r" (__to), "r" (__from), "r" (__len)
1255                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1256         }
1257
1258         return res;
1259 }
1260
1261 extern long __strncpy_from_kernel_asm(char *__to, const char __user *__from, long __len);
1262 extern long __strncpy_from_user_asm(char *__to, const char __user *__from, long __len);
1263
1264 /*
1265  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1266  * @dst:   Destination address, in kernel space.  This buffer must be at
1267  *         least @count bytes long.
1268  * @src:   Source address, in user space.
1269  * @count: Maximum number of bytes to copy, including the trailing NUL.
1270  *
1271  * Copies a NUL-terminated string from userspace to kernel space.
1272  *
1273  * On success, returns the length of the string (not including the trailing
1274  * NUL).
1275  *
1276  * If access to userspace fails, returns -EFAULT (some data may have been
1277  * copied).
1278  *
1279  * If @count is smaller than the length of the string, copies @count bytes
1280  * and returns @count.
1281  */
1282 static inline long
1283 strncpy_from_user(char *__to, const char __user *__from, long __len)
1284 {
1285         long res;
1286
1287         if (eva_kernel_access()) {
1288                 __asm__ __volatile__(
1289                         "move\t$4, %1\n\t"
1290                         "move\t$5, %2\n\t"
1291                         "move\t$6, %3\n\t"
1292                         __MODULE_JAL(__strncpy_from_kernel_asm)
1293                         "move\t%0, $2"
1294                         : "=r" (res)
1295                         : "r" (__to), "r" (__from), "r" (__len)
1296                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1297         } else {
1298                 might_fault();
1299                 __asm__ __volatile__(
1300                         "move\t$4, %1\n\t"
1301                         "move\t$5, %2\n\t"
1302                         "move\t$6, %3\n\t"
1303                         __MODULE_JAL(__strncpy_from_user_asm)
1304                         "move\t%0, $2"
1305                         : "=r" (res)
1306                         : "r" (__to), "r" (__from), "r" (__len)
1307                         : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1308         }
1309
1310         return res;
1311 }
1312
1313 extern long __strlen_kernel_asm(const char __user *s);
1314 extern long __strlen_user_asm(const char __user *s);
1315
1316 /*
1317  * strlen_user: - Get the size of a string in user space.
1318  * @str: The string to measure.
1319  *
1320  * Context: User context only. This function may sleep if pagefaults are
1321  *          enabled.
1322  *
1323  * Get the size of a NUL-terminated string in user space.
1324  *
1325  * Returns the size of the string INCLUDING the terminating NUL.
1326  * On exception, returns 0.
1327  *
1328  * If there is a limit on the length of a valid string, you may wish to
1329  * consider using strnlen_user() instead.
1330  */
1331 static inline long strlen_user(const char __user *s)
1332 {
1333         long res;
1334
1335         if (eva_kernel_access()) {
1336                 __asm__ __volatile__(
1337                         "move\t$4, %1\n\t"
1338                         __MODULE_JAL(__strlen_kernel_asm)
1339                         "move\t%0, $2"
1340                         : "=r" (res)
1341                         : "r" (s)
1342                         : "$2", "$4", __UA_t0, "$31");
1343         } else {
1344                 might_fault();
1345                 __asm__ __volatile__(
1346                         "move\t$4, %1\n\t"
1347                         __MODULE_JAL(__strlen_user_asm)
1348                         "move\t%0, $2"
1349                         : "=r" (res)
1350                         : "r" (s)
1351                         : "$2", "$4", __UA_t0, "$31");
1352         }
1353
1354         return res;
1355 }
1356
1357 extern long __strnlen_kernel_nocheck_asm(const char __user *s, long n);
1358 extern long __strnlen_user_nocheck_asm(const char __user *s, long n);
1359
1360 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1361 static inline long __strnlen_user(const char __user *s, long n)
1362 {
1363         long res;
1364
1365         if (eva_kernel_access()) {
1366                 __asm__ __volatile__(
1367                         "move\t$4, %1\n\t"
1368                         "move\t$5, %2\n\t"
1369                         __MODULE_JAL(__strnlen_kernel_nocheck_asm)
1370                         "move\t%0, $2"
1371                         : "=r" (res)
1372                         : "r" (s), "r" (n)
1373                         : "$2", "$4", "$5", __UA_t0, "$31");
1374         } else {
1375                 might_fault();
1376                 __asm__ __volatile__(
1377                         "move\t$4, %1\n\t"
1378                         "move\t$5, %2\n\t"
1379                         __MODULE_JAL(__strnlen_user_nocheck_asm)
1380                         "move\t%0, $2"
1381                         : "=r" (res)
1382                         : "r" (s), "r" (n)
1383                         : "$2", "$4", "$5", __UA_t0, "$31");
1384         }
1385
1386         return res;
1387 }
1388
1389 extern long __strnlen_kernel_asm(const char __user *s, long n);
1390 extern long __strnlen_user_asm(const char __user *s, long n);
1391
1392 /*
1393  * strnlen_user: - Get the size of a string in user space.
1394  * @str: The string to measure.
1395  *
1396  * Context: User context only. This function may sleep if pagefaults are
1397  *          enabled.
1398  *
1399  * Get the size of a NUL-terminated string in user space.
1400  *
1401  * Returns the size of the string INCLUDING the terminating NUL.
1402  * On exception, returns 0.
1403  * If the string is too long, returns a value greater than @n.
1404  */
1405 static inline long strnlen_user(const char __user *s, long n)
1406 {
1407         long res;
1408
1409         might_fault();
1410         if (eva_kernel_access()) {
1411                 __asm__ __volatile__(
1412                         "move\t$4, %1\n\t"
1413                         "move\t$5, %2\n\t"
1414                         __MODULE_JAL(__strnlen_kernel_asm)
1415                         "move\t%0, $2"
1416                         : "=r" (res)
1417                         : "r" (s), "r" (n)
1418                         : "$2", "$4", "$5", __UA_t0, "$31");
1419         } else {
1420                 __asm__ __volatile__(
1421                         "move\t$4, %1\n\t"
1422                         "move\t$5, %2\n\t"
1423                         __MODULE_JAL(__strnlen_user_asm)
1424                         "move\t%0, $2"
1425                         : "=r" (res)
1426                         : "r" (s), "r" (n)
1427                         : "$2", "$4", "$5", __UA_t0, "$31");
1428         }
1429
1430         return res;
1431 }
1432
1433 #endif /* _ASM_UACCESS_H */