]> git.karo-electronics.de Git - mv-sheeva.git/blob - arch/mn10300/include/asm/uaccess.h
x86_64: fix incorrect comments
[mv-sheeva.git] / arch / mn10300 / include / asm / uaccess.h
1 /* MN10300 userspace access functions
2  *
3  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public Licence
8  * as published by the Free Software Foundation; either version
9  * 2 of the Licence, or (at your option) any later version.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13
14 /*
15  * User space memory access functions
16  */
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/errno.h>
21
22 #define VERIFY_READ 0
23 #define VERIFY_WRITE 1
24
25 /*
26  * The fs value determines whether argument validity checking should be
27  * performed or not.  If get_fs() == USER_DS, checking is performed, with
28  * get_fs() == KERNEL_DS, checking is bypassed.
29  *
30  * For historical reasons, these macros are grossly misnamed.
31  */
32
33 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
34
35 #define KERNEL_XDS      MAKE_MM_SEG(0xBFFFFFFF)
36 #define KERNEL_DS       MAKE_MM_SEG(0x9FFFFFFF)
37 #define USER_DS         MAKE_MM_SEG(TASK_SIZE)
38
39 #define get_ds()        (KERNEL_DS)
40 #define get_fs()        (current_thread_info()->addr_limit)
41 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
42 #define __kernel_ds_p() (current_thread_info()->addr_limit.seg == 0x9FFFFFFF)
43
44 #define segment_eq(a, b) ((a).seg == (b).seg)
45
46 #define __addr_ok(addr) \
47         ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
48
49 /*
50  * check that a range of addresses falls within the current address limit
51  */
52 static inline int ___range_ok(unsigned long addr, unsigned int size)
53 {
54         int flag = 1, tmp;
55
56         asm("   add     %3,%1   \n"     /* set C-flag if addr + size > 4Gb */
57             "   bcs     0f      \n"
58             "   cmp     %4,%1   \n"     /* jump if addr+size>limit (error) */
59             "   bhi     0f      \n"
60             "   clr     %0      \n"     /* mark okay */
61             "0:                 \n"
62             : "=r"(flag), "=&r"(tmp)
63             : "1"(addr), "ir"(size),
64               "r"(current_thread_info()->addr_limit.seg), "0"(flag)
65             : "cc"
66             );
67
68         return flag;
69 }
70
71 #define __range_ok(addr, size) ___range_ok((unsigned long)(addr), (u32)(size))
72
73 #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
74 #define __access_ok(addr, size)     (__range_ok((addr), (size)) == 0)
75
76 static inline int verify_area(int type, const void *addr, unsigned long size)
77 {
78         return access_ok(type, addr, size) ? 0 : -EFAULT;
79 }
80
81
82 /*
83  * The exception table consists of pairs of addresses: the first is the
84  * address of an instruction that is allowed to fault, and the second is
85  * the address at which the program should continue.  No registers are
86  * modified, so it is entirely up to the continuation code to figure out
87  * what to do.
88  *
89  * All the routines below use bits of fixup code that are out of line
90  * with the main instruction path.  This means when everything is well,
91  * we don't even have to jump over them.  Further, they do not intrude
92  * on our cache or tlb entries.
93  */
94
95 struct exception_table_entry
96 {
97         unsigned long insn, fixup;
98 };
99
100 /* Returns 0 if exception not found and fixup otherwise.  */
101 extern int fixup_exception(struct pt_regs *regs);
102
103 #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
104 #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
105
106 /*
107  * The "__xxx" versions do not do address space checking, useful when
108  * doing multiple accesses to the same area (the user has to do the
109  * checks by hand with "access_ok()")
110  */
111 #define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
112 #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
113
114 /*
115  * The "xxx_ret" versions return constant specified in third argument, if
116  * something bad happens. These macros can be optimized for the
117  * case of just returning from the function xxx_ret is used.
118  */
119
120 #define put_user_ret(x, ptr, ret) \
121         ({ if (put_user((x), (ptr)))    return (ret); })
122 #define get_user_ret(x, ptr, ret) \
123         ({ if (get_user((x), (ptr)))    return (ret); })
124 #define __put_user_ret(x, ptr, ret) \
125         ({ if (__put_user((x), (ptr)))  return (ret); })
126 #define __get_user_ret(x, ptr, ret) \
127         ({ if (__get_user((x), (ptr)))  return (ret); })
128
129 struct __large_struct { unsigned long buf[100]; };
130 #define __m(x) (*(struct __large_struct *)(x))
131
132 #define __get_user_nocheck(x, ptr, size)        \
133 ({                                              \
134         __typeof(*(ptr)) __gu_val;              \
135         unsigned long __gu_addr;                \
136         int __gu_err;                           \
137         __gu_addr = (unsigned long) (ptr);      \
138         switch (size) {                         \
139         case 1:  __get_user_asm("bu"); break;   \
140         case 2:  __get_user_asm("hu"); break;   \
141         case 4:  __get_user_asm(""  ); break;   \
142         default: __get_user_unknown(); break;   \
143         }                                       \
144         x = (__typeof__(*(ptr))) __gu_val;      \
145         __gu_err;                               \
146 })
147
148 #define __get_user_check(x, ptr, size)                  \
149 ({                                                      \
150         __typeof__(*(ptr)) __gu_val;                    \
151         unsigned long __gu_addr;                        \
152         int __gu_err;                                   \
153         __gu_addr = (unsigned long) (ptr);              \
154         if (likely(__access_ok(__gu_addr,size))) {      \
155                 switch (size) {                         \
156                 case 1:  __get_user_asm("bu"); break;   \
157                 case 2:  __get_user_asm("hu"); break;   \
158                 case 4:  __get_user_asm(""  ); break;   \
159                 default: __get_user_unknown(); break;   \
160                 }                                       \
161         }                                               \
162         else {                                          \
163                 __gu_err = -EFAULT;                     \
164                 __gu_val = 0;                           \
165         }                                               \
166         x = (__typeof__(*(ptr))) __gu_val;              \
167         __gu_err;                                       \
168 })
169
170 #define __get_user_asm(INSN)                                    \
171 ({                                                              \
172         asm volatile(                                   \
173                 "1:\n"                                          \
174                 "       mov"INSN"       %2,%1\n"                \
175                 "       mov             0,%0\n"                 \
176                 "2:\n"                                          \
177                 "       .section        .fixup,\"ax\"\n"        \
178                 "3:\n\t"                                        \
179                 "       mov             %3,%0\n"                \
180                 "       jmp             2b\n"                   \
181                 "       .previous\n"                            \
182                 "       .section        __ex_table,\"a\"\n"     \
183                 "       .balign         4\n"                    \
184                 "       .long           1b, 3b\n"               \
185                 "       .previous"                              \
186                 : "=&r" (__gu_err), "=&r" (__gu_val)            \
187                 : "m" (__m(__gu_addr)), "i" (-EFAULT));         \
188 })
189
190 extern int __get_user_unknown(void);
191
192 #define __put_user_nocheck(x, ptr, size)                        \
193 ({                                                              \
194         union {                                                 \
195                 __typeof__(*(ptr)) val;                         \
196                 u32 bits[2];                                    \
197         } __pu_val;                                             \
198         unsigned long __pu_addr;                                \
199         int __pu_err;                                           \
200         __pu_val.val = (x);                                     \
201         __pu_addr = (unsigned long) (ptr);                      \
202         switch (size) {                                         \
203         case 1:  __put_user_asm("bu"); break;                   \
204         case 2:  __put_user_asm("hu"); break;                   \
205         case 4:  __put_user_asm(""  ); break;                   \
206         case 8:  __put_user_asm8();    break;                   \
207         default: __pu_err = __put_user_unknown(); break;        \
208         }                                                       \
209         __pu_err;                                               \
210 })
211
212 #define __put_user_check(x, ptr, size)                                  \
213 ({                                                                      \
214         union {                                                         \
215                 __typeof__(*(ptr)) val;                                 \
216                 u32 bits[2];                                            \
217         } __pu_val;                                                     \
218         unsigned long __pu_addr;                                        \
219         int __pu_err;                                                   \
220         __pu_val.val = (x);                                             \
221         __pu_addr = (unsigned long) (ptr);                              \
222         if (likely(__access_ok(__pu_addr, size))) {                     \
223                 switch (size) {                                         \
224                 case 1:  __put_user_asm("bu"); break;                   \
225                 case 2:  __put_user_asm("hu"); break;                   \
226                 case 4:  __put_user_asm(""  ); break;                   \
227                 case 8:  __put_user_asm8();    break;                   \
228                 default: __pu_err = __put_user_unknown(); break;        \
229                 }                                                       \
230         }                                                               \
231         else {                                                          \
232                 __pu_err = -EFAULT;                                     \
233         }                                                               \
234         __pu_err;                                                       \
235 })
236
237 #define __put_user_asm(INSN)                                    \
238 ({                                                              \
239         asm volatile(                                           \
240                 "1:\n"                                          \
241                 "       mov"INSN"       %1,%2\n"                \
242                 "       mov             0,%0\n"                 \
243                 "2:\n"                                          \
244                 "       .section        .fixup,\"ax\"\n"        \
245                 "3:\n"                                          \
246                 "       mov             %3,%0\n"                \
247                 "       jmp             2b\n"                   \
248                 "       .previous\n"                            \
249                 "       .section        __ex_table,\"a\"\n"     \
250                 "       .balign         4\n"                    \
251                 "       .long           1b, 3b\n"               \
252                 "       .previous"                              \
253                 : "=&r" (__pu_err)                              \
254                 : "r" (__pu_val.val), "m" (__m(__pu_addr)),     \
255                   "i" (-EFAULT)                                 \
256                 );                                              \
257 })
258
259 #define __put_user_asm8()                                               \
260 ({                                                                      \
261         asm volatile(                                                   \
262                 "1:     mov             %1,%3           \n"             \
263                 "2:     mov             %2,%4           \n"             \
264                 "       mov             0,%0            \n"             \
265                 "3:                                     \n"             \
266                 "       .section        .fixup,\"ax\"   \n"             \
267                 "4:                                     \n"             \
268                 "       mov             %5,%0           \n"             \
269                 "       jmp             3b              \n"             \
270                 "       .previous                       \n"             \
271                 "       .section        __ex_table,\"a\"\n"             \
272                 "       .balign         4               \n"             \
273                 "       .long           1b, 4b          \n"             \
274                 "       .long           2b, 4b          \n"             \
275                 "       .previous                       \n"             \
276                 : "=&r" (__pu_err)                                      \
277                 : "r" (__pu_val.bits[0]), "r" (__pu_val.bits[1]),       \
278                   "m" (__m(__pu_addr)), "m" (__m(__pu_addr+4)),         \
279                   "i" (-EFAULT)                                         \
280                 );                                                      \
281 })
282
283 extern int __put_user_unknown(void);
284
285
286 /*
287  * Copy To/From Userspace
288  */
289 /* Generic arbitrary sized copy.  */
290 #define __copy_user(to, from, size)                                     \
291 do {                                                                    \
292         if (size) {                                                     \
293                 void *__to = to;                                        \
294                 const void *__from = from;                              \
295                 int w;                                                  \
296                 asm volatile(                                           \
297                         "0:     movbu   (%0),%3;\n"                     \
298                         "1:     movbu   %3,(%1);\n"                     \
299                         "       inc     %0;\n"                          \
300                         "       inc     %1;\n"                          \
301                         "       add     -1,%2;\n"                       \
302                         "       bne     0b;\n"                          \
303                         "2:\n"                                          \
304                         "       .section .fixup,\"ax\"\n"               \
305                         "3:     jmp     2b\n"                           \
306                         "       .previous\n"                            \
307                         "       .section __ex_table,\"a\"\n"            \
308                         "       .balign 4\n"                            \
309                         "       .long   0b,3b\n"                        \
310                         "       .long   1b,3b\n"                        \
311                         "       .previous\n"                            \
312                         : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
313                         : "0"(__from), "1"(__to), "2"(size)             \
314                         : "memory");                                    \
315         }                                                               \
316 } while (0)
317
318 #define __copy_user_zeroing(to, from, size)                             \
319 do {                                                                    \
320         if (size) {                                                     \
321                 void *__to = to;                                        \
322                 const void *__from = from;                              \
323                 int w;                                                  \
324                 asm volatile(                                           \
325                         "0:     movbu   (%0),%3;\n"                     \
326                         "1:     movbu   %3,(%1);\n"                     \
327                         "       inc     %0;\n"                          \
328                         "       inc     %1;\n"                          \
329                         "       add     -1,%2;\n"                       \
330                         "       bne     0b;\n"                          \
331                         "2:\n"                                          \
332                         "       .section .fixup,\"ax\"\n"               \
333                         "3:\n"                                          \
334                         "       mov     %2,%0\n"                        \
335                         "       clr     %3\n"                           \
336                         "4:     movbu   %3,(%1);\n"                     \
337                         "       inc     %1;\n"                          \
338                         "       add     -1,%2;\n"                       \
339                         "       bne     4b;\n"                          \
340                         "       mov     %0,%2\n"                        \
341                         "       jmp     2b\n"                           \
342                         "       .previous\n"                            \
343                         "       .section __ex_table,\"a\"\n"            \
344                         "       .balign 4\n"                            \
345                         "       .long   0b,3b\n"                        \
346                         "       .long   1b,3b\n"                        \
347                         "       .previous\n"                            \
348                         : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
349                         : "0"(__from), "1"(__to), "2"(size)             \
350                         : "memory");                                    \
351         }                                                               \
352 } while (0)
353
354 /* We let the __ versions of copy_from/to_user inline, because they're often
355  * used in fast paths and have only a small space overhead.
356  */
357 static inline
358 unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
359                                                unsigned long n)
360 {
361         __copy_user_zeroing(to, from, n);
362         return n;
363 }
364
365 static inline
366 unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
367                                              unsigned long n)
368 {
369         __copy_user(to, from, n);
370         return n;
371 }
372
373
374 #if 0
375 #error don't use - these macros don't increment to & from pointers
376 /* Optimize just a little bit when we know the size of the move. */
377 #define __constant_copy_user(to, from, size)    \
378 do {                                            \
379         asm volatile(                           \
380                 "       mov %0,a0;\n"           \
381                 "0:     movbu (%1),d3;\n"       \
382                 "1:     movbu d3,(%2);\n"       \
383                 "       add -1,a0;\n"           \
384                 "       bne 0b;\n"              \
385                 "2:;"                           \
386                 ".section .fixup,\"ax\"\n"      \
387                 "3:     jmp 2b\n"               \
388                 ".previous\n"                   \
389                 ".section __ex_table,\"a\"\n"   \
390                 "       .balign 4\n"            \
391                 "       .long 0b,3b\n"          \
392                 "       .long 1b,3b\n"          \
393                 ".previous"                     \
394                 :                               \
395                 : "d"(size), "d"(to), "d"(from) \
396                 : "d3", "a0");                  \
397 } while (0)
398
399 /* Optimize just a little bit when we know the size of the move. */
400 #define __constant_copy_user_zeroing(to, from, size)    \
401 do {                                                    \
402         asm volatile(                                   \
403                 "       mov %0,a0;\n"                   \
404                 "0:     movbu (%1),d3;\n"               \
405                 "1:     movbu d3,(%2);\n"               \
406                 "       add -1,a0;\n"                   \
407                 "       bne 0b;\n"                      \
408                 "2:;"                                   \
409                 ".section .fixup,\"ax\"\n"              \
410                 "3:     jmp 2b\n"                       \
411                 ".previous\n"                           \
412                 ".section __ex_table,\"a\"\n"           \
413                 "       .balign 4\n"                    \
414                 "       .long 0b,3b\n"                  \
415                 "       .long 1b,3b\n"                  \
416                 ".previous"                             \
417                 :                                       \
418                 : "d"(size), "d"(to), "d"(from)         \
419                 : "d3", "a0");                          \
420 } while (0)
421
422 static inline
423 unsigned long __constant_copy_to_user(void *to, const void *from,
424                                       unsigned long n)
425 {
426         if (access_ok(VERIFY_WRITE, to, n))
427                 __constant_copy_user(to, from, n);
428         return n;
429 }
430
431 static inline
432 unsigned long __constant_copy_from_user(void *to, const void *from,
433                                         unsigned long n)
434 {
435         if (access_ok(VERIFY_READ, from, n))
436                 __constant_copy_user_zeroing(to, from, n);
437         return n;
438 }
439
440 static inline
441 unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
442                                               unsigned long n)
443 {
444         __constant_copy_user(to, from, n);
445         return n;
446 }
447
448 static inline
449 unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
450                                                 unsigned long n)
451 {
452         __constant_copy_user_zeroing(to, from, n);
453         return n;
454 }
455 #endif
456
457 extern unsigned long __generic_copy_to_user(void __user *, const void *,
458                                             unsigned long);
459 extern unsigned long __generic_copy_from_user(void *, const void __user *,
460                                               unsigned long);
461
462 #define __copy_to_user_inatomic(to, from, n) \
463         __generic_copy_to_user_nocheck((to), (from), (n))
464 #define __copy_from_user_inatomic(to, from, n) \
465         __generic_copy_from_user_nocheck((to), (from), (n))
466
467 #define __copy_to_user(to, from, n)                     \
468 ({                                                      \
469         might_sleep();                                  \
470         __copy_to_user_inatomic((to), (from), (n));     \
471 })
472
473 #define __copy_from_user(to, from, n)                   \
474 ({                                                      \
475         might_sleep();                                  \
476         __copy_from_user_inatomic((to), (from), (n));   \
477 })
478
479
480 #define copy_to_user(to, from, n)   __generic_copy_to_user((to), (from), (n))
481 #define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n))
482
483 extern long strncpy_from_user(char *dst, const char __user *src, long count);
484 extern long __strncpy_from_user(char *dst, const char __user *src, long count);
485 extern long strnlen_user(const char __user *str, long n);
486 #define strlen_user(str) strnlen_user(str, ~0UL >> 1)
487 extern unsigned long clear_user(void __user *mem, unsigned long len);
488 extern unsigned long __clear_user(void __user *mem, unsigned long len);
489
490 #endif /* _ASM_UACCESS_H */