]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
mn10300: get rid of zeroing
authorAl Viro <viro@zeniv.linux.org.uk>
Tue, 21 Mar 2017 00:45:41 +0000 (20:45 -0400)
committerAl Viro <viro@zeniv.linux.org.uk>
Tue, 28 Mar 2017 22:23:48 +0000 (18:23 -0400)
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
arch/mn10300/include/asm/uaccess.h
arch/mn10300/lib/usercopy.c

index 2eeaa8c5d92c95bb28357b7567664da2a18cca9d..1c35be3d8fef362a3cc54daac06189e003a74210 100644 (file)
@@ -275,55 +275,19 @@ do {                                                                      \
        }                                                               \
 } while (0)
 
-#define __copy_user_zeroing(to, from, size)                            \
-do {                                                                   \
-       if (size) {                                                     \
-               void *__to = to;                                        \
-               const void *__from = from;                              \
-               int w;                                                  \
-               asm volatile(                                           \
-                       "0:     movbu   (%0),%3;\n"                     \
-                       "1:     movbu   %3,(%1);\n"                     \
-                       "       inc     %0;\n"                          \
-                       "       inc     %1;\n"                          \
-                       "       add     -1,%2;\n"                       \
-                       "       bne     0b;\n"                          \
-                       "2:\n"                                          \
-                       "       .section .fixup,\"ax\"\n"               \
-                       "3:\n"                                          \
-                       "       mov     %2,%0\n"                        \
-                       "       clr     %3\n"                           \
-                       "4:     movbu   %3,(%1);\n"                     \
-                       "       inc     %1;\n"                          \
-                       "       add     -1,%2;\n"                       \
-                       "       bne     4b;\n"                          \
-                       "       mov     %0,%2\n"                        \
-                       "       jmp     2b\n"                           \
-                       "       .previous\n"                            \
-                       "       .section __ex_table,\"a\"\n"            \
-                       "       .balign 4\n"                            \
-                       "       .long   0b,3b\n"                        \
-                       "       .long   1b,3b\n"                        \
-                       "       .previous\n"                            \
-                       : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
-                       : "0"(__from), "1"(__to), "2"(size)             \
-                       : "cc", "memory");                              \
-       }                                                               \
-} while (0)
-
 /* We let the __ versions of copy_from/to_user inline, because they're often
  * used in fast paths and have only a small space overhead.
  */
 static inline
-unsigned long __generic_copy_from_user_nocheck(void *to, const void *from,
+unsigned long __copy_from_user_inatomic(void *to, const void __user *from,
                                               unsigned long n)
 {
-       __copy_user_zeroing(to, from, n);
+       __copy_user(to, from, n);
        return n;
 }
 
 static inline
-unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
+unsigned long __copy_to_user_inatomic(void __user *to, const void *from,
                                             unsigned long n)
 {
        __copy_user(to, from, n);
@@ -331,110 +295,24 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
 }
 
 
-#if 0
-#error "don't use - these macros don't increment to & from pointers"
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user(to, from, size)   \
-do {                                           \
-       asm volatile(                           \
-               "       mov %0,a0;\n"           \
-               "0:     movbu (%1),d3;\n"       \
-               "1:     movbu d3,(%2);\n"       \
-               "       add -1,a0;\n"           \
-               "       bne 0b;\n"              \
-               "2:;"                           \
-               ".section .fixup,\"ax\"\n"      \
-               "3:     jmp 2b\n"               \
-               ".previous\n"                   \
-               ".section __ex_table,\"a\"\n"   \
-               "       .balign 4\n"            \
-               "       .long 0b,3b\n"          \
-               "       .long 1b,3b\n"          \
-               ".previous"                     \
-               :                               \
-               : "d"(size), "d"(to), "d"(from) \
-               : "d3", "a0");                  \
-} while (0)
-
-/* Optimize just a little bit when we know the size of the move. */
-#define __constant_copy_user_zeroing(to, from, size)   \
-do {                                                   \
-       asm volatile(                                   \
-               "       mov %0,a0;\n"                   \
-               "0:     movbu (%1),d3;\n"               \
-               "1:     movbu d3,(%2);\n"               \
-               "       add -1,a0;\n"                   \
-               "       bne 0b;\n"                      \
-               "2:;"                                   \
-               ".section .fixup,\"ax\"\n"              \
-               "3:     jmp 2b\n"                       \
-               ".previous\n"                           \
-               ".section __ex_table,\"a\"\n"           \
-               "       .balign 4\n"                    \
-               "       .long 0b,3b\n"                  \
-               "       .long 1b,3b\n"                  \
-               ".previous"                             \
-               :                                       \
-               : "d"(size), "d"(to), "d"(from)         \
-               : "d3", "a0");                          \
-} while (0)
-
-static inline
-unsigned long __constant_copy_to_user(void *to, const void *from,
-                                     unsigned long n)
-{
-       if (access_ok(VERIFY_WRITE, to, n))
-               __constant_copy_user(to, from, n);
-       return n;
-}
-
-static inline
-unsigned long __constant_copy_from_user(void *to, const void *from,
-                                       unsigned long n)
-{
-       if (access_ok(VERIFY_READ, from, n))
-               __constant_copy_user_zeroing(to, from, n);
-       return n;
-}
+extern unsigned long __generic_copy_to_user(void __user *, const void *,
+                                           unsigned long);
+extern unsigned long __generic_copy_from_user(void *, const void __user *,
+                                             unsigned long);
 
-static inline
-unsigned long __constant_copy_to_user_nocheck(void *to, const void *from,
-                                             unsigned long n)
+static inline unsigned long __copy_to_user(void __user *to, const void *from,
+                                               unsigned long n)
 {
-       __constant_copy_user(to, from, n);
-       return n;
+       might_fault();
+       return __copy_to_user_inatomic(to, from, n);
 }
 
-static inline
-unsigned long __constant_copy_from_user_nocheck(void *to, const void *from,
+static inline unsigned long __copy_from_user(void *to, const void __user *from,
                                                unsigned long n)
 {
-       __constant_copy_user_zeroing(to, from, n);
-       return n;
+       might_fault();
+       return __copy_from_user_inatomic(to, from, n);
 }
-#endif
-
-extern unsigned long __generic_copy_to_user(void __user *, const void *,
-                                           unsigned long);
-extern unsigned long __generic_copy_from_user(void *, const void __user *,
-                                             unsigned long);
-
-#define __copy_to_user_inatomic(to, from, n) \
-       __generic_copy_to_user_nocheck((to), (from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
-       __generic_copy_from_user_nocheck((to), (from), (n))
-
-#define __copy_to_user(to, from, n)                    \
-({                                                     \
-       might_fault();                                  \
-       __copy_to_user_inatomic((to), (from), (n));     \
-})
-
-#define __copy_from_user(to, from, n)                  \
-({                                                     \
-       might_fault();                                  \
-       __copy_from_user_inatomic((to), (from), (n));   \
-})
 
 
 #define copy_to_user(to, from, n)   __generic_copy_to_user((to), (from), (n))
index ce8899e5e171370ebc88c08a91727ef9232febf9..b48af8d4f38d267da6f515f7a9e3bb347c43f94b 100644 (file)
@@ -22,11 +22,12 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
 unsigned long
 __generic_copy_from_user(void *to, const void *from, unsigned long n)
 {
-       if (access_ok(VERIFY_READ, from, n))
-               __copy_user_zeroing(to, from, n);
-       else
-               memset(to, 0, n);
-       return n;
+       unsigned long res = n;
+       if (access_ok(VERIFY_READ, from, res))
+               __copy_user(to, from, res);
+       if (unlikely(res))
+               memset(to + n - res, 0, res);
+       return res;
 }
 
 /*