]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
powerpc: fix __strnlen_user in merge tree
authorStephen Rothwell <sfr@canb.auug.org.au>
Mon, 31 Oct 2005 07:39:20 +0000 (18:39 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Tue, 1 Nov 2005 03:34:17 +0000 (14:34 +1100)
Change USER/KERNEL_DS so that the merged version of
__strnlen_user can be used which allows us to complete the
removal of arch/ppc64/lib/.

Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
arch/powerpc/lib/Makefile
arch/ppc64/Makefile
arch/ppc64/lib/Makefile [deleted file]
arch/ppc64/lib/string.S [deleted file]
include/asm-powerpc/uaccess.h

index e6b2be3bcec1469816a8046b49cb572b04b09913..dfb33915ad6169ebf9c4ac201f1cae4ab41ea18d 100644 (file)
@@ -9,7 +9,7 @@ endif
 obj-y                  += strcase.o
 obj-$(CONFIG_PPC32)    += div64.o copy_32.o checksum_32.o
 obj-$(CONFIG_PPC64)    += checksum_64.o copypage_64.o copyuser_64.o \
-                          memcpy_64.o usercopy_64.o mem_64.o
+                          memcpy_64.o usercopy_64.o mem_64.o string.o
 obj-$(CONFIG_PPC_ISERIES) += e2a.o
 obj-$(CONFIG_XMON)     += sstep.o
 
index fdbd6f44adc07a1f01e57803b9b3d5717c37e488..a55a82d145d4c14a8a0f5c5d30a92e218b26d4fd 100644 (file)
@@ -86,7 +86,6 @@ head-y := arch/ppc64/kernel/head.o
 head-y += arch/powerpc/kernel/fpu.o
 head-y += arch/powerpc/kernel/entry_64.o
 
-libs-y                         += arch/ppc64/lib/
 core-y                         += arch/ppc64/kernel/ arch/powerpc/kernel/
 core-y                         += arch/powerpc/mm/
 core-y                         += arch/powerpc/sysdev/
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile
deleted file mode 100644 (file)
index 42d5295..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for ppc64-specific library files..
-#
-
-lib-y := string.o
diff --git a/arch/ppc64/lib/string.S b/arch/ppc64/lib/string.S
deleted file mode 100644 (file)
index e21a003..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * String handling functions for PowerPC.
- *
- * Copyright (C) 1996 Paul Mackerras.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <asm/processor.h>
-#include <asm/errno.h>
-#include <asm/ppc_asm.h>
-
-_GLOBAL(strcpy)
-       addi    r5,r3,-1
-       addi    r4,r4,-1
-1:     lbzu    r0,1(r4)
-       cmpwi   0,r0,0
-       stbu    r0,1(r5)
-       bne     1b
-       blr
-
-_GLOBAL(strncpy)
-       cmpwi   0,r5,0
-       beqlr
-       mtctr   r5
-       addi    r6,r3,-1
-       addi    r4,r4,-1
-1:     lbzu    r0,1(r4)
-       cmpwi   0,r0,0
-       stbu    r0,1(r6)
-       bdnzf   2,1b            /* dec ctr, branch if ctr != 0 && !cr0.eq */
-       blr
-
-_GLOBAL(strcat)
-       addi    r5,r3,-1
-       addi    r4,r4,-1
-1:     lbzu    r0,1(r5)
-       cmpwi   0,r0,0
-       bne     1b
-       addi    r5,r5,-1
-1:     lbzu    r0,1(r4)
-       cmpwi   0,r0,0
-       stbu    r0,1(r5)
-       bne     1b
-       blr
-
-_GLOBAL(strcmp)
-       addi    r5,r3,-1
-       addi    r4,r4,-1
-1:     lbzu    r3,1(r5)
-       cmpwi   1,r3,0
-       lbzu    r0,1(r4)
-       subf.   r3,r0,r3
-       beqlr   1
-       beq     1b
-       blr
-
-_GLOBAL(strlen)
-       addi    r4,r3,-1
-1:     lbzu    r0,1(r4)
-       cmpwi   0,r0,0
-       bne     1b
-       subf    r3,r3,r4
-       blr
-
-_GLOBAL(memcmp)
-       cmpwi   0,r5,0
-       ble-    2f
-       mtctr   r5
-       addi    r6,r3,-1
-       addi    r4,r4,-1
-1:     lbzu    r3,1(r6)
-       lbzu    r0,1(r4)
-       subf.   r3,r0,r3
-       bdnzt   2,1b
-       blr
-2:     li      r3,0
-       blr
-
-_GLOBAL(memchr)
-       cmpwi   0,r5,0
-       ble-    2f
-       mtctr   r5
-       addi    r3,r3,-1
-1:     lbzu    r0,1(r3)
-       cmpw    0,r0,r4
-       bdnzf   2,1b
-       beqlr
-2:     li      r3,0
-       blr
-
-_GLOBAL(__clear_user)
-       addi    r6,r3,-4
-       li      r3,0
-       li      r5,0
-       cmplwi  0,r4,4
-       blt     7f
-       /* clear a single word */
-11:    stwu    r5,4(r6)
-       beqlr
-       /* clear word sized chunks */
-       andi.   r0,r6,3
-       add     r4,r0,r4
-       subf    r6,r0,r6
-       srwi    r0,r4,2
-       andi.   r4,r4,3
-       mtctr   r0
-       bdz     7f
-1:     stwu    r5,4(r6)
-       bdnz    1b
-       /* clear byte sized chunks */
-7:     cmpwi   0,r4,0
-       beqlr
-       mtctr   r4
-       addi    r6,r6,3
-8:     stbu    r5,1(r6)
-       bdnz    8b
-       blr
-90:    mr      r3,r4
-       blr
-91:    mfctr   r3
-       slwi    r3,r3,2
-       add     r3,r3,r4
-       blr
-92:    mfctr   r3
-       blr
-
-       .section __ex_table,"a"
-       .align  3
-       .llong  11b,90b
-       .llong  1b,91b
-       .llong  8b,92b
-       .text
-
-/* r3 = dst, r4 = src, r5 = count */
-_GLOBAL(__strncpy_from_user)
-       addi    r6,r3,-1
-       addi    r4,r4,-1
-       cmpwi   0,r5,0
-       beq     2f
-       mtctr   r5
-1:     lbzu    r0,1(r4)
-       cmpwi   0,r0,0
-       stbu    r0,1(r6)
-       bdnzf   2,1b            /* dec ctr, branch if ctr != 0 && !cr0.eq */
-       beq     3f
-2:     addi    r6,r6,1
-3:     subf    r3,r3,r6
-       blr
-99:    li      r3,-EFAULT
-       blr
-
-       .section __ex_table,"a"
-       .align  3
-       .llong  1b,99b
-       .text
-
-/* r3 = str, r4 = len (> 0) */
-_GLOBAL(__strnlen_user)
-       addi    r7,r3,-1
-       mtctr   r4              /* ctr = len */
-1:     lbzu    r0,1(r7)        /* get next byte */
-       cmpwi   0,r0,0
-       bdnzf   2,1b            /* loop if --ctr != 0 && byte != 0 */
-       addi    r7,r7,1
-       subf    r3,r3,r7        /* number of bytes we have looked at */
-       beqlr                   /* return if we found a 0 byte */
-       cmpw    0,r3,r4         /* did we look at all len bytes? */
-       blt     99f             /* if not, must have hit top */
-       addi    r3,r4,1         /* return len + 1 to indicate no null found */
-       blr
-99:    li      r3,0            /* bad address, return 0 */
-       blr
-
-       .section __ex_table,"a"
-       .align  3
-       .llong  1b,99b
index 2ecc3e16e49e493d9cd553302af05b348a16db70..035338b0c5ee13ffe3df4e5f0d59aa86b5830d66 100644 (file)
 
 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
 
+#define KERNEL_DS      MAKE_MM_SEG(~0UL)
 #ifdef __powerpc64__
-#define KERNEL_DS      MAKE_MM_SEG(0UL)
-#define USER_DS                MAKE_MM_SEG(0xf000000000000000UL)
+/* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
+#define USER_DS                MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
 #else
-#define KERNEL_DS      MAKE_MM_SEG(~0UL)
 #define USER_DS                MAKE_MM_SEG(TASK_SIZE - 1)
 #endif
 
 
 #ifdef __powerpc64__
 /*
- * Use the alpha trick for checking ranges:
- *
- * Is a address valid? This does a straightforward calculation rather
- * than tests.
- *
- * Address valid if:
- *  - "addr" doesn't have any high-bits set
- *  - AND "size" doesn't have any high-bits set
- *  - OR we are in kernel mode.
- *
- * We dont have to check for high bits in (addr+size) because the first
- * two checks force the maximum result to be below the start of the
- * kernel region.
+ * This check is sufficient because there is a large enough
+ * gap between user addresses and the kernel addresses
  */
 #define __access_ok(addr, size, segment)       \
-       (((segment).seg & (addr | size )) == 0)
+       (((addr) <= (segment).seg) && ((size) <= (segment).seg))
 
 #else
 
@@ -161,7 +150,10 @@ extern long __put_user_bad(void);
                : "=r" (err)                                    \
                : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
 
-#ifndef __powerpc64__
+#ifdef __powerpc64__
+#define __put_user_asm2(x, ptr, retval)                                \
+         __put_user_asm(x, ptr, retval, "std")
+#else /* __powerpc64__ */
 #define __put_user_asm2(x, addr, err)                          \
        __asm__ __volatile__(                                   \
                "1:     stw %1,0(%2)\n"                         \
@@ -178,9 +170,6 @@ extern long __put_user_bad(void);
                ".previous"                                     \
                : "=r" (err)                                    \
                : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
-#else /* __powerpc64__ */
-#define __put_user_asm2(x, ptr, retval)                                \
-         __put_user_asm(x, ptr, retval, "std")
 #endif /* __powerpc64__ */
 
 #define __put_user_size(x, ptr, size, retval)                  \
@@ -218,7 +207,7 @@ extern long __get_user_bad(void);
 
 #define __get_user_asm(x, addr, err, op)               \
        __asm__ __volatile__(                           \
-               "1:     "op" %1,0(%2)   # get_user\n"   \
+               "1:     "op" %1,0(%2)   # get_user\n"   \
                "2:\n"                                  \
                ".section .fixup,\"ax\"\n"              \
                "3:     li %0,%3\n"                     \
@@ -232,8 +221,11 @@ extern long __get_user_bad(void);
                : "=r" (err), "=r" (x)                  \
                : "b" (addr), "i" (-EFAULT), "0" (err))
 
-#ifndef __powerpc64__
-#define __get_user_asm2(x, addr, err)                  \
+#ifdef __powerpc64__
+#define __get_user_asm2(x, addr, err)                  \
+       __get_user_asm(x, addr, err, "ld")
+#else /* __powerpc64__ */
+#define __get_user_asm2(x, addr, err)                  \
        __asm__ __volatile__(                           \
                "1:     lwz %1,0(%2)\n"                 \
                "2:     lwz %1+1,4(%2)\n"               \
@@ -251,17 +243,14 @@ extern long __get_user_bad(void);
                ".previous"                             \
                : "=r" (err), "=&r" (x)                 \
                : "b" (addr), "i" (-EFAULT), "0" (err))
-#else
-#define __get_user_asm2(x, addr, err)                  \
-       __get_user_asm(x, addr, err, "ld")
 #endif /* __powerpc64__ */
 
 #define __get_user_size(x, ptr, size, retval)                  \
 do {                                                           \
        retval = 0;                                             \
        __chk_user_ptr(ptr);                                    \
-       if (size > sizeof(x))                                   \
-               (x) = __get_user_bad();                         \
+       if (size > sizeof(x))                                   \
+               (x) = __get_user_bad();                         \
        switch (size) {                                         \
        case 1: __get_user_asm(x, ptr, retval, "lbz"); break;   \
        case 2: __get_user_asm(x, ptr, retval, "lhz"); break;   \
@@ -300,7 +289,7 @@ do {                                                                \
        long __gu_err = -EFAULT;                                        \
        unsigned long  __gu_val = 0;                                    \
        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);             \
-       might_sleep();                                                  \
+       might_sleep();                                                  \
        if (access_ok(VERIFY_READ, __gu_addr, (size)))                  \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
        (x) = (__typeof__(*(ptr)))__gu_val;                             \
@@ -313,8 +302,9 @@ extern unsigned long __copy_tofrom_user(void __user *to,
                const void __user *from, unsigned long size);
 
 #ifndef __powerpc64__
-extern inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
+
+extern inline unsigned long copy_from_user(void *to,
+               const void __user *from, unsigned long n)
 {
        unsigned long over;
 
@@ -328,8 +318,8 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
        return n;
 }
 
-extern inline unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
+extern inline unsigned long copy_to_user(void __user *to,
+               const void *from, unsigned long n)
 {
        unsigned long over;
 
@@ -343,10 +333,23 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
        return n;
 }
 
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
+
 #else /* __powerpc64__ */
 
-static inline unsigned long
-__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+#define __copy_in_user(to, from, size) \
+       __copy_tofrom_user((to), (from), (size))
+
+extern unsigned long copy_from_user(void *to, const void __user *from,
+                                   unsigned long n);
+extern unsigned long copy_to_user(void __user *to, const void *from,
+                                 unsigned long n);
+extern unsigned long copy_in_user(void __user *to, const void __user *from,
+                                 unsigned long n);
+
+static inline unsigned long __copy_from_user_inatomic(void *to,
+               const void __user *from, unsigned long n)
 {
        if (__builtin_constant_p(n) && (n <= 8)) {
                unsigned long ret;
@@ -370,8 +373,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
        return __copy_tofrom_user((__force void __user *) to, from, n);
 }
 
-static inline unsigned long
-__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+static inline unsigned long __copy_to_user_inatomic(void __user *to,
+               const void *from, unsigned long n)
 {
        if (__builtin_constant_p(n) && (n <= 8)) {
                unsigned long ret;
@@ -397,8 +400,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
 
 #endif /* __powerpc64__ */
 
-static inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long size)
+static inline unsigned long __copy_from_user(void *to,
+               const void __user *from, unsigned long size)
 {
        might_sleep();
 #ifndef __powerpc64__
@@ -408,8 +411,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long size)
 #endif /* __powerpc64__ */
 }
 
-static inline unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long size)
+static inline unsigned long __copy_to_user(void __user *to,
+               const void *from, unsigned long size)
 {
        might_sleep();
 #ifndef __powerpc64__
@@ -419,21 +422,6 @@ __copy_to_user(void __user *to, const void *from, unsigned long size)
 #endif /* __powerpc64__ */
 }
 
-#ifndef __powerpc64__
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
-#else /* __powerpc64__ */
-#define __copy_in_user(to, from, size) \
-       __copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
-                                   unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
-                                 unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
-                                 unsigned long n);
-#endif /* __powerpc64__ */
-
 extern unsigned long __clear_user(void __user *addr, unsigned long size);
 
 static inline unsigned long clear_user(void __user *addr, unsigned long size)
@@ -466,11 +454,7 @@ static inline long strncpy_from_user(char *dst, const char __user *src,
  *
  * Return 0 for error
  */
-#ifndef __powerpc64__
 extern int __strnlen_user(const char __user *str, long len, unsigned long top);
-#else /* __powerpc64__ */
-extern int __strnlen_user(const char __user *str, long len);
-#endif /* __powerpc64__ */
 
 /*
  * Returns the length of the string at str (including the null byte),
@@ -482,18 +466,11 @@ extern int __strnlen_user(const char __user *str, long len);
  */
 static inline int strnlen_user(const char __user *str, long len)
 {
-#ifndef __powerpc64__
        unsigned long top = current->thread.fs.seg;
 
        if ((unsigned long)str > top)
                return 0;
        return __strnlen_user(str, len, top);
-#else /* __powerpc64__ */
-       might_sleep();
-       if (likely(access_ok(VERIFY_READ, str, 1)))
-               return __strnlen_user(str, len);
-       return 0;
-#endif /* __powerpc64__ */
 }
 
 #define strlen_user(str)       strnlen_user((str), 0x7ffffffe)