]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge branch 'uaccess' into fixes
authorRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 11 Sep 2015 18:18:28 +0000 (19:18 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 11 Sep 2015 18:18:28 +0000 (19:18 +0100)
31 files changed:
arch/arm/Kconfig
arch/arm/include/asm/assembler.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/pgtable-2level-hwdef.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/armksyms.c
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/head.S
arch/arm/kernel/process.c
arch/arm/kernel/swp_emulate.c
arch/arm/kernel/traps.c
arch/arm/lib/clear_user.S
arch/arm/lib/copy_from_user.S
arch/arm/lib/copy_to_user.S
arch/arm/lib/csumpartialcopyuser.S
arch/arm/lib/uaccess_with_memcpy.c
arch/arm/mm/abort-ev4.S
arch/arm/mm/abort-ev5t.S
arch/arm/mm/abort-ev5tj.S
arch/arm/mm/abort-ev6.S
arch/arm/mm/abort-ev7.S
arch/arm/mm/abort-lv4t.S
arch/arm/mm/abort-macro.S
arch/arm/mm/mmu.c
arch/arm/mm/pgd.c
arch/arm/nwfpe/entry.S
arch/arm/xen/hypercall.S

index 1c5021002fe40b06a2e57daad459bf1b4181b822..a7a2e328edf9bc433be7b49ba7e2375983b8eb83 100644 (file)
@@ -1700,6 +1700,21 @@ config HIGHPTE
          consumed by page tables.  Setting this option will allow
          user-space 2nd level page tables to reside in high memory.
 
+config CPU_SW_DOMAIN_PAN
+       bool "Enable use of CPU domains to implement privileged no-access"
+       depends on MMU && !ARM_LPAE
+       default y
+       help
+         Increase kernel security by ensuring that normal kernel accesses
+         are unable to access userspace addresses.  This can help prevent
+         use-after-free bugs becoming an exploitable privilege escalation
+         by ensuring that magic values (such as LIST_POISON) will always
+         fault when dereferenced.
+
+         CPUs with low-vector mappings use a best-efforts implementation.
+         Their lower 1MB needs to remain accessible for the vectors, but
+         the remainder of userspace will become appropriately inaccessible.
+
 config HW_PERF_EVENTS
        bool "Enable hardware performance counter support for perf events"
        depends on PERF_EVENTS
index 4abe57279c66f0ecf2c96c116ae7ce839e646b4c..9007c518d1d8a1fe76c9a960526ae1b34d2710f2 100644 (file)
@@ -445,6 +445,48 @@ THUMB(     orr     \reg , \reg , #PSR_T_BIT        )
 #endif
        .endm
 
+       .macro  uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /*
+        * Whenever we re-enter userspace, the domains should always be
+        * set appropriately.
+        */
+       mov     \tmp, #DACR_UACCESS_DISABLE
+       mcr     p15, 0, \tmp, c3, c0, 0         @ Set domain register
+       .if     \isb
+       instr_sync
+       .endif
+#endif
+       .endm
+
+       .macro  uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /*
+        * Whenever we re-enter userspace, the domains should always be
+        * set appropriately.
+        */
+       mov     \tmp, #DACR_UACCESS_ENABLE
+       mcr     p15, 0, \tmp, c3, c0, 0
+       .if     \isb
+       instr_sync
+       .endif
+#endif
+       .endm
+
+       .macro  uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       mrc     p15, 0, \tmp, c3, c0, 0
+       str     \tmp, [sp, #S_FRAME_SIZE]
+#endif
+       .endm
+
+       .macro  uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       ldr     r0, [sp, #S_FRAME_SIZE]
+       mcr     p15, 0, r0, c3, c0, 0
+#endif
+       .endm
+
        .irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
        .macro  ret\c, reg
 #if __LINUX_ARM_ARCH__ < 6
index 6ddbe446425e11524d927b5cd8b479bcd2419238..fc8ba1663601e0743a05b7cda52703df9f9bc07a 100644 (file)
@@ -12,6 +12,7 @@
 
 #ifndef __ASSEMBLY__
 #include <asm/barrier.h>
+#include <asm/thread_info.h>
 #endif
 
 /*
  */
 #ifndef CONFIG_IO_36
 #define DOMAIN_KERNEL  0
-#define DOMAIN_TABLE   0
 #define DOMAIN_USER    1
 #define DOMAIN_IO      2
 #else
 #define DOMAIN_KERNEL  2
-#define DOMAIN_TABLE   2
 #define DOMAIN_USER    1
 #define DOMAIN_IO      0
 #endif
+#define DOMAIN_VECTORS 3
 
 /*
  * Domain types
 #define DOMAIN_MANAGER 1
 #endif
 
-#define domain_val(dom,type)   ((type) << (2*(dom)))
+#define domain_mask(dom)       ((3) << (2 * (dom)))
+#define domain_val(dom,type)   ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+       (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+        domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+        domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+       (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+        domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+        domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+        domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+       domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+       domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+       domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE   \
+       (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE    \
+       (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+       unsigned int domain;
+
+       asm(
+       "mrc    p15, 0, %0, c3, c0      @ get domain"
+        : "=r" (domain)
+        : "m" (current_thread_info()->cpu_domain));
+
+       return domain;
+}
+
 static inline void set_domain(unsigned val)
 {
        asm volatile(
        "mcr    p15, 0, %0, c3, c0      @ set domain"
-         : : "r" (val));
+         : : "r" (val) : "memory");
        isb();
 }
 
+#ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)                                        \
        do {                                                    \
-       struct thread_info *thread = current_thread_info();     \
-       unsigned int domain = thread->cpu_domain;               \
-       domain &= ~domain_val(dom, DOMAIN_MANAGER);             \
-       thread->cpu_domain = domain | domain_val(dom, type);    \
-       set_domain(thread->cpu_domain);                         \
+               unsigned int domain = get_domain();             \
+               domain &= ~domain_mask(dom);                    \
+               domain = domain | domain_val(dom, type);        \
+               set_domain(domain);                             \
        } while (0)
 
 #else
-static inline void set_domain(unsigned val) { }
 static inline void modify_domain(unsigned dom, unsigned type)  { }
 #endif
 
index 5eed82809d82b7aa9c74670fd9c9624bbd930803..6795368ad0238068c55fa4c8b56e0d67d9b9a5cf 100644 (file)
 #ifdef CONFIG_SMP
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
+({                                                             \
+       unsigned int __ua_flags;                                \
        smp_mb();                                               \
        prefetchw(uaddr);                                       \
+       __ua_flags = uaccess_save_and_enable();                 \
        __asm__ __volatile__(                                   \
        "1:     ldrex   %1, [%3]\n"                             \
        "       " insn "\n"                                     \
        __futex_atomic_ex_table("%5")                           \
        : "=&r" (ret), "=&r" (oldval), "=&r" (tmp)              \
        : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)              \
-       : "cc", "memory")
+       : "cc", "memory");                                      \
+       uaccess_restore(__ua_flags);                            \
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                              u32 oldval, u32 newval)
 {
+       unsigned int __ua_flags;
        int ret;
        u32 val;
 
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        smp_mb();
        /* Prefetching cannot fault */
        prefetchw(uaddr);
+       __ua_flags = uaccess_save_and_enable();
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
        "1:     ldrex   %1, [%4]\n"
        "       teq     %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        : "=&r" (ret), "=&r" (val)
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
        : "cc", "memory");
+       uaccess_restore(__ua_flags);
        smp_mb();
 
        *uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 #include <asm/domain.h>
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
+({                                                             \
+       unsigned int __ua_flags = uaccess_save_and_enable();    \
        __asm__ __volatile__(                                   \
        "1:     " TUSER(ldr) "  %1, [%3]\n"                     \
        "       " insn "\n"                                     \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        __futex_atomic_ex_table("%5")                           \
        : "=&r" (ret), "=&r" (oldval), "=&r" (tmp)              \
        : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT)              \
-       : "cc", "memory")
+       : "cc", "memory");                                      \
+       uaccess_restore(__ua_flags);                            \
+})
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                              u32 oldval, u32 newval)
 {
+       unsigned int __ua_flags;
        int ret = 0;
        u32 val;
 
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        preempt_disable();
+       __ua_flags = uaccess_save_and_enable();
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
        "1:     " TUSER(ldr) "  %1, [%4]\n"
        "       teq     %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        : "+r" (ret), "=&r" (val)
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
        : "cc", "memory");
+       uaccess_restore(__ua_flags);
 
        *uval = val;
        preempt_enable();
index 5e68278e953e2513f904cc253d200705c999a673..d0131ee6f6af920d5c3bc6479c2e515e1d706539 100644 (file)
@@ -23,6 +23,7 @@
 #define PMD_PXNTABLE           (_AT(pmdval_t, 1) << 2)     /* v7 */
 #define PMD_BIT4               (_AT(pmdval_t, 1) << 4)
 #define PMD_DOMAIN(x)          (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK                PMD_DOMAIN(0x0f)
 #define PMD_PROTECTION         (_AT(pmdval_t, 1) << 9)         /* v5 */
 /*
  *   - section
index bd32eded3e5061b49048e3110902b7edc63e3638..ae02e68b61fcaa14799c26a3786cea11696c59e5 100644 (file)
@@ -25,7 +25,6 @@
 struct task_struct;
 
 #include <asm/types.h>
-#include <asm/domain.h>
 
 typedef unsigned long mm_segment_t;
 
@@ -74,9 +73,6 @@ struct thread_info {
        .flags          = 0,                                            \
        .preempt_count  = INIT_PREEMPT_COUNT,                           \
        .addr_limit     = KERNEL_DS,                                    \
-       .cpu_domain     = domain_val(DOMAIN_USER, DOMAIN_MANAGER) |     \
-                         domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) |   \
-                         domain_val(DOMAIN_IO, DOMAIN_CLIENT),         \
 }
 
 #define init_thread_info       (init_thread_union.thread_info)
index 74b17d09ef7aa54cb98d22ccf5f068b4c39971be..01bae13b2cea005f7b3d4c828040f368bc04fdab 100644 (file)
@@ -49,6 +49,35 @@ struct exception_table_entry
 
 extern int fixup_exception(struct pt_regs *regs);
 
+/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       unsigned int old_domain = get_domain();
+
+       /* Set the current domain access to permit user accesses */
+       set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+                  domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+       return old_domain;
+#else
+       return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+       /* Restore the user access mask */
+       set_domain(flags);
+#endif
+}
+
 /*
  * These two are intentionally not defined anywhere - if the kernel
  * code generates any references to them, that's a bug.
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
                register typeof(x) __r2 asm("r2");                      \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
+               unsigned int __ua_flags = uaccess_save_and_enable();    \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
                        if (sizeof((x)) >= 8)                           \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
                        break;                                          \
                default: __e = __get_user_bad(); break;                 \
                }                                                       \
+               uaccess_restore(__ua_flags);                            \
                x = (typeof(*(p))) __r2;                                \
                __e;                                                    \
        })
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
                register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
                register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
+               unsigned int __ua_flags = uaccess_save_and_enable();    \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
                        __put_user_x(__r2, __p, __e, __l, 1);           \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
                        break;                                          \
                default: __e = __put_user_bad(); break;                 \
                }                                                       \
+               uaccess_restore(__ua_flags);                            \
                __e;                                                    \
        })
 
@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
 do {                                                                   \
        unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
+       unsigned int __ua_flags;                                        \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
+       __ua_flags = uaccess_save_and_enable();                         \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __get_user_asm_byte(__gu_val, __gu_addr, err);  break;  \
        case 2: __get_user_asm_half(__gu_val, __gu_addr, err);  break;  \
        case 4: __get_user_asm_word(__gu_val, __gu_addr, err);  break;  \
        default: (__gu_val) = __get_user_bad();                         \
        }                                                               \
+       uaccess_restore(__ua_flags);                                    \
        (x) = (__typeof__(*(ptr)))__gu_val;                             \
 } while (0)
 
@@ -381,9 +417,11 @@ do {                                                                       \
 #define __put_user_err(x, ptr, err)                                    \
 do {                                                                   \
        unsigned long __pu_addr = (unsigned long)(ptr);                 \
+       unsigned int __ua_flags;                                        \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
        might_fault();                                                  \
+       __ua_flags = uaccess_save_and_enable();                         \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __put_user_asm_byte(__pu_val, __pu_addr, err);  break;  \
        case 2: __put_user_asm_half(__pu_val, __pu_addr, err);  break;  \
@@ -391,6 +429,7 @@ do {                                                                        \
        case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break;  \
        default: __put_user_bad();                                      \
        }                                                               \
+       uaccess_restore(__ua_flags);                                    \
 } while (0)
 
 #define __put_user_asm_byte(x, __pu_addr, err)                 \
@@ -474,11 +513,46 @@ do {                                                                      \
 
 
 #ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+       unsigned int __ua_flags = uaccess_save_and_enable();
+       n = arm_copy_from_user(to, from, n);
+       uaccess_restore(__ua_flags);
+       return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+       unsigned int __ua_flags = uaccess_save_and_enable();
+       n = arm_copy_to_user(to, from, n);
+       uaccess_restore(__ua_flags);
+       return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+       unsigned int __ua_flags = uaccess_save_and_enable();
+       n = arm_clear_user(addr, n);
+       uaccess_restore(__ua_flags);
+       return n;
+}
+
 #else
 #define __copy_from_user(to, from, n)  (memcpy(to, (void __force *)from, n), 0)
 #define __copy_to_user(to, from, n)    (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +585,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
        return n;
 }
 
+/* These are from lib/ code, and use __get_user() and friends */
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern __must_check long strlen_user(const char __user *str);
index 5e5a51a99e68ec77b38b8101611bc853eb8496af..f89811fb9a55f3a490c3633ef99ef52745c58129 100644 (file)
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
 #ifdef CONFIG_MMU
 EXPORT_SYMBOL(copy_page);
 
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
 
 EXPORT_SYMBOL(__get_user_1);
 EXPORT_SYMBOL(__get_user_2);
index cb4fb1e69778603d41356f3ed7a98695f4cc0cdb..3e1c26eb32b43e13a5fa3e70b2d09f91a07ebd4e 100644 (file)
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
 #define SPFIX(code...)
 #endif
 
-       .macro  svc_entry, stack_hole=0, trace=1
+       .macro  svc_entry, stack_hole=0, trace=1, uaccess=1
  UNWIND(.fnstart               )
  UNWIND(.save {r0 - pc}                )
-       sub     sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+       sub     sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
 #ifdef CONFIG_THUMB2_KERNEL
  SPFIX(        str     r0, [sp]        )       @ temporarily saved
  SPFIX(        mov     r0, sp          )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
        ldmia   r0, {r3 - r5}
        add     r7, sp, #S_SP - 4       @ here for interlock avoidance
        mov     r6, #-1                 @  ""  ""      ""       ""
-       add     r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+       add     r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
  SPFIX(        addeq   r2, r2, #4      )
        str     r3, [sp, #-4]!          @ save the "real" r0 copied
                                        @ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
        @
        stmia   r7, {r2 - r6}
 
+       uaccess_save r0
+       .if \uaccess
+       uaccess_disable r0
+       .endif
+
        .if \trace
 #ifdef CONFIG_TRACE_IRQFLAGS
        bl      trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
 
        .align  5
 __dabt_svc:
-       svc_entry
+       svc_entry uaccess=0
        mov     r2, sp
        dabt_helper
  THUMB(        ldr     r5, [sp, #S_PSR]        )       @ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
 #error "sizeof(struct pt_regs) must be a multiple of 8"
 #endif
 
-       .macro  usr_entry, trace=1
+       .macro  usr_entry, trace=1, uaccess=1
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )       @ don't unwind the user space
        sub     sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
  ARM(  stmdb   r0, {sp, lr}^                   )
  THUMB(        store_user_sp_lr r0, r1, S_SP - S_PC    )
 
+       .if \uaccess
+       uaccess_disable ip
+       .endif
+
        @ Enable the alignment trap while in kernel mode
  ATRAP(        teq     r8, r7)
  ATRAP( mcrne  p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
 
        .align  5
 __dabt_usr:
-       usr_entry
+       usr_entry uaccess=0
        kuser_cmpxchg_check
        mov     r2, sp
        dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
 
        .align  5
 __und_usr:
-       usr_entry
+       usr_entry uaccess=0
 
        mov     r2, r4
        mov     r3, r5
@@ -484,6 +493,8 @@ __und_usr:
 1:     ldrt    r0, [r4]
  ARM_BE8(rev   r0, r0)                         @ little endian instruction
 
+       uaccess_disable ip
+
        @ r0 = 32-bit ARM instruction which caused the exception
        @ r2 = PC value for the following instruction (:= regs->ARM_pc)
        @ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
 2:     ldrht   r5, [r4]
 ARM_BE8(rev16  r5, r5)                         @ little endian instruction
        cmp     r5, #0xe800                     @ 32bit instruction if xx != 0
-       blo     __und_usr_fault_16              @ 16bit undefined instruction
+       blo     __und_usr_fault_16_pan          @ 16bit undefined instruction
 3:     ldrht   r0, [r2]
 ARM_BE8(rev16  r0, r0)                         @ little endian instruction
+       uaccess_disable ip
        add     r2, r2, #2                      @ r2 is PC + 2, make it PC + 4
        str     r2, [sp, #S_PC]                 @ it's a 2x16bit instr, update
        orr     r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
 __und_usr_fault_32:
        mov     r1, #4
        b       1f
+__und_usr_fault_16_pan:
+       uaccess_disable ip
 __und_usr_fault_16:
        mov     r1, #2
 1:     mov     r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
        ldr     r4, [r2, #TI_TP_VALUE]
        ldr     r5, [r2, #TI_TP_VALUE + 4]
 #ifdef CONFIG_CPU_USE_DOMAINS
+       mrc     p15, 0, r6, c3, c0, 0           @ Get domain register
+       str     r6, [r1, #TI_CPU_DOMAIN]        @ Save old domain register
        ldr     r6, [r2, #TI_CPU_DOMAIN]
 #endif
        switch_tls r1, r4, r5, r3, r7
index b48dd4f37f8067e781ee3e135ed7aff27940371f..61974dfba13244b0fb24c57ac4fb13ea81bd5058 100644 (file)
@@ -174,6 +174,8 @@ ENTRY(vector_swi)
  USER( ldr     scno, [lr, #-4]         )       @ get SWI instruction
 #endif
 
+       uaccess_disable tbl
+
        adr     tbl, sys_call_table             @ load syscall table pointer
 
 #if defined(CONFIG_OABI_COMPAT)
index 1a0045abead7562be1e27163e0aee3c6afbe9b40..0d22ad206d5230ba05a40b4101bb1dd2e4addd10 100644 (file)
        msr     cpsr_c, \rtemp                  @ switch back to the SVC mode
        .endm
 
-#ifndef CONFIG_THUMB2_KERNEL
+
        .macro  svc_exit, rpsr, irq = 0
        .if     \irq != 0
        @ IRQs already off
        blne    trace_hardirqs_off
 #endif
        .endif
+       uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+       @ ARM mode SVC restore
        msr     spsr_cxsf, \rpsr
 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
        @ We must avoid clrex due to Cortex-A15 erratum #830321
        strex   r1, r2, [r0]                    @ clear the exclusive monitor
 #endif
        ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
+#else
+       @ Thumb mode SVC restore
+       ldr     lr, [sp, #S_SP]                 @ top of the stack
+       ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
+
+       @ We must avoid clrex due to Cortex-A15 erratum #830321
+       strex   r2, r1, [sp, #S_LR]             @ clear the exclusive monitor
+
+       stmdb   lr!, {r0, r1, \rpsr}            @ calling lr and rfe context
+       ldmia   sp, {r0 - r12}
+       mov     sp, lr
+       ldr     lr, [sp], #4
+       rfeia   sp!
+#endif
        .endm
 
        @
        @ on the stack remains correct).
        @
        .macro  svc_exit_via_fiq
+       uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+       @ ARM mode restore
        mov     r0, sp
        ldmib   r0, {r1 - r14}  @ abort is deadly from here onward (it will
                                @ clobber state restored below)
        msr     spsr_cxsf, r9
        ldr     r0, [r0, #S_R0]
        ldmia   r8, {pc}^
+#else
+       @ Thumb mode restore
+       add     r0, sp, #S_R2
+       ldr     lr, [sp, #S_LR]
+       ldr     sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+                               @ clobber state restored below)
+       ldmia   r0, {r2 - r12}
+       mov     r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+       msr     cpsr_c, r1
+       sub     r0, #S_R2
+       add     r8, r0, #S_PC
+       ldmia   r0, {r0 - r1}
+       rfeia   r8
+#endif
        .endm
 
+
        .macro  restore_user_regs, fast = 0, offset = 0
+       uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+       @ ARM mode restore
        mov     r2, sp
        ldr     r1, [r2, #\offset + S_PSR]      @ get calling cpsr
        ldr     lr, [r2, #\offset + S_PC]!      @ get pc
                                                @ after ldm {}^
        add     sp, sp, #\offset + S_FRAME_SIZE
        movs    pc, lr                          @ return & move spsr_svc into cpsr
-       .endm
-
-#else  /* CONFIG_THUMB2_KERNEL */
-       .macro  svc_exit, rpsr, irq = 0
-       .if     \irq != 0
-       @ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
-       @ The parent context IRQs must have been enabled to get here in
-       @ the first place, so there's no point checking the PSR I bit.
-       bl      trace_hardirqs_on
-#endif
-       .else
-       @ IRQs off again before pulling preserved data off the stack
-       disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
-       tst     \rpsr, #PSR_I_BIT
-       bleq    trace_hardirqs_on
-       tst     \rpsr, #PSR_I_BIT
-       blne    trace_hardirqs_off
-#endif
-       .endif
-       ldr     lr, [sp, #S_SP]                 @ top of the stack
-       ldrd    r0, r1, [sp, #S_LR]             @ calling lr and pc
-
-       @ We must avoid clrex due to Cortex-A15 erratum #830321
-       strex   r2, r1, [sp, #S_LR]             @ clear the exclusive monitor
-
-       stmdb   lr!, {r0, r1, \rpsr}            @ calling lr and rfe context
-       ldmia   sp, {r0 - r12}
-       mov     sp, lr
-       ldr     lr, [sp], #4
-       rfeia   sp!
-       .endm
-
-       @
-       @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
-       @
-       @ For full details see non-Thumb implementation above.
-       @
-       .macro  svc_exit_via_fiq
-       add     r0, sp, #S_R2
-       ldr     lr, [sp, #S_LR]
-       ldr     sp, [sp, #S_SP] @ abort is deadly from here onward (it will
-                               @ clobber state restored below)
-       ldmia   r0, {r2 - r12}
-       mov     r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
-       msr     cpsr_c, r1
-       sub     r0, #S_R2
-       add     r8, r0, #S_PC
-       ldmia   r0, {r0 - r1}
-       rfeia   r8
-       .endm
-
-#ifdef CONFIG_CPU_V7M
-       /*
-        * Note we don't need to do clrex here as clearing the local monitor is
-        * part of each exception entry and exit sequence.
-        */
-       .macro  restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+       @ V7M restore.
+       @ Note that we don't need to do clrex here as clearing the local
+       @ monitor is part of the exception entry and exit sequence.
        .if     \offset
        add     sp, #\offset
        .endif
        v7m_exception_slow_exit ret_r0 = \fast
-       .endm
-#else  /* ifdef CONFIG_CPU_V7M */
-       .macro  restore_user_regs, fast = 0, offset = 0
+#else
+       @ Thumb mode restore
        mov     r2, sp
        load_user_sp_lr r2, r3, \offset + S_SP  @ calling sp, lr
        ldr     r1, [sp, #\offset + S_PSR]      @ get calling cpsr
        .endif
        add     sp, sp, #S_FRAME_SIZE - S_SP
        movs    pc, lr                          @ return & move spsr_svc into cpsr
-       .endm
-#endif /* ifdef CONFIG_CPU_V7M / else */
 #endif /* !CONFIG_THUMB2_KERNEL */
+       .endm
 
 /*
  * Context tracking subsystem.  Used to instrument transitions
index 29e2991465cb27b579f729deec65e2293a0a04b5..04286fd9e09ce7a27259c4d375a05a965e3be0ea 100644 (file)
@@ -464,10 +464,7 @@ __enable_mmu:
 #ifdef CONFIG_ARM_LPAE
        mcrr    p15, 0, r4, r5, c2              @ load TTBR0
 #else
-       mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
-                     domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+       mov     r5, #DACR_INIT
        mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
        mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
 #endif
index f192a2a4171935720cfc702953703c78078579cc..e550a4541f482d03837e9c8929acb8022c00191c 100644 (file)
@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
        buf[4] = '\0';
 
 #ifndef CONFIG_CPU_V7M
-       printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
-               buf, interrupts_enabled(regs) ? "n" : "ff",
-               fast_interrupts_enabled(regs) ? "n" : "ff",
-               processor_modes[processor_mode(regs)],
-               isa_modes[isa_mode(regs)],
-               get_fs() == get_ds() ? "kernel" : "user");
+       {
+               unsigned int domain = get_domain();
+               const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               /*
+                * Get the domain register for the parent context. In user
+                * mode, we don't save the DACR, so lets use what it should
+                * be. For other modes, we place it after the pt_regs struct.
+                */
+               if (user_mode(regs))
+                       domain = DACR_UACCESS_ENABLE;
+               else
+                       domain = *(unsigned int *)(regs + 1);
+#endif
+
+               if ((domain & domain_mask(DOMAIN_USER)) ==
+                   domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+                       segment = "none";
+               else if (get_fs() == get_ds())
+                       segment = "kernel";
+               else
+                       segment = "user";
+
+               printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
+                       buf, interrupts_enabled(regs) ? "n" : "ff",
+                       fast_interrupts_enabled(regs) ? "n" : "ff",
+                       processor_modes[processor_mode(regs)],
+                       isa_modes[isa_mode(regs)], segment);
+       }
 #else
        printk("xPSR: %08lx\n", regs->ARM_cpsr);
 #endif
@@ -146,10 +170,9 @@ void __show_regs(struct pt_regs *regs)
                buf[0] = '\0';
 #ifdef CONFIG_CPU_CP15_MMU
                {
-                       unsigned int transbase, dac;
+                       unsigned int transbase, dac = get_domain();
                        asm("mrc p15, 0, %0, c2, c0\n\t"
-                           "mrc p15, 0, %1, c3, c0\n"
-                           : "=r" (transbase), "=r" (dac));
+                           : "=r" (transbase));
                        snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
                                transbase, dac);
                }
@@ -210,6 +233,16 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
        memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
 
+#ifdef CONFIG_CPU_USE_DOMAINS
+       /*
+        * Copy the initial value of the domain access control register
+        * from the current thread: thread->addr_limit will have been
+        * copied from the current thread via setup_thread_stack() in
+        * kernel/fork.c
+        */
+       thread->cpu_domain = get_domain();
+#endif
+
        if (likely(!(p->flags & PF_KTHREAD))) {
                *childregs = *current_pt_regs();
                childregs->ARM_r0 = 0;
index 1361756782c73b49c499f50bc4f423647e2edd3f..5b26e7efa9ea415967b63ede27ab1edf2bc8e888 100644 (file)
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
 
        while (1) {
                unsigned long temp;
+               unsigned int __ua_flags;
 
+               __ua_flags = uaccess_save_and_enable();
                if (type == TYPE_SWPB)
                        __user_swpb_asm(*data, address, res, temp);
                else
                        __user_swp_asm(*data, address, res, temp);
+               uaccess_restore(__ua_flags);
 
                if (likely(res != -EAGAIN) || signal_pending(current))
                        break;
index d358226236f2951e3b09cbc629799c963884ca49..969f9d9e665f4d49b2951ed65cfde9c279e1aa55 100644 (file)
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
        kuser_init(vectors_base);
 
        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
-       modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 #else /* ifndef CONFIG_CPU_V7M */
        /*
         * on V7-M there is no need to copy the vector table to a dedicated
index 1710fd7db2d57d35ed342417336980f987db5985..970d6c0437743cda6a78620e1439eccb91398da2 100644 (file)
 
                .text
 
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
  * Purpose  : clear some user memory
  * Params   : addr - user memory address to clear
  *          : sz   - number of bytes to clear
  * Returns  : number of bytes NOT cleared
  */
 ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
                stmfd   sp!, {r1, lr}
                mov     r2, #0
                cmp     r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
 USER(          strnebt r2, [r0])
                mov     r0, #0
                ldmfd   sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
 ENDPROC(__clear_user_std)
 
                .pushsection .text.fixup,"ax"
index 7a235b9952be04e3ed8acd8892d5ca4d63ee27ff..1512bebfbf1b18ad317648891385a24e93d1f35f 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *     size_t __copy_from_user(void *to, const void *from, size_t n)
+ *     size_t arm_copy_from_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
 
        .text
 
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
 
        .pushsection .fixup,"ax"
        .align 0
index 9648b0675a3efc81dd412fa5b3be820c1a3d6242..caf5019d8161e2f1914a797a4c6800844a27d570 100644 (file)
@@ -17,7 +17,7 @@
 /*
  * Prototype:
  *
- *     size_t __copy_to_user(void *to, const void *from, size_t n)
+ *     size_t arm_copy_to_user(void *to, const void *from, size_t n)
  *
  * Purpose:
  *
        .text
 
 ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
 
 #include "copy_template.S"
 
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
 ENDPROC(__copy_to_user_std)
 
        .pushsection .text.fixup,"ax"
index 1d0957e61f898ab6ab43759496e85c8b247218d1..1712f132b80d2402d94d72ea974a0c3326fa2f52 100644 (file)
 
                .text
 
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+               .macro  save_regs
+               mrc     p15, 0, ip, c3, c0, 0
+               stmfd   sp!, {r1, r2, r4 - r8, ip, lr}
+               uaccess_enable ip
+               .endm
+
+               .macro  load_regs
+               ldmfd   sp!, {r1, r2, r4 - r8, ip, lr}
+               mcr     p15, 0, ip, c3, c0, 0
+               ret     lr
+               .endm
+#else
                .macro  save_regs
                stmfd   sp!, {r1, r2, r4 - r8, lr}
                .endm
@@ -24,6 +37,7 @@
                .macro  load_regs
                ldmfd   sp!, {r1, r2, r4 - r8, pc}
                .endm
+#endif
 
                .macro  load1b, reg1
                ldrusr  \reg1, r0, 1
index 4b39af2dfda9963345afe18c89131c1056a90b41..d72b90905132487257220939a255ac7ed1d3754d 100644 (file)
@@ -136,7 +136,7 @@ out:
 }
 
 unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
        /*
         * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
        return n;
 }
 
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
 {
        /* See rational for this in __copy_to_user() above. */
        if (n < 64)
index 54473cd4aba951c793f25df8f3e9fb17be7f0160..b3b31e30cadd207f98991841b42f60f91d2a8097 100644 (file)
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
        ldr     r3, [r4]                        @ read aborted ARM instruction
+       uaccess_disable ip                      @ disable userspace access
        bic     r1, r1, #1 << 11 | 1 << 10      @ clear bits 11 and 10 of FSR
        tst     r3, #1 << 20                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
index a0908d4653a34a2241d95c58af16dcdde4dafa9a..a6a381a6caa5a32f6b4ad018ab10e702b01920cc 100644 (file)
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
        do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
        ldreq   r3, [r4]                        @ read aborted ARM instruction
+       uaccess_disable ip                      @ disable user access
        bic     r1, r1, #1 << 11                @ clear bits 11 of FSR
-       do_ldrd_abort tmp=ip, insn=r3
+       teq_ldrd tmp=ip, insn=r3                @ insn was LDRD?
+       beq     do_DataAbort                    @ yes
        tst     r3, #1 << 20                    @ check write
        orreq   r1, r1, #1 << 11
        b       do_DataAbort
index 4006b7a612642b7fa4ec36b5a995ccc9bc1e3a40..00ab011bef5848cbcc750d8aa51770cea7ac9934 100644 (file)
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
        bne     do_DataAbort
        do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
        ldreq   r3, [r4]                        @ read aborted ARM instruction
-       do_ldrd_abort tmp=ip, insn=r3
+       uaccess_disable ip                      @ disable userspace access
+       teq_ldrd tmp=ip, insn=r3                @ insn was LDRD?
+       beq     do_DataAbort                    @ yes
        tst     r3, #1 << 20                    @ L = 0 -> write
        orreq   r1, r1, #1 << 11                @ yes.
        b       do_DataAbort
index 8c48c5c22a331aac8f547335d6990c598457ef0b..8801a15aa10595a9288edaeca03ed434d33e86b1 100644 (file)
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
        ldr     ip, =0x4107b36
        mrc     p15, 0, r3, c0, c0, 0           @ get processor id
        teq     ip, r3, lsr #4                  @ r0 ARM1136?
-       bne     do_DataAbort
+       bne     1f
        tst     r5, #PSR_J_BIT                  @ Java?
        tsteq   r5, #PSR_T_BIT                  @ Thumb?
-       bne     do_DataAbort
+       bne     1f
        bic     r1, r1, #1 << 11                @ clear bit 11 of FSR
        ldr     r3, [r4]                        @ read aborted ARM instruction
  ARM_BE8(rev   r3, r3)
 
-       do_ldrd_abort tmp=ip, insn=r3
+       teq_ldrd tmp=ip, insn=r3                @ insn was LDRD?
+       beq     1f                              @ yes
        tst     r3, #1 << 20                    @ L = 0 -> write
        orreq   r1, r1, #1 << 11                @ yes.
 #endif
+1:     uaccess_disable ip                      @ disable userspace access
        b       do_DataAbort
index 4812ad054214572ba6e7198247e2c190e469897d..e8d0e08c227fc5f36d864378bf5cf75dfbb00e10 100644 (file)
@@ -15,6 +15,7 @@
 ENTRY(v7_early_abort)
        mrc     p15, 0, r1, c5, c0, 0           @ get FSR
        mrc     p15, 0, r0, c6, c0, 0           @ get FAR
+       uaccess_disable ip                      @ disable userspace access
 
        /*
         * V6 code adjusts the returned DFSR.
index f3982580c273057b89a1c025cb52f0f54093014f..6d8e8e3365d17321f03b37fa67ab04a65b29f4ca 100644 (file)
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
 #endif
        bne     .data_thumb_abort
        ldr     r8, [r4]                        @ read arm instruction
+       uaccess_disable ip                      @ disable userspace access
        tst     r8, #1 << 20                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 11                @ yes.
        and     r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
 
 .data_thumb_abort:
        ldrh    r8, [r4]                        @ read instruction
+       uaccess_disable ip                      @ disable userspace access
        tst     r8, #1 << 11                    @ L = 1 -> write?
        orreq   r1, r1, #1 << 8                 @ yes
        and     r7, r8, #15 << 12
index 2cbf68ef0e8321121e5ecabb55f50f95083beb1d..4509bee4e081ce78f95bcd99cd890468d97a5d8c 100644 (file)
@@ -13,6 +13,7 @@
        tst     \psr, #PSR_T_BIT
        beq     not_thumb
        ldrh    \tmp, [\pc]                     @ Read aborted Thumb instruction
+       uaccess_disable ip                      @ disable userspace access
        and     \tmp, \tmp, # 0xfe00            @ Mask opcode field
        cmp     \tmp, # 0x5600                  @ Is it ldrsb?
        orreq   \tmp, \tmp, #1 << 11            @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
  *   [7:4] == 1101
  *    [20] == 0
  */
-       .macro  do_ldrd_abort, tmp, insn
-       tst     \insn, #0x0e100000              @ [27:25,20] == 0
-       bne     not_ldrd
-       and     \tmp, \insn, #0x000000f0        @ [7:4] == 1101
-       cmp     \tmp, #0x000000d0
-       beq     do_DataAbort
-not_ldrd:
+       .macro  teq_ldrd, tmp, insn
+       mov     \tmp, #0x0e100000
+       orr     \tmp, #0x000000f0
+       and     \tmp, \insn, \tmp
+       teq     \tmp, #0x000000d0
        .endm
-
index 870838a46d524141a9cb1d65980c1ba7456465cb..1cb9c1c1c05f864884231986341bfc27f0862ffe 100644 (file)
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_RDONLY,
                .prot_l1   = PMD_TYPE_TABLE,
-               .domain    = DOMAIN_USER,
+               .domain    = DOMAIN_VECTORS,
        },
        [MT_HIGH_VECTORS] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
                                L_PTE_USER | L_PTE_RDONLY,
                .prot_l1   = PMD_TYPE_TABLE,
-               .domain    = DOMAIN_USER,
+               .domain    = DOMAIN_VECTORS,
        },
        [MT_MEMORY_RWX] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
index a3681f11dd9f12ceb8260df36e09a11ec0c22066..e683db1b90a3f805d1de8f92a9c0ebe544960368 100644 (file)
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
                if (!new_pte)
                        goto no_pte;
 
+#ifndef CONFIG_ARM_LPAE
+               /*
+                * Modify the PTE pointer to have the correct domain.  This
+                * needs to be the vectors domain to avoid the low vectors
+                * being unmapped.
+                */
+               pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+               pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
                init_pud = pud_offset(init_pgd, 0);
                init_pmd = pmd_offset(init_pud, 0);
                init_pte = pte_offset_map(init_pmd, 0);
index 71df4354765927da922606db930c6665cbd0c7f9..39c20afad7ed9ed3b4b967a54d3a435e07eccf9e 100644 (file)
@@ -95,9 +95,10 @@ emulate:
        reteq   r4                      @ no, return failure
 
 next:
+       uaccess_enable r3
 .Lx1:  ldrt    r6, [r5], #4            @ get the next instruction and
                                        @ increment PC
-
+       uaccess_disable r3
        and     r2, r6, #0x0F000000     @ test for FP insns
        teq     r2, #0x0C000000
        teqne   r2, #0x0D000000
index f00e080759384afd300398be488c5740f55f1091..10fd99c568c62a9296b4ad7d2cc3584360b1b8a0 100644 (file)
@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
        mov r1, r2
        mov r2, r3
        ldr r3, [sp, #8]
+       /*
+        * Privcmd calls are issued by the userspace. We need to allow the
+        * kernel to access the userspace memory before issuing the hypercall.
+        */
+       uaccess_enable r4
+
+       /* r4 is loaded now as we use it as scratch register before */
        ldr r4, [sp, #4]
        __HVC(XEN_IMM)
+
+       /*
+        * Disable userspace access from kernel. This is fine to do it
+        * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
+        * called before.
+        */
+       uaccess_disable r4
+
        ldm sp!, {r4}
        ret lr
 ENDPROC(privcmd_call);