]> git.karo-electronics.de Git - linux-beck.git/commitdiff
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 Mar 2015 00:27:18 +0000 (17:27 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 25 Mar 2015 00:27:18 +0000 (17:27 -0700)
Pull two arm64 fixes from Catalin Marinas:

 - switch_mm() fix where init_mm.pgd ends up in the user TTBR0;
   swapper_pg_dir is not suitable for user mappings

 - this_cpu accessors fix for preemption safety

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: percpu: Make this_cpu accessors pre-empt safe
  arm64: Use the reserved TTBR0 if context switching to the init_mm

arch/arm64/include/asm/cmpxchg.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/percpu.h

index cb9593079f29763c34f7e68fa89737355ac03adb..d8c25b7b18fbf42ddc66ab888fc22c530d752d15 100644 (file)
@@ -246,14 +246,30 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
        __ret; \
 })
 
-#define this_cpu_cmpxchg_1(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_2(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_4(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-#define this_cpu_cmpxchg_8(ptr, o, n) cmpxchg_local(raw_cpu_ptr(&(ptr)), o, n)
-
-#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
-       cmpxchg_double_local(raw_cpu_ptr(&(ptr1)), raw_cpu_ptr(&(ptr2)), \
-                               o1, o2, n1, n2)
+#define _protect_cmpxchg_local(pcp, o, n)                      \
+({                                                             \
+       typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
+       preempt_disable();                                      \
+       __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);       \
+       preempt_enable();                                       \
+       __ret;                                                  \
+})
+
+#define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
+
+#define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2)          \
+({                                                                     \
+       int __ret;                                                      \
+       preempt_disable();                                              \
+       __ret = cmpxchg_double_local(   raw_cpu_ptr(&(ptr1)),           \
+                                       raw_cpu_ptr(&(ptr2)),           \
+                                       o1, o2, n1, n2);                \
+       preempt_enable();                                               \
+       __ret;                                                          \
+})
 
 #define cmpxchg64(ptr,o,n)             cmpxchg((ptr),(o),(n))
 #define cmpxchg64_local(ptr,o,n)       cmpxchg_local((ptr),(o),(n))
index a9eee33dfa62dc031ab8262c275eba79f8609bac..101a42bde728a8b9547bca989b696d473dcd7e42 100644 (file)
@@ -151,6 +151,15 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 {
        unsigned int cpu = smp_processor_id();
 
+       /*
+        * init_mm.pgd does not contain any user mappings and it is always
+        * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
+        */
+       if (next == &init_mm) {
+               cpu_set_reserved_ttbr0();
+               return;
+       }
+
        if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
                check_and_switch_context(next, tsk);
 }
index 09da25bc596fd0bdccdf03e94b37a4c81e0cc633..4fde8c1df97ffb46d9d2039a11cf574d05ed5a92 100644 (file)
@@ -204,25 +204,47 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
        return ret;
 }
 
+#define _percpu_read(pcp)                                              \
+({                                                                     \
+       typeof(pcp) __retval;                                           \
+       preempt_disable();                                              \
+       __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)),      \
+                                             sizeof(pcp));             \
+       preempt_enable();                                               \
+       __retval;                                                       \
+})
+
+#define _percpu_write(pcp, val)                                                \
+do {                                                                   \
+       preempt_disable();                                              \
+       __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val),       \
+                               sizeof(pcp));                           \
+       preempt_enable();                                               \
+} while(0)                                                             \
+
+#define _pcp_protect(operation, pcp, val)                      \
+({                                                             \
+       typeof(pcp) __retval;                                   \
+       preempt_disable();                                      \
+       __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),  \
+                                         (val), sizeof(pcp));  \
+       preempt_enable();                                       \
+       __retval;                                               \
+})
+
 #define _percpu_add(pcp, val) \
-       __percpu_add(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+       _pcp_protect(__percpu_add, pcp, val)
 
-#define _percpu_add_return(pcp, val) (typeof(pcp)) (_percpu_add(pcp, val))
+#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
 
 #define _percpu_and(pcp, val) \
-       __percpu_and(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
+       _pcp_protect(__percpu_and, pcp, val)
 
 #define _percpu_or(pcp, val) \
-       __percpu_or(raw_cpu_ptr(&(pcp)), val, sizeof(pcp))
-
-#define _percpu_read(pcp) (typeof(pcp))        \
-       (__percpu_read(raw_cpu_ptr(&(pcp)), sizeof(pcp)))
-
-#define _percpu_write(pcp, val) \
-       __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp))
+       _pcp_protect(__percpu_or, pcp, val)
 
 #define _percpu_xchg(pcp, val) (typeof(pcp)) \
-       (__percpu_xchg(raw_cpu_ptr(&(pcp)), (unsigned long)(val), sizeof(pcp)))
+       _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
 
 #define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
 #define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)