From 1ad709a9213058bc50976e3d8d81f5b8c81a4331 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Fri, 13 Apr 2012 08:51:45 +1000 Subject: [PATCH] x86: change percpu_read_stable() to this_cpu_read_stable() It has no function change. It's a preparation for percpu_xxx serial function change. Signed-off-by: Alex Shi Acked-by: Christoph Lameter Acked-by: Tejun Heo Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton --- arch/x86/include/asm/current.h | 2 +- arch/x86/include/asm/fpu-internal.h | 2 +- arch/x86/include/asm/percpu.h | 6 +++--- arch/x86/include/asm/thread_info.h | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/current.h b/arch/x86/include/asm/current.h index 4d447b732d82..9476c04ee635 100644 --- a/arch/x86/include/asm/current.h +++ b/arch/x86/include/asm/current.h @@ -11,7 +11,7 @@ DECLARE_PER_CPU(struct task_struct *, current_task); static __always_inline struct task_struct *get_current(void) { - return percpu_read_stable(current_task); + return this_cpu_read_stable(current_task); } #define current get_current() diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 4fa88154e4de..5caaf43b7bd4 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -344,7 +344,7 @@ typedef struct { int preload; } fpu_switch_t; */ static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) { - return new == percpu_read_stable(fpu_owner_task) && + return new == this_cpu_read_stable(fpu_owner_task) && cpu == new->thread.fpu.last_cpu; } diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 276bbc07f007..8d256ad313e1 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -352,15 +352,15 @@ do { \ /* * percpu_read() makes gcc load the percpu variable every time it is - * accessed while percpu_read_stable() allows the value to be cached. - * percpu_read_stable() is more efficient and can be used if its value + * accessed while this_cpu_read_stable() allows the value to be cached. + * this_cpu_read_stable() is more efficient and can be used if its value * is guaranteed to be valid across cpus. The current users include * get_current() and get_thread_info() both of which are actually * per-thread variables implemented as per-cpu variables and thus * stable for the duration of the respective task. */ #define percpu_read(var) percpu_from_op("mov", var, "m" (var)) -#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) +#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) #define percpu_write(var, val) percpu_to_op("mov", var, val) #define percpu_add(var, val) percpu_add_op(var, val) #define percpu_sub(var, val) percpu_add_op(var, -(val)) diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 0710c11305d4..194490d4b736 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -224,7 +224,7 @@ DECLARE_PER_CPU(unsigned long, kernel_stack); static inline struct thread_info *current_thread_info(void) { struct thread_info *ti; - ti = (void *)(percpu_read_stable(kernel_stack) + + ti = (void *)(this_cpu_read_stable(kernel_stack) + KERNEL_STACK_OFFSET - THREAD_SIZE); return ti; } -- 2.39.5