From: Alex Shi Date: Thu, 12 Apr 2012 22:51:46 +0000 (+1000) Subject: percpu: remove percpu_xxx() functions X-Git-Tag: next-20120417~2^2~115 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=6b1ab291ed7567a8501a9149089ec0fc3495d8b1;p=karo-tx-linux.git percpu: remove percpu_xxx() functions There are no percpu_xxx callers remaining Signed-off-by: Alex Shi Acked-by: Christoph Lameter Acked-by: Tejun Heo Cc: Ingo Molnar Cc: Thomas Gleixner Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton --- diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h index d6805798d6fc..7cf3f496f121 100644 --- a/arch/x86/include/asm/compat.h +++ b/arch/x86/include/asm/compat.h @@ -229,7 +229,7 @@ static inline void __user *arch_compat_alloc_user_space(long len) sp = task_pt_regs(current)->sp; } else { /* -128 for the x32 ABI redzone */ - sp = percpu_read(old_rsp) - 128; + sp = __this_cpu_read(old_rsp) - 128; } return (void __user *)round_down(sp - len, 16); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 8d256ad313e1..ed3e18ac299f 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -351,7 +351,7 @@ do { \ }) /* - * percpu_read() makes gcc load the percpu variable every time it is + * this_cpu_read() makes gcc load the percpu variable every time it is * accessed while this_cpu_read_stable() allows the value to be cached. * this_cpu_read_stable() is more efficient and can be used if its value * is guaranteed to be valid across cpus. The current users include @@ -359,15 +359,7 @@ do { \ * per-thread variables implemented as per-cpu variables and thus * stable for the duration of the respective task. */ -#define percpu_read(var) percpu_from_op("mov", var, "m" (var)) #define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var))) -#define percpu_write(var, val) percpu_to_op("mov", var, val) -#define percpu_add(var, val) percpu_add_op(var, val) -#define percpu_sub(var, val) percpu_add_op(var, -(val)) -#define percpu_and(var, val) percpu_to_op("and", var, val) -#define percpu_or(var, val) percpu_to_op("or", var, val) -#define percpu_xor(var, val) percpu_to_op("xor", var, val) -#define percpu_inc(var) percpu_unary_op("inc", var) #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) @@ -512,7 +504,11 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr, { unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG; - return ((1UL << (nr % BITS_PER_LONG)) & percpu_read(*a)) != 0; +#ifdef CONFIG_X86_64 + return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_8(*a)) != 0; +#else + return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_4(*a)) != 0; +#endif } static inline int x86_this_cpu_variable_test_bit(int nr, diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 21638ae14e07..2b9f82c037c9 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -165,60 +165,6 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); #define alloc_percpu(type) \ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) -/* - * Optional methods for optimized non-lvalue per-cpu variable access. - * - * @var can be a percpu variable or a field of it and its size should - * equal char, int or long. percpu_read() evaluates to a lvalue and - * all others to void. - * - * These operations are guaranteed to be atomic. - * The generic versions disable interrupts. Archs are - * encouraged to implement single-instruction alternatives which don't - * require protection. - */ -#ifndef percpu_read -# define percpu_read(var) \ - ({ \ - typeof(var) *pr_ptr__ = &(var); \ - typeof(var) pr_ret__; \ - pr_ret__ = get_cpu_var(*pr_ptr__); \ - put_cpu_var(*pr_ptr__); \ - pr_ret__; \ - }) -#endif - -#define __percpu_generic_to_op(var, val, op) \ -do { \ - typeof(var) *pgto_ptr__ = &(var); \ - get_cpu_var(*pgto_ptr__) op val; \ - put_cpu_var(*pgto_ptr__); \ -} while (0) - -#ifndef percpu_write -# define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) -#endif - -#ifndef percpu_add -# define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) -#endif - -#ifndef percpu_sub -# define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) -#endif - -#ifndef percpu_and -# define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) -#endif - -#ifndef percpu_or -# define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) -#endif - -#ifndef percpu_xor -# define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) -#endif - /* * Branching function to split up a function into a set of functions that * are called for different scalar sizes of the objects handled.