]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 8 Jan 2011 01:02:58 +0000 (17:02 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 8 Jan 2011 01:02:58 +0000 (17:02 -0800)
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits)
  gameport: use this_cpu_read instead of lookup
  x86: udelay: Use this_cpu_read to avoid address calculation
  x86: Use this_cpu_inc_return for nmi counter
  x86: Replace uses of current_cpu_data with this_cpu ops
  x86: Use this_cpu_ops to optimize code
  vmstat: User per cpu atomics to avoid interrupt disable / enable
  irq_work: Use per cpu atomics instead of regular atomics
  cpuops: Use cmpxchg for xchg to avoid lock semantics
  x86: this_cpu_cmpxchg and this_cpu_xchg operations
  percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support
  percpu,x86: relocate this_cpu_add_return() and friends
  connector: Use this_cpu operations
  xen: Use this_cpu_inc_return
  taskstats: Use this_cpu_ops
  random: Use this_cpu_inc_return
  fs: Use this_cpu_inc_return in buffer.c
  highmem: Use this_cpu_xx_return() operations
  vmstat: Use this_cpu_inc_return for vm statistics
  x86: Support for this_cpu_add, sub, dec, inc_return
  percpu: Generic support for this_cpu_add, sub, dec, inc_return
  ...

Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c}
as per Tejun.

62 files changed:
MAINTAINERS
arch/x86/Kconfig.cpu
arch/x86/include/asm/debugreg.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/processor.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_intel.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/hw_breakpoint.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/kprobes.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tsc.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/op_model_ppro.c
arch/x86/xen/enlighten.c
arch/x86/xen/multicalls.h
arch/x86/xen/spinlock.c
arch/x86/xen/time.c
drivers/acpi/processor_idle.c
drivers/char/random.c
drivers/connector/cn_proc.c
drivers/cpuidle/cpuidle.c
drivers/input/gameport/gameport.c
drivers/s390/cio/cio.c
drivers/staging/lirc/lirc_serial.c
drivers/staging/speakup/fakekey.c
drivers/xen/events.c
fs/buffer.c
include/asm-generic/irq_regs.h
include/linux/elevator.h
include/linux/highmem.h
include/linux/kernel_stat.h
include/linux/kprobes.h
include/linux/percpu.h
kernel/exit.c
kernel/fork.c
kernel/hrtimer.c
kernel/irq_work.c
kernel/kprobes.c
kernel/rcutree.c
kernel/softirq.c
kernel/taskstats.c
kernel/time/tick-common.c
kernel/time/tick-oneshot.c
kernel/watchdog.c
lib/percpu_counter.c
mm/percpu.c
mm/slab.c
mm/vmstat.c

index 78d317c545b9df0a75b97e4b6b00842f558b2feb..23d04363a1955503a817e9e53dae991e2ad43570 100644 (file)
@@ -4653,6 +4653,16 @@ S:       Maintained
 F:     crypto/pcrypt.c
 F:     include/crypto/pcrypt.h
 
+PER-CPU MEMORY ALLOCATOR
+M:     Tejun Heo <tj@kernel.org>
+M:     Christoph Lameter <cl@linux-foundation.org>
+L:     linux-kernel@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
+S:     Maintained
+F:     include/linux/percpu*.h
+F:     mm/percpu*.c
+F:     arch/*/include/asm/percpu.h
+
 PER-TASK DELAY ACCOUNTING
 M:     Balbir Singh <balbir@linux.vnet.ibm.com>
 S:     Maintained
index 2ac9069890cdf594610e19424c54626bb1f051aa..15588a0ef4663054dc33d623e373f8edce90432a 100644 (file)
@@ -310,6 +310,9 @@ config X86_INTERNODE_CACHE_SHIFT
 config X86_CMPXCHG
        def_bool X86_64 || (X86_32 && !M386)
 
+config CMPXCHG_LOCAL
+       def_bool X86_64 || (X86_32 && !M386)
+
 config X86_L1_CACHE_SHIFT
        int
        default "7" if MPENTIUM4 || MPSC
index b81002f23614bb9ef6d9b75502f8ab08e228bc0d..078ad0caefc6115612d8ee60f8ba7da122346516 100644 (file)
@@ -94,7 +94,7 @@ static inline void hw_breakpoint_disable(void)
 
 static inline int hw_breakpoint_active(void)
 {
-       return __get_cpu_var(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
+       return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK;
 }
 
 extern void aout_dump_debugregs(struct user *dump);
index f899e01a8ac965d0dfb90b8e8d0b1bda0d2fe812..8ee45167e8176d4a73814732428fad90f76c2966 100644 (file)
@@ -229,6 +229,125 @@ do {                                                                      \
        }                                               \
 })
 
+/*
+ * Add return operation
+ */
+#define percpu_add_return_op(var, val)                                 \
+({                                                                     \
+       typeof(var) paro_ret__ = val;                                   \
+       switch (sizeof(var)) {                                          \
+       case 1:                                                         \
+               asm("xaddb %0, "__percpu_arg(1)                         \
+                           : "+q" (paro_ret__), "+m" (var)             \
+                           : : "memory");                              \
+               break;                                                  \
+       case 2:                                                         \
+               asm("xaddw %0, "__percpu_arg(1)                         \
+                           : "+r" (paro_ret__), "+m" (var)             \
+                           : : "memory");                              \
+               break;                                                  \
+       case 4:                                                         \
+               asm("xaddl %0, "__percpu_arg(1)                         \
+                           : "+r" (paro_ret__), "+m" (var)             \
+                           : : "memory");                              \
+               break;                                                  \
+       case 8:                                                         \
+               asm("xaddq %0, "__percpu_arg(1)                         \
+                           : "+re" (paro_ret__), "+m" (var)            \
+                           : : "memory");                              \
+               break;                                                  \
+       default: __bad_percpu_size();                                   \
+       }                                                               \
+       paro_ret__ += val;                                              \
+       paro_ret__;                                                     \
+})
+
+/*
+ * xchg is implemented using cmpxchg without a lock prefix. xchg is
+ * expensive due to the implied lock prefix.  The processor cannot prefetch
+ * cachelines if xchg is used.
+ */
+#define percpu_xchg_op(var, nval)                                      \
+({                                                                     \
+       typeof(var) pxo_ret__;                                          \
+       typeof(var) pxo_new__ = (nval);                                 \
+       switch (sizeof(var)) {                                          \
+       case 1:                                                         \
+               asm("\n1:mov "__percpu_arg(1)",%%al"                    \
+                   "\n\tcmpxchgb %2, "__percpu_arg(1)                  \
+                   "\n\tjnz 1b"                                        \
+                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "q" (pxo_new__)                           \
+                           : "memory");                                \
+               break;                                                  \
+       case 2:                                                         \
+               asm("\n1:mov "__percpu_arg(1)",%%ax"                    \
+                   "\n\tcmpxchgw %2, "__percpu_arg(1)                  \
+                   "\n\tjnz 1b"                                        \
+                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "r" (pxo_new__)                           \
+                           : "memory");                                \
+               break;                                                  \
+       case 4:                                                         \
+               asm("\n1:mov "__percpu_arg(1)",%%eax"                   \
+                   "\n\tcmpxchgl %2, "__percpu_arg(1)                  \
+                   "\n\tjnz 1b"                                        \
+                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "r" (pxo_new__)                           \
+                           : "memory");                                \
+               break;                                                  \
+       case 8:                                                         \
+               asm("\n1:mov "__percpu_arg(1)",%%rax"                   \
+                   "\n\tcmpxchgq %2, "__percpu_arg(1)                  \
+                   "\n\tjnz 1b"                                        \
+                           : "=a" (pxo_ret__), "+m" (var)              \
+                           : "r" (pxo_new__)                           \
+                           : "memory");                                \
+               break;                                                  \
+       default: __bad_percpu_size();                                   \
+       }                                                               \
+       pxo_ret__;                                                      \
+})
+
+/*
+ * cmpxchg has no such implied lock semantics as a result it is much
+ * more efficient for cpu local operations.
+ */
+#define percpu_cmpxchg_op(var, oval, nval)                             \
+({                                                                     \
+       typeof(var) pco_ret__;                                          \
+       typeof(var) pco_old__ = (oval);                                 \
+       typeof(var) pco_new__ = (nval);                                 \
+       switch (sizeof(var)) {                                          \
+       case 1:                                                         \
+               asm("cmpxchgb %2, "__percpu_arg(1)                      \
+                           : "=a" (pco_ret__), "+m" (var)              \
+                           : "q" (pco_new__), "0" (pco_old__)          \
+                           : "memory");                                \
+               break;                                                  \
+       case 2:                                                         \
+               asm("cmpxchgw %2, "__percpu_arg(1)                      \
+                           : "=a" (pco_ret__), "+m" (var)              \
+                           : "r" (pco_new__), "0" (pco_old__)          \
+                           : "memory");                                \
+               break;                                                  \
+       case 4:                                                         \
+               asm("cmpxchgl %2, "__percpu_arg(1)                      \
+                           : "=a" (pco_ret__), "+m" (var)              \
+                           : "r" (pco_new__), "0" (pco_old__)          \
+                           : "memory");                                \
+               break;                                                  \
+       case 8:                                                         \
+               asm("cmpxchgq %2, "__percpu_arg(1)                      \
+                           : "=a" (pco_ret__), "+m" (var)              \
+                           : "r" (pco_new__), "0" (pco_old__)          \
+                           : "memory");                                \
+               break;                                                  \
+       default: __bad_percpu_size();                                   \
+       }                                                               \
+       pco_ret__;                                                      \
+})
+
 /*
  * percpu_read() makes gcc load the percpu variable every time it is
  * accessed while percpu_read_stable() allows the value to be cached.
@@ -267,6 +386,12 @@ do {                                                                       \
 #define __this_cpu_xor_1(pcp, val)     percpu_to_op("xor", (pcp), val)
 #define __this_cpu_xor_2(pcp, val)     percpu_to_op("xor", (pcp), val)
 #define __this_cpu_xor_4(pcp, val)     percpu_to_op("xor", (pcp), val)
+/*
+ * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
+ * faster than an xchg with forced lock semantics.
+ */
+#define __this_cpu_xchg_8(pcp, nval)   percpu_xchg_op(pcp, nval)
+#define __this_cpu_cmpxchg_8(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
 
 #define this_cpu_read_1(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
 #define this_cpu_read_2(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
@@ -286,6 +411,11 @@ do {                                                                       \
 #define this_cpu_xor_1(pcp, val)       percpu_to_op("xor", (pcp), val)
 #define this_cpu_xor_2(pcp, val)       percpu_to_op("xor", (pcp), val)
 #define this_cpu_xor_4(pcp, val)       percpu_to_op("xor", (pcp), val)
+#define this_cpu_xchg_1(pcp, nval)     percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_2(pcp, nval)     percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_4(pcp, nval)     percpu_xchg_op(pcp, nval)
+#define this_cpu_xchg_8(pcp, nval)     percpu_xchg_op(pcp, nval)
+#define this_cpu_cmpxchg_8(pcp, oval, nval)    percpu_cmpxchg_op(pcp, oval, nval)
 
 #define irqsafe_cpu_add_1(pcp, val)    percpu_add_op((pcp), val)
 #define irqsafe_cpu_add_2(pcp, val)    percpu_add_op((pcp), val)
@@ -299,6 +429,31 @@ do {                                                                       \
 #define irqsafe_cpu_xor_1(pcp, val)    percpu_to_op("xor", (pcp), val)
 #define irqsafe_cpu_xor_2(pcp, val)    percpu_to_op("xor", (pcp), val)
 #define irqsafe_cpu_xor_4(pcp, val)    percpu_to_op("xor", (pcp), val)
+#define irqsafe_cpu_xchg_1(pcp, nval)  percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_xchg_2(pcp, nval)  percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_xchg_4(pcp, nval)  percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_xchg_8(pcp, nval)  percpu_xchg_op(pcp, nval)
+#define irqsafe_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+
+#ifndef CONFIG_M386
+#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_cmpxchg_1(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
+#define __this_cpu_cmpxchg_2(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
+#define __this_cpu_cmpxchg_4(pcp, oval, nval)  percpu_cmpxchg_op(pcp, oval, nval)
+
+#define this_cpu_add_return_1(pcp, val)        percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_2(pcp, val)        percpu_add_return_op(pcp, val)
+#define this_cpu_add_return_4(pcp, val)        percpu_add_return_op(pcp, val)
+#define this_cpu_cmpxchg_1(pcp, oval, nval)    percpu_cmpxchg_op(pcp, oval, nval)
+#define this_cpu_cmpxchg_2(pcp, oval, nval)    percpu_cmpxchg_op(pcp, oval, nval)
+#define this_cpu_cmpxchg_4(pcp, oval, nval)    percpu_cmpxchg_op(pcp, oval, nval)
+
+#define irqsafe_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define irqsafe_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define irqsafe_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#endif /* !CONFIG_M386 */
 
 /*
  * Per cpu atomic 64 bit operations are only available under 64 bit.
@@ -311,6 +466,7 @@ do {                                                                        \
 #define __this_cpu_and_8(pcp, val)     percpu_to_op("and", (pcp), val)
 #define __this_cpu_or_8(pcp, val)      percpu_to_op("or", (pcp), val)
 #define __this_cpu_xor_8(pcp, val)     percpu_to_op("xor", (pcp), val)
+#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
 
 #define this_cpu_read_8(pcp)           percpu_from_op("mov", (pcp), "m"(pcp))
 #define this_cpu_write_8(pcp, val)     percpu_to_op("mov", (pcp), val)
@@ -318,12 +474,12 @@ do {                                                                      \
 #define this_cpu_and_8(pcp, val)       percpu_to_op("and", (pcp), val)
 #define this_cpu_or_8(pcp, val)                percpu_to_op("or", (pcp), val)
 #define this_cpu_xor_8(pcp, val)       percpu_to_op("xor", (pcp), val)
+#define this_cpu_add_return_8(pcp, val)        percpu_add_return_op(pcp, val)
 
 #define irqsafe_cpu_add_8(pcp, val)    percpu_add_op((pcp), val)
 #define irqsafe_cpu_and_8(pcp, val)    percpu_to_op("and", (pcp), val)
 #define irqsafe_cpu_or_8(pcp, val)     percpu_to_op("or", (pcp), val)
 #define irqsafe_cpu_xor_8(pcp, val)    percpu_to_op("xor", (pcp), val)
-
 #endif
 
 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
index cae9c3cb95cf160e4e00f8c0b1c29fac298bb48f..c6efecf85a6a4b750805e3528e7d6b6c80dc22b7 100644 (file)
@@ -141,10 +141,9 @@ extern __u32                       cpu_caps_set[NCAPINTS];
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 #define cpu_data(cpu)          per_cpu(cpu_info, cpu)
-#define current_cpu_data       __get_cpu_var(cpu_info)
 #else
+#define cpu_info               boot_cpu_data
 #define cpu_data(cpu)          boot_cpu_data
-#define current_cpu_data       boot_cpu_data
 #endif
 
 extern const struct seq_operations cpuinfo_op;
index ce65d449b750f9dd3caa787d1df2e0211d5db3a8..79e6baa8aa0a77f323051b1f67a2037aad3b9817 100644 (file)
@@ -516,7 +516,7 @@ static void __cpuinit setup_APIC_timer(void)
 {
        struct clock_event_device *levt = &__get_cpu_var(lapic_events);
 
-       if (cpu_has(&current_cpu_data, X86_FEATURE_ARAT)) {
+       if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_ARAT)) {
                lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
                /* Make LAPIC timer preferrable over percpu HPET */
                lapic_clockevent.rating = 150;
index 52735a710c30db466d3a84c3e650e08725f24c42..697dc34b7b87611443ccdd06d39c8188f77ca94d 100644 (file)
@@ -2329,7 +2329,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
                unsigned int irr;
                struct irq_desc *desc;
                struct irq_cfg *cfg;
-               irq = __get_cpu_var(vector_irq)[vector];
+               irq = __this_cpu_read(vector_irq[vector]);
 
                if (irq == -1)
                        continue;
@@ -2363,7 +2363,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
                        apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
                        goto unlock;
                }
-               __get_cpu_var(vector_irq)[vector] = -1;
+               __this_cpu_write(vector_irq[vector], -1);
 unlock:
                raw_spin_unlock(&desc->lock);
        }
index 2a3f2a7db243f8b846ef5d3032287037d0111a69..ecca5f41ad2c8f00c5a88f113451e1a5ed480ed7 100644 (file)
@@ -120,8 +120,8 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
                else if (!strcmp(oem_table_id, "UVX"))
                        uv_system_type = UV_X2APIC;
                else if (!strcmp(oem_table_id, "UVH")) {
-                       __get_cpu_var(x2apic_extra_bits) =
-                               pnodeid << uvh_apicid.s.pnode_shift;
+                       __this_cpu_write(x2apic_extra_bits,
+                               pnodeid << uvh_apicid.s.pnode_shift);
                        uv_system_type = UV_NON_UNIQUE_APIC;
                        uv_set_apicid_hibit();
                        return 1;
@@ -286,7 +286,7 @@ static unsigned int x2apic_get_apic_id(unsigned long x)
        unsigned int id;
 
        WARN_ON(preemptible() && num_online_cpus() > 1);
-       id = x | __get_cpu_var(x2apic_extra_bits);
+       id = x | __this_cpu_read(x2apic_extra_bits);
 
        return id;
 }
@@ -378,7 +378,7 @@ struct apic __refdata apic_x2apic_uv_x = {
 
 static __cpuinit void set_x2apic_extra_bits(int pnode)
 {
-       __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
+       __this_cpu_write(x2apic_extra_bits, (pnode << 6));
 }
 
 /*
index 9e093f8fe78c4713aec4f91a62394eb0efa1f6c0..7c7bedb83c5a463bad2a2cd59765d3d6b1066f8b 100644 (file)
@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383);
 
 bool cpu_has_amd_erratum(const int *erratum)
 {
-       struct cpuinfo_x86 *cpu = &current_cpu_data;
+       struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
        int osvw_id = *erratum++;
        u32 range;
        u32 ms;
index 491977baf6c0c5f374d988842f890fca183c7770..35c7e65e59be4b27e9242843184dd63af0ee8ed8 100644 (file)
@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc)
 
        *rc = -ENODEV;
 
-       if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
+       if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD)
                return;
 
        eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
@@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
 static void query_values_on_cpu(void *_err)
 {
        int *err = _err;
-       struct powernow_k8_data *data = __get_cpu_var(powernow_data);
+       struct powernow_k8_data *data = __this_cpu_read(powernow_data);
 
        *err = query_current_values_with_pending_wait(data);
 }
index 9ecf81f9b90fb0c73416d958b1aa216b17e1ecfa..7283e98deaae14ca8f712acf3759f11cf7e9fc2b 100644 (file)
@@ -265,7 +265,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
                line_size = l2.line_size;
                lines_per_tag = l2.lines_per_tag;
                /* cpu_data has errata corrections for K7 applied */
-               size_in_kb = current_cpu_data.x86_cache_size;
+               size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
                break;
        case 3:
                if (!l3.val)
@@ -287,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
        eax->split.type = types[leaf];
        eax->split.level = levels[leaf];
        eax->split.num_threads_sharing = 0;
-       eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
+       eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
 
 
        if (assoc == 0xffff)
index 7a35b72d7c039d633bd8a4c9179eb4e4a5b7a549..d916183b7f9cfa77e9a6ae45531c44c38d4e563b 100644 (file)
@@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
 
 static int msr_to_offset(u32 msr)
 {
-       unsigned bank = __get_cpu_var(injectm.bank);
+       unsigned bank = __this_cpu_read(injectm.bank);
 
        if (msr == rip_msr)
                return offsetof(struct mce, ip);
@@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr)
 {
        u64 v;
 
-       if (__get_cpu_var(injectm).finished) {
+       if (__this_cpu_read(injectm.finished)) {
                int offset = msr_to_offset(msr);
 
                if (offset < 0)
@@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr)
 
 static void mce_wrmsrl(u32 msr, u64 v)
 {
-       if (__get_cpu_var(injectm).finished) {
+       if (__this_cpu_read(injectm.finished)) {
                int offset = msr_to_offset(msr);
 
                if (offset >= 0)
@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data)
 
        WARN_ON(smp_processor_id() != data);
 
-       if (mce_available(&current_cpu_data)) {
+       if (mce_available(__this_cpu_ptr(&cpu_info))) {
                machine_check_poll(MCP_TIMESTAMP,
                                &__get_cpu_var(mce_poll_banks));
        }
@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev)
 static int mce_resume(struct sys_device *dev)
 {
        __mcheck_cpu_init_generic();
-       __mcheck_cpu_init_vendor(&current_cpu_data);
+       __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
 
        return 0;
 }
@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev)
 static void mce_cpu_restart(void *data)
 {
        del_timer_sync(&__get_cpu_var(mce_timer));
-       if (!mce_available(&current_cpu_data))
+       if (!mce_available(__this_cpu_ptr(&cpu_info)))
                return;
        __mcheck_cpu_init_generic();
        __mcheck_cpu_init_timer();
@@ -1790,7 +1790,7 @@ static void mce_restart(void)
 /* Toggle features for corrected errors */
 static void mce_disable_ce(void *all)
 {
-       if (!mce_available(&current_cpu_data))
+       if (!mce_available(__this_cpu_ptr(&cpu_info)))
                return;
        if (all)
                del_timer_sync(&__get_cpu_var(mce_timer));
@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all)
 
 static void mce_enable_ce(void *all)
 {
-       if (!mce_available(&current_cpu_data))
+       if (!mce_available(__this_cpu_ptr(&cpu_info)))
                return;
        cmci_reenable();
        cmci_recheck();
@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h)
        unsigned long action = *(unsigned long *)h;
        int i;
 
-       if (!mce_available(&current_cpu_data))
+       if (!mce_available(__this_cpu_ptr(&cpu_info)))
                return;
 
        if (!(action & CPU_TASKS_FROZEN))
@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
        unsigned long action = *(unsigned long *)h;
        int i;
 
-       if (!mce_available(&current_cpu_data))
+       if (!mce_available(__this_cpu_ptr(&cpu_info)))
                return;
 
        if (!(action & CPU_TASKS_FROZEN))
index 6fcd0936194ff195cd78b14cdce76b997dd45ec9..8694ef56459d8e26761e6344df513ebf3af9dce6 100644 (file)
@@ -130,7 +130,7 @@ void cmci_recheck(void)
        unsigned long flags;
        int banks;
 
-       if (!mce_available(&current_cpu_data) || !cmci_supported(&banks))
+       if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
                return;
        local_irq_save(flags);
        machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
index 0a360d146596b6d01f8c833e655150faa3ae9605..04921017abe04fd748eae0acb73ce0d5866496e9 100644 (file)
@@ -997,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event)
 
 static void x86_pmu_enable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       if (cpuc->enabled)
+       if (__this_cpu_read(cpu_hw_events.enabled))
                __x86_pmu_enable_event(&event->hw,
                                       ARCH_PERFMON_EVENTSEL_ENABLE);
 }
@@ -1272,7 +1271,7 @@ perf_event_nmi_handler(struct notifier_block *self,
                break;
        case DIE_NMIUNKNOWN:
                this_nmi = percpu_read(irq_stat.__nmi_count);
-               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+               if (this_nmi != __this_cpu_read(pmu_nmi.marked))
                        /* let the kernel handle the unknown nmi */
                        return NOTIFY_DONE;
                /*
@@ -1296,8 +1295,8 @@ perf_event_nmi_handler(struct notifier_block *self,
        this_nmi = percpu_read(irq_stat.__nmi_count);
        if ((handled > 1) ||
                /* the next nmi could be a back-to-back nmi */
-           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
-            (__get_cpu_var(pmu_nmi).handled > 1))) {
+           ((__this_cpu_read(pmu_nmi.marked) == this_nmi) &&
+            (__this_cpu_read(pmu_nmi.handled) > 1))) {
                /*
                 * We could have two subsequent back-to-back nmis: The
                 * first handles more than one counter, the 2nd
@@ -1308,8 +1307,8 @@ perf_event_nmi_handler(struct notifier_block *self,
                 * handling more than one counter. We will mark the
                 * next (3rd) and then drop it if unhandled.
                 */
-               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
-               __get_cpu_var(pmu_nmi).handled  = handled;
+               __this_cpu_write(pmu_nmi.marked, this_nmi + 1);
+               __this_cpu_write(pmu_nmi.handled, handled);
        }
 
        return NOTIFY_STOP;
@@ -1484,11 +1483,9 @@ static inline void x86_pmu_read(struct perf_event *event)
  */
 static void x86_pmu_start_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
        perf_pmu_disable(pmu);
-       cpuc->group_flag |= PERF_EVENT_TXN;
-       cpuc->n_txn = 0;
+       __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
+       __this_cpu_write(cpu_hw_events.n_txn, 0);
 }
 
 /*
@@ -1498,14 +1495,12 @@ static void x86_pmu_start_txn(struct pmu *pmu)
  */
 static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
-       cpuc->group_flag &= ~PERF_EVENT_TXN;
+       __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
        /*
         * Truncate the collected events.
         */
-       cpuc->n_added -= cpuc->n_txn;
-       cpuc->n_events -= cpuc->n_txn;
+       __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
+       __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
        perf_pmu_enable(pmu);
 }
 
index 24e390e40f2e0b484d4b2b09084deb9d120d59b4..008835c1d79ca2c5fb83b2c2ca2ddfeab657cca1 100644 (file)
@@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
 
        if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
-               if (!__get_cpu_var(cpu_hw_events).enabled)
+               if (!__this_cpu_read(cpu_hw_events.enabled))
                        return;
 
                intel_pmu_enable_bts(hwc->config);
@@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event)
 
 static void intel_pmu_reset(void)
 {
-       struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
+       struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
        unsigned long flags;
        int idx;
 
index 298448656b6079d074232518cb16e50895b4a5b8..382eb2936d4d57f40db5cb360efb8c7421cdc0cf 100644 (file)
@@ -170,9 +170,9 @@ static void ftrace_mod_code(void)
 
 void ftrace_nmi_enter(void)
 {
-       __get_cpu_var(save_modifying_code) = modifying_code;
+       __this_cpu_write(save_modifying_code, modifying_code);
 
-       if (!__get_cpu_var(save_modifying_code))
+       if (!__this_cpu_read(save_modifying_code))
                return;
 
        if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
@@ -186,7 +186,7 @@ void ftrace_nmi_enter(void)
 
 void ftrace_nmi_exit(void)
 {
-       if (!__get_cpu_var(save_modifying_code))
+       if (!__this_cpu_read(save_modifying_code))
                return;
 
        /* Finish all executions before clearing nmi_running */
index 42c59425450727284615b2a920611e21047ce715..02f07634d265ea0a840892b5bafbd554197644b8 100644 (file)
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
                return -EBUSY;
 
        set_debugreg(info->address, i);
-       __get_cpu_var(cpu_debugreg[i]) = info->address;
+       __this_cpu_write(cpu_debugreg[i], info->address);
 
        dr7 = &__get_cpu_var(cpu_dr7);
        *dr7 |= encode_dr7(i, info->len, info->type);
@@ -397,12 +397,12 @@ void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
 
 void hw_breakpoint_restore(void)
 {
-       set_debugreg(__get_cpu_var(cpu_debugreg[0]), 0);
-       set_debugreg(__get_cpu_var(cpu_debugreg[1]), 1);
-       set_debugreg(__get_cpu_var(cpu_debugreg[2]), 2);
-       set_debugreg(__get_cpu_var(cpu_debugreg[3]), 3);
+       set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0);
+       set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1);
+       set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2);
+       set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3);
        set_debugreg(current->thread.debugreg6, 6);
-       set_debugreg(__get_cpu_var(cpu_dr7), 7);
+       set_debugreg(__this_cpu_read(cpu_dr7), 7);
 }
 EXPORT_SYMBOL_GPL(hw_breakpoint_restore);
 
index 83ec0175f986a5742c2ef85eff69d24100cd44b6..3a43caa3beb79a30e478d200c302d35187424cb4 100644 (file)
@@ -234,7 +234,7 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        exit_idle();
        irq_enter();
 
-       irq = __get_cpu_var(vector_irq)[vector];
+       irq = __this_cpu_read(vector_irq[vector]);
 
        if (!handle_irq(irq, regs)) {
                ack_APIC_irq();
@@ -350,12 +350,12 @@ void fixup_irqs(void)
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
                unsigned int irr;
 
-               if (__get_cpu_var(vector_irq)[vector] < 0)
+               if (__this_cpu_read(vector_irq[vector]) < 0)
                        continue;
 
                irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
                if (irr  & (1 << (vector % 32))) {
-                       irq = __get_cpu_var(vector_irq)[vector];
+                       irq = __this_cpu_read(vector_irq[vector]);
 
                        data = irq_get_irq_data(irq);
                        raw_spin_lock(&desc->lock);
index 96656f2077511bf280eec0010a087c96649fdaaa..48ff6dcffa02774d4921331011dbcf6fe9070bc3 100644 (file)
@@ -79,7 +79,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
        u32 *isp, arg1, arg2;
 
        curctx = (union irq_ctx *) current_thread_info();
-       irqctx = __get_cpu_var(hardirq_ctx);
+       irqctx = __this_cpu_read(hardirq_ctx);
 
        /*
         * this is where we switch to the IRQ stack. However, if we are
@@ -166,7 +166,7 @@ asmlinkage void do_softirq(void)
 
        if (local_softirq_pending()) {
                curctx = current_thread_info();
-               irqctx = __get_cpu_var(softirq_ctx);
+               irqctx = __this_cpu_read(softirq_ctx);
                irqctx->tinfo.task = curctx->task;
                irqctx->tinfo.previous_esp = current_stack_pointer;
 
index 5940282bd2f94ed886226bc717c189e593adab50..d91c477b3f6234cf122a08cda38b8d9a571a27cf 100644 (file)
@@ -403,7 +403,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
        kcb->kprobe_status = kcb->prev_kprobe.status;
        kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
        kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
@@ -412,7 +412,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
                                struct kprobe_ctlblk *kcb)
 {
-       __get_cpu_var(current_kprobe) = p;
+       __this_cpu_write(current_kprobe, p);
        kcb->kprobe_saved_flags = kcb->kprobe_old_flags
                = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
        if (is_IF_modifier(p->ainsn.insn))
@@ -586,7 +586,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                preempt_enable_no_resched();
                return 1;
        } else if (kprobe_running()) {
-               p = __get_cpu_var(current_kprobe);
+               p = __this_cpu_read(current_kprobe);
                if (p->break_handler && p->break_handler(p, regs)) {
                        setup_singlestep(p, regs, kcb, 0);
                        return 1;
@@ -759,11 +759,11 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
 
                orig_ret_address = (unsigned long)ri->ret_addr;
                if (ri->rp && ri->rp->handler) {
-                       __get_cpu_var(current_kprobe) = &ri->rp->kp;
+                       __this_cpu_write(current_kprobe, &ri->rp->kp);
                        get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
                        ri->ret_addr = correct_ret_addr;
                        ri->rp->handler(ri, regs);
-                       __get_cpu_var(current_kprobe) = NULL;
+                       __this_cpu_write(current_kprobe, NULL);
                }
 
                recycle_rp_inst(ri, &empty_rp);
@@ -1202,10 +1202,10 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
                regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
                regs->orig_ax = ~0UL;
 
-               __get_cpu_var(current_kprobe) = &op->kp;
+               __this_cpu_write(current_kprobe, &op->kp);
                kcb->kprobe_status = KPROBE_HIT_ACTIVE;
                opt_pre_handler(&op->kp, regs);
-               __get_cpu_var(current_kprobe) = NULL;
+               __this_cpu_write(current_kprobe, NULL);
        }
        preempt_enable_no_resched();
 }
index c852041bfc3d5b70e792dad0cfd42fdd467c3f38..09c08a1c706f0993a59475d8a7446c4cd2eecd93 100644 (file)
@@ -446,7 +446,7 @@ void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
        trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
        trace_cpu_idle((ax>>4)+1, smp_processor_id());
        if (!need_resched()) {
-               if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
+               if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
                        clflush((void *)&current_thread_info()->flags);
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
@@ -462,7 +462,7 @@ static void mwait_idle(void)
        if (!need_resched()) {
                trace_power_start(POWER_CSTATE, 1, smp_processor_id());
                trace_cpu_idle(1, smp_processor_id());
-               if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
+               if (cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLUSH_MONITOR))
                        clflush((void *)&current_thread_info()->flags);
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
index ee886fe10ef4eb8515ae1d2431c0ce8ad3e8dd89..c7149c96d0795f592d2c24252ef471bb310aa471 100644 (file)
@@ -427,7 +427,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
 
        cpumask_set_cpu(cpu, c->llc_shared_map);
 
-       if (current_cpu_data.x86_max_cores == 1) {
+       if (__this_cpu_read(cpu_info.x86_max_cores) == 1) {
                cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
                c->booted_cores = 1;
                return;
@@ -1089,7 +1089,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
        preempt_disable();
        smp_cpu_index_default();
-       current_cpu_data = boot_cpu_data;
+       memcpy(__this_cpu_ptr(&cpu_info), &boot_cpu_data, sizeof(cpu_info));
        cpumask_copy(cpu_callin_mask, cpumask_of(0));
        mb();
        /*
@@ -1383,7 +1383,7 @@ void play_dead_common(void)
 
        mb();
        /* Ack it */
-       __get_cpu_var(cpu_state) = CPU_DEAD;
+       __this_cpu_write(cpu_state, CPU_DEAD);
 
        /*
         * With physical CPU hotplug, we should halt the cpu
@@ -1403,11 +1403,11 @@ static inline void mwait_play_dead(void)
        int i;
        void *mwait_ptr;
 
-       if (!cpu_has(&current_cpu_data, X86_FEATURE_MWAIT))
+       if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_MWAIT))
                return;
-       if (!cpu_has(&current_cpu_data, X86_FEATURE_CLFLSH))
+       if (!cpu_has(__this_cpu_ptr(&cpu_info), X86_FEATURE_CLFLSH))
                return;
-       if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
+       if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
                return;
 
        eax = CPUID_MWAIT_LEAF;
@@ -1458,7 +1458,7 @@ static inline void mwait_play_dead(void)
 
 static inline void hlt_play_dead(void)
 {
-       if (current_cpu_data.x86 >= 4)
+       if (__this_cpu_read(cpu_info.x86) >= 4)
                wbinvd();
 
        while (1) {
index 356a0d455cf997cb1bd586d3a13fd8a7c16d4d3c..03d2ea82f35ac7b8dab7950cd2324685304463fb 100644 (file)
@@ -659,7 +659,7 @@ void restore_sched_clock_state(void)
 
        local_irq_save(flags);
 
-       __get_cpu_var(cyc2ns_offset) = 0;
+       __this_cpu_write(cyc2ns_offset, 0);
        offset = cyc2ns_suspend - sched_clock();
 
        for_each_possible_cpu(cpu)
index b989e1f1e5d36b732d1d9c864f369b39a7459105..46a368cb651ea6abb031bc4b2543c92c3ccbb51d 100644 (file)
@@ -976,7 +976,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
        if (kvm_tsc_changes_freq())
                printk_once(KERN_WARNING
                 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
-       ret = nsec * __get_cpu_var(cpu_tsc_khz);
+       ret = nsec * __this_cpu_read(cpu_tsc_khz);
        do_div(ret, USEC_PER_SEC);
        return ret;
 }
@@ -1061,7 +1061,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        local_irq_save(flags);
        kvm_get_msr(v, MSR_IA32_TSC, &tsc_timestamp);
        kernel_ns = get_kernel_ns();
-       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+       this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
 
        if (unlikely(this_tsc_khz == 0)) {
                local_irq_restore(flags);
@@ -4427,7 +4427,7 @@ EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
 
 static void tsc_bad(void *info)
 {
-       __get_cpu_var(cpu_tsc_khz) = 0;
+       __this_cpu_write(cpu_tsc_khz, 0);
 }
 
 static void tsc_khz_changed(void *data)
@@ -4441,7 +4441,7 @@ static void tsc_khz_changed(void *data)
                khz = cpufreq_quick_get(raw_smp_processor_id());
        if (!khz)
                khz = tsc_khz;
-       __get_cpu_var(cpu_tsc_khz) = khz;
+       __this_cpu_write(cpu_tsc_khz, khz);
 }
 
 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
index ff485d361182f814624e238e1ebc418c28c05e79..fc45ba887d051e504dd592be40ec2e78d70eea33 100644 (file)
@@ -121,7 +121,7 @@ inline void __const_udelay(unsigned long xloops)
        asm("mull %%edx"
                :"=d" (xloops), "=&a" (d0)
                :"1" (xloops), "0"
-               (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
+               (this_cpu_read(cpu_info.loops_per_jiffy) * (HZ/4)));
 
        __delay(++xloops);
 }
index 358c8b9c96a79c725766e1627544486eb312a0bc..f24a8533bcdf11087d9c08e143ac89436768703e 100644 (file)
@@ -143,7 +143,7 @@ static inline int has_mux(void)
 
 inline int op_x86_phys_to_virt(int phys)
 {
-       return __get_cpu_var(switch_index) + phys;
+       return __this_cpu_read(switch_index) + phys;
 }
 
 inline int op_x86_virt_to_phys(int virt)
index d769cda540823e12a0dbfbdba923ec481f07abfd..94b745045e450932434a7c1b737c06b1721def16 100644 (file)
@@ -95,8 +95,8 @@ static void ppro_setup_ctrs(struct op_x86_model_spec const *model,
                 * counter width:
                 */
                if (!(eax.split.version_id == 0 &&
-                       current_cpu_data.x86 == 6 &&
-                               current_cpu_data.x86_model == 15)) {
+                       __this_cpu_read(cpu_info.x86) == 6 &&
+                               __this_cpu_read(cpu_info.x86_model) == 15)) {
 
                        if (counter_width < eax.split.bit_width)
                                counter_width = eax.split.bit_width;
@@ -235,8 +235,8 @@ static void arch_perfmon_setup_counters(void)
        eax.full = cpuid_eax(0xa);
 
        /* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
-       if (eax.split.version_id == 0 && current_cpu_data.x86 == 6 &&
-               current_cpu_data.x86_model == 15) {
+       if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
+               __this_cpu_read(cpu_info.x86_model) == 15) {
                eax.split.version_id = 2;
                eax.split.num_counters = 2;
                eax.split.bit_width = 40;
index 44dcad43989dc983af51863fac28677e2cfdaaee..aa8c89ae54cfaf08318b3d9718187dbaa4c41c24 100644 (file)
@@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
 
        preempt_disable();
 
-       start = __get_cpu_var(idt_desc).address;
-       end = start + __get_cpu_var(idt_desc).size + 1;
+       start = __this_cpu_read(idt_desc.address);
+       end = start + __this_cpu_read(idt_desc.size) + 1;
 
        xen_mc_flush();
 
index 9e565da5d1f730f50d41fca98f4fbb86c69b5f29..4ec8035e3216208a94c960624befa606aed79884 100644 (file)
@@ -22,7 +22,7 @@ static inline void xen_mc_batch(void)
        unsigned long flags;
        /* need to disable interrupts until this entry is complete */
        local_irq_save(flags);
-       __get_cpu_var(xen_mc_irq_flags) = flags;
+       __this_cpu_write(xen_mc_irq_flags, flags);
 }
 
 static inline struct multicall_space xen_mc_entry(size_t args)
index 23e061b9327bc45b9ba64024559c87202f7602b0..cc9b1e182fcfad86bc67b56a8e172fb73d9e9ecf 100644 (file)
@@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
 {
        struct xen_spinlock *prev;
 
-       prev = __get_cpu_var(lock_spinners);
-       __get_cpu_var(lock_spinners) = xl;
+       prev = __this_cpu_read(lock_spinners);
+       __this_cpu_write(lock_spinners, xl);
 
        wmb();                  /* set lock of interest before count */
 
@@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
        asm(LOCK_PREFIX " decw %0"
            : "+m" (xl->spinners) : : "memory");
        wmb();                  /* decrement count before restoring lock */
-       __get_cpu_var(lock_spinners) = prev;
+       __this_cpu_write(lock_spinners, prev);
 }
 
 static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
 {
        struct xen_spinlock *xl = (struct xen_spinlock *)lock;
        struct xen_spinlock *prev;
-       int irq = __get_cpu_var(lock_kicker_irq);
+       int irq = __this_cpu_read(lock_kicker_irq);
        int ret;
        u64 start;
 
index 5da5e53fb94c20bf6c244dce734e515c92d10b34..067759e3d6a525b53198673d6029ec4d6328cae7 100644 (file)
@@ -135,24 +135,24 @@ static void do_stolen_accounting(void)
 
        /* Add the appropriate number of ticks of stolen time,
           including any left-overs from last time. */
-       stolen = runnable + offline + __get_cpu_var(xen_residual_stolen);
+       stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
 
        if (stolen < 0)
                stolen = 0;
 
        ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
-       __get_cpu_var(xen_residual_stolen) = stolen;
+       __this_cpu_write(xen_residual_stolen, stolen);
        account_steal_ticks(ticks);
 
        /* Add the appropriate number of ticks of blocked time,
           including any left-overs from last time. */
-       blocked += __get_cpu_var(xen_residual_blocked);
+       blocked += __this_cpu_read(xen_residual_blocked);
 
        if (blocked < 0)
                blocked = 0;
 
        ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
-       __get_cpu_var(xen_residual_blocked) = blocked;
+       __this_cpu_write(xen_residual_blocked, blocked);
        account_idle_ticks(ticks);
 }
 
index dcb38f8ddfda09142f5962cfcfe38b9aa9fc5570..a765b823aa9e91066c417dacca634abe9ae9c6e5 100644 (file)
@@ -746,7 +746,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
        struct acpi_processor *pr;
        struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
 
-       pr = __get_cpu_var(processors);
+       pr = __this_cpu_read(processors);
 
        if (unlikely(!pr))
                return 0;
@@ -787,7 +787,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
        s64 idle_time_ns;
        s64 idle_time;
 
-       pr = __get_cpu_var(processors);
+       pr = __this_cpu_read(processors);
 
        if (unlikely(!pr))
                return 0;
@@ -864,7 +864,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        s64 idle_time;
 
 
-       pr = __get_cpu_var(processors);
+       pr = __this_cpu_read(processors);
 
        if (unlikely(!pr))
                return 0;
index 5a1aa64f4e76c5b9178dbb9d3ef4fe9285ed8791..72a4fcb1774509a5e624cf4bc72805cb041e4d28 100644 (file)
@@ -626,7 +626,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
        preempt_disable();
        /* if over the trickle threshold, use only 1 in 4096 samples */
        if (input_pool.entropy_count > trickle_thresh &&
-           (__get_cpu_var(trickle_count)++ & 0xfff))
+           ((__this_cpu_inc_return(trickle_count) - 1) & 0xfff))
                goto out;
 
        sample.jiffies = jiffies;
index a7f046b0096ca26121a1016922fef359ee4d805c..2b46a7efa0ac78c5c4213609d7846422842ef17f 100644 (file)
@@ -43,9 +43,10 @@ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 };
 
 static inline void get_seq(__u32 *ts, int *cpu)
 {
-       *ts = get_cpu_var(proc_event_counts)++;
+       preempt_disable();
+       *ts = __this_cpu_inc_return(proc_event_counts) -1;
        *cpu = smp_processor_id();
-       put_cpu_var(proc_event_counts);
+       preempt_enable();
 }
 
 void proc_fork_connector(struct task_struct *task)
index 08d5f05378d9efb1df0fe055240e8e8ed9e7a90b..386888f10df02c0147b1b3664d8b08e7ebe88aa4 100644 (file)
@@ -49,7 +49,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
  */
 static void cpuidle_idle_call(void)
 {
-       struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
+       struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_state *target_state;
        int next_state;
 
index dbf741c95835f953c8d6d554f7f087286f3c9381..23cf8fc933ec037c40251d0c855e736e00141dc8 100644 (file)
@@ -121,7 +121,7 @@ static int gameport_measure_speed(struct gameport *gameport)
        }
 
        gameport_close(gameport);
-       return (cpu_data(raw_smp_processor_id()).loops_per_jiffy *
+       return (this_cpu_read(cpu_info.loops_per_jiffy) *
                (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
 
 #else
index f4e6cf3aceb86ac86233cd6cd00485312a945824..430f875006f22b8535d6c179268655e053528962 100644 (file)
@@ -619,7 +619,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
        s390_idle_check(regs, S390_lowcore.int_clock,
                        S390_lowcore.async_enter_timer);
        irq_enter();
-       __get_cpu_var(s390_idle).nohz_delay = 1;
+       __this_cpu_write(s390_idle.nohz_delay, 1);
        if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
                /* Serve timer interrupts first. */
                clock_comparator_work();
index 971844bbee284a141f36d6221a29fb793a6574a9..9bcf149c4260ad3a8020caccc98801fff40664de 100644 (file)
@@ -377,7 +377,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
        duty_cycle = new_duty_cycle;
        freq = new_freq;
 
-       loops_per_sec = current_cpu_data.loops_per_jiffy;
+       loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy);
        loops_per_sec *= HZ;
 
        /* How many clocks in a microsecond?, avoiding long long divide */
@@ -398,7 +398,7 @@ static int init_timing_params(unsigned int new_duty_cycle,
        dprintk("in init_timing_params, freq=%d, duty_cycle=%d, "
                "clk/jiffy=%ld, pulse=%ld, space=%ld, "
                "conv_us_to_clocks=%ld\n",
-               freq, duty_cycle, current_cpu_data.loops_per_jiffy,
+               freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy),
                pulse_width, space_width, conv_us_to_clocks);
        return 0;
 }
index 65b231178f0580d30a4233e17bf31eb73c7df6f7..1b34a87716418b8af6248d0540d9958f2b0cbd2e 100644 (file)
@@ -78,10 +78,10 @@ void speakup_fake_down_arrow(void)
        /* don't change CPU */
        preempt_disable();
 
-       __get_cpu_var(reporting_keystroke) = true;
+       __this_cpu_write(reporting_keystroke, true);
        input_report_key(virt_keyboard, KEY_DOWN, PRESSED);
        input_report_key(virt_keyboard, KEY_DOWN, RELEASED);
-       __get_cpu_var(reporting_keystroke) = false;
+       __this_cpu_write(reporting_keystroke, false);
 
        /* reenable preemption */
        preempt_enable();
@@ -95,10 +95,5 @@ void speakup_fake_down_arrow(void)
         */
 bool speakup_fake_key_pressed(void)
 {
-       bool is_pressed;
-
-       is_pressed = get_cpu_var(reporting_keystroke);
-       put_cpu_var(reporting_keystroke);
-
-       return is_pressed;
+       return this_cpu_read(reporting_keystroke);
 }
index 31af0ac31a98bffc310cfb79bcaa2d7e03393e81..65f8637d13cf1fa555825824d71c5a4c30e39828 100644 (file)
@@ -355,7 +355,7 @@ static void unmask_evtchn(int port)
                struct evtchn_unmask unmask = { .port = port };
                (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
        } else {
-               struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+               struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
 
                sync_clear_bit(port, &s->evtchn_mask[0]);
 
@@ -1101,7 +1101,7 @@ static void __xen_evtchn_do_upcall(void)
 {
        int cpu = get_cpu();
        struct shared_info *s = HYPERVISOR_shared_info;
-       struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
+       struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
        unsigned count;
 
        do {
@@ -1109,7 +1109,7 @@ static void __xen_evtchn_do_upcall(void)
 
                vcpu_info->evtchn_upcall_pending = 0;
 
-               if (__get_cpu_var(xed_nesting_count)++)
+               if (__this_cpu_inc_return(xed_nesting_count) - 1)
                        goto out;
 
 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -1141,8 +1141,8 @@ static void __xen_evtchn_do_upcall(void)
 
                BUG_ON(!irqs_disabled());
 
-               count = __get_cpu_var(xed_nesting_count);
-               __get_cpu_var(xed_nesting_count) = 0;
+               count = __this_cpu_read(xed_nesting_count);
+               __this_cpu_write(xed_nesting_count, 0);
        } while (count != 1 || vcpu_info->evtchn_upcall_pending);
 
 out:
index 5930e382959bc504c58bbb428588a372742d4aa4..2219a76e2caf08415b2e207bc23466d4154d35e0 100644 (file)
@@ -1270,12 +1270,10 @@ static inline void check_irqs_on(void)
 static void bh_lru_install(struct buffer_head *bh)
 {
        struct buffer_head *evictee = NULL;
-       struct bh_lru *lru;
 
        check_irqs_on();
        bh_lru_lock();
-       lru = &__get_cpu_var(bh_lrus);
-       if (lru->bhs[0] != bh) {
+       if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
                struct buffer_head *bhs[BH_LRU_SIZE];
                int in;
                int out = 0;
@@ -1283,7 +1281,8 @@ static void bh_lru_install(struct buffer_head *bh)
                get_bh(bh);
                bhs[out++] = bh;
                for (in = 0; in < BH_LRU_SIZE; in++) {
-                       struct buffer_head *bh2 = lru->bhs[in];
+                       struct buffer_head *bh2 =
+                               __this_cpu_read(bh_lrus.bhs[in]);
 
                        if (bh2 == bh) {
                                __brelse(bh2);
@@ -1298,7 +1297,7 @@ static void bh_lru_install(struct buffer_head *bh)
                }
                while (out < BH_LRU_SIZE)
                        bhs[out++] = NULL;
-               memcpy(lru->bhs, bhs, sizeof(bhs));
+               memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
        }
        bh_lru_unlock();
 
@@ -1313,23 +1312,22 @@ static struct buffer_head *
 lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
 {
        struct buffer_head *ret = NULL;
-       struct bh_lru *lru;
        unsigned int i;
 
        check_irqs_on();
        bh_lru_lock();
-       lru = &__get_cpu_var(bh_lrus);
        for (i = 0; i < BH_LRU_SIZE; i++) {
-               struct buffer_head *bh = lru->bhs[i];
+               struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
 
                if (bh && bh->b_bdev == bdev &&
                                bh->b_blocknr == block && bh->b_size == size) {
                        if (i) {
                                while (i) {
-                                       lru->bhs[i] = lru->bhs[i - 1];
+                                       __this_cpu_write(bh_lrus.bhs[i],
+                                               __this_cpu_read(bh_lrus.bhs[i - 1]));
                                        i--;
                                }
-                               lru->bhs[0] = bh;
+                               __this_cpu_write(bh_lrus.bhs[0], bh);
                        }
                        get_bh(bh);
                        ret = bh;
@@ -3203,22 +3201,23 @@ static void recalc_bh_state(void)
        int i;
        int tot = 0;
 
-       if (__get_cpu_var(bh_accounting).ratelimit++ < 4096)
+       if (__this_cpu_inc_return(bh_accounting.ratelimit) - 1 < 4096)
                return;
-       __get_cpu_var(bh_accounting).ratelimit = 0;
+       __this_cpu_write(bh_accounting.ratelimit, 0);
        for_each_online_cpu(i)
                tot += per_cpu(bh_accounting, i).nr;
        buffer_heads_over_limit = (tot > max_buffer_heads);
 }
-       
+
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
        struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
-               get_cpu_var(bh_accounting).nr++;
+               preempt_disable();
+               __this_cpu_inc(bh_accounting.nr);
                recalc_bh_state();
-               put_cpu_var(bh_accounting);
+               preempt_enable();
        }
        return ret;
 }
@@ -3228,9 +3227,10 @@ void free_buffer_head(struct buffer_head *bh)
 {
        BUG_ON(!list_empty(&bh->b_assoc_buffers));
        kmem_cache_free(bh_cachep, bh);
-       get_cpu_var(bh_accounting).nr--;
+       preempt_disable();
+       __this_cpu_dec(bh_accounting.nr);
        recalc_bh_state();
-       put_cpu_var(bh_accounting);
+       preempt_enable();
 }
 EXPORT_SYMBOL(free_buffer_head);
 
@@ -3243,9 +3243,8 @@ static void buffer_exit_cpu(int cpu)
                brelse(b->bhs[i]);
                b->bhs[i] = NULL;
        }
-       get_cpu_var(bh_accounting).nr += per_cpu(bh_accounting, cpu).nr;
+       this_cpu_add(bh_accounting.nr, per_cpu(bh_accounting, cpu).nr);
        per_cpu(bh_accounting, cpu).nr = 0;
-       put_cpu_var(bh_accounting);
 }
 
 static int buffer_cpu_notify(struct notifier_block *self,
index 5ae1d07d4a1275a8ac91996b23753270e5033cb3..6bf9355fa7eb5097c59454ac8d98e46e09c1f621 100644 (file)
@@ -22,15 +22,15 @@ DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
 
 static inline struct pt_regs *get_irq_regs(void)
 {
-       return __get_cpu_var(__irq_regs);
+       return __this_cpu_read(__irq_regs);
 }
 
 static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
 {
-       struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+       struct pt_regs *old_regs;
 
-       old_regs = *pp_regs;
-       *pp_regs = new_regs;
+       old_regs = __this_cpu_read(__irq_regs);
+       __this_cpu_write(__irq_regs, new_regs);
        return old_regs;
 }
 
index 4fd978e7eb83ef8d689d0d313b5441b0631ea275..4d857973d2c94317cf11041a4a7070794fc13a99 100644 (file)
@@ -195,15 +195,9 @@ enum {
 /*
  * io context count accounting
  */
-#define elv_ioc_count_mod(name, __val)                         \
-       do {                                                    \
-               preempt_disable();                              \
-               __get_cpu_var(name) += (__val);                 \
-               preempt_enable();                               \
-       } while (0)
-
-#define elv_ioc_count_inc(name)        elv_ioc_count_mod(name, 1)
-#define elv_ioc_count_dec(name)        elv_ioc_count_mod(name, -1)
+#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
+#define elv_ioc_count_inc(name)        this_cpu_inc(name)
+#define elv_ioc_count_dec(name)        this_cpu_dec(name)
 
 #define elv_ioc_count_read(name)                               \
 ({                                                             \
index b676c585574e1723a50fbb7bddf0b96d43706dbe..3a93f73a8acc8001bb69d6fcf2f0cd871030aeaa 100644 (file)
@@ -81,7 +81,8 @@ DECLARE_PER_CPU(int, __kmap_atomic_idx);
 
 static inline int kmap_atomic_idx_push(void)
 {
-       int idx = __get_cpu_var(__kmap_atomic_idx)++;
+       int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+
 #ifdef CONFIG_DEBUG_HIGHMEM
        WARN_ON_ONCE(in_irq() && !irqs_disabled());
        BUG_ON(idx > KM_TYPE_NR);
@@ -91,16 +92,18 @@ static inline int kmap_atomic_idx_push(void)
 
 static inline int kmap_atomic_idx(void)
 {
-       return __get_cpu_var(__kmap_atomic_idx) - 1;
+       return __this_cpu_read(__kmap_atomic_idx) - 1;
 }
 
-static inline int kmap_atomic_idx_pop(void)
+static inline void kmap_atomic_idx_pop(void)
 {
-       int idx = --__get_cpu_var(__kmap_atomic_idx);
 #ifdef CONFIG_DEBUG_HIGHMEM
+       int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+
        BUG_ON(idx < 0);
+#else
+       __this_cpu_dec(__kmap_atomic_idx);
 #endif
-       return idx;
 }
 
 #endif
index ad54c846911b91a169b903f7b1f6fee24d4320a2..44e83ba12b5b1076e4a1af7624420a9c2762ed2f 100644 (file)
@@ -47,7 +47,7 @@ extern unsigned long long nr_context_switches(void);
 
 #ifndef CONFIG_GENERIC_HARDIRQS
 #define kstat_irqs_this_cpu(irq) \
-       (kstat_this_cpu.irqs[irq])
+       (this_cpu_read(kstat.irqs[irq])
 
 struct irq_desc;
 
index b78edb58ee66164e756b4789baf71ab86e8684c4..dd7c12e875bcc987abea835fc96b18cd3416a0eb 100644 (file)
@@ -305,12 +305,12 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
 /* kprobe_running() will just return the current_kprobe on this CPU */
 static inline struct kprobe *kprobe_running(void)
 {
-       return (__get_cpu_var(current_kprobe));
+       return (__this_cpu_read(current_kprobe));
 }
 
 static inline void reset_current_kprobe(void)
 {
-       __get_cpu_var(current_kprobe) = NULL;
+       __this_cpu_write(current_kprobe, NULL);
 }
 
 static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
index 5095b834a6fb52f1f746257805f34cdabf186528..27c3c6fcfad321a4915cba6b3ab6162cb10c19a1 100644 (file)
@@ -240,6 +240,21 @@ extern void __bad_size_call_parameter(void);
        pscr_ret__;                                                     \
 })
 
+#define __pcpu_size_call_return2(stem, variable, ...)                  \
+({                                                                     \
+       typeof(variable) pscr2_ret__;                                   \
+       __verify_pcpu_ptr(&(variable));                                 \
+       switch(sizeof(variable)) {                                      \
+       case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;    \
+       case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;    \
+       case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;    \
+       case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;    \
+       default:                                                        \
+               __bad_size_call_parameter(); break;                     \
+       }                                                               \
+       pscr2_ret__;                                                    \
+})
+
 #define __pcpu_size_call(stem, variable, ...)                          \
 do {                                                                   \
        __verify_pcpu_ptr(&(variable));                                 \
@@ -402,6 +417,89 @@ do {                                                                       \
 # define this_cpu_xor(pcp, val)                __pcpu_size_call(this_cpu_or_, (pcp), (val))
 #endif
 
+#define _this_cpu_generic_add_return(pcp, val)                         \
+({                                                                     \
+       typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       __this_cpu_add(pcp, val);                                       \
+       ret__ = __this_cpu_read(pcp);                                   \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_add_return
+# ifndef this_cpu_add_return_1
+#  define this_cpu_add_return_1(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_2
+#  define this_cpu_add_return_2(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_4
+#  define this_cpu_add_return_4(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef this_cpu_add_return_8
+#  define this_cpu_add_return_8(pcp, val)      _this_cpu_generic_add_return(pcp, val)
+# endif
+# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(val))
+#define this_cpu_inc_return(pcp)       this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp)       this_cpu_add_return(pcp, -1)
+
+#define _this_cpu_generic_xchg(pcp, nval)                              \
+({     typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       __this_cpu_write(pcp, nval);                                    \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_xchg
+# ifndef this_cpu_xchg_1
+#  define this_cpu_xchg_1(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_2
+#  define this_cpu_xchg_2(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_4
+#  define this_cpu_xchg_4(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef this_cpu_xchg_8
+#  define this_cpu_xchg_8(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
+# endif
+# define this_cpu_xchg(pcp, nval)      \
+       __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define _this_cpu_generic_cmpxchg(pcp, oval, nval)                     \
+({     typeof(pcp) ret__;                                              \
+       preempt_disable();                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       if (ret__ == (oval))                                            \
+               __this_cpu_write(pcp, nval);                            \
+       preempt_enable();                                               \
+       ret__;                                                          \
+})
+
+#ifndef this_cpu_cmpxchg
+# ifndef this_cpu_cmpxchg_1
+#  define this_cpu_cmpxchg_1(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_2
+#  define this_cpu_cmpxchg_2(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_4
+#  define this_cpu_cmpxchg_4(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef this_cpu_cmpxchg_8
+#  define this_cpu_cmpxchg_8(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define this_cpu_cmpxchg(pcp, oval, nval)     \
+       __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
 /*
  * Generic percpu operations that do not require preemption handling.
  * Either we do not care about races or the caller has the
@@ -529,11 +627,87 @@ do {                                                                      \
 # define __this_cpu_xor(pcp, val)      __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
 #endif
 
+#define __this_cpu_generic_add_return(pcp, val)                                \
+({                                                                     \
+       __this_cpu_add(pcp, val);                                       \
+       __this_cpu_read(pcp);                                           \
+})
+
+#ifndef __this_cpu_add_return
+# ifndef __this_cpu_add_return_1
+#  define __this_cpu_add_return_1(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_2
+#  define __this_cpu_add_return_2(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_4
+#  define __this_cpu_add_return_4(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# ifndef __this_cpu_add_return_8
+#  define __this_cpu_add_return_8(pcp, val)    __this_cpu_generic_add_return(pcp, val)
+# endif
+# define __this_cpu_add_return(pcp, val)       __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#endif
+
+#define __this_cpu_sub_return(pcp, val)        this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp)     this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp)     this_cpu_add_return(pcp, -1)
+
+#define __this_cpu_generic_xchg(pcp, nval)                             \
+({     typeof(pcp) ret__;                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       __this_cpu_write(pcp, nval);                                    \
+       ret__;                                                          \
+})
+
+#ifndef __this_cpu_xchg
+# ifndef __this_cpu_xchg_1
+#  define __this_cpu_xchg_1(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_2
+#  define __this_cpu_xchg_2(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_4
+#  define __this_cpu_xchg_4(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# ifndef __this_cpu_xchg_8
+#  define __this_cpu_xchg_8(pcp, nval) __this_cpu_generic_xchg(pcp, nval)
+# endif
+# define __this_cpu_xchg(pcp, nval)    \
+       __pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
+#endif
+
+#define __this_cpu_generic_cmpxchg(pcp, oval, nval)                    \
+({                                                                     \
+       typeof(pcp) ret__;                                              \
+       ret__ = __this_cpu_read(pcp);                                   \
+       if (ret__ == (oval))                                            \
+               __this_cpu_write(pcp, nval);                            \
+       ret__;                                                          \
+})
+
+#ifndef __this_cpu_cmpxchg
+# ifndef __this_cpu_cmpxchg_1
+#  define __this_cpu_cmpxchg_1(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_2
+#  define __this_cpu_cmpxchg_2(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_4
+#  define __this_cpu_cmpxchg_4(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef __this_cpu_cmpxchg_8
+#  define __this_cpu_cmpxchg_8(pcp, oval, nval)        __this_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define __this_cpu_cmpxchg(pcp, oval, nval)   \
+       __pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
+#endif
+
 /*
  * IRQ safe versions of the per cpu RMW operations. Note that these operations
  * are *not* safe against modification of the same variable from another
  * processors (which one gets when using regular atomic operations)
. They are guaranteed to be atomic vs. local interrupts and
* They are guaranteed to be atomic vs. local interrupts and
  * preemption only.
  */
 #define irqsafe_cpu_generic_to_op(pcp, val, op)                                \
@@ -620,4 +794,33 @@ do {                                                                       \
 # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
 #endif
 
+#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)                   \
+({                                                                     \
+       typeof(pcp) ret__;                                              \
+       unsigned long flags;                                            \
+       local_irq_save(flags);                                          \
+       ret__ = __this_cpu_read(pcp);                                   \
+       if (ret__ == (oval))                                            \
+               __this_cpu_write(pcp, nval);                            \
+       local_irq_restore(flags);                                       \
+       ret__;                                                          \
+})
+
+#ifndef irqsafe_cpu_cmpxchg
+# ifndef irqsafe_cpu_cmpxchg_1
+#  define irqsafe_cpu_cmpxchg_1(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_2
+#  define irqsafe_cpu_cmpxchg_2(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_4
+#  define irqsafe_cpu_cmpxchg_4(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# ifndef irqsafe_cpu_cmpxchg_8
+#  define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)       irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
+# endif
+# define irqsafe_cpu_cmpxchg(pcp, oval, nval)          \
+       __pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
+#endif
+
 #endif /* __LINUX_PERCPU_H */
index 676149a4ac5ff497367a484e2b66c01e915ccefd..89c74861a3da94ea6b21720b40f59ce69a42b918 100644 (file)
@@ -69,7 +69,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
 
                list_del_rcu(&p->tasks);
                list_del_init(&p->sibling);
-               __get_cpu_var(process_counts)--;
+               __this_cpu_dec(process_counts);
        }
        list_del_rcu(&p->thread_group);
 }
index dc1a8bbcea7b41e760ac0d97b767e399c6477db7..d9b44f20b6b079c096c524f4f659cc4c63f614c1 100644 (file)
@@ -1285,7 +1285,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
                        attach_pid(p, PIDTYPE_SID, task_session(current));
                        list_add_tail(&p->sibling, &p->real_parent->children);
                        list_add_tail_rcu(&p->tasks, &init_task.tasks);
-                       __get_cpu_var(process_counts)++;
+                       __this_cpu_inc(process_counts);
                }
                attach_pid(p, PIDTYPE_PID, pid);
                nr_threads++;
index f2429fc3438c4f1c2094e59fe54415dc30e4bb51..45da2b6920ab25a23e6cb885297ca76280e81e36 100644 (file)
@@ -497,7 +497,7 @@ static inline int hrtimer_is_hres_enabled(void)
  */
 static inline int hrtimer_hres_active(void)
 {
-       return __get_cpu_var(hrtimer_bases).hres_active;
+       return __this_cpu_read(hrtimer_bases.hres_active);
 }
 
 /*
index 90f881904bb1202ba1566f5db2e28eaf128551b9..c58fa7da8aef1683dac88d4d2608d1fdaa5cd4ab 100644 (file)
@@ -77,21 +77,21 @@ void __weak arch_irq_work_raise(void)
  */
 static void __irq_work_queue(struct irq_work *entry)
 {
-       struct irq_work **head, *next;
+       struct irq_work *next;
 
-       head = &get_cpu_var(irq_work_list);
+       preempt_disable();
 
        do {
-               next = *head;
+               next = __this_cpu_read(irq_work_list);
                /* Can assign non-atomic because we keep the flags set. */
                entry->next = next_flags(next, IRQ_WORK_FLAGS);
-       } while (cmpxchg(head, next, entry) != next);
+       } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next);
 
        /* The list was empty, raise self-interrupt to start processing. */
        if (!irq_work_next(entry))
                arch_irq_work_raise();
 
-       put_cpu_var(irq_work_list);
+       preempt_enable();
 }
 
 /*
@@ -120,16 +120,16 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
  */
 void irq_work_run(void)
 {
-       struct irq_work *list, **head;
+       struct irq_work *list;
 
-       head = &__get_cpu_var(irq_work_list);
-       if (*head == NULL)
+       if (this_cpu_read(irq_work_list) == NULL)
                return;
 
        BUG_ON(!in_irq());
        BUG_ON(!irqs_disabled());
 
-       list = xchg(head, NULL);
+       list = this_cpu_xchg(irq_work_list, NULL);
+
        while (list != NULL) {
                struct irq_work *entry = list;
 
index 7663e5df0e6f731f1804201a5e6cdf9b6162dd05..77981813a1e75d6c3c830dac5084bc37e47c1080 100644 (file)
@@ -317,12 +317,12 @@ void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
 /* We have preemption disabled.. so it is safe to use __ versions */
 static inline void set_kprobe_instance(struct kprobe *kp)
 {
-       __get_cpu_var(kprobe_instance) = kp;
+       __this_cpu_write(kprobe_instance, kp);
 }
 
 static inline void reset_kprobe_instance(void)
 {
-       __get_cpu_var(kprobe_instance) = NULL;
+       __this_cpu_write(kprobe_instance, NULL);
 }
 
 /*
@@ -965,7 +965,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
                                        int trapnr)
 {
-       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+       struct kprobe *cur = __this_cpu_read(kprobe_instance);
 
        /*
         * if we faulted "during" the execution of a user specified
@@ -980,7 +980,7 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
 
 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
-       struct kprobe *cur = __get_cpu_var(kprobe_instance);
+       struct kprobe *cur = __this_cpu_read(kprobe_instance);
        int ret = 0;
 
        if (cur && cur->break_handler) {
index d0ddfea6579d027809cfb0bce885289bac0f957e..dd4aea806f8ef63ba882af3a47d5981b366d4dc7 100644 (file)
@@ -364,8 +364,8 @@ void rcu_irq_exit(void)
        WARN_ON_ONCE(rdtp->dynticks & 0x1);
 
        /* If the interrupt queued a callback, get out of dyntick mode. */
-       if (__get_cpu_var(rcu_sched_data).nxtlist ||
-           __get_cpu_var(rcu_bh_data).nxtlist)
+       if (__this_cpu_read(rcu_sched_data.nxtlist) ||
+           __this_cpu_read(rcu_bh_data.nxtlist))
                set_need_resched();
 }
 
index c10150cb456bc0316d81e3acd5ccdebb772df394..0823778f87fc697482251ba39351d8e2ac3a9709 100644 (file)
@@ -70,7 +70,7 @@ char *softirq_to_name[NR_SOFTIRQS] = {
 static void wakeup_softirqd(void)
 {
        /* Interrupts are disabled: no need to stop preemption */
-       struct task_struct *tsk = __get_cpu_var(ksoftirqd);
+       struct task_struct *tsk = __this_cpu_read(ksoftirqd);
 
        if (tsk && tsk->state != TASK_RUNNING)
                wake_up_process(tsk);
@@ -388,8 +388,8 @@ void __tasklet_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_vec).tail = t;
-       __get_cpu_var(tasklet_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_vec.tail) = t;
+       __this_cpu_write(tasklet_vec.tail, &(t->next));
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
        local_irq_restore(flags);
 }
@@ -402,8 +402,8 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
 
        local_irq_save(flags);
        t->next = NULL;
-       *__get_cpu_var(tasklet_hi_vec).tail = t;
-       __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+       *__this_cpu_read(tasklet_hi_vec.tail) = t;
+       __this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
        raise_softirq_irqoff(HI_SOFTIRQ);
        local_irq_restore(flags);
 }
@@ -414,8 +414,8 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
 {
        BUG_ON(!irqs_disabled());
 
-       t->next = __get_cpu_var(tasklet_hi_vec).head;
-       __get_cpu_var(tasklet_hi_vec).head = t;
+       t->next = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, t);
        __raise_softirq_irqoff(HI_SOFTIRQ);
 }
 
@@ -426,9 +426,9 @@ static void tasklet_action(struct softirq_action *a)
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_vec).head;
-       __get_cpu_var(tasklet_vec).head = NULL;
-       __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
+       list = __this_cpu_read(tasklet_vec.head);
+       __this_cpu_write(tasklet_vec.head, NULL);
+       __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -449,8 +449,8 @@ static void tasklet_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_vec).tail = t;
-               __get_cpu_var(tasklet_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_vec.tail) = t;
+               __this_cpu_write(tasklet_vec.tail, &(t->next));
                __raise_softirq_irqoff(TASKLET_SOFTIRQ);
                local_irq_enable();
        }
@@ -461,9 +461,9 @@ static void tasklet_hi_action(struct softirq_action *a)
        struct tasklet_struct *list;
 
        local_irq_disable();
-       list = __get_cpu_var(tasklet_hi_vec).head;
-       __get_cpu_var(tasklet_hi_vec).head = NULL;
-       __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
+       list = __this_cpu_read(tasklet_hi_vec.head);
+       __this_cpu_write(tasklet_hi_vec.head, NULL);
+       __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
        local_irq_enable();
 
        while (list) {
@@ -484,8 +484,8 @@ static void tasklet_hi_action(struct softirq_action *a)
 
                local_irq_disable();
                t->next = NULL;
-               *__get_cpu_var(tasklet_hi_vec).tail = t;
-               __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
+               *__this_cpu_read(tasklet_hi_vec.tail) = t;
+               __this_cpu_write(tasklet_hi_vec.tail, &(t->next));
                __raise_softirq_irqoff(HI_SOFTIRQ);
                local_irq_enable();
        }
@@ -802,16 +802,16 @@ static void takeover_tasklets(unsigned int cpu)
 
        /* Find end, append list for that CPU. */
        if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
-               *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
-               __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+               *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
+               this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
                per_cpu(tasklet_vec, cpu).head = NULL;
                per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
        }
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
 
        if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
-               *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
-               __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+               *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
+               __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
                per_cpu(tasklet_hi_vec, cpu).head = NULL;
                per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
        }
index 3308fd7f1b52f170e4da5dd7e26749aca652ef1b..69691eb4b715da804555e04c79aa4ba44ec2bdd3 100644 (file)
@@ -89,8 +89,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
                return -ENOMEM;
 
        if (!info) {
-               int seq = get_cpu_var(taskstats_seqnum)++;
-               put_cpu_var(taskstats_seqnum);
+               int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
 
                reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
        } else
@@ -612,7 +611,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
                fill_tgid_exit(tsk);
        }
 
-       listeners = &__raw_get_cpu_var(listener_array);
+       listeners = __this_cpu_ptr(&listener_array);
        if (list_empty(&listeners->list))
                return;
 
index b6b898d2eeefc1b0627c613b3d23a9bd4b21876e..051bc80a0c435cf47a8dfb8a0f5d2d49d20e188c 100644 (file)
@@ -49,7 +49,7 @@ struct tick_device *tick_get_device(int cpu)
  */
 int tick_is_oneshot_available(void)
 {
-       struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
        return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
 }
index aada0e52680ace6cc7d5e09a111a3879c1c6bda3..5cbc101f908b8483938c0153fc1ac023bcd1c784 100644 (file)
@@ -95,7 +95,7 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
  */
 int tick_program_event(ktime_t expires, int force)
 {
-       struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+       struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
        return tick_dev_program_event(dev, expires, force);
 }
@@ -167,7 +167,7 @@ int tick_oneshot_mode_active(void)
        int ret;
 
        local_irq_save(flags);
-       ret = __get_cpu_var(tick_cpu_device).mode == TICKDEV_MODE_ONESHOT;
+       ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT;
        local_irq_restore(flags);
 
        return ret;
index 6e7b575ac33cf2dcba3f9dc749f7039e6805a3f0..d7ebdf4cea98aa3829ba1ec6a57a67359d6a0c98 100644 (file)
@@ -118,12 +118,12 @@ static void __touch_watchdog(void)
 {
        int this_cpu = smp_processor_id();
 
-       __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
+       __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu));
 }
 
 void touch_softlockup_watchdog(void)
 {
-       __raw_get_cpu_var(watchdog_touch_ts) = 0;
+       __this_cpu_write(watchdog_touch_ts, 0);
 }
 EXPORT_SYMBOL(touch_softlockup_watchdog);
 
@@ -167,12 +167,12 @@ void touch_softlockup_watchdog_sync(void)
 /* watchdog detector functions */
 static int is_hardlockup(void)
 {
-       unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
+       unsigned long hrint = __this_cpu_read(hrtimer_interrupts);
 
-       if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
+       if (__this_cpu_read(hrtimer_interrupts_saved) == hrint)
                return 1;
 
-       __get_cpu_var(hrtimer_interrupts_saved) = hrint;
+       __this_cpu_write(hrtimer_interrupts_saved, hrint);
        return 0;
 }
 #endif
@@ -205,8 +205,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
-       if (__get_cpu_var(watchdog_nmi_touch) == true) {
-               __get_cpu_var(watchdog_nmi_touch) = false;
+       if (__this_cpu_read(watchdog_nmi_touch) == true) {
+               __this_cpu_write(watchdog_nmi_touch, false);
                return;
        }
 
@@ -220,7 +220,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                int this_cpu = smp_processor_id();
 
                /* only print hardlockups once */
-               if (__get_cpu_var(hard_watchdog_warn) == true)
+               if (__this_cpu_read(hard_watchdog_warn) == true)
                        return;
 
                if (hardlockup_panic)
@@ -228,16 +228,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi,
                else
                        WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
 
-               __get_cpu_var(hard_watchdog_warn) = true;
+               __this_cpu_write(hard_watchdog_warn, true);
                return;
        }
 
-       __get_cpu_var(hard_watchdog_warn) = false;
+       __this_cpu_write(hard_watchdog_warn, false);
        return;
 }
 static void watchdog_interrupt_count(void)
 {
-       __get_cpu_var(hrtimer_interrupts)++;
+       __this_cpu_inc(hrtimer_interrupts);
 }
 #else
 static inline void watchdog_interrupt_count(void) { return; }
@@ -246,7 +246,7 @@ static inline void watchdog_interrupt_count(void) { return; }
 /* watchdog kicker functions */
 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 {
-       unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
+       unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
        struct pt_regs *regs = get_irq_regs();
        int duration;
 
@@ -254,18 +254,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        watchdog_interrupt_count();
 
        /* kick the softlockup detector */
-       wake_up_process(__get_cpu_var(softlockup_watchdog));
+       wake_up_process(__this_cpu_read(softlockup_watchdog));
 
        /* .. and repeat */
        hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
 
        if (touch_ts == 0) {
-               if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
+               if (unlikely(__this_cpu_read(softlockup_touch_sync))) {
                        /*
                         * If the time stamp was touched atomically
                         * make sure the scheduler tick is up to date.
                         */
-                       __get_cpu_var(softlockup_touch_sync) = false;
+                       __this_cpu_write(softlockup_touch_sync, false);
                        sched_clock_tick();
                }
                __touch_watchdog();
@@ -281,7 +281,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        duration = is_softlockup(touch_ts);
        if (unlikely(duration)) {
                /* only warn once */
-               if (__get_cpu_var(soft_watchdog_warn) == true)
+               if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
                printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
@@ -296,9 +296,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
 
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
-               __get_cpu_var(soft_watchdog_warn) = true;
+               __this_cpu_write(soft_watchdog_warn, true);
        } else
-               __get_cpu_var(soft_watchdog_warn) = false;
+               __this_cpu_write(soft_watchdog_warn, false);
 
        return HRTIMER_RESTART;
 }
index 604678d7d06d9b101feafb31da72654d5f618500..28f2c33c6b537ac07f5d2692fe08b0d48a8dae91 100644 (file)
@@ -72,18 +72,16 @@ EXPORT_SYMBOL(percpu_counter_set);
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
 {
        s64 count;
-       s32 *pcount;
 
        preempt_disable();
-       pcount = this_cpu_ptr(fbc->counters);
-       count = *pcount + amount;
+       count = __this_cpu_read(*fbc->counters) + amount;
        if (count >= batch || count <= -batch) {
                spin_lock(&fbc->lock);
                fbc->count += count;
-               *pcount = 0;
+               __this_cpu_write(*fbc->counters, 0);
                spin_unlock(&fbc->lock);
        } else {
-               *pcount = count;
+               __this_cpu_write(*fbc->counters, count);
        }
        preempt_enable();
 }
index 02ba91230b99269f45beea6387c165e042800347..3dd4984bdef849c4a172c5e2d5d6f2d230137f8b 100644 (file)
@@ -293,12 +293,8 @@ static void *pcpu_mem_alloc(size_t size)
 
        if (size <= PAGE_SIZE)
                return kzalloc(size, GFP_KERNEL);
-       else {
-               void *ptr = vmalloc(size);
-               if (ptr)
-                       memset(ptr, 0, size);
-               return ptr;
-       }
+       else
+               return vzalloc(size);
 }
 
 /**
index 39e92c0e6273df713e08eea11cc047edbf8dc838..e9f92987954ab81384f4ae1d6766dad3848aa3f3 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -829,12 +829,12 @@ static void init_reap_node(int cpu)
 
 static void next_reap_node(void)
 {
-       int node = __get_cpu_var(slab_reap_node);
+       int node = __this_cpu_read(slab_reap_node);
 
        node = next_node(node, node_online_map);
        if (unlikely(node >= MAX_NUMNODES))
                node = first_node(node_online_map);
-       __get_cpu_var(slab_reap_node) = node;
+       __this_cpu_write(slab_reap_node, node);
 }
 
 #else
@@ -1012,7 +1012,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
  */
 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
 {
-       int node = __get_cpu_var(slab_reap_node);
+       int node = __this_cpu_read(slab_reap_node);
 
        if (l3->alien) {
                struct array_cache *ac = l3->alien[node];
index 33c33e7a0f9b24f1432213118c70a8c1334cc741..312d728976f1661c4fa335a1ead46356b3bf091b 100644 (file)
@@ -167,35 +167,23 @@ static void refresh_zone_stat_thresholds(void)
 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
                                int delta)
 {
-       struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-
-       s8 *p = pcp->vm_stat_diff + item;
+       struct per_cpu_pageset __percpu *pcp = zone->pageset;
+       s8 __percpu *p = pcp->vm_stat_diff + item;
        long x;
+       long t;
+
+       x = delta + __this_cpu_read(*p);
 
-       x = delta + *p;
+       t = __this_cpu_read(pcp->stat_threshold);
 
-       if (unlikely(x > pcp->stat_threshold || x < -pcp->stat_threshold)) {
+       if (unlikely(x > t || x < -t)) {
                zone_page_state_add(x, zone, item);
                x = 0;
        }
-       *p = x;
+       __this_cpu_write(*p, x);
 }
 EXPORT_SYMBOL(__mod_zone_page_state);
 
-/*
- * For an unknown interrupt state
- */
-void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
-                                       int delta)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __mod_zone_page_state(zone, item, delta);
-       local_irq_restore(flags);
-}
-EXPORT_SYMBOL(mod_zone_page_state);
-
 /*
  * Optimized increment and decrement functions.
  *
@@ -221,16 +209,17 @@ EXPORT_SYMBOL(mod_zone_page_state);
  */
 void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-       struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-       s8 *p = pcp->vm_stat_diff + item;
+       struct per_cpu_pageset __percpu *pcp = zone->pageset;
+       s8 __percpu *p = pcp->vm_stat_diff + item;
+       s8 v, t;
 
-       (*p)++;
+       v = __this_cpu_inc_return(*p);
+       t = __this_cpu_read(pcp->stat_threshold);
+       if (unlikely(v > t)) {
+               s8 overstep = t >> 1;
 
-       if (unlikely(*p > pcp->stat_threshold)) {
-               int overstep = pcp->stat_threshold / 2;
-
-               zone_page_state_add(*p + overstep, zone, item);
-               *p = -overstep;
+               zone_page_state_add(v + overstep, zone, item);
+               __this_cpu_write(*p, -overstep);
        }
 }
 
@@ -242,16 +231,17 @@ EXPORT_SYMBOL(__inc_zone_page_state);
 
 void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
 {
-       struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
-       s8 *p = pcp->vm_stat_diff + item;
-
-       (*p)--;
+       struct per_cpu_pageset __percpu *pcp = zone->pageset;
+       s8 __percpu *p = pcp->vm_stat_diff + item;
+       s8 v, t;
 
-       if (unlikely(*p < - pcp->stat_threshold)) {
-               int overstep = pcp->stat_threshold / 2;
+       v = __this_cpu_dec_return(*p);
+       t = __this_cpu_read(pcp->stat_threshold);
+       if (unlikely(v < - t)) {
+               s8 overstep = t >> 1;
 
-               zone_page_state_add(*p - overstep, zone, item);
-               *p = overstep;
+               zone_page_state_add(v - overstep, zone, item);
+               __this_cpu_write(*p, overstep);
        }
 }
 
@@ -261,6 +251,92 @@ void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
 }
 EXPORT_SYMBOL(__dec_zone_page_state);
 
+#ifdef CONFIG_CMPXCHG_LOCAL
+/*
+ * If we have cmpxchg_local support then we do not need to incur the overhead
+ * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
+ *
+ * mod_state() modifies the zone counter state through atomic per cpu
+ * operations.
+ *
+ * Overstep mode specifies how overstep should handled:
+ *     0       No overstepping
+ *     1       Overstepping half of threshold
+ *     -1      Overstepping minus half of threshold
+*/
+static inline void mod_state(struct zone *zone,
+       enum zone_stat_item item, int delta, int overstep_mode)
+{
+       struct per_cpu_pageset __percpu *pcp = zone->pageset;
+       s8 __percpu *p = pcp->vm_stat_diff + item;
+       long o, n, t, z;
+
+       do {
+               z = 0;  /* overflow to zone counters */
+
+               /*
+                * The fetching of the stat_threshold is racy. We may apply
+                * a counter threshold to the wrong the cpu if we get
+                * rescheduled while executing here. However, the following
+                * will apply the threshold again and therefore bring the
+                * counter under the threshold.
+                */
+               t = this_cpu_read(pcp->stat_threshold);
+
+               o = this_cpu_read(*p);
+               n = delta + o;
+
+               if (n > t || n < -t) {
+                       int os = overstep_mode * (t >> 1) ;
+
+                       /* Overflow must be added to zone counters */
+                       z = n + os;
+                       n = -os;
+               }
+       } while (this_cpu_cmpxchg(*p, o, n) != o);
+
+       if (z)
+               zone_page_state_add(z, zone, item);
+}
+
+void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+                                       int delta)
+{
+       mod_state(zone, item, delta, 0);
+}
+EXPORT_SYMBOL(mod_zone_page_state);
+
+void inc_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+       mod_state(zone, item, 1, 1);
+}
+
+void inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+       mod_state(page_zone(page), item, 1, 1);
+}
+EXPORT_SYMBOL(inc_zone_page_state);
+
+void dec_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+       mod_state(page_zone(page), item, -1, -1);
+}
+EXPORT_SYMBOL(dec_zone_page_state);
+#else
+/*
+ * Use interrupt disable to serialize counter updates
+ */
+void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
+                                       int delta)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __mod_zone_page_state(zone, item, delta);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(mod_zone_page_state);
+
 void inc_zone_state(struct zone *zone, enum zone_stat_item item)
 {
        unsigned long flags;
@@ -291,6 +367,7 @@ void dec_zone_page_state(struct page *page, enum zone_stat_item item)
        local_irq_restore(flags);
 }
 EXPORT_SYMBOL(dec_zone_page_state);
+#endif
 
 /*
  * Update the zone counters for one cpu.