]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Revert "sparc: replace __get_cpu_var uses"
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 6 Mar 2014 08:20:08 +0000 (19:20 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 6 Mar 2014 08:20:08 +0000 (19:20 +1100)
This reverts commit 5aa661aa13d4e8191238efaaa8054bf56084e7e4.

arch/sparc/include/asm/cpudata_32.h
arch/sparc/include/asm/cpudata_64.h
arch/sparc/kernel/kprobes.c
arch/sparc/kernel/leon_smp.c
arch/sparc/kernel/nmi.c
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/sun4d_smp.c
arch/sparc/kernel/time_64.c
arch/sparc/mm/tlb.c

index fc191baa84746a00cfaf719ad0b48975a79b3e67..0300d94c25b35722f30e72d36d2c558ebf3d8bc3 100644 (file)
@@ -26,6 +26,6 @@ typedef struct {
 
 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
-#define local_cpu_data() __this_cpu_read(__cpu_data)
+#define local_cpu_data() __get_cpu_var(__cpu_data)
 
 #endif /* _SPARC_CPUDATA_H */
index cc7c054019fd0e1315f583e9079ff06166c51ef6..050ef35b9dcf5224ead325d70a9274f9209ddc84 100644 (file)
@@ -33,7 +33,7 @@ typedef struct {
 
 DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
 #define cpu_data(__cpu)                per_cpu(__cpu_data, (__cpu))
-#define local_cpu_data()       __this_cpu_read(__cpu_data)
+#define local_cpu_data()       __get_cpu_var(__cpu_data)
 
 extern const struct seq_operations cpuinfo_op;
 
index c6a2825930a0e9809f21966188f6f5452558f943..1b0973503197508404967e8bb0ed58cc934b982e 100644 (file)
@@ -83,7 +83,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
 
 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 {
-       __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+       __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
        kcb->kprobe_status = kcb->prev_kprobe.status;
        kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
        kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
@@ -92,7 +92,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
                                struct kprobe_ctlblk *kcb)
 {
-       __this_cpu_write(current_kprobe, p);
+       __get_cpu_var(current_kprobe) = p;
        kcb->kprobe_orig_tnpc = regs->tnpc;
        kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
 }
@@ -155,7 +155,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
                                ret = 1;
                                goto no_kprobe;
                        }
-                       p = __this_cpu_read(current_kprobe);
+                       p = __get_cpu_var(current_kprobe);
                        if (p->break_handler && p->break_handler(p, regs))
                                goto ss_probe;
                }
index 05161b53e797f318524c7c6c0293261da87c995b..6edf955f987caabd6427eb3bd80b8c7603b02563 100644 (file)
@@ -354,7 +354,7 @@ static void leon_ipi_resched(int cpu)
 
 void leonsmp_ipi_interrupt(void)
 {
-       struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work);
+       struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
 
        if (work->single) {
                work->single = 0;
index b896d83cd41fba901489910b7edc35e83b18fb75..6479256fd5a4b650a2f5c3ca3cf5c501033dc9fb 100644 (file)
@@ -111,20 +111,20 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
                pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
 
        sum = local_cpu_data().irq0_irqs;
-       if (__this_cpu_read(nmi_touch)) {
-               __this_cpu_write(nmi_touch, 0);
+       if (__get_cpu_var(nmi_touch)) {
+               __get_cpu_var(nmi_touch) = 0;
                touched = 1;
        }
-       if (!touched && __this_cpu_read(last_irq_sum) == sum) {
+       if (!touched && __get_cpu_var(last_irq_sum) == sum) {
                __this_cpu_inc(alert_counter);
                if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
                        die_nmi("BUG: NMI Watchdog detected LOCKUP",
                                regs, panic_on_timeout);
        } else {
-               __this_cpu_write(last_irq_sum, sum);
+               __get_cpu_var(last_irq_sum) = sum;
                __this_cpu_write(alert_counter, 0);
        }
-       if (__this_cpu_read(wd_enabled)) {
+       if (__get_cpu_var(wd_enabled)) {
                pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
                pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
        }
@@ -166,7 +166,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
 void stop_nmi_watchdog(void *unused)
 {
        pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
-       __this_cpu_write(wd_enabled, 0);
+       __get_cpu_var(wd_enabled) = 0;
        atomic_dec(&nmi_active);
 }
 
@@ -219,7 +219,7 @@ error:
 
 void start_nmi_watchdog(void *unused)
 {
-       __this_cpu_write(wd_enabled, 1);
+       __get_cpu_var(wd_enabled) = 1;
        atomic_inc(&nmi_active);
 
        pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
@@ -230,7 +230,7 @@ void start_nmi_watchdog(void *unused)
 
 static void nmi_adjust_hz_one(void *unused)
 {
-       if (!__this_cpu_read(wd_enabled))
+       if (!__get_cpu_var(wd_enabled))
                return;
 
        pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
index 49d33b178793c59ca04cd55a60ba5161f9f5e508..d07f6b29aed88510f1625d90655a4fbd10159b4c 100644 (file)
@@ -48,7 +48,7 @@ static int iommu_batch_initialized;
 /* Interrupts must be disabled.  */
 static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
 {
-       struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
+       struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 
        p->dev          = dev;
        p->prot         = prot;
@@ -94,7 +94,7 @@ static long iommu_batch_flush(struct iommu_batch *p)
 
 static inline void iommu_batch_new_entry(unsigned long entry)
 {
-       struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
+       struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 
        if (p->entry + p->npages == entry)
                return;
@@ -106,7 +106,7 @@ static inline void iommu_batch_new_entry(unsigned long entry)
 /* Interrupts must be disabled.  */
 static inline long iommu_batch_add(u64 phys_page)
 {
-       struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
+       struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 
        BUG_ON(p->npages >= PGLIST_NENTS);
 
@@ -120,7 +120,7 @@ static inline long iommu_batch_add(u64 phys_page)
 /* Interrupts must be disabled.  */
 static inline long iommu_batch_end(void)
 {
-       struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
+       struct iommu_batch *p = &__get_cpu_var(iommu_batch);
 
        BUG_ON(p->npages >= PGLIST_NENTS);
 
index 9bf55dee57825062d3997f6d6df04e389c3e6681..b5c38faa4eadf423db3bc20df35d2c98cd5dabf3 100644 (file)
@@ -1013,7 +1013,7 @@ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
 
 static void sparc_pmu_enable(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
 
        if (cpuc->enabled)
@@ -1031,7 +1031,7 @@ static void sparc_pmu_enable(struct pmu *pmu)
 
 static void sparc_pmu_disable(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
 
        if (!cpuc->enabled)
@@ -1065,7 +1065,7 @@ static int active_event_index(struct cpu_hw_events *cpuc,
 
 static void sparc_pmu_start(struct perf_event *event, int flags)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx = active_event_index(cpuc, event);
 
        if (flags & PERF_EF_RELOAD) {
@@ -1080,7 +1080,7 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
 
 static void sparc_pmu_stop(struct perf_event *event, int flags)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx = active_event_index(cpuc, event);
 
        if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -1096,7 +1096,7 @@ static void sparc_pmu_stop(struct perf_event *event, int flags)
 
 static void sparc_pmu_del(struct perf_event *event, int _flags)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        unsigned long flags;
        int i;
 
@@ -1133,7 +1133,7 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
 
 static void sparc_pmu_read(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx = active_event_index(cpuc, event);
        struct hw_perf_event *hwc = &event->hw;
 
@@ -1145,7 +1145,7 @@ static DEFINE_MUTEX(pmc_grab_mutex);
 
 static void perf_stop_nmi_watchdog(void *unused)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
 
        stop_nmi_watchdog(NULL);
@@ -1356,7 +1356,7 @@ static int collect_events(struct perf_event *group, int max_count,
 
 static int sparc_pmu_add(struct perf_event *event, int ef_flags)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n0, ret = -EAGAIN;
        unsigned long flags;
 
@@ -1498,7 +1498,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
  */
 static void sparc_pmu_start_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        perf_pmu_disable(pmu);
        cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1511,7 +1511,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
  */
 static void sparc_pmu_cancel_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
        perf_pmu_enable(pmu);
@@ -1524,13 +1524,13 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
  */
 static int sparc_pmu_commit_txn(struct pmu *pmu)
 {
-       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int n;
 
        if (!sparc_pmu)
                return -EINVAL;
 
-       cpuc = this_cpu_ptr(&cpu_hw_events);
+       cpuc = &__get_cpu_var(cpu_hw_events);
        n = cpuc->n_events;
        if (check_excludes(cpuc->event, 0, n))
                return -EINVAL;
@@ -1601,7 +1601,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
 
        regs = args->regs;
 
-       cpuc = this_cpu_ptr(&cpu_hw_events);
+       cpuc = &__get_cpu_var(cpu_hw_events);
 
        /* If the PMU has the TOE IRQ enable bits, we need to do a
         * dummy write to the %pcr to clear the overflow bits and thus
index 9d98e5002a09a483eb87b23bd68b33a6e6f83e05..d5c319553fd0874903515106920d283c066d8fbd 100644 (file)
@@ -204,7 +204,7 @@ static void __init smp4d_ipi_init(void)
 
 void sun4d_ipi_interrupt(void)
 {
-       struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work);
+       struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work);
 
        if (work->single) {
                work->single = 0;
index f3a661d9725337523832eebf310844edc9bc6993..c3d82b5f54ca8501960d9145990304d5b15e4a3c 100644 (file)
@@ -766,7 +766,7 @@ void setup_sparc64_timer(void)
                             : /* no outputs */
                             : "r" (pstate));
 
-       sevt = this_cpu_ptr(&sparc64_events);
+       sevt = &__get_cpu_var(sparc64_events);
 
        memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
        sevt->cpumask = cpumask_of(smp_processor_id());
index 89ab798c1cd7a82de481240ad0b9914e7a6cb122..b12cb5e72812140688d771ed0788b6a2cf2cfc16 100644 (file)
@@ -52,14 +52,14 @@ out:
 
 void arch_enter_lazy_mmu_mode(void)
 {
-       struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
 
        tb->active = 1;
 }
 
 void arch_leave_lazy_mmu_mode(void)
 {
-       struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
+       struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
 
        if (tb->tlb_nr)
                flush_tlb_pending();