]> git.karo-electronics.de Git - linux-beck.git/commitdiff
perf_counter: x86: fixup nmi_watchdog vs perf_counter boo-boo
authorPeter Zijlstra <peterz@infradead.org>
Mon, 4 May 2009 16:47:44 +0000 (18:47 +0200)
committerIngo Molnar <mingo@elte.hu>
Mon, 4 May 2009 17:30:29 +0000 (19:30 +0200)
Invert the atomic_inc_not_zero() test so that we will indeed detect the
first activation.

Also rename the global num_counters, since its easy to confuse with
x86_pmu.num_counters.

[ Impact: fix non-working perfcounters on AMD CPUs, cleanup ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241455664.7620.4938.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c

index d4c0cc9d326370e2992d77d378d891c77ca9d71e..196b58f044483d1f1ac60dee643b4a250bb8be5c 100644 (file)
@@ -171,7 +171,7 @@ again:
        return new_raw_count;
 }
 
-static atomic_t num_counters;
+static atomic_t active_counters;
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
 static bool reserve_pmc_hardware(void)
@@ -224,7 +224,7 @@ static void release_pmc_hardware(void)
 
 static void hw_perf_counter_destroy(struct perf_counter *counter)
 {
-       if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
+       if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
                release_pmc_hardware();
                mutex_unlock(&pmc_reserve_mutex);
        }
@@ -248,12 +248,12 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
                return -ENODEV;
 
        err = 0;
-       if (atomic_inc_not_zero(&num_counters)) {
+       if (!atomic_inc_not_zero(&active_counters)) {
                mutex_lock(&pmc_reserve_mutex);
-               if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
+               if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
                        err = -EBUSY;
                else
-                       atomic_inc(&num_counters);
+                       atomic_inc(&active_counters);
                mutex_unlock(&pmc_reserve_mutex);
        }
        if (err)
@@ -280,7 +280,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
        if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
                hwc->nmi = 1;
 
-       hwc->irq_period         = hw_event->irq_period;
+       hwc->irq_period = hw_event->irq_period;
        if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period)
                hwc->irq_period = x86_pmu.max_period;
 
@@ -871,7 +871,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
        struct pt_regs *regs;
        int ret;
 
-       if (!atomic_read(&num_counters))
+       if (!atomic_read(&active_counters))
                return NOTIFY_DONE;
 
        switch (cmd) {