]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
perf_events, x86: Clean up hw_perf_*_all() implementation
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Wed, 27 Jan 2010 22:07:47 +0000 (23:07 +0100)
committerIngo Molnar <mingo@elte.hu>
Fri, 29 Jan 2010 08:01:47 +0000 (09:01 +0100)
Put the recursion avoidance code in the generic hook instead of
replicating it in each implementation.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <20100127221122.057507285@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c

index 951213a514892daeb842e0ca7a512d22d04e2fc4..cf10839f20ea4814999b13812fa9c97db9cdb110 100644 (file)
@@ -1099,15 +1099,8 @@ static int __hw_perf_event_init(struct perf_event *event)
 
 static void p6_pmu_disable_all(void)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        u64 val;
 
-       if (!cpuc->enabled)
-               return;
-
-       cpuc->enabled = 0;
-       barrier();
-
        /* p6 only has one enable register */
        rdmsrl(MSR_P6_EVNTSEL0, val);
        val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1118,12 +1111,6 @@ static void intel_pmu_disable_all(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       if (!cpuc->enabled)
-               return;
-
-       cpuc->enabled = 0;
-       barrier();
-
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
 
        if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
@@ -1135,17 +1122,6 @@ static void amd_pmu_disable_all(void)
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx;
 
-       if (!cpuc->enabled)
-               return;
-
-       cpuc->enabled = 0;
-       /*
-        * ensure we write the disable before we start disabling the
-        * events proper, so that amd_pmu_enable_event() does the
-        * right thing.
-        */
-       barrier();
-
        for (idx = 0; idx < x86_pmu.num_events; idx++) {
                u64 val;
 
@@ -1166,23 +1142,20 @@ void hw_perf_disable(void)
        if (!x86_pmu_initialized())
                return;
 
-       if (cpuc->enabled)
-               cpuc->n_added = 0;
+       if (!cpuc->enabled)
+               return;
+
+       cpuc->n_added = 0;
+       cpuc->enabled = 0;
+       barrier();
 
        x86_pmu.disable_all();
 }
 
 static void p6_pmu_enable_all(void)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        unsigned long val;
 
-       if (cpuc->enabled)
-               return;
-
-       cpuc->enabled = 1;
-       barrier();
-
        /* p6 only has one enable register */
        rdmsrl(MSR_P6_EVNTSEL0, val);
        val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
@@ -1193,12 +1166,6 @@ static void intel_pmu_enable_all(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
-       if (cpuc->enabled)
-               return;
-
-       cpuc->enabled = 1;
-       barrier();
-
        wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
 
        if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
@@ -1217,12 +1184,6 @@ static void amd_pmu_enable_all(void)
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx;
 
-       if (cpuc->enabled)
-               return;
-
-       cpuc->enabled = 1;
-       barrier();
-
        for (idx = 0; idx < x86_pmu.num_events; idx++) {
                struct perf_event *event = cpuc->events[idx];
                u64 val;
@@ -1417,6 +1378,10 @@ void hw_perf_enable(void)
 
        if (!x86_pmu_initialized())
                return;
+
+       if (cpuc->enabled)
+               return;
+
        if (cpuc->n_added) {
                /*
                 * apply assignment obtained either from
@@ -1461,6 +1426,10 @@ void hw_perf_enable(void)
                cpuc->n_added = 0;
                perf_events_lapic_init();
        }
+
+       cpuc->enabled = 1;
+       barrier();
+
        x86_pmu.enable_all();
 }