]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
perf, x86: Pass enable bit mask to __x86_pmu_enable_event()
authorRobert Richter <robert.richter@amd.com>
Tue, 13 Apr 2010 20:23:14 +0000 (22:23 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 7 May 2010 09:31:00 +0000 (11:31 +0200)
To reuse this function for events with different enable bit masks,
this mask is part of the function's argument list now.

The function will be used later to control ibs events too.

Signed-off-by: Robert Richter <robert.richter@amd.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1271190201-25705-6-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c

index c2c1e10f7b037d746feb10115132a9fa117f672d..4e218d7ac8526a735cfb9605909fd54e5b7262c9 100644 (file)
@@ -844,10 +844,10 @@ void hw_perf_enable(void)
        x86_pmu.enable_all(added);
 }
 
-static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
+                                         u64 enable_mask)
 {
-       wrmsrl(hwc->config_base + hwc->idx,
-                             hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
+       wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
 }
 
 static inline void x86_pmu_disable_event(struct perf_event *event)
@@ -919,7 +919,8 @@ static void x86_pmu_enable_event(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        if (cpuc->enabled)
-               __x86_pmu_enable_event(&event->hw);
+               __x86_pmu_enable_event(&event->hw,
+                                      ARCH_PERFMON_EVENTSEL_ENABLE);
 }
 
 /*
index a099df96f9168000720480c96f0aaf5e1b3a80b3..a4b56ac425cb1a30c4ba7927844af20b66c37d49 100644 (file)
@@ -513,7 +513,8 @@ static void intel_pmu_nhm_enable_all(int added)
                        if (!event)
                                continue;
 
-                       __x86_pmu_enable_event(&event->hw);
+                       __x86_pmu_enable_event(&event->hw,
+                                              ARCH_PERFMON_EVENTSEL_ENABLE);
                }
        }
        intel_pmu_enable_all(added);
@@ -617,7 +618,7 @@ static void intel_pmu_enable_event(struct perf_event *event)
        if (unlikely(event->attr.precise))
                intel_pmu_pebs_enable(event);
 
-       __x86_pmu_enable_event(hwc);
+       __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
 }
 
 /*