]> git.karo-electronics.de Git - linux-beck.git/commitdiff
perf/x86/intel: Clean up checkpoint-interrupt bits
authorPeter Zijlstra <peterz@infradead.org>
Thu, 12 Sep 2013 10:53:44 +0000 (12:53 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 12 Sep 2013 17:13:37 +0000 (19:13 +0200)
Clean up the weird CP interrupt exception code by keeping a CP mask.

Andi suggested this implementation but weirdly didn't actually
implement it himself, do so now because it removes the conditional in
the interrupt handler and avoids the assumption its only on cnt2.

Suggested-by: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-dvb4q0rydkfp00kqat4p5bah@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c

index cc16faae0538431073b01dfa0bd5c96ab4383ae1..ce84edeeae271c4a428e930d8f04852279f3556a 100644 (file)
@@ -163,6 +163,11 @@ struct cpu_hw_events {
        u64                             intel_ctrl_host_mask;
        struct perf_guest_switch_msr    guest_switch_msrs[X86_PMC_IDX_MAX];
 
+       /*
+        * Intel checkpoint mask
+        */
+       u64                             intel_cp_status;
+
        /*
         * manage shared (per-core, per-cpu) registers
         * used on Intel NHM/WSM/SNB
index dd1d4f3e18e68d8ea3c5c843f7f9ce7d1614a107..ec70d0cce555251c095eac57bedeb78ad6ef079d 100644 (file)
@@ -1184,6 +1184,11 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
        wrmsrl(hwc->config_base, ctrl_val);
 }
 
+static inline bool event_is_checkpointed(struct perf_event *event)
+{
+       return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
+}
+
 static void intel_pmu_disable_event(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -1197,6 +1202,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
 
        cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
        cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
+       cpuc->intel_cp_status &= ~(1ull << hwc->idx);
 
        /*
         * must disable before any actual event
@@ -1271,6 +1277,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
        if (event->attr.exclude_guest)
                cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
 
+       if (unlikely(event_is_checkpointed(event)))
+               cpuc->intel_cp_status |= (1ull << hwc->idx);
+
        if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
                intel_pmu_enable_fixed(hwc);
                return;
@@ -1282,11 +1291,6 @@ static void intel_pmu_enable_event(struct perf_event *event)
        __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
 }
 
-static inline bool event_is_checkpointed(struct perf_event *event)
-{
-       return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
-}
-
 /*
  * Save and restart an expired event. Called by NMI contexts,
  * so it has to be careful about preempting normal event ops:
@@ -1389,11 +1393,11 @@ again:
        }
 
        /*
-        * To avoid spurious interrupts with perf stat always reset checkpointed
-        * counters.
+        * Checkpointed counters can lead to 'spurious' PMIs because the
+        * rollback caused by the PMI will have cleared the overflow status
+        * bit. Therefore always force probe these counters.
         */
-       if (cpuc->events[2] && event_is_checkpointed(cpuc->events[2]))
-               status |= (1ULL << 2);
+       status |= cpuc->intel_cp_status;
 
        for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
                struct perf_event *event = cpuc->events[bit];