]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/kernel/cpu/perf_event.c
perf, arch: Cleanup perf-pmu init vs lockup-detector
[mv-sheeva.git] / arch / x86 / kernel / cpu / perf_event.c
index c1e8c7a5116493568e06ba353dd4150d6c027d9a..817d2b195e8e1e04507a49d955e1371f275555c9 100644 (file)
@@ -237,6 +237,7 @@ struct x86_pmu {
         * Intel DebugStore bits
         */
        int             bts, pebs;
+       int             bts_active, pebs_active;
        int             pebs_record_size;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
@@ -329,9 +330,6 @@ static bool reserve_pmc_hardware(void)
 {
        int i;
 
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               disable_lapic_nmi_watchdog();
-
        for (i = 0; i < x86_pmu.num_counters; i++) {
                if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
                        goto perfctr_fail;
@@ -354,9 +352,6 @@ perfctr_fail:
        for (i--; i >= 0; i--)
                release_perfctr_nmi(x86_pmu.perfctr + i);
 
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               enable_lapic_nmi_watchdog();
-
        return false;
 }
 
@@ -368,9 +363,6 @@ static void release_pmc_hardware(void)
                release_perfctr_nmi(x86_pmu.perfctr + i);
                release_evntsel_nmi(x86_pmu.eventsel + i);
        }
-
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               enable_lapic_nmi_watchdog();
 }
 
 #else
@@ -380,7 +372,21 @@ static void release_pmc_hardware(void) {}
 
 #endif
 
-static int reserve_ds_buffers(void);
+static bool check_hw_exists(void)
+{
+       u64 val, val_new = 0;
+       int ret = 0;
+
+       val = 0xabcdUL;
+       ret |= checking_wrmsrl(x86_pmu.perfctr, val);
+       ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
+       if (ret || val != val_new)
+               return false;
+
+       return true;
+}
+
+static void reserve_ds_buffers(void);
 static void release_ds_buffers(void);
 
 static void hw_perf_event_destroy(struct perf_event *event)
@@ -436,7 +442,7 @@ static int x86_setup_perfctr(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        u64 config;
 
-       if (!hwc->sample_period) {
+       if (!is_sampling_event(event)) {
                hwc->sample_period = x86_pmu.max_period;
                hwc->last_period = hwc->sample_period;
                local64_set(&hwc->period_left, hwc->sample_period);
@@ -477,7 +483,7 @@ static int x86_setup_perfctr(struct perf_event *event)
        if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
            (hwc->sample_period == 1)) {
                /* BTS is not supported by this architecture. */
-               if (!x86_pmu.bts)
+               if (!x86_pmu.bts_active)
                        return -EOPNOTSUPP;
 
                /* BTS is currently only allowed for user-mode. */
@@ -496,12 +502,13 @@ static int x86_pmu_hw_config(struct perf_event *event)
                int precise = 0;
 
                /* Support for constant skid */
-               if (x86_pmu.pebs)
+               if (x86_pmu.pebs_active) {
                        precise++;
 
-               /* Support for IP fixup */
-               if (x86_pmu.lbr_nr)
-                       precise++;
+                       /* Support for IP fixup */
+                       if (x86_pmu.lbr_nr)
+                               precise++;
+               }
 
                if (event->attr.precise_ip > precise)
                        return -EOPNOTSUPP;
@@ -543,11 +550,8 @@ static int __x86_pmu_event_init(struct perf_event *event)
                if (atomic_read(&active_events) == 0) {
                        if (!reserve_pmc_hardware())
                                err = -EBUSY;
-                       else {
-                               err = reserve_ds_buffers();
-                               if (err)
-                                       release_pmc_hardware();
-                       }
+                       else
+                               reserve_ds_buffers();
                }
                if (!err)
                        atomic_inc(&active_events);
@@ -1349,7 +1353,7 @@ static void __init pmu_check_apic(void)
        pr_info("no hardware sampling interrupt available.\n");
 }
 
-void __init init_hw_perf_events(void)
+int __init init_hw_perf_events(void)
 {
        struct event_constraint *c;
        int err;
@@ -1364,15 +1368,21 @@ void __init init_hw_perf_events(void)
                err = amd_pmu_init();
                break;
        default:
-               return;
+               return 0;
        }
        if (err != 0) {
                pr_cont("no PMU driver, software events only.\n");
-               return;
+               return 0;
        }
 
        pmu_check_apic();
 
+       /* sanity check that the hardware exists or is emulated */
+       if (!check_hw_exists()) {
+               pr_cont("Broken PMU hardware detected, software events only.\n");
+               return 0;
+       }
+
        pr_cont("%s PMU driver.\n", x86_pmu.name);
 
        if (x86_pmu.quirks)
@@ -1421,7 +1431,10 @@ void __init init_hw_perf_events(void)
 
        perf_pmu_register(&pmu);
        perf_cpu_notifier(x86_pmu_notifier);
+
+       return 0;
 }
+early_initcall(init_hw_perf_events);
 
 static inline void x86_pmu_read(struct perf_event *event)
 {
@@ -1667,7 +1680,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
        perf_callchain_store(entry, regs->ip);
 
-       dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
+       dump_trace(NULL, regs, NULL, &backtrace_ops, entry);
 }
 
 #ifdef CONFIG_COMPAT