]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/x86/kernel/cpu/perf_event.c
perf: Rework the PMU methods
[mv-sheeva.git] / arch / x86 / kernel / cpu / perf_event.c
index 8e91cf34a9c886a85ba9c77db49c3f5eb5a93674..dd6fec710677effa72a0b36e1bfd8a8d5239c78f 100644 (file)
@@ -530,7 +530,7 @@ static int x86_pmu_hw_config(struct perf_event *event)
 /*
  * Setup the hardware configuration for a given attr_type
  */
-static int __hw_perf_event_init(struct perf_event *event)
+static int __x86_pmu_event_init(struct perf_event *event)
 {
        int err;
 
@@ -583,7 +583,7 @@ static void x86_pmu_disable_all(void)
        }
 }
 
-void hw_perf_disable(void)
+static void x86_pmu_disable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -618,7 +618,7 @@ static void x86_pmu_enable_all(int added)
        }
 }
 
-static const struct pmu pmu;
+static struct pmu pmu;
 
 static inline int is_x86_event(struct perf_event *event)
 {
@@ -800,10 +800,10 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
                hwc->last_tag == cpuc->tags[i];
 }
 
-static int x86_pmu_start(struct perf_event *event);
-static void x86_pmu_stop(struct perf_event *event);
+static void x86_pmu_start(struct perf_event *event, int flags);
+static void x86_pmu_stop(struct perf_event *event, int flags);
 
-void hw_perf_enable(void)
+static void x86_pmu_enable(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct perf_event *event;
@@ -839,7 +839,14 @@ void hw_perf_enable(void)
                            match_prev_assignment(hwc, cpuc, i))
                                continue;
 
-                       x86_pmu_stop(event);
+                       /*
+                        * Ensure we don't accidentally enable a stopped
+                        * counter simply because we rescheduled.
+                        */
+                       if (hwc->state & PERF_HES_STOPPED)
+                               hwc->state |= PERF_HES_ARCH;
+
+                       x86_pmu_stop(event, PERF_EF_UPDATE);
                }
 
                for (i = 0; i < cpuc->n_events; i++) {
@@ -851,7 +858,10 @@ void hw_perf_enable(void)
                        else if (i < n_running)
                                continue;
 
-                       x86_pmu_start(event);
+                       if (hwc->state & PERF_HES_ARCH)
+                               continue;
+
+                       x86_pmu_start(event, PERF_EF_RELOAD);
                }
                cpuc->n_added = 0;
                perf_events_lapic_init();
@@ -952,15 +962,12 @@ static void x86_pmu_enable_event(struct perf_event *event)
 }
 
 /*
- * activate a single event
+ * Add a single event to the PMU.
  *
  * The event is added to the group of enabled events
  * but only if it can be scehduled with existing events.
- *
- * Called with PMU disabled. If successful and return value 1,
- * then guaranteed to call perf_enable() and hw_perf_enable()
  */
-static int x86_pmu_enable(struct perf_event *event)
+static int x86_pmu_add(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc;
@@ -969,57 +976,66 @@ static int x86_pmu_enable(struct perf_event *event)
 
        hwc = &event->hw;
 
+       perf_pmu_disable(event->pmu);
        n0 = cpuc->n_events;
-       n = collect_events(cpuc, event, false);
-       if (n < 0)
-               return n;
+       ret = n = collect_events(cpuc, event, false);
+       if (ret < 0)
+               goto out;
+
+       hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+       if (!(flags & PERF_EF_START))
+               hwc->state |= PERF_HES_ARCH;
 
        /*
         * If group events scheduling transaction was started,
         * skip the schedulability test here, it will be peformed
-        * at commit time(->commit_txn) as a whole
+        * at commit time (->commit_txn) as a whole
         */
        if (cpuc->group_flag & PERF_EVENT_TXN)
-               goto out;
+               goto done_collect;
 
        ret = x86_pmu.schedule_events(cpuc, n, assign);
        if (ret)
-               return ret;
+               goto out;
        /*
         * copy new assignment, now we know it is possible
         * will be used by hw_perf_enable()
         */
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
-out:
+done_collect:
        cpuc->n_events = n;
        cpuc->n_added += n - n0;
        cpuc->n_txn += n - n0;
 
-       return 0;
+       ret = 0;
+out:
+       perf_pmu_enable(event->pmu);
+       return ret;
 }
 
-static int x86_pmu_start(struct perf_event *event)
+static void x86_pmu_start(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx = event->hw.idx;
 
-       if (idx == -1)
-               return -EAGAIN;
+       if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+               return;
+
+       if (WARN_ON_ONCE(idx == -1))
+               return;
+
+       if (flags & PERF_EF_RELOAD) {
+               WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+               x86_perf_event_set_period(event);
+       }
+
+       event->hw.state = 0;
 
-       x86_perf_event_set_period(event);
        cpuc->events[idx] = event;
        __set_bit(idx, cpuc->active_mask);
        x86_pmu.enable(event);
        perf_event_update_userpage(event);
-
-       return 0;
-}
-
-static void x86_pmu_unthrottle(struct perf_event *event)
-{
-       int ret = x86_pmu_start(event);
-       WARN_ON_ONCE(ret);
 }
 
 void perf_event_print_debug(void)
@@ -1076,27 +1092,29 @@ void perf_event_print_debug(void)
        local_irq_restore(flags);
 }
 
-static void x86_pmu_stop(struct perf_event *event)
+static void x86_pmu_stop(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-
-       if (!__test_and_clear_bit(idx, cpuc->active_mask))
-               return;
-
-       x86_pmu.disable(event);
 
-       /*
-        * Drain the remaining delta count out of a event
-        * that we are disabling:
-        */
-       x86_perf_event_update(event);
+       if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
+               x86_pmu.disable(event);
+               cpuc->events[hwc->idx] = NULL;
+               WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+               hwc->state |= PERF_HES_STOPPED;
+       }
 
-       cpuc->events[idx] = NULL;
+       if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+               /*
+                * Drain the remaining delta count out of a event
+                * that we are disabling:
+                */
+               x86_perf_event_update(event);
+               hwc->state |= PERF_HES_UPTODATE;
+       }
 }
 
-static void x86_pmu_disable(struct perf_event *event)
+static void x86_pmu_del(struct perf_event *event, int flags)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
@@ -1109,7 +1127,7 @@ static void x86_pmu_disable(struct perf_event *event)
        if (cpuc->group_flag & PERF_EVENT_TXN)
                return;
 
-       x86_pmu_stop(event);
+       x86_pmu_stop(event, PERF_EF_UPDATE);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event_list[i]) {
@@ -1154,14 +1172,14 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                /*
                 * event overflow
                 */
-               handled         = 1;
+               handled++;
                data.period     = event->hw.last_period;
 
                if (!x86_perf_event_set_period(event))
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu_stop(event);
+                       x86_pmu_stop(event, 0);
        }
 
        if (handled)
@@ -1200,12 +1218,20 @@ void perf_events_lapic_init(void)
        apic_write(APIC_LVTPC, APIC_DM_NMI);
 }
 
+struct pmu_nmi_state {
+       unsigned int    marked;
+       int             handled;
+};
+
+static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
+
 static int __kprobes
 perf_event_nmi_handler(struct notifier_block *self,
                         unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
-       struct pt_regs *regs;
+       unsigned int this_nmi;
+       int handled;
 
        if (!atomic_read(&active_events))
                return NOTIFY_DONE;
@@ -1214,22 +1240,47 @@ perf_event_nmi_handler(struct notifier_block *self,
        case DIE_NMI:
        case DIE_NMI_IPI:
                break;
-
+       case DIE_NMIUNKNOWN:
+               this_nmi = percpu_read(irq_stat.__nmi_count);
+               if (this_nmi != __get_cpu_var(pmu_nmi).marked)
+                       /* let the kernel handle the unknown nmi */
+                       return NOTIFY_DONE;
+               /*
+                * This one is a PMU back-to-back nmi. Two events
+                * trigger 'simultaneously' raising two back-to-back
+                * NMIs. If the first NMI handles both, the latter
+                * will be empty and daze the CPU. So, we drop it to
+                * avoid false-positive 'unknown nmi' messages.
+                */
+               return NOTIFY_STOP;
        default:
                return NOTIFY_DONE;
        }
 
-       regs = args->regs;
-
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-       /*
-        * Can't rely on the handled return value to say it was our NMI, two
-        * events could trigger 'simultaneously' raising two back-to-back NMIs.
-        *
-        * If the first NMI handles both, the latter will be empty and daze
-        * the CPU.
-        */
-       x86_pmu.handle_irq(regs);
+
+       handled = x86_pmu.handle_irq(args->regs);
+       if (!handled)
+               return NOTIFY_DONE;
+
+       this_nmi = percpu_read(irq_stat.__nmi_count);
+       if ((handled > 1) ||
+               /* the next nmi could be a back-to-back nmi */
+           ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
+            (__get_cpu_var(pmu_nmi).handled > 1))) {
+               /*
+                * We could have two subsequent back-to-back nmis: The
+                * first handles more than one counter, the 2nd
+                * handles only one counter and the 3rd handles no
+                * counter.
+                *
+                * This is the 2nd nmi because the previous was
+                * handling more than one counter. We will mark the
+                * next (3rd) and then drop it if unhandled.
+                */
+               __get_cpu_var(pmu_nmi).marked   = this_nmi + 1;
+               __get_cpu_var(pmu_nmi).handled  = handled;
+       }
 
        return NOTIFY_STOP;
 }
@@ -1381,6 +1432,7 @@ void __init init_hw_perf_events(void)
        pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
        pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
+       perf_pmu_register(&pmu);
        perf_cpu_notifier(x86_pmu_notifier);
 }
 
@@ -1394,10 +1446,11 @@ static inline void x86_pmu_read(struct perf_event *event)
  * Set the flag to make pmu::enable() not perform the
  * schedulability test, it will be performed at commit time
  */
-static void x86_pmu_start_txn(const struct pmu *pmu)
+static void x86_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       perf_pmu_disable(pmu);
        cpuc->group_flag |= PERF_EVENT_TXN;
        cpuc->n_txn = 0;
 }
@@ -1407,7 +1460,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
  * Clear the flag and pmu::enable() will perform the
  * schedulability test.
  */
-static void x86_pmu_cancel_txn(const struct pmu *pmu)
+static void x86_pmu_cancel_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -1417,6 +1470,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
         */
        cpuc->n_added -= cpuc->n_txn;
        cpuc->n_events -= cpuc->n_txn;
+       perf_pmu_enable(pmu);
 }
 
 /*
@@ -1424,7 +1478,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
  * Perform the group schedulability test as a whole
  * Return 0 if success
  */
-static int x86_pmu_commit_txn(const struct pmu *pmu)
+static int x86_pmu_commit_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int assign[X86_PMC_IDX_MAX];
@@ -1446,22 +1500,10 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
-
+       perf_pmu_enable(pmu);
        return 0;
 }
 
-static const struct pmu pmu = {
-       .enable         = x86_pmu_enable,
-       .disable        = x86_pmu_disable,
-       .start          = x86_pmu_start,
-       .stop           = x86_pmu_stop,
-       .read           = x86_pmu_read,
-       .unthrottle     = x86_pmu_unthrottle,
-       .start_txn      = x86_pmu_start_txn,
-       .cancel_txn     = x86_pmu_cancel_txn,
-       .commit_txn     = x86_pmu_commit_txn,
-};
-
 /*
  * validate that we can schedule this event
  */
@@ -1536,12 +1578,22 @@ out:
        return ret;
 }
 
-const struct pmu *hw_perf_event_init(struct perf_event *event)
+int x86_pmu_event_init(struct perf_event *event)
 {
-       const struct pmu *tmp;
+       struct pmu *tmp;
        int err;
 
-       err = __hw_perf_event_init(event);
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
+       err = __x86_pmu_event_init(event);
        if (!err) {
                /*
                 * we temporarily connect event to its pmu
@@ -1561,19 +1613,32 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
        if (err) {
                if (event->destroy)
                        event->destroy(event);
-               return ERR_PTR(err);
        }
 
-       return &pmu;
+       return err;
 }
 
+static struct pmu pmu = {
+       .pmu_enable     = x86_pmu_enable,
+       .pmu_disable    = x86_pmu_disable,
+
+       .event_init     = x86_pmu_event_init,
+
+       .add            = x86_pmu_add,
+       .del            = x86_pmu_del,
+       .start          = x86_pmu_start,
+       .stop           = x86_pmu_stop,
+       .read           = x86_pmu_read,
+
+       .start_txn      = x86_pmu_start_txn,
+       .cancel_txn     = x86_pmu_cancel_txn,
+       .commit_txn     = x86_pmu_commit_txn,
+};
+
 /*
  * callchain support
  */
 
-static DEFINE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry_nmi);
-
-
 static void
 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
 {
@@ -1610,7 +1675,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* TODO: We don't support guest os callchain now */
-               return NULL;
+               return;
        }
 
        perf_callchain_store(entry, regs->ip);
@@ -1663,7 +1728,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
        if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
                /* TODO: We don't support guest os callchain now */
-               return NULL;
+               return;
        }
 
        fp = (void __user *)regs->bp;