]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - kernel/perf_event.c
perf: Reduce perf_disable() usage
[mv-sheeva.git] / kernel / perf_event.c
index 403d1804b198140e4f1355c70c0b25e6efa9e5d8..9a98ce95356106d689c9fba843dbb5a67a67c4c0 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
 #include <linux/ftrace_event.h>
-#include <linux/hw_breakpoint.h>
 
 #include <asm/irq_regs.h>
 
@@ -72,14 +71,6 @@ static atomic64_t perf_event_id;
  */
 static DEFINE_SPINLOCK(perf_resource_lock);
 
-/*
- * Architecture provided APIs - weak aliases:
- */
-extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
-{
-       return NULL;
-}
-
 void __weak hw_perf_disable(void)              { barrier(); }
 void __weak hw_perf_enable(void)               { barrier(); }
 
@@ -156,7 +147,7 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
        struct perf_event_context *ctx;
 
        rcu_read_lock();
- retry:
+retry:
        ctx = rcu_dereference(task->perf_event_ctxp);
        if (ctx) {
                /*
@@ -402,11 +393,31 @@ static void perf_group_detach(struct perf_event *event)
        }
 }
 
+static inline int
+event_filter_match(struct perf_event *event)
+{
+       return event->cpu == -1 || event->cpu == smp_processor_id();
+}
+
 static void
 event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 delta;
+       /*
+        * An event which could not be activated because of
+        * filter mismatch still needs to have its timings
+        * maintained, otherwise bogus information is return
+        * via read() for time_enabled, time_running:
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE
+           && !event_filter_match(event)) {
+               delta = ctx->time - event->tstamp_stopped;
+               event->tstamp_running += delta;
+               event->tstamp_stopped = ctx->time;
+       }
+
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return;
 
@@ -432,9 +443,7 @@ group_sched_out(struct perf_event *group_event,
                struct perf_event_context *ctx)
 {
        struct perf_event *event;
-
-       if (group_event->state != PERF_EVENT_STATE_ACTIVE)
-               return;
+       int state = group_event->state;
 
        event_sched_out(group_event, cpuctx, ctx);
 
@@ -444,7 +453,7 @@ group_sched_out(struct perf_event *group_event,
        list_for_each_entry(event, &group_event->sibling_list, group_entry)
                event_sched_out(event, cpuctx, ctx);
 
-       if (group_event->attr.exclusive)
+       if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
                cpuctx->exclusive = 0;
 }
 
@@ -469,11 +478,6 @@ static void __perf_event_remove_from_context(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level.
-        */
-       perf_disable();
 
        event_sched_out(event, cpuctx, ctx);
 
@@ -489,7 +493,6 @@ static void __perf_event_remove_from_context(void *info)
                            perf_max_events - perf_reserved_percpu);
        }
 
-       perf_enable();
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -610,7 +613,7 @@ void perf_event_disable(struct perf_event *event)
                return;
        }
 
- retry:
+retry:
        task_oncpu_function_call(task, __perf_event_disable, event);
 
        raw_spin_lock_irq(&ctx->lock);
@@ -673,7 +676,7 @@ group_sched_in(struct perf_event *group_event,
               struct perf_event_context *ctx)
 {
        struct perf_event *event, *partial_group = NULL;
-       const struct pmu *pmu = group_event->pmu;
+       struct pmu *pmu = group_event->pmu;
        bool txn = false;
 
        if (group_event->state == PERF_EVENT_STATE_OFF)
@@ -794,12 +797,6 @@ static void __perf_install_in_context(void *info)
        ctx->is_active = 1;
        update_context_time(ctx);
 
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level. NOP for non NMI based events.
-        */
-       perf_disable();
-
        add_event_to_ctx(event, ctx);
 
        if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -840,9 +837,7 @@ static void __perf_install_in_context(void *info)
        if (!err && !ctx->task && cpuctx->max_pertask)
                cpuctx->max_pertask--;
 
- unlock:
-       perf_enable();
-
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -913,10 +908,12 @@ static void __perf_event_mark_enabled(struct perf_event *event,
 
        event->state = PERF_EVENT_STATE_INACTIVE;
        event->tstamp_enabled = ctx->time - event->total_time_enabled;
-       list_for_each_entry(sub, &event->sibling_list, group_entry)
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+       list_for_each_entry(sub, &event->sibling_list, group_entry) {
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
                        sub->tstamp_enabled =
                                ctx->time - sub->total_time_enabled;
+               }
+       }
 }
 
 /*
@@ -961,12 +958,10 @@ static void __perf_event_enable(void *info)
        if (!group_can_go_on(event, cpuctx, 1)) {
                err = -EEXIST;
        } else {
-               perf_disable();
                if (event == leader)
                        err = group_sched_in(event, cpuctx, ctx);
                else
                        err = event_sched_in(event, cpuctx, ctx);
-               perf_enable();
        }
 
        if (err) {
@@ -982,7 +977,7 @@ static void __perf_event_enable(void *info)
                }
        }
 
- unlock:
+unlock:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1023,7 +1018,7 @@ void perf_event_enable(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_ERROR)
                event->state = PERF_EVENT_STATE_OFF;
 
- retry:
+retry:
        raw_spin_unlock_irq(&ctx->lock);
        task_oncpu_function_call(task, __perf_event_enable, event);
 
@@ -1043,7 +1038,7 @@ void perf_event_enable(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_OFF)
                __perf_event_mark_enabled(event, ctx);
 
- out:
+out:
        raw_spin_unlock_irq(&ctx->lock);
 }
 
@@ -1079,21 +1074,19 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                goto out;
        update_context_time(ctx);
 
-       perf_disable();
        if (!ctx->nr_active)
-               goto out_enable;
+               goto out;
 
-       if (event_type & EVENT_PINNED)
+       if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
+       }
 
-       if (event_type & EVENT_FLEXIBLE)
+       if (event_type & EVENT_FLEXIBLE) {
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
-
- out_enable:
-       perf_enable();
- out:
+       }
+out:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1332,9 +1325,10 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                if (event->cpu != -1 && event->cpu != smp_processor_id())
                        continue;
 
-               if (group_can_go_on(event, cpuctx, can_add_hw))
+               if (group_can_go_on(event, cpuctx, can_add_hw)) {
                        if (group_sched_in(event, cpuctx, ctx))
                                can_add_hw = 0;
+               }
        }
 }
 
@@ -1350,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx,
 
        ctx->timestamp = perf_clock();
 
-       perf_disable();
-
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
@@ -1363,8 +1355,7 @@ ctx_sched_in(struct perf_event_context *ctx,
        if (event_type & EVENT_FLEXIBLE)
                ctx_flexible_sched_in(ctx, cpuctx);
 
-       perf_enable();
- out:
+out:
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1411,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task)
        if (cpuctx->task_ctx == ctx)
                return;
 
-       perf_disable();
-
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
@@ -1425,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task)
        ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
 
        cpuctx->task_ctx = ctx;
-
-       perf_enable();
 }
 
 #define MAX_INTERRUPTS (~0ULL)
@@ -1541,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
        hwc->sample_period = sample_period;
 
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               perf_disable();
                perf_event_stop(event);
                local64_set(&hwc->period_left, 0);
                perf_event_start(event);
-               perf_enable();
        }
 }
 
@@ -1574,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
-                       perf_disable();
                        event->pmu->unthrottle(event);
-                       perf_enable();
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
-               perf_disable();
                event->pmu->read(event);
                now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
@@ -1590,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
 
                if (delta > 0)
                        perf_adjust_period(event, TICK_NSEC, delta);
-               perf_enable();
        }
        raw_spin_unlock(&ctx->lock);
 }
@@ -1633,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr)
        if (!rotate)
                return;
 
-       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1645,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr)
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-       perf_enable();
 }
 
 static int event_enable_on_exec(struct perf_event *event,
@@ -1705,7 +1684,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
        raw_spin_unlock(&ctx->lock);
 
        perf_event_task_sched_in(task);
- out:
+out:
        local_irq_restore(flags);
 }
 
@@ -1763,6 +1742,216 @@ static u64 perf_event_read(struct perf_event *event)
        return perf_event_count(event);
 }
 
+/*
+ * Callchain support
+ */
+
+struct callchain_cpus_entries {
+       struct rcu_head                 rcu_head;
+       struct perf_callchain_entry     *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs)
+{
+}
+
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+       struct callchain_cpus_entries *entries;
+       int cpu;
+
+       entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+
+       kfree(entries);
+}
+
+static void release_callchain_buffers(void)
+{
+       struct callchain_cpus_entries *entries;
+
+       entries = callchain_cpus_entries;
+       rcu_assign_pointer(callchain_cpus_entries, NULL);
+       call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+       int cpu;
+       int size;
+       struct callchain_cpus_entries *entries;
+
+       /*
+        * We can't use the percpu allocation API for data that can be
+        * accessed from NMI. Use a temporary manual per cpu allocation
+        * until that gets sorted out.
+        */
+       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
+               num_possible_cpus();
+
+       entries = kzalloc(size, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+       for_each_possible_cpu(cpu) {
+               entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+                                                        cpu_to_node(cpu));
+               if (!entries->cpu_entries[cpu])
+                       goto fail;
+       }
+
+       rcu_assign_pointer(callchain_cpus_entries, entries);
+
+       return 0;
+
+fail:
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+
+       return -ENOMEM;
+}
+
+static int get_callchain_buffers(void)
+{
+       int err = 0;
+       int count;
+
+       mutex_lock(&callchain_mutex);
+
+       count = atomic_inc_return(&nr_callchain_events);
+       if (WARN_ON_ONCE(count < 1)) {
+               err = -EINVAL;
+               goto exit;
+       }
+
+       if (count > 1) {
+               /* If the allocation failed, give up */
+               if (!callchain_cpus_entries)
+                       err = -ENOMEM;
+               goto exit;
+       }
+
+       err = alloc_callchain_buffers();
+       if (err)
+               release_callchain_buffers();
+exit:
+       mutex_unlock(&callchain_mutex);
+
+       return err;
+}
+
+static void put_callchain_buffers(void)
+{
+       if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+               release_callchain_buffers();
+               mutex_unlock(&callchain_mutex);
+       }
+}
+
+static int get_recursion_context(int *recursion)
+{
+       int rctx;
+
+       if (in_nmi())
+               rctx = 3;
+       else if (in_irq())
+               rctx = 2;
+       else if (in_softirq())
+               rctx = 1;
+       else
+               rctx = 0;
+
+       if (recursion[rctx])
+               return -1;
+
+       recursion[rctx]++;
+       barrier();
+
+       return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+       barrier();
+       recursion[rctx]--;
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+       int cpu;
+       struct callchain_cpus_entries *entries;
+
+       *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+       if (*rctx == -1)
+               return NULL;
+
+       entries = rcu_dereference(callchain_cpus_entries);
+       if (!entries)
+               return NULL;
+
+       cpu = smp_processor_id();
+
+       return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+       put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+       int rctx;
+       struct perf_callchain_entry *entry;
+
+
+       entry = get_callchain_entry(&rctx);
+       if (rctx == -1)
+               return NULL;
+
+       if (!entry)
+               goto exit_put;
+
+       entry->nr = 0;
+
+       if (!user_mode(regs)) {
+               perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+               perf_callchain_kernel(entry, regs);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+
+       if (regs) {
+               perf_callchain_store(entry, PERF_CONTEXT_USER);
+               perf_callchain_user(entry, regs);
+       }
+
+exit_put:
+       put_callchain_entry(rctx);
+
+       return entry;
+}
+
 /*
  * Initialize the perf_event context in a task_struct:
  */
@@ -1834,7 +2023,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
                goto errout;
 
- retry:
+retry:
        ctx = perf_lock_task_context(task, &flags);
        if (ctx) {
                unclone_ctx(ctx);
@@ -1862,7 +2051,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
        put_task_struct(task);
        return ctx;
 
- errout:
+errout:
        put_task_struct(task);
        return ERR_PTR(err);
 }
@@ -1895,6 +2084,8 @@ static void free_event(struct perf_event *event)
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
        }
 
        if (event->buffer) {
@@ -2937,16 +3128,6 @@ void perf_event_do_pending(void)
        __perf_pending_run();
 }
 
-/*
- * Callchain support -- arch specific
- */
-
-__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       return NULL;
-}
-
-
 /*
  * We assume there is only KVM supporting the callbacks.
  * Later on, we might change it to a list if there is
@@ -3053,7 +3234,7 @@ again:
        if (handle->wakeup != local_read(&buffer->wakeup))
                perf_output_wakeup(handle);
 
- out:
+out:
        preempt_enable();
 }
 
@@ -3441,14 +3622,20 @@ static void perf_event_output(struct perf_event *event, int nmi,
        struct perf_output_handle handle;
        struct perf_event_header header;
 
+       /* protect the callchain buffers */
+       rcu_read_lock();
+
        perf_prepare_sample(&header, data, event, regs);
 
        if (perf_output_begin(&handle, event, header.size, nmi, 1))
-               return;
+               goto exit;
 
        perf_output_sample(&handle, &header, data, event);
 
        perf_output_end(&handle);
+
+exit:
+       rcu_read_unlock();
 }
 
 /*
@@ -4204,32 +4391,16 @@ end:
 int perf_swevent_get_recursion_context(void)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       int rctx;
-
-       if (in_nmi())
-               rctx = 3;
-       else if (in_irq())
-               rctx = 2;
-       else if (in_softirq())
-               rctx = 1;
-       else
-               rctx = 0;
-
-       if (cpuctx->recursion[rctx])
-               return -1;
 
-       cpuctx->recursion[rctx]++;
-       barrier();
-
-       return rctx;
+       return get_recursion_context(cpuctx->recursion);
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
 void inline perf_swevent_put_recursion_context(int rctx)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       barrier();
-       cpuctx->recursion[rctx]--;
+
+       put_recursion_context(cpuctx->recursion, rctx);
 }
 
 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@ -4291,224 +4462,48 @@ static int perf_swevent_int(struct perf_event *event)
        return 0;
 }
 
-static const struct pmu perf_ops_generic = {
-       .enable         = perf_swevent_enable,
-       .disable        = perf_swevent_disable,
-       .start          = perf_swevent_int,
-       .stop           = perf_swevent_void,
-       .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void, /* hwc->interrupts already reset */
-};
+/* Deref the hlist from the update side */
+static inline struct swevent_hlist *
+swevent_hlist_deref(struct perf_cpu_context *cpuctx)
+{
+       return rcu_dereference_protected(cpuctx->swevent_hlist,
+                                        lockdep_is_held(&cpuctx->hlist_mutex));
+}
 
-/*
- * hrtimer based swevent callback
- */
+static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
+{
+       struct swevent_hlist *hlist;
 
-static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
+       hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
+       kfree(hlist);
+}
+
+static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
 {
-       enum hrtimer_restart ret = HRTIMER_RESTART;
-       struct perf_sample_data data;
-       struct pt_regs *regs;
-       struct perf_event *event;
-       u64 period;
+       struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
 
-       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
-       event->pmu->read(event);
+       if (!hlist)
+               return;
 
-       perf_sample_data_init(&data, 0);
-       data.period = event->hw.last_period;
-       regs = get_irq_regs();
+       rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
+       call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
+}
 
-       if (regs && !perf_exclude_event(event, regs)) {
-               if (!(event->attr.exclude_idle && current->pid == 0))
-                       if (perf_event_overflow(event, 0, &data, regs))
-                               ret = HRTIMER_NORESTART;
-       }
+static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
+{
+       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
 
-       period = max_t(u64, 10000, event->hw.sample_period);
-       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
+       mutex_lock(&cpuctx->hlist_mutex);
 
-       return ret;
+       if (!--cpuctx->hlist_refcount)
+               swevent_hlist_release(cpuctx);
+
+       mutex_unlock(&cpuctx->hlist_mutex);
 }
 
-static void perf_swevent_start_hrtimer(struct perf_event *event)
+static void swevent_hlist_put(struct perf_event *event)
 {
-       struct hw_perf_event *hwc = &event->hw;
-
-       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hwc->hrtimer.function = perf_swevent_hrtimer;
-       if (hwc->sample_period) {
-               u64 period;
-
-               if (hwc->remaining) {
-                       if (hwc->remaining < 0)
-                               period = 10000;
-                       else
-                               period = hwc->remaining;
-                       hwc->remaining = 0;
-               } else {
-                       period = max_t(u64, 10000, hwc->sample_period);
-               }
-               __hrtimer_start_range_ns(&hwc->hrtimer,
-                               ns_to_ktime(period), 0,
-                               HRTIMER_MODE_REL, 0);
-       }
-}
-
-static void perf_swevent_cancel_hrtimer(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (hwc->sample_period) {
-               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
-               hwc->remaining = ktime_to_ns(remaining);
-
-               hrtimer_cancel(&hwc->hrtimer);
-       }
-}
-
-/*
- * Software event: cpu wall time clock
- */
-
-static void cpu_clock_perf_event_update(struct perf_event *event)
-{
-       int cpu = raw_smp_processor_id();
-       s64 prev;
-       u64 now;
-
-       now = cpu_clock(cpu);
-       prev = local64_xchg(&event->hw.prev_count, now);
-       local64_add(now - prev, &event->count);
-}
-
-static int cpu_clock_perf_event_enable(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-       int cpu = raw_smp_processor_id();
-
-       local64_set(&hwc->prev_count, cpu_clock(cpu));
-       perf_swevent_start_hrtimer(event);
-
-       return 0;
-}
-
-static void cpu_clock_perf_event_disable(struct perf_event *event)
-{
-       perf_swevent_cancel_hrtimer(event);
-       cpu_clock_perf_event_update(event);
-}
-
-static void cpu_clock_perf_event_read(struct perf_event *event)
-{
-       cpu_clock_perf_event_update(event);
-}
-
-static const struct pmu perf_ops_cpu_clock = {
-       .enable         = cpu_clock_perf_event_enable,
-       .disable        = cpu_clock_perf_event_disable,
-       .read           = cpu_clock_perf_event_read,
-};
-
-/*
- * Software event: task time clock
- */
-
-static void task_clock_perf_event_update(struct perf_event *event, u64 now)
-{
-       u64 prev;
-       s64 delta;
-
-       prev = local64_xchg(&event->hw.prev_count, now);
-       delta = now - prev;
-       local64_add(delta, &event->count);
-}
-
-static int task_clock_perf_event_enable(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-       u64 now;
-
-       now = event->ctx->time;
-
-       local64_set(&hwc->prev_count, now);
-
-       perf_swevent_start_hrtimer(event);
-
-       return 0;
-}
-
-static void task_clock_perf_event_disable(struct perf_event *event)
-{
-       perf_swevent_cancel_hrtimer(event);
-       task_clock_perf_event_update(event, event->ctx->time);
-
-}
-
-static void task_clock_perf_event_read(struct perf_event *event)
-{
-       u64 time;
-
-       if (!in_nmi()) {
-               update_context_time(event->ctx);
-               time = event->ctx->time;
-       } else {
-               u64 now = perf_clock();
-               u64 delta = now - event->ctx->timestamp;
-               time = event->ctx->time + delta;
-       }
-
-       task_clock_perf_event_update(event, time);
-}
-
-static const struct pmu perf_ops_task_clock = {
-       .enable         = task_clock_perf_event_enable,
-       .disable        = task_clock_perf_event_disable,
-       .read           = task_clock_perf_event_read,
-};
-
-/* Deref the hlist from the update side */
-static inline struct swevent_hlist *
-swevent_hlist_deref(struct perf_cpu_context *cpuctx)
-{
-       return rcu_dereference_protected(cpuctx->swevent_hlist,
-                                        lockdep_is_held(&cpuctx->hlist_mutex));
-}
-
-static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
-{
-       struct swevent_hlist *hlist;
-
-       hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
-       kfree(hlist);
-}
-
-static void swevent_hlist_release(struct perf_cpu_context *cpuctx)
-{
-       struct swevent_hlist *hlist = swevent_hlist_deref(cpuctx);
-
-       if (!hlist)
-               return;
-
-       rcu_assign_pointer(cpuctx->swevent_hlist, NULL);
-       call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
-}
-
-static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
-{
-       struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
-
-       mutex_lock(&cpuctx->hlist_mutex);
-
-       if (!--cpuctx->hlist_refcount)
-               swevent_hlist_release(cpuctx);
-
-       mutex_unlock(&cpuctx->hlist_mutex);
-}
-
-static void swevent_hlist_put(struct perf_event *event)
-{
-       int cpu;
+       int cpu;
 
        if (event->cpu != -1) {
                swevent_hlist_put_cpu(event, event->cpu);
@@ -4537,7 +4532,7 @@ static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
                rcu_assign_pointer(cpuctx->swevent_hlist, hlist);
        }
        cpuctx->hlist_refcount++;
- exit:
+exit:
        mutex_unlock(&cpuctx->hlist_mutex);
 
        return err;
@@ -4562,7 +4557,7 @@ static int swevent_hlist_get(struct perf_event *event)
        put_online_cpus();
 
        return 0;
- fail:
+fail:
        for_each_possible_cpu(cpu) {
                if (cpu == failed_cpu)
                        break;
@@ -4573,17 +4568,63 @@ static int swevent_hlist_get(struct perf_event *event)
        return err;
 }
 
-#ifdef CONFIG_EVENT_TRACING
+atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
-static const struct pmu perf_ops_tracepoint = {
-       .enable         = perf_trace_enable,
-       .disable        = perf_trace_disable,
+static void sw_perf_event_destroy(struct perf_event *event)
+{
+       u64 event_id = event->attr.config;
+
+       WARN_ON(event->parent);
+
+       atomic_dec(&perf_swevent_enabled[event_id]);
+       swevent_hlist_put(event);
+}
+
+static int perf_swevent_init(struct perf_event *event)
+{
+       int event_id = event->attr.config;
+
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       switch (event_id) {
+       case PERF_COUNT_SW_CPU_CLOCK:
+       case PERF_COUNT_SW_TASK_CLOCK:
+               return -ENOENT;
+
+       default:
+               break;
+       }
+
+       if (event_id > PERF_COUNT_SW_MAX)
+               return -ENOENT;
+
+       if (!event->parent) {
+               int err;
+
+               err = swevent_hlist_get(event);
+               if (err)
+                       return err;
+
+               atomic_inc(&perf_swevent_enabled[event_id]);
+               event->destroy = sw_perf_event_destroy;
+       }
+
+       return 0;
+}
+
+static struct pmu perf_swevent = {
+       .event_init     = perf_swevent_init,
+       .enable         = perf_swevent_enable,
+       .disable        = perf_swevent_disable,
        .start          = perf_swevent_int,
        .stop           = perf_swevent_void,
        .read           = perf_swevent_read,
-       .unthrottle     = perf_swevent_void,
+       .unthrottle     = perf_swevent_void, /* hwc->interrupts already reset */
 };
 
+#ifdef CONFIG_EVENT_TRACING
+
 static int perf_tp_filter_match(struct perf_event *event,
                                struct perf_sample_data *data)
 {
@@ -4639,10 +4680,13 @@ static void tp_perf_event_destroy(struct perf_event *event)
        perf_trace_destroy(event);
 }
 
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static int perf_tp_event_init(struct perf_event *event)
 {
        int err;
 
+       if (event->attr.type != PERF_TYPE_TRACEPOINT)
+               return -ENOENT;
+
        /*
         * Raw tracepoint data is a severe data leak, only allow root to
         * have these.
@@ -4650,15 +4694,30 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
        if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
                        perf_paranoid_tracepoint_raw() &&
                        !capable(CAP_SYS_ADMIN))
-               return ERR_PTR(-EPERM);
+               return -EPERM;
 
        err = perf_trace_init(event);
        if (err)
-               return NULL;
+               return err;
 
        event->destroy = tp_perf_event_destroy;
 
-       return &perf_ops_tracepoint;
+       return 0;
+}
+
+static struct pmu perf_tracepoint = {
+       .event_init     = perf_tp_event_init,
+       .enable         = perf_trace_enable,
+       .disable        = perf_trace_disable,
+       .start          = perf_swevent_int,
+       .stop           = perf_swevent_void,
+       .read           = perf_swevent_read,
+       .unthrottle     = perf_swevent_void,
+};
+
+static inline void perf_tp_register(void)
+{
+       perf_pmu_register(&perf_tracepoint);
 }
 
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4686,9 +4745,8 @@ static void perf_event_free_filter(struct perf_event *event)
 
 #else
 
-static const struct pmu *tp_perf_event_init(struct perf_event *event)
+static inline void perf_tp_register(void)
 {
-       return NULL;
 }
 
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -4703,105 +4761,247 @@ static void perf_event_free_filter(struct perf_event *event)
 #endif /* CONFIG_EVENT_TRACING */
 
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
-static void bp_perf_event_destroy(struct perf_event *event)
+void perf_bp_event(struct perf_event *bp, void *data)
 {
-       release_bp_slot(event);
+       struct perf_sample_data sample;
+       struct pt_regs *regs = data;
+
+       perf_sample_data_init(&sample, bp->attr.bp_addr);
+
+       if (!perf_exclude_event(bp, regs))
+               perf_swevent_add(bp, 1, 1, &sample, regs);
 }
+#endif
 
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+/*
+ * hrtimer based swevent callback
+ */
+
+static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 {
-       int err;
+       enum hrtimer_restart ret = HRTIMER_RESTART;
+       struct perf_sample_data data;
+       struct pt_regs *regs;
+       struct perf_event *event;
+       u64 period;
 
-       err = register_perf_hw_breakpoint(bp);
-       if (err)
-               return ERR_PTR(err);
+       event = container_of(hrtimer, struct perf_event, hw.hrtimer);
+       event->pmu->read(event);
 
-       bp->destroy = bp_perf_event_destroy;
+       perf_sample_data_init(&data, 0);
+       data.period = event->hw.last_period;
+       regs = get_irq_regs();
+
+       if (regs && !perf_exclude_event(event, regs)) {
+               if (!(event->attr.exclude_idle && current->pid == 0))
+                       if (perf_event_overflow(event, 0, &data, regs))
+                               ret = HRTIMER_NORESTART;
+       }
+
+       period = max_t(u64, 10000, event->hw.sample_period);
+       hrtimer_forward_now(hrtimer, ns_to_ktime(period));
 
-       return &perf_ops_bp;
+       return ret;
 }
 
-void perf_bp_event(struct perf_event *bp, void *data)
+static void perf_swevent_start_hrtimer(struct perf_event *event)
 {
-       struct perf_sample_data sample;
-       struct pt_regs *regs = data;
+       struct hw_perf_event *hwc = &event->hw;
 
-       perf_sample_data_init(&sample, bp->attr.bp_addr);
+       hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hwc->hrtimer.function = perf_swevent_hrtimer;
+       if (hwc->sample_period) {
+               u64 period;
 
-       if (!perf_exclude_event(bp, regs))
-               perf_swevent_add(bp, 1, 1, &sample, regs);
+               if (hwc->remaining) {
+                       if (hwc->remaining < 0)
+                               period = 10000;
+                       else
+                               period = hwc->remaining;
+                       hwc->remaining = 0;
+               } else {
+                       period = max_t(u64, 10000, hwc->sample_period);
+               }
+               __hrtimer_start_range_ns(&hwc->hrtimer,
+                               ns_to_ktime(period), 0,
+                               HRTIMER_MODE_REL, 0);
+       }
 }
-#else
-static const struct pmu *bp_perf_event_init(struct perf_event *bp)
+
+static void perf_swevent_cancel_hrtimer(struct perf_event *event)
 {
-       return NULL;
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->sample_period) {
+               ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
+               hwc->remaining = ktime_to_ns(remaining);
+
+               hrtimer_cancel(&hwc->hrtimer);
+       }
 }
 
-void perf_bp_event(struct perf_event *bp, void *regs)
+/*
+ * Software event: cpu wall time clock
+ */
+
+static void cpu_clock_event_update(struct perf_event *event)
 {
+       int cpu = raw_smp_processor_id();
+       s64 prev;
+       u64 now;
+
+       now = cpu_clock(cpu);
+       prev = local64_xchg(&event->hw.prev_count, now);
+       local64_add(now - prev, &event->count);
 }
-#endif
 
-atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
+static int cpu_clock_event_enable(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       int cpu = raw_smp_processor_id();
 
-static void sw_perf_event_destroy(struct perf_event *event)
+       local64_set(&hwc->prev_count, cpu_clock(cpu));
+       perf_swevent_start_hrtimer(event);
+
+       return 0;
+}
+
+static void cpu_clock_event_disable(struct perf_event *event)
 {
-       u64 event_id = event->attr.config;
+       perf_swevent_cancel_hrtimer(event);
+       cpu_clock_event_update(event);
+}
 
-       WARN_ON(event->parent);
+static void cpu_clock_event_read(struct perf_event *event)
+{
+       cpu_clock_event_update(event);
+}
 
-       atomic_dec(&perf_swevent_enabled[event_id]);
-       swevent_hlist_put(event);
+static int cpu_clock_event_init(struct perf_event *event)
+{
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
+               return -ENOENT;
+
+       return 0;
 }
 
-static const struct pmu *sw_perf_event_init(struct perf_event *event)
+static struct pmu perf_cpu_clock = {
+       .event_init     = cpu_clock_event_init,
+       .enable         = cpu_clock_event_enable,
+       .disable        = cpu_clock_event_disable,
+       .read           = cpu_clock_event_read,
+};
+
+/*
+ * Software event: task time clock
+ */
+
+static void task_clock_event_update(struct perf_event *event, u64 now)
 {
-       const struct pmu *pmu = NULL;
-       u64 event_id = event->attr.config;
+       u64 prev;
+       s64 delta;
 
-       /*
-        * Software events (currently) can't in general distinguish
-        * between user, kernel and hypervisor events.
-        * However, context switches and cpu migrations are considered
-        * to be kernel events, and page faults are never hypervisor
-        * events.
-        */
-       switch (event_id) {
-       case PERF_COUNT_SW_CPU_CLOCK:
-               pmu = &perf_ops_cpu_clock;
+       prev = local64_xchg(&event->hw.prev_count, now);
+       delta = now - prev;
+       local64_add(delta, &event->count);
+}
 
-               break;
-       case PERF_COUNT_SW_TASK_CLOCK:
-               /*
-                * If the user instantiates this as a per-cpu event,
-                * use the cpu_clock event instead.
-                */
-               if (event->ctx->task)
-                       pmu = &perf_ops_task_clock;
-               else
-                       pmu = &perf_ops_cpu_clock;
+static int task_clock_event_enable(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       u64 now;
 
-               break;
-       case PERF_COUNT_SW_PAGE_FAULTS:
-       case PERF_COUNT_SW_PAGE_FAULTS_MIN:
-       case PERF_COUNT_SW_PAGE_FAULTS_MAJ:
-       case PERF_COUNT_SW_CONTEXT_SWITCHES:
-       case PERF_COUNT_SW_CPU_MIGRATIONS:
-       case PERF_COUNT_SW_ALIGNMENT_FAULTS:
-       case PERF_COUNT_SW_EMULATION_FAULTS:
-               if (!event->parent) {
-                       int err;
-
-                       err = swevent_hlist_get(event);
-                       if (err)
-                               return ERR_PTR(err);
+       now = event->ctx->time;
+
+       local64_set(&hwc->prev_count, now);
+
+       perf_swevent_start_hrtimer(event);
+
+       return 0;
+}
+
+static void task_clock_event_disable(struct perf_event *event)
+{
+       perf_swevent_cancel_hrtimer(event);
+       task_clock_event_update(event, event->ctx->time);
+
+}
+
+static void task_clock_event_read(struct perf_event *event)
+{
+       u64 time;
 
-                       atomic_inc(&perf_swevent_enabled[event_id]);
-                       event->destroy = sw_perf_event_destroy;
+       if (!in_nmi()) {
+               update_context_time(event->ctx);
+               time = event->ctx->time;
+       } else {
+               u64 now = perf_clock();
+               u64 delta = now - event->ctx->timestamp;
+               time = event->ctx->time + delta;
+       }
+
+       task_clock_event_update(event, time);
+}
+
+static int task_clock_event_init(struct perf_event *event)
+{
+       if (event->attr.type != PERF_TYPE_SOFTWARE)
+               return -ENOENT;
+
+       if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
+               return -ENOENT;
+
+       return 0;
+}
+
+static struct pmu perf_task_clock = {
+       .event_init     = task_clock_event_init,
+       .enable         = task_clock_event_enable,
+       .disable        = task_clock_event_disable,
+       .read           = task_clock_event_read,
+};
+
+static LIST_HEAD(pmus);
+static DEFINE_MUTEX(pmus_lock);
+static struct srcu_struct pmus_srcu;
+
+int perf_pmu_register(struct pmu *pmu)
+{
+       mutex_lock(&pmus_lock);
+       list_add_rcu(&pmu->entry, &pmus);
+       mutex_unlock(&pmus_lock);
+
+       return 0;
+}
+
+void perf_pmu_unregister(struct pmu *pmu)
+{
+       mutex_lock(&pmus_lock);
+       list_del_rcu(&pmu->entry);
+       mutex_unlock(&pmus_lock);
+
+       synchronize_srcu(&pmus_srcu);
+}
+
+struct pmu *perf_init_event(struct perf_event *event)
+{
+       struct pmu *pmu = NULL;
+       int idx;
+
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               int ret = pmu->event_init(event);
+               if (!ret)
+                       break;
+               if (ret != -ENOENT) {
+                       pmu = ERR_PTR(ret);
+                       break;
                }
-               pmu = &perf_ops_generic;
-               break;
        }
+       srcu_read_unlock(&pmus_srcu, idx);
 
        return pmu;
 }
@@ -4818,7 +5018,7 @@ perf_event_alloc(struct perf_event_attr *attr,
                   perf_overflow_handler_t overflow_handler,
                   gfp_t gfpflags)
 {
-       const struct pmu *pmu;
+       struct pmu *pmu;
        struct perf_event *event;
        struct hw_perf_event *hwc;
        long err;
@@ -4882,29 +5082,8 @@ perf_event_alloc(struct perf_event_attr *attr,
        if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
                goto done;
 
-       switch (attr->type) {
-       case PERF_TYPE_RAW:
-       case PERF_TYPE_HARDWARE:
-       case PERF_TYPE_HW_CACHE:
-               pmu = hw_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_SOFTWARE:
-               pmu = sw_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_TRACEPOINT:
-               pmu = tp_perf_event_init(event);
-               break;
-
-       case PERF_TYPE_BREAKPOINT:
-               pmu = bp_perf_event_init(event);
-               break;
-
+       pmu = perf_init_event(event);
 
-       default:
-               break;
-       }
 done:
        err = 0;
        if (!pmu)
@@ -4929,6 +5108,13 @@ done:
                        atomic_inc(&nr_comm_events);
                if (event->attr.task)
                        atomic_inc(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       err = get_callchain_buffers();
+                       if (err) {
+                               free_event(event);
+                               return ERR_PTR(err);
+                       }
+               }
        }
 
        return event;
@@ -5743,15 +5929,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (long)hcpu;
 
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
 
        case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
+       case CPU_DOWN_FAILED:
                perf_event_init_cpu(cpu);
                break;
 
+       case CPU_UP_CANCELED:
        case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
                perf_event_exit_cpu(cpu);
                break;
 
@@ -5762,22 +5948,15 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
-/*
- * This has to have a higher priority than migration_notifier in sched.c.
- */
-static struct notifier_block __cpuinitdata perf_cpu_nb = {
-       .notifier_call          = perf_cpu_notify,
-       .priority               = 20,
-};
-
 void __init perf_event_init(void)
 {
        perf_event_init_all_cpus();
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
-                       (void *)(long)smp_processor_id());
-       perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
-                       (void *)(long)smp_processor_id());
-       register_cpu_notifier(&perf_cpu_nb);
+       init_srcu_struct(&pmus_srcu);
+       perf_pmu_register(&perf_swevent);
+       perf_pmu_register(&perf_cpu_clock);
+       perf_pmu_register(&perf_task_clock);
+       perf_tp_register();
+       perf_cpu_notifier(perf_cpu_notify);
 }
 
 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,