]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - arch/arm/kernel/perf_event.c
perf: Reduce perf_disable() usage
[mv-sheeva.git] / arch / arm / kernel / perf_event.c
index 417c392ddf1cb55066fa5f99e83e77514bd89901..afc92c580d18a265f0af31a965c7c9e01c16b409 100644 (file)
@@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event)
        int idx;
        int err = 0;
 
+       perf_disable();
+
        /* If we don't have a space for the counter then finish early. */
        idx = armpmu->get_event_idx(cpuc, hwc);
        if (idx < 0) {
@@ -303,15 +305,11 @@ armpmu_enable(struct perf_event *event)
        perf_event_update_userpage(event);
 
 out:
+       perf_enable();
        return err;
 }
 
-static struct pmu pmu = {
-       .enable     = armpmu_enable,
-       .disable    = armpmu_disable,
-       .unthrottle = armpmu_unthrottle,
-       .read       = armpmu_read,
-};
+static struct pmu pmu;
 
 static int
 validate_event(struct cpu_hw_events *cpuc,
@@ -491,20 +489,29 @@ __hw_perf_event_init(struct perf_event *event)
        return err;
 }
 
-const struct pmu *
-hw_perf_event_init(struct perf_event *event)
+static int armpmu_event_init(struct perf_event *event)
 {
        int err = 0;
 
+       switch (event->attr.type) {
+       case PERF_TYPE_RAW:
+       case PERF_TYPE_HARDWARE:
+       case PERF_TYPE_HW_CACHE:
+               break;
+
+       default:
+               return -ENOENT;
+       }
+
        if (!armpmu)
-               return ERR_PTR(-ENODEV);
+               return -ENODEV;
 
        event->destroy = hw_perf_event_destroy;
 
        if (!atomic_inc_not_zero(&active_events)) {
                if (atomic_read(&active_events) > perf_max_events) {
                        atomic_dec(&active_events);
-                       return ERR_PTR(-ENOSPC);
+                       return -ENOSPC;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -518,15 +525,23 @@ hw_perf_event_init(struct perf_event *event)
        }
 
        if (err)
-               return ERR_PTR(err);
+               return err;
 
        err = __hw_perf_event_init(event);
        if (err)
                hw_perf_event_destroy(event);
 
-       return err ? ERR_PTR(err) : &pmu;
+       return err;
 }
 
+static struct pmu pmu = {
+       .event_init = armpmu_event_init,
+       .enable     = armpmu_enable,
+       .disable    = armpmu_disable,
+       .unthrottle = armpmu_unthrottle,
+       .read       = armpmu_read,
+};
+
 void
 hw_perf_enable(void)
 {
@@ -2994,6 +3009,8 @@ init_hw_perf_events(void)
                perf_max_events = -1;
        }
 
+       perf_pmu_register(&pmu);
+
        return 0;
 }
 arch_initcall(init_hw_perf_events);
@@ -3001,13 +3018,6 @@ arch_initcall(init_hw_perf_events);
 /*
  * Callchain handling code.
  */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-               u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 /*
  * The registers we're interested in are at the end of the variable
@@ -3039,7 +3049,7 @@ user_backtrace(struct frame_tail *tail,
        if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
                return NULL;
 
-       callchain_store(entry, buftail.lr);
+       perf_callchain_store(entry, buftail.lr);
 
        /*
         * Frame pointers should strictly progress back up the stack
@@ -3051,16 +3061,11 @@ user_backtrace(struct frame_tail *tail,
        return buftail.fp - 1;
 }
 
-static void
-perf_callchain_user(struct pt_regs *regs,
-                   struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct frame_tail *tail;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
 
        tail = (struct frame_tail *)regs->ARM_fp - 1;
 
@@ -3078,56 +3083,18 @@ callchain_trace(struct stackframe *fr,
                void *data)
 {
        struct perf_callchain_entry *entry = data;
-       callchain_store(entry, fr->pc);
+       perf_callchain_store(entry, fr->pc);
        return 0;
 }
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-                     struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stackframe fr;
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        fr.fp = regs->ARM_fp;
        fr.sp = regs->ARM_sp;
        fr.lr = regs->ARM_lr;
        fr.pc = regs->ARM_pc;
        walk_stackframe(&fr, callchain_trace, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-                 struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (!current || !current->pid)
-               return;
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-       perf_do_callchain(regs, entry);
-       return entry;
-}