]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
perf: Reduce perf_disable() usage
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 11 Jun 2010 15:32:03 +0000 (17:32 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 9 Sep 2010 18:46:29 +0000 (20:46 +0200)
Since the current perf_disable() usage is only an optimization,
remove it for now. This eases the removal of the __weak
hw_perf_enable() interface.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/arm/kernel/perf_event.c
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/sh/kernel/perf_event.c
arch/sparc/kernel/perf_event.c
arch/x86/kernel/cpu/perf_event.c
include/linux/perf_event.h
kernel/perf_event.c

index f62f9db35db3d85da24c78632573e91bd0d41888..afc92c580d18a265f0af31a965c7c9e01c16b409 100644 (file)
@@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event)
        int idx;
        int err = 0;
 
+       perf_disable();
+
        /* If we don't have a space for the counter then finish early. */
        idx = armpmu->get_event_idx(cpuc, hwc);
        if (idx < 0) {
@@ -303,6 +305,7 @@ armpmu_enable(struct perf_event *event)
        perf_event_update_userpage(event);
 
 out:
+       perf_enable();
        return err;
 }
 
index 19131b2614b9c54fc10f5e86522b3da417a6e86c..c1408821dbc246b744cbc31507aa8bbb25f0b4e2 100644 (file)
@@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_disable();
        cpuhw->group_flag |= PERF_EVENT_TXN;
        cpuhw->n_txn_start = cpuhw->n_events;
 }
@@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_enable();
 }
 
 /*
@@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
                cpuhw->event[i]->hw.config = cpuhw->events[i];
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_enable();
        return 0;
 }
 
index ea6a804e43fdc1dc2faf767172c496a74d06a2fb..9bc84a7fd901ba48a81dd52ec8f27a57a0343b13 100644 (file)
@@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count,
        return n;
 }
 
-/* perf must be disabled, context locked on entry */
+/* context locked on entry */
 static int fsl_emb_pmu_enable(struct perf_event *event)
 {
        struct cpu_hw_events *cpuhw;
@@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        u64 val;
        int i;
 
+       perf_disable();
        cpuhw = &get_cpu_var(cpu_hw_events);
 
        if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
@@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
        ret = 0;
  out:
        put_cpu_var(cpu_hw_events);
+       perf_enable();
        return ret;
 }
 
-/* perf must be disabled, context locked on entry */
+/* context locked on entry */
 static void fsl_emb_pmu_disable(struct perf_event *event)
 {
        struct cpu_hw_events *cpuhw;
        int i = event->hw.idx;
 
+       perf_disable();
        if (i < 0)
                goto out;
 
@@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
        cpuhw->n_events--;
 
  out:
+       perf_enable();
        put_cpu_var(cpu_hw_events);
 }
 
index 8cb206597e0cad28bbabf62f54cf6e2f93ef9f4d..d042989ceb45bc58031e6ecd758037f71846a74b 100644 (file)
@@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event)
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
+       int ret = -EAGAIN;
+
+       perf_disable();
 
        if (test_and_set_bit(idx, cpuc->used_mask)) {
                idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
                if (idx == sh_pmu->num_events)
-                       return -EAGAIN;
+                       goto out;
 
                set_bit(idx, cpuc->used_mask);
                hwc->idx = idx;
@@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event)
        sh_pmu->enable(hwc, idx);
 
        perf_event_update_userpage(event);
-
-       return 0;
+       ret = 0;
+out:
+       perf_enable();
+       return ret;
 }
 
 static void sh_pmu_read(struct perf_event *event)
index bed4327f5a7a5a8b81c44f4f852df3659dc6d162..d0131deeeaf67e86eb7023e56dfccb21e3391855 100644 (file)
@@ -1113,6 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
+       perf_disable();
        cpuhw->group_flag |= PERF_EVENT_TXN;
 }
 
@@ -1126,6 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
        struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
 
        cpuhw->group_flag &= ~PERF_EVENT_TXN;
+       perf_enable();
 }
 
 /*
@@ -1149,6 +1151,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
                return -EAGAIN;
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
+       perf_enable();
        return 0;
 }
 
index 2c89264ee7919905ddfa7687770a11c0aad8644c..846070ce49c3db27346294a6ff10210b3586f999 100644 (file)
@@ -969,10 +969,11 @@ static int x86_pmu_enable(struct perf_event *event)
 
        hwc = &event->hw;
 
+       perf_disable();
        n0 = cpuc->n_events;
-       n = collect_events(cpuc, event, false);
-       if (n < 0)
-               return n;
+       ret = n = collect_events(cpuc, event, false);
+       if (ret < 0)
+               goto out;
 
        /*
         * If group events scheduling transaction was started,
@@ -980,23 +981,26 @@ static int x86_pmu_enable(struct perf_event *event)
         * at commit time(->commit_txn) as a whole
         */
        if (cpuc->group_flag & PERF_EVENT_TXN)
-               goto out;
+               goto done_collect;
 
        ret = x86_pmu.schedule_events(cpuc, n, assign);
        if (ret)
-               return ret;
+               goto out;
        /*
         * copy new assignment, now we know it is possible
         * will be used by hw_perf_enable()
         */
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
-out:
+done_collect:
        cpuc->n_events = n;
        cpuc->n_added += n - n0;
        cpuc->n_txn += n - n0;
 
-       return 0;
+       ret = 0;
+out:
+       perf_enable();
+       return ret;
 }
 
 static int x86_pmu_start(struct perf_event *event)
@@ -1432,6 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
 
+       perf_disable();
        cpuc->group_flag |= PERF_EVENT_TXN;
        cpuc->n_txn = 0;
 }
@@ -1451,6 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
         */
        cpuc->n_added -= cpuc->n_txn;
        cpuc->n_events -= cpuc->n_txn;
+       perf_enable();
 }
 
 /*
@@ -1480,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
        cpuc->group_flag &= ~PERF_EVENT_TXN;
-
+       perf_enable();
        return 0;
 }
 
index ab72f56eb3722dd8c4ea2f028fd0c97e136c90ff..243286a8ded76c54ff3bbf3bf854b57e0d70937d 100644 (file)
@@ -564,26 +564,26 @@ struct pmu {
        struct list_head                entry;
 
        /*
-        * Should return -ENOENT when the @event doesn't match this pmu
+        * Should return -ENOENT when the @event doesn't match this PMU.
         */
        int (*event_init)               (struct perf_event *event);
 
-       int (*enable)                   (struct perf_event *event);
+       int  (*enable)                  (struct perf_event *event);
        void (*disable)                 (struct perf_event *event);
-       int (*start)                    (struct perf_event *event);
+       int  (*start)                   (struct perf_event *event);
        void (*stop)                    (struct perf_event *event);
        void (*read)                    (struct perf_event *event);
        void (*unthrottle)              (struct perf_event *event);
 
        /*
-        * Group events scheduling is treated as a transaction, add group
-        * events as a whole and perform one schedulability test. If the test
-        * fails, roll back the whole group
+        * Group events scheduling is treated as a transaction, add
+        * group events as a whole and perform one schedulability test.
+        * If the test fails, roll back the whole group
         */
 
        /*
-        * Start the transaction, after this ->enable() doesn't need
-        * to do schedulability tests.
+        * Start the transaction, after this ->enable() doesn't need to
+        * do schedulability tests.
         */
        void (*start_txn)       (struct pmu *pmu);
        /*
@@ -594,8 +594,8 @@ struct pmu {
         */
        int  (*commit_txn)      (struct pmu *pmu);
        /*
-        * Will cancel the transaction, assumes ->disable() is called for
-        * each successfull ->enable() during the transaction.
+        * Will cancel the transaction, assumes ->disable() is called
+        * for each successfull ->enable() during the transaction.
         */
        void (*cancel_txn)      (struct pmu *pmu);
 };
index 149ca18371b74d2df9f35e6d006d4e24fa9ce034..9a98ce95356106d689c9fba843dbb5a67a67c4c0 100644 (file)
@@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info)
                return;
 
        raw_spin_lock(&ctx->lock);
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level.
-        */
-       perf_disable();
 
        event_sched_out(event, cpuctx, ctx);
 
@@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info)
                            perf_max_events - perf_reserved_percpu);
        }
 
-       perf_enable();
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info)
        ctx->is_active = 1;
        update_context_time(ctx);
 
-       /*
-        * Protect the list operation against NMI by disabling the
-        * events on a global level. NOP for non NMI based events.
-        */
-       perf_disable();
-
        add_event_to_ctx(event, ctx);
 
        if (event->cpu != -1 && event->cpu != smp_processor_id())
@@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info)
                cpuctx->max_pertask--;
 
 unlock:
-       perf_enable();
-
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -972,12 +958,10 @@ static void __perf_event_enable(void *info)
        if (!group_can_go_on(event, cpuctx, 1)) {
                err = -EEXIST;
        } else {
-               perf_disable();
                if (event == leader)
                        err = group_sched_in(event, cpuctx, ctx);
                else
                        err = event_sched_in(event, cpuctx, ctx);
-               perf_enable();
        }
 
        if (err) {
@@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                goto out;
        update_context_time(ctx);
 
-       perf_disable();
        if (!ctx->nr_active)
-               goto out_enable;
+               goto out;
 
        if (event_type & EVENT_PINNED) {
                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
@@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
                        group_sched_out(event, cpuctx, ctx);
        }
-
- out_enable:
-       perf_enable();
 out:
        raw_spin_unlock(&ctx->lock);
 }
@@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx,
 
        ctx->timestamp = perf_clock();
 
-       perf_disable();
-
        /*
         * First go through the list and put on any pinned groups
         * in order to give them the best chance of going on.
@@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx,
        if (event_type & EVENT_FLEXIBLE)
                ctx_flexible_sched_in(ctx, cpuctx);
 
-       perf_enable();
 out:
        raw_spin_unlock(&ctx->lock);
 }
@@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task)
        if (cpuctx->task_ctx == ctx)
                return;
 
-       perf_disable();
-
        /*
         * We want to keep the following priority order:
         * cpu pinned (that don't need to move), task pinned,
@@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task)
        ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
 
        cpuctx->task_ctx = ctx;
-
-       perf_enable();
 }
 
 #define MAX_INTERRUPTS (~0ULL)
@@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
        hwc->sample_period = sample_period;
 
        if (local64_read(&hwc->period_left) > 8*sample_period) {
-               perf_disable();
                perf_event_stop(event);
                local64_set(&hwc->period_left, 0);
                perf_event_start(event);
-               perf_enable();
        }
 }
 
@@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
                 */
                if (interrupts == MAX_INTERRUPTS) {
                        perf_log_throttle(event, 1);
-                       perf_disable();
                        event->pmu->unthrottle(event);
-                       perf_enable();
                }
 
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
-               perf_disable();
                event->pmu->read(event);
                now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
@@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
 
                if (delta > 0)
                        perf_adjust_period(event, TICK_NSEC, delta);
-               perf_enable();
        }
        raw_spin_unlock(&ctx->lock);
 }
@@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr)
        if (!rotate)
                return;
 
-       perf_disable();
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr)
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_in(curr, EVENT_FLEXIBLE);
-       perf_enable();
 }
 
 static int event_enable_on_exec(struct perf_event *event,