From afedadf23a2c90f3ba0d963282cbe6a6be129494 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 20 May 2009 12:21:22 +0200 Subject: [PATCH] perf_counter: Optimize sched in/out of counters Avoid a function call for !group counters by directly calling the counter function. [ Impact: micro-optimize the code ] Signed-off-by: Peter Zijlstra Cc: Paul Mackerras Cc: Corey Ashford Cc: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Marcelo Tosatti Cc: John Kacur LKML-Reference: <20090520102553.511933670@chello.nl> Signed-off-by: Ingo Molnar --- kernel/perf_counter.c | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 473ed2cafbf..69d4de81596 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c @@ -826,8 +826,12 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx, perf_disable(); if (ctx->nr_active) { - list_for_each_entry(counter, &ctx->counter_list, list_entry) - group_sched_out(counter, cpuctx, ctx); + list_for_each_entry(counter, &ctx->counter_list, list_entry) { + if (counter != counter->group_leader) + counter_sched_out(counter, cpuctx, ctx); + else + group_sched_out(counter, cpuctx, ctx); + } } perf_enable(); out: @@ -903,8 +907,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, if (counter->cpu != -1 && counter->cpu != cpu) continue; - if (group_can_go_on(counter, cpuctx, 1)) - group_sched_in(counter, cpuctx, ctx, cpu); + if (counter != counter->group_leader) + counter_sched_in(counter, cpuctx, ctx, cpu); + else { + if (group_can_go_on(counter, cpuctx, 1)) + group_sched_in(counter, cpuctx, ctx, cpu); + } /* * If this pinned group hasn't been scheduled, @@ -932,9 +940,14 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, if (counter->cpu != -1 && counter->cpu != cpu) continue; - if (group_can_go_on(counter, cpuctx, can_add_hw)) { - if (group_sched_in(counter, cpuctx, ctx, cpu)) + if (counter != counter->group_leader) { + if (counter_sched_in(counter, cpuctx, ctx, cpu)) can_add_hw = 0; + } else { + if (group_can_go_on(counter, cpuctx, can_add_hw)) { + if (group_sched_in(counter, cpuctx, ctx, cpu)) + can_add_hw = 0; + } } } perf_enable(); -- 2.39.5