]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
perf_counter: Remove perf_counter_context::nr_enabled
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Sat, 23 May 2009 16:29:01 +0000 (18:29 +0200)
committerIngo Molnar <mingo@elte.hu>
Sun, 24 May 2009 06:24:30 +0000 (08:24 +0200)
now that pctrl() no longer disables other people's counters,
remove the PMU cache code that deals with that.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090523163013.032998331@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/perf_counter.h
kernel/perf_counter.c

index 4159ee5940f8a7147630052a5b031375795299c7..2ddf5e3c55186bed11ce6bd6997155a1cf9c0480 100644 (file)
@@ -516,7 +516,6 @@ struct perf_counter_context {
        struct list_head        event_list;
        int                     nr_counters;
        int                     nr_active;
-       int                     nr_enabled;
        int                     is_active;
        atomic_t                refcount;
        struct task_struct      *task;
index 4c86a636976470b29913d9c63f4c711fb5fb6cb4..cb4062559b47c66c8b696809e8945f4166d47ddc 100644 (file)
@@ -134,8 +134,6 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
 
        list_add_rcu(&counter->event_entry, &ctx->event_list);
        ctx->nr_counters++;
-       if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
-               ctx->nr_enabled++;
 }
 
 /*
@@ -150,8 +148,6 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
        if (list_empty(&counter->list_entry))
                return;
        ctx->nr_counters--;
-       if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
-               ctx->nr_enabled--;
 
        list_del_init(&counter->list_entry);
        list_del_rcu(&counter->event_entry);
@@ -406,7 +402,6 @@ static void __perf_counter_disable(void *info)
                else
                        counter_sched_out(counter, cpuctx, ctx);
                counter->state = PERF_COUNTER_STATE_OFF;
-               ctx->nr_enabled--;
        }
 
        spin_unlock_irqrestore(&ctx->lock, flags);
@@ -448,7 +443,6 @@ static void perf_counter_disable(struct perf_counter *counter)
        if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
                update_counter_times(counter);
                counter->state = PERF_COUNTER_STATE_OFF;
-               ctx->nr_enabled--;
        }
 
        spin_unlock_irq(&ctx->lock);
@@ -759,7 +753,6 @@ static void __perf_counter_enable(void *info)
                goto unlock;
        counter->state = PERF_COUNTER_STATE_INACTIVE;
        counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
-       ctx->nr_enabled++;
 
        /*
         * If the counter is in a group and isn't the group leader,
@@ -850,7 +843,6 @@ static void perf_counter_enable(struct perf_counter *counter)
                counter->state = PERF_COUNTER_STATE_INACTIVE;
                counter->tstamp_enabled =
                        ctx->time - counter->total_time_enabled;
-               ctx->nr_enabled++;
        }
  out:
        spin_unlock_irq(&ctx->lock);
@@ -910,8 +902,7 @@ static int context_equiv(struct perf_counter_context *ctx1,
                         struct perf_counter_context *ctx2)
 {
        return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
-               && ctx1->parent_gen == ctx2->parent_gen
-               && ctx1->nr_enabled == ctx2->nr_enabled;
+               && ctx1->parent_gen == ctx2->parent_gen;
 }
 
 /*