]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
perf counters: clean up state transitions
authorIngo Molnar <mingo@elte.hu>
Thu, 11 Dec 2008 14:17:03 +0000 (15:17 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 11 Dec 2008 14:45:56 +0000 (15:45 +0100)
Impact: cleanup

Introduce a proper enum for the 3 states of a counter:

PERF_COUNTER_STATE_OFF = -1
PERF_COUNTER_STATE_INACTIVE =  0
PERF_COUNTER_STATE_ACTIVE =  1

and rename counter->active to counter->state and propagate the
changes everywhere.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/kernel/cpu/perf_counter.c
include/linux/perf_counter.h
kernel/perf_counter.c

index 3e1dbebe22b9cfe545423c27d1d0e1aa3d0cd0b0..4854cca7fffd4b788a9e261dcffc09b7d41358cf 100644 (file)
@@ -332,7 +332,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
         * Then store sibling timestamps (if any):
         */
        list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
-               if (!counter->active) {
+               if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
                        /*
                         * When counter was not in the overflow mask, we have to
                         * read it from hardware. We read it as well, when it
index 97d86c293ee8ffc4ee3bdee98d2e15d496f66d5d..8cb095fa442c38ad7d5122ec9d0e5fe2488e1489 100644 (file)
@@ -127,6 +127,15 @@ struct hw_perf_counter_ops {
        void (*hw_perf_counter_read)    (struct perf_counter *counter);
 };
 
+/**
+ * enum perf_counter_active_state - the states of a counter
+ */
+enum perf_counter_active_state {
+       PERF_COUNTER_STATE_OFF          = -1,
+       PERF_COUNTER_STATE_INACTIVE     =  0,
+       PERF_COUNTER_STATE_ACTIVE       =  1,
+};
+
 /**
  * struct perf_counter - performance counter kernel representation:
  */
@@ -136,7 +145,7 @@ struct perf_counter {
        struct perf_counter             *group_leader;
        const struct hw_perf_counter_ops *hw_ops;
 
-       int                             active;
+       enum perf_counter_active_state  state;
 #if BITS_PER_LONG == 64
        atomic64_t                      count;
 #else
index 4e679b91d8bb35d1ca9cf1f33d5adc076d3a51dd..559130b8774ded9c1b0a64bb6846b1e41adde6a5 100644 (file)
@@ -167,9 +167,9 @@ static void __perf_counter_remove_from_context(void *info)
 
        spin_lock(&ctx->lock);
 
-       if (counter->active) {
+       if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
                counter->hw_ops->hw_perf_counter_disable(counter);
-               counter->active = 0;
+               counter->state = PERF_COUNTER_STATE_INACTIVE;
                ctx->nr_active--;
                cpuctx->active_oncpu--;
                counter->task = NULL;
@@ -281,7 +281,7 @@ static void __perf_install_in_context(void *info)
 
        if (cpuctx->active_oncpu < perf_max_counters) {
                counter->hw_ops->hw_perf_counter_enable(counter);
-               counter->active = 1;
+               counter->state = PERF_COUNTER_STATE_ACTIVE;
                counter->oncpu = cpu;
                ctx->nr_active++;
                cpuctx->active_oncpu++;
@@ -328,7 +328,6 @@ retry:
 
        spin_lock_irq(&ctx->lock);
        /*
-        * If the context is active and the counter has not been added
         * we need to retry the smp call.
         */
        if (ctx->nr_active && list_empty(&counter->list_entry)) {
@@ -353,12 +352,12 @@ counter_sched_out(struct perf_counter *counter,
                  struct perf_cpu_context *cpuctx,
                  struct perf_counter_context *ctx)
 {
-       if (!counter->active)
+       if (counter->state != PERF_COUNTER_STATE_ACTIVE)
                return;
 
        counter->hw_ops->hw_perf_counter_disable(counter);
-       counter->active =  0;
-       counter->oncpu  = -1;
+       counter->state = PERF_COUNTER_STATE_INACTIVE;
+       counter->oncpu = -1;
 
        cpuctx->active_oncpu--;
        ctx->nr_active--;
@@ -415,11 +414,11 @@ counter_sched_in(struct perf_counter *counter,
                 struct perf_counter_context *ctx,
                 int cpu)
 {
-       if (counter->active == -1)
+       if (counter->state == PERF_COUNTER_STATE_OFF)
                return;
 
        counter->hw_ops->hw_perf_counter_enable(counter);
-       counter->active = 1;
+       counter->state = PERF_COUNTER_STATE_ACTIVE;
        counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
 
        cpuctx->active_oncpu++;
@@ -506,8 +505,8 @@ int perf_counter_task_disable(void)
        perf_flags = hw_perf_save_disable();
 
        list_for_each_entry(counter, &ctx->counter_list, list_entry) {
-               WARN_ON_ONCE(counter->active == 1);
-               counter->active = -1;
+               WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE);
+               counter->state = PERF_COUNTER_STATE_OFF;
        }
        hw_perf_restore(perf_flags);
 
@@ -540,9 +539,9 @@ int perf_counter_task_enable(void)
        perf_flags = hw_perf_save_disable();
 
        list_for_each_entry(counter, &ctx->counter_list, list_entry) {
-               if (counter->active != -1)
+               if (counter->state != PERF_COUNTER_STATE_OFF)
                        continue;
-               counter->active = 0;
+               counter->state = PERF_COUNTER_STATE_INACTIVE;
        }
        hw_perf_restore(perf_flags);
 
@@ -620,7 +619,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
         * If counter is enabled and currently active on a CPU, update the
         * value in the counter structure:
         */
-       if (counter->active) {
+       if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
                smp_call_function_single(counter->oncpu,
                                         __hw_perf_counter_read, counter, 1);
        }
@@ -673,7 +672,7 @@ static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
 
 retry:
        spin_lock_irq(&ctx->lock);
-       if (!counter->active) {
+       if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
                counter->irqdata = counter->usrdata;
                counter->usrdata = oldirqdata;
                spin_unlock_irq(&ctx->lock);