]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
cpufreq: governor: Rename skip_work to work_count
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 8 Feb 2016 22:41:10 +0000 (23:41 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Tue, 9 Feb 2016 20:34:55 +0000 (21:34 +0100)
The skip_work field in struct policy_dbs_info technically is a
counter, so give it a new name to reflect that.

No functional changes.

Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_governor.h

index 9c1dfcee0d571a428fe92ca2078711721ec778a3..a3001d8f34bbf8dc1ac727adb40bf0822f8d879e 100644 (file)
@@ -196,16 +196,16 @@ static inline void gov_clear_update_util(struct cpufreq_policy *policy)
 static void gov_cancel_work(struct policy_dbs_info *policy_dbs)
 {
        /* Tell dbs_update_util_handler() to skip queuing up work items. */
-       atomic_inc(&policy_dbs->skip_work);
+       atomic_inc(&policy_dbs->work_count);
        /*
         * If dbs_update_util_handler() is already running, it may not notice
-        * the incremented skip_work, so wait for it to complete to prevent its
+        * the incremented work_count, so wait for it to complete to prevent its
         * work item from being queued up after the cancel_work_sync() below.
         */
        gov_clear_update_util(policy_dbs->policy);
        irq_work_sync(&policy_dbs->irq_work);
        cancel_work_sync(&policy_dbs->work);
-       atomic_set(&policy_dbs->skip_work, 0);
+       atomic_set(&policy_dbs->work_count, 0);
 }
 
 static void dbs_work_handler(struct work_struct *work)
@@ -234,7 +234,7 @@ static void dbs_work_handler(struct work_struct *work)
         * up using a stale sample delay value.
         */
        smp_mb__before_atomic();
-       atomic_dec(&policy_dbs->skip_work);
+       atomic_dec(&policy_dbs->work_count);
 }
 
 static void dbs_irq_work(struct irq_work *irq_work)
@@ -266,7 +266,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
         * - The governor is being stopped.
         * - It is too early (too little time from the previous sample).
         */
-       if (atomic_inc_return(&policy_dbs->skip_work) == 1) {
+       if (atomic_inc_return(&policy_dbs->work_count) == 1) {
                u64 delta_ns;
 
                delta_ns = time - policy_dbs->last_sample_time;
@@ -276,7 +276,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time,
                        return;
                }
        }
-       atomic_dec(&policy_dbs->skip_work);
+       atomic_dec(&policy_dbs->work_count);
 }
 
 static void set_sampling_rate(struct dbs_data *dbs_data,
@@ -304,7 +304,7 @@ static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *poli
                return NULL;
 
        mutex_init(&policy_dbs->timer_mutex);
-       atomic_set(&policy_dbs->skip_work, 0);
+       atomic_set(&policy_dbs->work_count, 0);
        init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
        INIT_WORK(&policy_dbs->work, dbs_work_handler);
 
index 95e6834d36a831c8a43701aa84c023e3b79a2c1b..37537220e48cc7421399db34b5b09e66f13cdf7b 100644 (file)
@@ -149,7 +149,7 @@ struct policy_dbs_info {
 
        u64 last_sample_time;
        s64 sample_delay_ns;
-       atomic_t skip_work;
+       atomic_t work_count;
        struct irq_work irq_work;
        struct work_struct work;
        /* dbs_data may be shared between multiple policy objects */