]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - kernel/sched/cpufreq_schedutil.c
cpufreq: schedutil: use now as reference when aggregating shared policy requests
[karo-tx-linux.git] / kernel / sched / cpufreq_schedutil.c
index f5ffe241812e6e2d1055103e883e3da94e4dfd37..622eed1b7658301a94a645c4426598b0096a8ab9 100644 (file)
@@ -61,6 +61,11 @@ struct sugov_cpu {
        unsigned long util;
        unsigned long max;
        unsigned int flags;
+
+       /* The field below is for single-CPU policies only. */
+#ifdef CONFIG_NO_HZ_COMMON
+       unsigned long saved_idle_calls;
+#endif
 };
 
 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
@@ -93,22 +98,23 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
 {
        struct cpufreq_policy *policy = sg_policy->policy;
 
+       if (sg_policy->next_freq == next_freq)
+               return;
+
+       if (sg_policy->next_freq > next_freq)
+               next_freq = (sg_policy->next_freq + next_freq) >> 1;
+
+       sg_policy->next_freq = next_freq;
+       sg_policy->last_freq_update_time = time;
+
        if (policy->fast_switch_enabled) {
-               if (sg_policy->next_freq == next_freq) {
-                       trace_cpu_frequency(policy->cur, smp_processor_id());
-                       return;
-               }
-               sg_policy->next_freq = next_freq;
-               sg_policy->last_freq_update_time = time;
                next_freq = cpufreq_driver_fast_switch(policy, next_freq);
                if (next_freq == CPUFREQ_ENTRY_INVALID)
                        return;
 
                policy->cur = next_freq;
                trace_cpu_frequency(next_freq, smp_processor_id());
-       } else if (sg_policy->next_freq != next_freq) {
-               sg_policy->next_freq = next_freq;
-               sg_policy->last_freq_update_time = time;
+       } else {
                sg_policy->work_in_progress = true;
                irq_work_queue(&sg_policy->irq_work);
        }
@@ -192,6 +198,19 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
        sg_cpu->iowait_boost >>= 1;
 }
 
+#ifdef CONFIG_NO_HZ_COMMON
+static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
+{
+       unsigned long idle_calls = tick_nohz_get_idle_calls();
+       bool ret = idle_calls == sg_cpu->saved_idle_calls;
+
+       sg_cpu->saved_idle_calls = idle_calls;
+       return ret;
+}
+#else
+static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
+#endif /* CONFIG_NO_HZ_COMMON */
+
 static void sugov_update_single(struct update_util_data *hook, u64 time,
                                unsigned int flags)
 {
@@ -200,6 +219,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
        struct cpufreq_policy *policy = sg_policy->policy;
        unsigned long util, max;
        unsigned int next_f;
+       bool busy;
 
        sugov_set_iowait_boost(sg_cpu, time, flags);
        sg_cpu->last_update = time;
@@ -207,21 +227,28 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
        if (!sugov_should_update_freq(sg_policy, time))
                return;
 
+       busy = sugov_cpu_is_busy(sg_cpu);
+
        if (flags & SCHED_CPUFREQ_RT_DL) {
                next_f = policy->cpuinfo.max_freq;
        } else {
                sugov_get_util(&util, &max);
                sugov_iowait_boost(sg_cpu, &util, &max);
                next_f = get_next_freq(sg_policy, util, max);
+               /*
+                * Do not reduce the frequency if the CPU has not been idle
+                * recently, as the reduction is likely to be premature then.
+                */
+               if (busy && next_f < sg_policy->next_freq)
+                       next_f = sg_policy->next_freq;
        }
        sugov_update_commit(sg_policy, time, next_f);
 }
 
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
 {
        struct sugov_policy *sg_policy = sg_cpu->sg_policy;
        struct cpufreq_policy *policy = sg_policy->policy;
-       u64 last_freq_update_time = sg_policy->last_freq_update_time;
        unsigned long util = 0, max = 1;
        unsigned int j;
 
@@ -237,7 +264,7 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
                 * enough, don't take the CPU into account as it probably is
                 * idle now (and clear iowait_boost for it).
                 */
-               delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+               delta_ns = time - j_sg_cpu->last_update;
                if (delta_ns > TICK_NSEC) {
                        j_sg_cpu->iowait_boost = 0;
                        continue;
@@ -281,7 +308,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
                if (flags & SCHED_CPUFREQ_RT_DL)
                        next_f = sg_policy->policy->cpuinfo.max_freq;
                else
-                       next_f = sugov_next_freq_shared(sg_cpu);
+                       next_f = sugov_next_freq_shared(sg_cpu, time);
 
                sugov_update_commit(sg_policy, time, next_f);
        }
@@ -466,7 +493,6 @@ static int sugov_init(struct cpufreq_policy *policy)
 {
        struct sugov_policy *sg_policy;
        struct sugov_tunables *tunables;
-       unsigned int lat;
        int ret = 0;
 
        /* State should be equivalent to EXIT */
@@ -505,10 +531,16 @@ static int sugov_init(struct cpufreq_policy *policy)
                goto stop_kthread;
        }
 
-       tunables->rate_limit_us = LATENCY_MULTIPLIER;
-       lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
-       if (lat)
-               tunables->rate_limit_us *= lat;
+       if (policy->transition_delay_us) {
+               tunables->rate_limit_us = policy->transition_delay_us;
+       } else {
+               unsigned int lat;
+
+               tunables->rate_limit_us = LATENCY_MULTIPLIER;
+               lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
+               if (lat)
+                       tunables->rate_limit_us *= lat;
+       }
 
        policy->governor_data = sg_policy;
        sg_policy->tunables = tunables;
@@ -577,20 +609,14 @@ static int sugov_start(struct cpufreq_policy *policy)
        for_each_cpu(cpu, policy->cpus) {
                struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
 
+               memset(sg_cpu, 0, sizeof(*sg_cpu));
                sg_cpu->sg_policy = sg_policy;
-               if (policy_is_shared(policy)) {
-                       sg_cpu->util = 0;
-                       sg_cpu->max = 0;
-                       sg_cpu->flags = SCHED_CPUFREQ_RT;
-                       sg_cpu->last_update = 0;
-                       sg_cpu->iowait_boost = 0;
-                       sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
-                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                                    sugov_update_shared);
-               } else {
-                       cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
-                                                    sugov_update_single);
-               }
+               sg_cpu->flags = SCHED_CPUFREQ_RT;
+               sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
+               cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
+                                            policy_is_shared(policy) ?
+                                                       sugov_update_shared :
+                                                       sugov_update_single);
        }
        return 0;
 }