2 * drivers/cpufreq/cpufreq_governor.c
4 * CPUFREQ governors common code
6 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 #include <linux/export.h>
20 #include <linux/kernel_stat.h>
21 #include <linux/slab.h>
23 #include "cpufreq_governor.h"
25 static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
27 DEFINE_MUTEX(dbs_data_mutex);
28 EXPORT_SYMBOL_GPL(dbs_data_mutex);
30 /* Common sysfs tunables */
32 * store_sampling_rate - update sampling rate effective immediately if needed.
34 * If new rate is smaller than the old, simply updating
35 * dbs.sampling_rate might not be appropriate. For example, if the
36 * original sampling_rate was 1 second and the requested new sampling rate is 10
37 * ms because the user needs immediate reaction from ondemand governor, but not
38 * sure if higher frequency will be required or not, then, the governor may
39 * change the sampling rate too late; up to 1 second later. Thus, if we are
40 * reducing the sampling rate, we need to make the new value effective
43 * This must be called with dbs_data->mutex held, otherwise traversing
44 * policy_dbs_list isn't safe.
46 ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
49 struct policy_dbs_info *policy_dbs;
52 ret = sscanf(buf, "%u", &rate);
56 dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
59 * We are operating under dbs_data->mutex and so the list and its
60 * entries can't be freed concurrently.
62 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
63 mutex_lock(&policy_dbs->timer_mutex);
65 * On 32-bit architectures this may race with the
66 * sample_delay_ns read in dbs_update_util_handler(), but that
67 * really doesn't matter. If the read returns a value that's
68 * too big, the sample will be skipped, but the next invocation
69 * of dbs_update_util_handler() (when the update has been
70 * completed) will take a sample.
72 * If this runs in parallel with dbs_work_handler(), we may end
73 * up overwriting the sample_delay_ns value that it has just
74 * written, but it will be corrected next time a sample is
75 * taken, so it shouldn't be significant.
77 gov_update_sample_delay(policy_dbs, 0);
78 mutex_unlock(&policy_dbs->timer_mutex);
83 EXPORT_SYMBOL_GPL(store_sampling_rate);
86 * gov_update_cpu_data - Update CPU load data.
87 * @dbs_data: Top-level governor data pointer.
89 * Update CPU load data for all CPUs in the domain governed by @dbs_data
90 * (that may be a single policy or a bunch of them if governor tunables are
93 * Call under the @dbs_data mutex.
95 void gov_update_cpu_data(struct dbs_data *dbs_data)
97 struct policy_dbs_info *policy_dbs;
99 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
102 for_each_cpu(j, policy_dbs->policy->cpus) {
103 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
105 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall,
106 dbs_data->io_is_busy);
107 if (dbs_data->ignore_nice_load)
108 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
112 EXPORT_SYMBOL_GPL(gov_update_cpu_data);
114 static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
116 return container_of(kobj, struct dbs_data, kobj);
119 static inline struct governor_attr *to_gov_attr(struct attribute *attr)
121 return container_of(attr, struct governor_attr, attr);
124 static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
127 struct dbs_data *dbs_data = to_dbs_data(kobj);
128 struct governor_attr *gattr = to_gov_attr(attr);
132 ret = gattr->show(dbs_data, buf);
137 static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
138 const char *buf, size_t count)
140 struct dbs_data *dbs_data = to_dbs_data(kobj);
141 struct governor_attr *gattr = to_gov_attr(attr);
144 mutex_lock(&dbs_data->mutex);
146 if (dbs_data->usage_count && gattr->store)
147 ret = gattr->store(dbs_data, buf, count);
149 mutex_unlock(&dbs_data->mutex);
155 * Sysfs Ops for accessing governor attributes.
157 * All show/store invocations for governor specific sysfs attributes, will first
158 * call the below show/store callbacks and the attribute specific callback will
159 * be called from within it.
161 static const struct sysfs_ops governor_sysfs_ops = {
162 .show = governor_show,
163 .store = governor_store,
166 unsigned int dbs_update(struct cpufreq_policy *policy)
168 struct policy_dbs_info *policy_dbs = policy->governor_data;
169 struct dbs_data *dbs_data = policy_dbs->dbs_data;
170 unsigned int ignore_nice = dbs_data->ignore_nice_load;
171 unsigned int max_load = 0;
172 unsigned int sampling_rate, io_busy, j;
175 * Sometimes governors may use an additional multiplier to increase
176 * sample delays temporarily. Apply that multiplier to sampling_rate
177 * so as to keep the wake-up-from-idle detection logic a bit
180 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
182 * For the purpose of ondemand, waiting for disk IO is an indication
183 * that you're performance critical, and not that the system is actually
184 * idle, so do not add the iowait time to the CPU idle time then.
186 io_busy = dbs_data->io_is_busy;
188 /* Get Absolute Load */
189 for_each_cpu(j, policy->cpus) {
190 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
191 u64 cur_wall_time, cur_idle_time;
192 unsigned int idle_time, wall_time;
195 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
197 wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
198 j_cdbs->prev_cpu_wall = cur_wall_time;
200 if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
203 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
204 j_cdbs->prev_cpu_idle = cur_idle_time;
208 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
210 idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
211 j_cdbs->prev_cpu_nice = cur_nice;
214 if (unlikely(!wall_time || wall_time < idle_time))
218 * If the CPU had gone completely idle, and a task just woke up
219 * on this CPU now, it would be unfair to calculate 'load' the
220 * usual way for this elapsed time-window, because it will show
221 * near-zero load, irrespective of how CPU intensive that task
222 * actually is. This is undesirable for latency-sensitive bursty
225 * To avoid this, we reuse the 'load' from the previous
226 * time-window and give this task a chance to start with a
227 * reasonably high CPU frequency. (However, we shouldn't over-do
228 * this copy, lest we get stuck at a high load (high frequency)
229 * for too long, even when the current system load has actually
230 * dropped down. So we perform the copy only once, upon the
231 * first wake-up from idle.)
233 * Detecting this situation is easy: the governor's utilization
234 * update handler would not have run during CPU-idle periods.
235 * Hence, an unusually large 'wall_time' (as compared to the
236 * sampling rate) indicates this scenario.
238 * prev_load can be zero in two cases and we must recalculate it
240 * - during long idle intervals
241 * - explicitly set to zero
243 if (unlikely(wall_time > (2 * sampling_rate) &&
244 j_cdbs->prev_load)) {
245 load = j_cdbs->prev_load;
248 * Perform a destructive copy, to ensure that we copy
249 * the previous load only once, upon the first wake-up
252 j_cdbs->prev_load = 0;
254 load = 100 * (wall_time - idle_time) / wall_time;
255 j_cdbs->prev_load = load;
263 EXPORT_SYMBOL_GPL(dbs_update);
265 void gov_set_update_util(struct policy_dbs_info *policy_dbs,
266 unsigned int delay_us)
268 struct cpufreq_policy *policy = policy_dbs->policy;
271 gov_update_sample_delay(policy_dbs, delay_us);
272 policy_dbs->last_sample_time = 0;
274 for_each_cpu(cpu, policy->cpus) {
275 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
277 cpufreq_set_update_util_data(cpu, &cdbs->update_util);
280 EXPORT_SYMBOL_GPL(gov_set_update_util);
282 static inline void gov_clear_update_util(struct cpufreq_policy *policy)
286 for_each_cpu(i, policy->cpus)
287 cpufreq_set_update_util_data(i, NULL);
292 static void gov_cancel_work(struct cpufreq_policy *policy)
294 struct policy_dbs_info *policy_dbs = policy->governor_data;
296 gov_clear_update_util(policy_dbs->policy);
297 irq_work_sync(&policy_dbs->irq_work);
298 cancel_work_sync(&policy_dbs->work);
299 atomic_set(&policy_dbs->work_count, 0);
300 policy_dbs->work_in_progress = false;
303 static void dbs_work_handler(struct work_struct *work)
305 struct policy_dbs_info *policy_dbs;
306 struct cpufreq_policy *policy;
307 struct dbs_governor *gov;
309 policy_dbs = container_of(work, struct policy_dbs_info, work);
310 policy = policy_dbs->policy;
311 gov = dbs_governor_of(policy);
314 * Make sure cpufreq_governor_limits() isn't evaluating load or the
315 * ondemand governor isn't updating the sampling rate in parallel.
317 mutex_lock(&policy_dbs->timer_mutex);
318 gov_update_sample_delay(policy_dbs, gov->gov_dbs_timer(policy));
319 mutex_unlock(&policy_dbs->timer_mutex);
321 /* Allow the utilization update handler to queue up more work. */
322 atomic_set(&policy_dbs->work_count, 0);
324 * If the update below is reordered with respect to the sample delay
325 * modification, the utilization update handler may end up using a stale
326 * sample delay value.
329 policy_dbs->work_in_progress = false;
332 static void dbs_irq_work(struct irq_work *irq_work)
334 struct policy_dbs_info *policy_dbs;
336 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
337 schedule_work(&policy_dbs->work);
340 static void dbs_update_util_handler(struct update_util_data *data, u64 time,
341 unsigned long util, unsigned long max)
343 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
344 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
348 * The work may not be allowed to be queued up right now.
350 * - Work has already been queued up or is in progress.
351 * - It is too early (too little time from the previous sample).
353 if (policy_dbs->work_in_progress)
357 * If the reads below are reordered before the check above, the value
358 * of sample_delay_ns used in the computation may be stale.
361 delta_ns = time - policy_dbs->last_sample_time;
362 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
366 * If the policy is not shared, the irq_work may be queued up right away
367 * at this point. Otherwise, we need to ensure that only one of the
368 * CPUs sharing the policy will do that.
370 if (policy_dbs->is_shared &&
371 !atomic_add_unless(&policy_dbs->work_count, 1, 1))
374 policy_dbs->last_sample_time = time;
375 policy_dbs->work_in_progress = true;
376 irq_work_queue(&policy_dbs->irq_work);
379 static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
380 struct dbs_governor *gov)
382 struct policy_dbs_info *policy_dbs;
385 /* Allocate memory for per-policy governor data. */
386 policy_dbs = gov->alloc();
390 policy_dbs->policy = policy;
391 mutex_init(&policy_dbs->timer_mutex);
392 atomic_set(&policy_dbs->work_count, 0);
393 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
394 INIT_WORK(&policy_dbs->work, dbs_work_handler);
396 /* Set policy_dbs for all CPUs, online+offline */
397 for_each_cpu(j, policy->related_cpus) {
398 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
400 j_cdbs->policy_dbs = policy_dbs;
401 j_cdbs->update_util.func = dbs_update_util_handler;
406 static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
407 struct dbs_governor *gov)
411 mutex_destroy(&policy_dbs->timer_mutex);
413 for_each_cpu(j, policy_dbs->policy->related_cpus) {
414 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
416 j_cdbs->policy_dbs = NULL;
417 j_cdbs->update_util.func = NULL;
419 gov->free(policy_dbs);
422 static int cpufreq_governor_init(struct cpufreq_policy *policy)
424 struct dbs_governor *gov = dbs_governor_of(policy);
425 struct dbs_data *dbs_data = gov->gdbs_data;
426 struct policy_dbs_info *policy_dbs;
427 unsigned int latency;
430 /* State should be equivalent to EXIT */
431 if (policy->governor_data)
434 policy_dbs = alloc_policy_dbs_info(policy, gov);
439 if (WARN_ON(have_governor_per_policy())) {
441 goto free_policy_dbs_info;
443 policy_dbs->dbs_data = dbs_data;
444 policy->governor_data = policy_dbs;
446 mutex_lock(&dbs_data->mutex);
447 dbs_data->usage_count++;
448 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
449 mutex_unlock(&dbs_data->mutex);
454 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
457 goto free_policy_dbs_info;
460 INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
461 mutex_init(&dbs_data->mutex);
463 ret = gov->init(dbs_data, !policy->governor->initialized);
465 goto free_policy_dbs_info;
467 /* policy latency is in ns. Convert it to us first */
468 latency = policy->cpuinfo.transition_latency / 1000;
472 /* Bring kernel and HW constraints together */
473 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
474 MIN_LATENCY_MULTIPLIER * latency);
475 dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
476 LATENCY_MULTIPLIER * latency);
478 if (!have_governor_per_policy())
479 gov->gdbs_data = dbs_data;
481 policy->governor_data = policy_dbs;
483 policy_dbs->dbs_data = dbs_data;
484 dbs_data->usage_count = 1;
485 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
487 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
488 ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
489 get_governor_parent_kobj(policy),
490 "%s", gov->gov.name);
494 /* Failure, so roll back. */
495 pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
497 policy->governor_data = NULL;
499 if (!have_governor_per_policy())
500 gov->gdbs_data = NULL;
501 gov->exit(dbs_data, !policy->governor->initialized);
504 free_policy_dbs_info:
505 free_policy_dbs_info(policy_dbs, gov);
509 static int cpufreq_governor_exit(struct cpufreq_policy *policy)
511 struct dbs_governor *gov = dbs_governor_of(policy);
512 struct policy_dbs_info *policy_dbs = policy->governor_data;
513 struct dbs_data *dbs_data = policy_dbs->dbs_data;
516 mutex_lock(&dbs_data->mutex);
517 list_del(&policy_dbs->list);
518 count = --dbs_data->usage_count;
519 mutex_unlock(&dbs_data->mutex);
522 kobject_put(&dbs_data->kobj);
524 policy->governor_data = NULL;
526 if (!have_governor_per_policy())
527 gov->gdbs_data = NULL;
529 gov->exit(dbs_data, policy->governor->initialized == 1);
530 mutex_destroy(&dbs_data->mutex);
533 policy->governor_data = NULL;
536 free_policy_dbs_info(policy_dbs, gov);
540 static int cpufreq_governor_start(struct cpufreq_policy *policy)
542 struct dbs_governor *gov = dbs_governor_of(policy);
543 struct policy_dbs_info *policy_dbs = policy->governor_data;
544 struct dbs_data *dbs_data = policy_dbs->dbs_data;
545 unsigned int sampling_rate, ignore_nice, j;
546 unsigned int io_busy;
551 policy_dbs->is_shared = policy_is_shared(policy);
552 policy_dbs->rate_mult = 1;
554 sampling_rate = dbs_data->sampling_rate;
555 ignore_nice = dbs_data->ignore_nice_load;
556 io_busy = dbs_data->io_is_busy;
558 for_each_cpu(j, policy->cpus) {
559 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
560 unsigned int prev_load;
562 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
564 prev_load = j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle;
565 j_cdbs->prev_load = 100 * prev_load / (unsigned int)j_cdbs->prev_cpu_wall;
568 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
573 gov_set_update_util(policy_dbs, sampling_rate);
577 static int cpufreq_governor_stop(struct cpufreq_policy *policy)
579 gov_cancel_work(policy);
583 static int cpufreq_governor_limits(struct cpufreq_policy *policy)
585 struct policy_dbs_info *policy_dbs = policy->governor_data;
587 mutex_lock(&policy_dbs->timer_mutex);
589 if (policy->max < policy->cur)
590 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
591 else if (policy->min > policy->cur)
592 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
594 gov_update_sample_delay(policy_dbs, 0);
596 mutex_unlock(&policy_dbs->timer_mutex);
601 int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
605 /* Lock governor to block concurrent initialization of governor */
606 mutex_lock(&dbs_data_mutex);
608 if (event == CPUFREQ_GOV_POLICY_INIT) {
609 ret = cpufreq_governor_init(policy);
610 } else if (policy->governor_data) {
612 case CPUFREQ_GOV_POLICY_EXIT:
613 ret = cpufreq_governor_exit(policy);
615 case CPUFREQ_GOV_START:
616 ret = cpufreq_governor_start(policy);
618 case CPUFREQ_GOV_STOP:
619 ret = cpufreq_governor_stop(policy);
621 case CPUFREQ_GOV_LIMITS:
622 ret = cpufreq_governor_limits(policy);
627 mutex_unlock(&dbs_data_mutex);
630 EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);