2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
38 return cpumask_empty(policy->cpus);
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
64 static struct cpufreq_driver *cpufreq_driver;
65 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
66 static DEFINE_RWLOCK(cpufreq_driver_lock);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 /* internal prototypes */
77 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78 static int cpufreq_init_governor(struct cpufreq_policy *policy);
79 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80 static int cpufreq_start_governor(struct cpufreq_policy *policy);
81 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
82 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
85 * Two notifier lists: the "policy" list is involved in the
86 * validation process for a new CPU frequency policy; the
87 * "transition" list for kernel code that needs to handle
88 * changes to devices when the CPU clock speed changes.
89 * The mutex locks both lists.
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92 static struct srcu_notifier_head cpufreq_transition_notifier_list;
94 static bool init_cpufreq_transition_notifier_list_called;
95 static int __init init_cpufreq_transition_notifier_list(void)
97 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
98 init_cpufreq_transition_notifier_list_called = true;
101 pure_initcall(init_cpufreq_transition_notifier_list);
103 static int off __read_mostly;
104 static int cpufreq_disabled(void)
108 void disable_cpufreq(void)
112 static DEFINE_MUTEX(cpufreq_governor_mutex);
114 bool have_governor_per_policy(void)
116 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
118 EXPORT_SYMBOL_GPL(have_governor_per_policy);
120 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
122 if (have_governor_per_policy())
123 return &policy->kobj;
125 return cpufreq_global_kobject;
127 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
129 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
135 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
137 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
138 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
139 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
140 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
141 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
142 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
144 idle_time = cur_wall_time - busy_time;
146 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
148 return div_u64(idle_time, NSEC_PER_USEC);
151 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
153 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
155 if (idle_time == -1ULL)
156 return get_cpu_idle_time_jiffy(cpu, wall);
158 idle_time += get_cpu_iowait_time_us(cpu, wall);
162 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
165 * This is a generic cpufreq init() routine which can be used by cpufreq
166 * drivers of SMP systems. It will do following:
167 * - validate & show freq table passed
168 * - set policies transition latency
169 * - policy->cpus with all possible CPUs
171 int cpufreq_generic_init(struct cpufreq_policy *policy,
172 struct cpufreq_frequency_table *table,
173 unsigned int transition_latency)
177 ret = cpufreq_table_validate_and_show(policy, table);
179 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
183 policy->cpuinfo.transition_latency = transition_latency;
186 * The driver only supports the SMP configuration where all processors
187 * share the clock and voltage and clock.
189 cpumask_setall(policy->cpus);
193 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
197 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
199 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
201 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
203 unsigned int cpufreq_generic_get(unsigned int cpu)
205 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
207 if (!policy || IS_ERR(policy->clk)) {
208 pr_err("%s: No %s associated to cpu: %d\n",
209 __func__, policy ? "clk" : "policy", cpu);
213 return clk_get_rate(policy->clk) / 1000;
215 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
218 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
220 * @cpu: cpu to find policy for.
222 * This returns policy for 'cpu', returns NULL if it doesn't exist.
223 * It also increments the kobject reference count to mark it busy and so would
224 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
225 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
226 * freed as that depends on the kobj count.
228 * Return: A valid policy on success, otherwise NULL on failure.
230 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
232 struct cpufreq_policy *policy = NULL;
235 if (WARN_ON(cpu >= nr_cpu_ids))
238 /* get the cpufreq driver */
239 read_lock_irqsave(&cpufreq_driver_lock, flags);
241 if (cpufreq_driver) {
243 policy = cpufreq_cpu_get_raw(cpu);
245 kobject_get(&policy->kobj);
248 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
252 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
255 * cpufreq_cpu_put: Decrements the usage count of a policy
257 * @policy: policy earlier returned by cpufreq_cpu_get().
259 * This decrements the kobject reference count incremented earlier by calling
262 void cpufreq_cpu_put(struct cpufreq_policy *policy)
264 kobject_put(&policy->kobj);
266 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
268 /*********************************************************************
269 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
270 *********************************************************************/
273 * adjust_jiffies - adjust the system "loops_per_jiffy"
275 * This function alters the system "loops_per_jiffy" for the clock
276 * speed change. Note that loops_per_jiffy cannot be updated on SMP
277 * systems as each CPU might be scaled differently. So, use the arch
278 * per-CPU loops_per_jiffy value wherever possible.
280 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
283 static unsigned long l_p_j_ref;
284 static unsigned int l_p_j_ref_freq;
286 if (ci->flags & CPUFREQ_CONST_LOOPS)
289 if (!l_p_j_ref_freq) {
290 l_p_j_ref = loops_per_jiffy;
291 l_p_j_ref_freq = ci->old;
292 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
293 l_p_j_ref, l_p_j_ref_freq);
295 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
296 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
298 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
299 loops_per_jiffy, ci->new);
304 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
305 struct cpufreq_freqs *freqs, unsigned int state)
307 BUG_ON(irqs_disabled());
309 if (cpufreq_disabled())
312 freqs->flags = cpufreq_driver->flags;
313 pr_debug("notification %u of frequency transition to %u kHz\n",
318 case CPUFREQ_PRECHANGE:
319 /* detect if the driver reported a value as "old frequency"
320 * which is not equal to what the cpufreq core thinks is
323 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
324 if ((policy) && (policy->cpu == freqs->cpu) &&
325 (policy->cur) && (policy->cur != freqs->old)) {
326 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
327 freqs->old, policy->cur);
328 freqs->old = policy->cur;
331 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
332 CPUFREQ_PRECHANGE, freqs);
333 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
336 case CPUFREQ_POSTCHANGE:
337 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
338 pr_debug("FREQ: %lu - CPU: %lu\n",
339 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
340 trace_cpu_frequency(freqs->new, freqs->cpu);
341 cpufreq_stats_record_transition(policy, freqs->new);
342 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
343 CPUFREQ_POSTCHANGE, freqs);
344 if (likely(policy) && likely(policy->cpu == freqs->cpu))
345 policy->cur = freqs->new;
351 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
352 * on frequency transition.
354 * This function calls the transition notifiers and the "adjust_jiffies"
355 * function. It is called twice on all CPU frequency changes that have
358 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
359 struct cpufreq_freqs *freqs, unsigned int state)
361 for_each_cpu(freqs->cpu, policy->cpus)
362 __cpufreq_notify_transition(policy, freqs, state);
365 /* Do post notifications when there are chances that transition has failed */
366 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
367 struct cpufreq_freqs *freqs, int transition_failed)
369 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370 if (!transition_failed)
373 swap(freqs->old, freqs->new);
374 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
375 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
378 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
379 struct cpufreq_freqs *freqs)
383 * Catch double invocations of _begin() which lead to self-deadlock.
384 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
385 * doesn't invoke _begin() on their behalf, and hence the chances of
386 * double invocations are very low. Moreover, there are scenarios
387 * where these checks can emit false-positive warnings in these
388 * drivers; so we avoid that by skipping them altogether.
390 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
391 && current == policy->transition_task);
394 wait_event(policy->transition_wait, !policy->transition_ongoing);
396 spin_lock(&policy->transition_lock);
398 if (unlikely(policy->transition_ongoing)) {
399 spin_unlock(&policy->transition_lock);
403 policy->transition_ongoing = true;
404 policy->transition_task = current;
406 spin_unlock(&policy->transition_lock);
408 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
410 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
412 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
413 struct cpufreq_freqs *freqs, int transition_failed)
415 if (unlikely(WARN_ON(!policy->transition_ongoing)))
418 cpufreq_notify_post_transition(policy, freqs, transition_failed);
420 policy->transition_ongoing = false;
421 policy->transition_task = NULL;
423 wake_up(&policy->transition_wait);
425 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
428 * Fast frequency switching status count. Positive means "enabled", negative
429 * means "disabled" and 0 means "not decided yet".
431 static int cpufreq_fast_switch_count;
432 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
434 static void cpufreq_list_transition_notifiers(void)
436 struct notifier_block *nb;
438 pr_info("Registered transition notifiers:\n");
440 mutex_lock(&cpufreq_transition_notifier_list.mutex);
442 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
443 pr_info("%pF\n", nb->notifier_call);
445 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
449 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
450 * @policy: cpufreq policy to enable fast frequency switching for.
452 * Try to enable fast frequency switching for @policy.
454 * The attempt will fail if there is at least one transition notifier registered
455 * at this point, as fast frequency switching is quite fundamentally at odds
456 * with transition notifiers. Thus if successful, it will make registration of
457 * transition notifiers fail going forward.
459 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
461 lockdep_assert_held(&policy->rwsem);
463 if (!policy->fast_switch_possible)
466 mutex_lock(&cpufreq_fast_switch_lock);
467 if (cpufreq_fast_switch_count >= 0) {
468 cpufreq_fast_switch_count++;
469 policy->fast_switch_enabled = true;
471 pr_warn("CPU%u: Fast frequency switching not enabled\n",
473 cpufreq_list_transition_notifiers();
475 mutex_unlock(&cpufreq_fast_switch_lock);
477 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
480 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
481 * @policy: cpufreq policy to disable fast frequency switching for.
483 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
485 mutex_lock(&cpufreq_fast_switch_lock);
486 if (policy->fast_switch_enabled) {
487 policy->fast_switch_enabled = false;
488 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
489 cpufreq_fast_switch_count--;
491 mutex_unlock(&cpufreq_fast_switch_lock);
493 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
496 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
498 * @target_freq: target frequency to resolve.
500 * The target to driver frequency mapping is cached in the policy.
502 * Return: Lowest driver-supported frequency greater than or equal to the
503 * given target_freq, subject to policy (min/max) and driver limitations.
505 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
506 unsigned int target_freq)
508 target_freq = clamp_val(target_freq, policy->min, policy->max);
509 policy->cached_target_freq = target_freq;
511 if (cpufreq_driver->target_index) {
514 idx = cpufreq_frequency_table_target(policy, target_freq,
516 policy->cached_resolved_idx = idx;
517 return policy->freq_table[idx].frequency;
520 if (cpufreq_driver->resolve_freq)
521 return cpufreq_driver->resolve_freq(policy, target_freq);
525 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
527 /*********************************************************************
529 *********************************************************************/
530 static ssize_t show_boost(struct kobject *kobj,
531 struct attribute *attr, char *buf)
533 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
536 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
537 const char *buf, size_t count)
541 ret = sscanf(buf, "%d", &enable);
542 if (ret != 1 || enable < 0 || enable > 1)
545 if (cpufreq_boost_trigger_state(enable)) {
546 pr_err("%s: Cannot %s BOOST!\n",
547 __func__, enable ? "enable" : "disable");
551 pr_debug("%s: cpufreq BOOST %s\n",
552 __func__, enable ? "enabled" : "disabled");
556 define_one_global_rw(boost);
558 static struct cpufreq_governor *find_governor(const char *str_governor)
560 struct cpufreq_governor *t;
563 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
570 * cpufreq_parse_governor - parse a governor string
572 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
573 struct cpufreq_governor **governor)
577 if (cpufreq_driver->setpolicy) {
578 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
579 *policy = CPUFREQ_POLICY_PERFORMANCE;
581 } else if (!strncasecmp(str_governor, "powersave",
583 *policy = CPUFREQ_POLICY_POWERSAVE;
587 struct cpufreq_governor *t;
589 mutex_lock(&cpufreq_governor_mutex);
591 t = find_governor(str_governor);
596 mutex_unlock(&cpufreq_governor_mutex);
597 ret = request_module("cpufreq_%s", str_governor);
598 mutex_lock(&cpufreq_governor_mutex);
601 t = find_governor(str_governor);
609 mutex_unlock(&cpufreq_governor_mutex);
615 * cpufreq_per_cpu_attr_read() / show_##file_name() -
616 * print out cpufreq information
618 * Write out information from cpufreq_driver->policy[cpu]; object must be
622 #define show_one(file_name, object) \
623 static ssize_t show_##file_name \
624 (struct cpufreq_policy *policy, char *buf) \
626 return sprintf(buf, "%u\n", policy->object); \
629 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
630 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
631 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
632 show_one(scaling_min_freq, min);
633 show_one(scaling_max_freq, max);
635 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
639 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
640 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
642 ret = sprintf(buf, "%u\n", policy->cur);
646 static int cpufreq_set_policy(struct cpufreq_policy *policy,
647 struct cpufreq_policy *new_policy);
650 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
652 #define store_one(file_name, object) \
653 static ssize_t store_##file_name \
654 (struct cpufreq_policy *policy, const char *buf, size_t count) \
657 struct cpufreq_policy new_policy; \
659 memcpy(&new_policy, policy, sizeof(*policy)); \
661 ret = sscanf(buf, "%u", &new_policy.object); \
665 temp = new_policy.object; \
666 ret = cpufreq_set_policy(policy, &new_policy); \
668 policy->user_policy.object = temp; \
670 return ret ? ret : count; \
673 store_one(scaling_min_freq, min);
674 store_one(scaling_max_freq, max);
677 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
679 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
682 unsigned int cur_freq = __cpufreq_get(policy);
685 return sprintf(buf, "%u\n", cur_freq);
687 return sprintf(buf, "<unknown>\n");
691 * show_scaling_governor - show the current policy for the specified CPU
693 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
695 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
696 return sprintf(buf, "powersave\n");
697 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
698 return sprintf(buf, "performance\n");
699 else if (policy->governor)
700 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
701 policy->governor->name);
706 * store_scaling_governor - store policy for the specified CPU
708 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
709 const char *buf, size_t count)
712 char str_governor[16];
713 struct cpufreq_policy new_policy;
715 memcpy(&new_policy, policy, sizeof(*policy));
717 ret = sscanf(buf, "%15s", str_governor);
721 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
722 &new_policy.governor))
725 ret = cpufreq_set_policy(policy, &new_policy);
726 return ret ? ret : count;
730 * show_scaling_driver - show the cpufreq driver currently loaded
732 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
734 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
738 * show_scaling_available_governors - show the available CPUfreq governors
740 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
744 struct cpufreq_governor *t;
747 i += sprintf(buf, "performance powersave");
751 for_each_governor(t) {
752 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
753 - (CPUFREQ_NAME_LEN + 2)))
755 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
758 i += sprintf(&buf[i], "\n");
762 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
767 for_each_cpu(cpu, mask) {
769 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
770 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
771 if (i >= (PAGE_SIZE - 5))
774 i += sprintf(&buf[i], "\n");
777 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
780 * show_related_cpus - show the CPUs affected by each transition even if
781 * hw coordination is in use
783 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
785 return cpufreq_show_cpus(policy->related_cpus, buf);
789 * show_affected_cpus - show the CPUs affected by each transition
791 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
793 return cpufreq_show_cpus(policy->cpus, buf);
796 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
797 const char *buf, size_t count)
799 unsigned int freq = 0;
802 if (!policy->governor || !policy->governor->store_setspeed)
805 ret = sscanf(buf, "%u", &freq);
809 policy->governor->store_setspeed(policy, freq);
814 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
816 if (!policy->governor || !policy->governor->show_setspeed)
817 return sprintf(buf, "<unsupported>\n");
819 return policy->governor->show_setspeed(policy, buf);
823 * show_bios_limit - show the current cpufreq HW/BIOS limitation
825 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
829 if (cpufreq_driver->bios_limit) {
830 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
832 return sprintf(buf, "%u\n", limit);
834 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
837 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
838 cpufreq_freq_attr_ro(cpuinfo_min_freq);
839 cpufreq_freq_attr_ro(cpuinfo_max_freq);
840 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
841 cpufreq_freq_attr_ro(scaling_available_governors);
842 cpufreq_freq_attr_ro(scaling_driver);
843 cpufreq_freq_attr_ro(scaling_cur_freq);
844 cpufreq_freq_attr_ro(bios_limit);
845 cpufreq_freq_attr_ro(related_cpus);
846 cpufreq_freq_attr_ro(affected_cpus);
847 cpufreq_freq_attr_rw(scaling_min_freq);
848 cpufreq_freq_attr_rw(scaling_max_freq);
849 cpufreq_freq_attr_rw(scaling_governor);
850 cpufreq_freq_attr_rw(scaling_setspeed);
852 static struct attribute *default_attrs[] = {
853 &cpuinfo_min_freq.attr,
854 &cpuinfo_max_freq.attr,
855 &cpuinfo_transition_latency.attr,
856 &scaling_min_freq.attr,
857 &scaling_max_freq.attr,
860 &scaling_governor.attr,
861 &scaling_driver.attr,
862 &scaling_available_governors.attr,
863 &scaling_setspeed.attr,
867 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
868 #define to_attr(a) container_of(a, struct freq_attr, attr)
870 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
872 struct cpufreq_policy *policy = to_policy(kobj);
873 struct freq_attr *fattr = to_attr(attr);
876 down_read(&policy->rwsem);
877 ret = fattr->show(policy, buf);
878 up_read(&policy->rwsem);
883 static ssize_t store(struct kobject *kobj, struct attribute *attr,
884 const char *buf, size_t count)
886 struct cpufreq_policy *policy = to_policy(kobj);
887 struct freq_attr *fattr = to_attr(attr);
888 ssize_t ret = -EINVAL;
892 if (cpu_online(policy->cpu)) {
893 down_write(&policy->rwsem);
894 ret = fattr->store(policy, buf, count);
895 up_write(&policy->rwsem);
903 static void cpufreq_sysfs_release(struct kobject *kobj)
905 struct cpufreq_policy *policy = to_policy(kobj);
906 pr_debug("last reference is dropped\n");
907 complete(&policy->kobj_unregister);
910 static const struct sysfs_ops sysfs_ops = {
915 static struct kobj_type ktype_cpufreq = {
916 .sysfs_ops = &sysfs_ops,
917 .default_attrs = default_attrs,
918 .release = cpufreq_sysfs_release,
921 static int add_cpu_dev_symlink(struct cpufreq_policy *policy,
924 dev_dbg(dev, "%s: Adding symlink\n", __func__);
925 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
928 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
931 dev_dbg(dev, "%s: Removing symlink\n", __func__);
932 sysfs_remove_link(&dev->kobj, "cpufreq");
935 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
937 struct freq_attr **drv_attr;
940 /* set up files for this cpu device */
941 drv_attr = cpufreq_driver->attr;
942 while (drv_attr && *drv_attr) {
943 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
948 if (cpufreq_driver->get) {
949 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
954 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
958 if (cpufreq_driver->bios_limit) {
959 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
967 __weak struct cpufreq_governor *cpufreq_default_governor(void)
972 static int cpufreq_init_policy(struct cpufreq_policy *policy)
974 struct cpufreq_governor *gov = NULL;
975 struct cpufreq_policy new_policy;
977 memcpy(&new_policy, policy, sizeof(*policy));
979 /* Update governor of new_policy to the governor used before hotplug */
980 gov = find_governor(policy->last_governor);
982 pr_debug("Restoring governor %s for cpu %d\n",
983 policy->governor->name, policy->cpu);
985 gov = cpufreq_default_governor();
990 new_policy.governor = gov;
992 /* Use the default policy if there is no last_policy. */
993 if (cpufreq_driver->setpolicy) {
994 if (policy->last_policy)
995 new_policy.policy = policy->last_policy;
997 cpufreq_parse_governor(gov->name, &new_policy.policy,
1000 /* set default policy */
1001 return cpufreq_set_policy(policy, &new_policy);
1004 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1008 /* Has this CPU been taken care of already? */
1009 if (cpumask_test_cpu(cpu, policy->cpus))
1012 down_write(&policy->rwsem);
1014 cpufreq_stop_governor(policy);
1016 cpumask_set_cpu(cpu, policy->cpus);
1019 ret = cpufreq_start_governor(policy);
1021 pr_err("%s: Failed to start governor\n", __func__);
1023 up_write(&policy->rwsem);
1027 static void handle_update(struct work_struct *work)
1029 struct cpufreq_policy *policy =
1030 container_of(work, struct cpufreq_policy, update);
1031 unsigned int cpu = policy->cpu;
1032 pr_debug("handle_update for cpu %u called\n", cpu);
1033 cpufreq_update_policy(cpu);
1036 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1038 struct cpufreq_policy *policy;
1041 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1045 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1046 goto err_free_policy;
1048 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1049 goto err_free_cpumask;
1051 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1052 goto err_free_rcpumask;
1054 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1055 cpufreq_global_kobject, "policy%u", cpu);
1057 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1058 goto err_free_real_cpus;
1061 INIT_LIST_HEAD(&policy->policy_list);
1062 init_rwsem(&policy->rwsem);
1063 spin_lock_init(&policy->transition_lock);
1064 init_waitqueue_head(&policy->transition_wait);
1065 init_completion(&policy->kobj_unregister);
1066 INIT_WORK(&policy->update, handle_update);
1072 free_cpumask_var(policy->real_cpus);
1074 free_cpumask_var(policy->related_cpus);
1076 free_cpumask_var(policy->cpus);
1083 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1085 struct kobject *kobj;
1086 struct completion *cmp;
1088 down_write(&policy->rwsem);
1089 cpufreq_stats_free_table(policy);
1090 kobj = &policy->kobj;
1091 cmp = &policy->kobj_unregister;
1092 up_write(&policy->rwsem);
1096 * We need to make sure that the underlying kobj is
1097 * actually not referenced anymore by anybody before we
1098 * proceed with unloading.
1100 pr_debug("waiting for dropping of refcount\n");
1101 wait_for_completion(cmp);
1102 pr_debug("wait complete\n");
1105 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1107 unsigned long flags;
1110 /* Remove policy from list */
1111 write_lock_irqsave(&cpufreq_driver_lock, flags);
1112 list_del(&policy->policy_list);
1114 for_each_cpu(cpu, policy->related_cpus)
1115 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1116 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1118 cpufreq_policy_put_kobj(policy);
1119 free_cpumask_var(policy->real_cpus);
1120 free_cpumask_var(policy->related_cpus);
1121 free_cpumask_var(policy->cpus);
1125 static int cpufreq_online(unsigned int cpu)
1127 struct cpufreq_policy *policy;
1129 unsigned long flags;
1133 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1135 /* Check if this CPU already has a policy to manage it */
1136 policy = per_cpu(cpufreq_cpu_data, cpu);
1138 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1139 if (!policy_is_inactive(policy))
1140 return cpufreq_add_policy_cpu(policy, cpu);
1142 /* This is the only online CPU for the policy. Start over. */
1144 down_write(&policy->rwsem);
1146 policy->governor = NULL;
1147 up_write(&policy->rwsem);
1150 policy = cpufreq_policy_alloc(cpu);
1155 cpumask_copy(policy->cpus, cpumask_of(cpu));
1157 /* call driver. From then on the cpufreq must be able
1158 * to accept all calls to ->verify and ->setpolicy for this CPU
1160 ret = cpufreq_driver->init(policy);
1162 pr_debug("initialization failed\n");
1163 goto out_free_policy;
1166 down_write(&policy->rwsem);
1169 /* related_cpus should at least include policy->cpus. */
1170 cpumask_copy(policy->related_cpus, policy->cpus);
1174 * affected cpus must always be the one, which are online. We aren't
1175 * managing offline cpus here.
1177 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1180 policy->user_policy.min = policy->min;
1181 policy->user_policy.max = policy->max;
1183 write_lock_irqsave(&cpufreq_driver_lock, flags);
1184 for_each_cpu(j, policy->related_cpus)
1185 per_cpu(cpufreq_cpu_data, j) = policy;
1186 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1188 policy->min = policy->user_policy.min;
1189 policy->max = policy->user_policy.max;
1192 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1193 policy->cur = cpufreq_driver->get(policy->cpu);
1195 pr_err("%s: ->get() failed\n", __func__);
1196 goto out_exit_policy;
1201 * Sometimes boot loaders set CPU frequency to a value outside of
1202 * frequency table present with cpufreq core. In such cases CPU might be
1203 * unstable if it has to run on that frequency for long duration of time
1204 * and so its better to set it to a frequency which is specified in
1205 * freq-table. This also makes cpufreq stats inconsistent as
1206 * cpufreq-stats would fail to register because current frequency of CPU
1207 * isn't found in freq-table.
1209 * Because we don't want this change to effect boot process badly, we go
1210 * for the next freq which is >= policy->cur ('cur' must be set by now,
1211 * otherwise we will end up setting freq to lowest of the table as 'cur'
1212 * is initialized to zero).
1214 * We are passing target-freq as "policy->cur - 1" otherwise
1215 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1216 * equal to target-freq.
1218 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1220 /* Are we running at unknown frequency ? */
1221 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1222 if (ret == -EINVAL) {
1223 /* Warn user and fix it */
1224 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1225 __func__, policy->cpu, policy->cur);
1226 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1227 CPUFREQ_RELATION_L);
1230 * Reaching here after boot in a few seconds may not
1231 * mean that system will remain stable at "unknown"
1232 * frequency for longer duration. Hence, a BUG_ON().
1235 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1236 __func__, policy->cpu, policy->cur);
1241 ret = cpufreq_add_dev_interface(policy);
1243 goto out_exit_policy;
1245 cpufreq_stats_create_table(policy);
1247 write_lock_irqsave(&cpufreq_driver_lock, flags);
1248 list_add(&policy->policy_list, &cpufreq_policy_list);
1249 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1252 ret = cpufreq_init_policy(policy);
1254 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1255 __func__, cpu, ret);
1256 /* cpufreq_policy_free() will notify based on this */
1258 goto out_exit_policy;
1261 up_write(&policy->rwsem);
1263 kobject_uevent(&policy->kobj, KOBJ_ADD);
1265 /* Callback for handling stuff after policy is ready */
1266 if (cpufreq_driver->ready)
1267 cpufreq_driver->ready(policy);
1269 pr_debug("initialization complete\n");
1274 up_write(&policy->rwsem);
1276 if (cpufreq_driver->exit)
1277 cpufreq_driver->exit(policy);
1279 cpufreq_policy_free(policy);
1283 static int cpufreq_offline(unsigned int cpu);
1286 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1288 * @sif: Subsystem interface structure pointer (not used)
1290 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1292 struct cpufreq_policy *policy;
1293 unsigned cpu = dev->id;
1296 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1298 if (cpu_online(cpu)) {
1299 ret = cpufreq_online(cpu);
1304 /* Create sysfs link on CPU registration */
1305 policy = per_cpu(cpufreq_cpu_data, cpu);
1306 if (!policy || cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1309 ret = add_cpu_dev_symlink(policy, dev);
1311 cpumask_clear_cpu(cpu, policy->real_cpus);
1312 cpufreq_offline(cpu);
1318 static int cpufreq_offline(unsigned int cpu)
1320 struct cpufreq_policy *policy;
1323 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1325 policy = cpufreq_cpu_get_raw(cpu);
1327 pr_debug("%s: No cpu_data found\n", __func__);
1331 down_write(&policy->rwsem);
1333 cpufreq_stop_governor(policy);
1335 cpumask_clear_cpu(cpu, policy->cpus);
1337 if (policy_is_inactive(policy)) {
1339 strncpy(policy->last_governor, policy->governor->name,
1342 policy->last_policy = policy->policy;
1343 } else if (cpu == policy->cpu) {
1344 /* Nominate new CPU */
1345 policy->cpu = cpumask_any(policy->cpus);
1348 /* Start governor again for active policy */
1349 if (!policy_is_inactive(policy)) {
1351 ret = cpufreq_start_governor(policy);
1353 pr_err("%s: Failed to start governor\n", __func__);
1359 if (cpufreq_driver->stop_cpu)
1360 cpufreq_driver->stop_cpu(policy);
1363 cpufreq_exit_governor(policy);
1366 * Perform the ->exit() even during light-weight tear-down,
1367 * since this is a core component, and is essential for the
1368 * subsequent light-weight ->init() to succeed.
1370 if (cpufreq_driver->exit) {
1371 cpufreq_driver->exit(policy);
1372 policy->freq_table = NULL;
1376 up_write(&policy->rwsem);
1381 * cpufreq_remove_dev - remove a CPU device
1383 * Removes the cpufreq interface for a CPU device.
1385 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1387 unsigned int cpu = dev->id;
1388 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1393 if (cpu_online(cpu))
1394 cpufreq_offline(cpu);
1396 cpumask_clear_cpu(cpu, policy->real_cpus);
1397 remove_cpu_dev_symlink(policy, dev);
1399 if (cpumask_empty(policy->real_cpus))
1400 cpufreq_policy_free(policy);
1404 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1406 * @policy: policy managing CPUs
1407 * @new_freq: CPU frequency the CPU actually runs at
1409 * We adjust to current frequency first, and need to clean up later.
1410 * So either call to cpufreq_update_policy() or schedule handle_update()).
1412 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1413 unsigned int new_freq)
1415 struct cpufreq_freqs freqs;
1417 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1418 policy->cur, new_freq);
1420 freqs.old = policy->cur;
1421 freqs.new = new_freq;
1423 cpufreq_freq_transition_begin(policy, &freqs);
1424 cpufreq_freq_transition_end(policy, &freqs, 0);
1428 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1431 * This is the last known freq, without actually getting it from the driver.
1432 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1434 unsigned int cpufreq_quick_get(unsigned int cpu)
1436 struct cpufreq_policy *policy;
1437 unsigned int ret_freq = 0;
1438 unsigned long flags;
1440 read_lock_irqsave(&cpufreq_driver_lock, flags);
1442 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1443 ret_freq = cpufreq_driver->get(cpu);
1444 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1448 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1450 policy = cpufreq_cpu_get(cpu);
1452 ret_freq = policy->cur;
1453 cpufreq_cpu_put(policy);
1458 EXPORT_SYMBOL(cpufreq_quick_get);
1461 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1464 * Just return the max possible frequency for a given CPU.
1466 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1468 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1469 unsigned int ret_freq = 0;
1472 ret_freq = policy->max;
1473 cpufreq_cpu_put(policy);
1478 EXPORT_SYMBOL(cpufreq_quick_get_max);
1480 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1482 unsigned int ret_freq = 0;
1484 if (!cpufreq_driver->get)
1487 ret_freq = cpufreq_driver->get(policy->cpu);
1490 * Updating inactive policies is invalid, so avoid doing that. Also
1491 * if fast frequency switching is used with the given policy, the check
1492 * against policy->cur is pointless, so skip it in that case too.
1494 if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1497 if (ret_freq && policy->cur &&
1498 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1499 /* verify no discrepancy between actual and
1500 saved value exists */
1501 if (unlikely(ret_freq != policy->cur)) {
1502 cpufreq_out_of_sync(policy, ret_freq);
1503 schedule_work(&policy->update);
1511 * cpufreq_get - get the current CPU frequency (in kHz)
1514 * Get the CPU current (static) CPU frequency
1516 unsigned int cpufreq_get(unsigned int cpu)
1518 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1519 unsigned int ret_freq = 0;
1522 down_read(&policy->rwsem);
1524 if (!policy_is_inactive(policy))
1525 ret_freq = __cpufreq_get(policy);
1527 up_read(&policy->rwsem);
1529 cpufreq_cpu_put(policy);
1534 EXPORT_SYMBOL(cpufreq_get);
1536 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1538 unsigned int new_freq;
1540 new_freq = cpufreq_driver->get(policy->cpu);
1545 pr_debug("cpufreq: Driver did not initialize current freq\n");
1546 policy->cur = new_freq;
1547 } else if (policy->cur != new_freq && has_target()) {
1548 cpufreq_out_of_sync(policy, new_freq);
1554 static struct subsys_interface cpufreq_interface = {
1556 .subsys = &cpu_subsys,
1557 .add_dev = cpufreq_add_dev,
1558 .remove_dev = cpufreq_remove_dev,
1562 * In case platform wants some specific frequency to be configured
1565 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1569 if (!policy->suspend_freq) {
1570 pr_debug("%s: suspend_freq not defined\n", __func__);
1574 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1575 policy->suspend_freq);
1577 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1578 CPUFREQ_RELATION_H);
1580 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1581 __func__, policy->suspend_freq, ret);
1585 EXPORT_SYMBOL(cpufreq_generic_suspend);
1588 * cpufreq_suspend() - Suspend CPUFreq governors
1590 * Called during system wide Suspend/Hibernate cycles for suspending governors
1591 * as some platforms can't change frequency after this point in suspend cycle.
1592 * Because some of the devices (like: i2c, regulators, etc) they use for
1593 * changing frequency are suspended quickly after this point.
1595 void cpufreq_suspend(void)
1597 struct cpufreq_policy *policy;
1599 if (!cpufreq_driver)
1602 if (!has_target() && !cpufreq_driver->suspend)
1605 pr_debug("%s: Suspending Governors\n", __func__);
1607 for_each_active_policy(policy) {
1609 down_write(&policy->rwsem);
1610 cpufreq_stop_governor(policy);
1611 up_write(&policy->rwsem);
1614 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1615 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1620 cpufreq_suspended = true;
1624 * cpufreq_resume() - Resume CPUFreq governors
1626 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1627 * are suspended with cpufreq_suspend().
1629 void cpufreq_resume(void)
1631 struct cpufreq_policy *policy;
1634 if (!cpufreq_driver)
1637 cpufreq_suspended = false;
1639 if (!has_target() && !cpufreq_driver->resume)
1642 pr_debug("%s: Resuming Governors\n", __func__);
1644 for_each_active_policy(policy) {
1645 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1646 pr_err("%s: Failed to resume driver: %p\n", __func__,
1648 } else if (has_target()) {
1649 down_write(&policy->rwsem);
1650 ret = cpufreq_start_governor(policy);
1651 up_write(&policy->rwsem);
1654 pr_err("%s: Failed to start governor for policy: %p\n",
1661 * cpufreq_get_current_driver - return current driver's name
1663 * Return the name string of the currently loaded cpufreq driver
1666 const char *cpufreq_get_current_driver(void)
1669 return cpufreq_driver->name;
1673 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1676 * cpufreq_get_driver_data - return current driver data
1678 * Return the private data of the currently loaded cpufreq
1679 * driver, or NULL if no cpufreq driver is loaded.
1681 void *cpufreq_get_driver_data(void)
1684 return cpufreq_driver->driver_data;
1688 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1690 /*********************************************************************
1691 * NOTIFIER LISTS INTERFACE *
1692 *********************************************************************/
1695 * cpufreq_register_notifier - register a driver with cpufreq
1696 * @nb: notifier function to register
1697 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1699 * Add a driver to one of two lists: either a list of drivers that
1700 * are notified about clock rate changes (once before and once after
1701 * the transition), or a list of drivers that are notified about
1702 * changes in cpufreq policy.
1704 * This function may sleep, and has the same return conditions as
1705 * blocking_notifier_chain_register.
1707 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1711 if (cpufreq_disabled())
1714 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1717 case CPUFREQ_TRANSITION_NOTIFIER:
1718 mutex_lock(&cpufreq_fast_switch_lock);
1720 if (cpufreq_fast_switch_count > 0) {
1721 mutex_unlock(&cpufreq_fast_switch_lock);
1724 ret = srcu_notifier_chain_register(
1725 &cpufreq_transition_notifier_list, nb);
1727 cpufreq_fast_switch_count--;
1729 mutex_unlock(&cpufreq_fast_switch_lock);
1731 case CPUFREQ_POLICY_NOTIFIER:
1732 ret = blocking_notifier_chain_register(
1733 &cpufreq_policy_notifier_list, nb);
1741 EXPORT_SYMBOL(cpufreq_register_notifier);
1744 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1745 * @nb: notifier block to be unregistered
1746 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1748 * Remove a driver from the CPU frequency notifier list.
1750 * This function may sleep, and has the same return conditions as
1751 * blocking_notifier_chain_unregister.
1753 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1757 if (cpufreq_disabled())
1761 case CPUFREQ_TRANSITION_NOTIFIER:
1762 mutex_lock(&cpufreq_fast_switch_lock);
1764 ret = srcu_notifier_chain_unregister(
1765 &cpufreq_transition_notifier_list, nb);
1766 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1767 cpufreq_fast_switch_count++;
1769 mutex_unlock(&cpufreq_fast_switch_lock);
1771 case CPUFREQ_POLICY_NOTIFIER:
1772 ret = blocking_notifier_chain_unregister(
1773 &cpufreq_policy_notifier_list, nb);
1781 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1784 /*********************************************************************
1786 *********************************************************************/
1789 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1790 * @policy: cpufreq policy to switch the frequency for.
1791 * @target_freq: New frequency to set (may be approximate).
1793 * Carry out a fast frequency switch without sleeping.
1795 * The driver's ->fast_switch() callback invoked by this function must be
1796 * suitable for being called from within RCU-sched read-side critical sections
1797 * and it is expected to select the minimum available frequency greater than or
1798 * equal to @target_freq (CPUFREQ_RELATION_L).
1800 * This function must not be called if policy->fast_switch_enabled is unset.
1802 * Governors calling this function must guarantee that it will never be invoked
1803 * twice in parallel for the same policy and that it will never be called in
1804 * parallel with either ->target() or ->target_index() for the same policy.
1806 * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
1807 * callback to indicate an error condition, the hardware configuration must be
1810 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1811 unsigned int target_freq)
1813 target_freq = clamp_val(target_freq, policy->min, policy->max);
1815 return cpufreq_driver->fast_switch(policy, target_freq);
1817 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1819 /* Must set freqs->new to intermediate frequency */
1820 static int __target_intermediate(struct cpufreq_policy *policy,
1821 struct cpufreq_freqs *freqs, int index)
1825 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1827 /* We don't need to switch to intermediate freq */
1831 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1832 __func__, policy->cpu, freqs->old, freqs->new);
1834 cpufreq_freq_transition_begin(policy, freqs);
1835 ret = cpufreq_driver->target_intermediate(policy, index);
1836 cpufreq_freq_transition_end(policy, freqs, ret);
1839 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1845 static int __target_index(struct cpufreq_policy *policy, int index)
1847 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1848 unsigned int intermediate_freq = 0;
1849 unsigned int newfreq = policy->freq_table[index].frequency;
1850 int retval = -EINVAL;
1853 if (newfreq == policy->cur)
1856 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1858 /* Handle switching to intermediate frequency */
1859 if (cpufreq_driver->get_intermediate) {
1860 retval = __target_intermediate(policy, &freqs, index);
1864 intermediate_freq = freqs.new;
1865 /* Set old freq to intermediate */
1866 if (intermediate_freq)
1867 freqs.old = freqs.new;
1870 freqs.new = newfreq;
1871 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1872 __func__, policy->cpu, freqs.old, freqs.new);
1874 cpufreq_freq_transition_begin(policy, &freqs);
1877 retval = cpufreq_driver->target_index(policy, index);
1879 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1883 cpufreq_freq_transition_end(policy, &freqs, retval);
1886 * Failed after setting to intermediate freq? Driver should have
1887 * reverted back to initial frequency and so should we. Check
1888 * here for intermediate_freq instead of get_intermediate, in
1889 * case we haven't switched to intermediate freq at all.
1891 if (unlikely(retval && intermediate_freq)) {
1892 freqs.old = intermediate_freq;
1893 freqs.new = policy->restore_freq;
1894 cpufreq_freq_transition_begin(policy, &freqs);
1895 cpufreq_freq_transition_end(policy, &freqs, 0);
1902 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1903 unsigned int target_freq,
1904 unsigned int relation)
1906 unsigned int old_target_freq = target_freq;
1909 if (cpufreq_disabled())
1912 /* Make sure that target_freq is within supported range */
1913 target_freq = clamp_val(target_freq, policy->min, policy->max);
1915 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1916 policy->cpu, target_freq, relation, old_target_freq);
1919 * This might look like a redundant call as we are checking it again
1920 * after finding index. But it is left intentionally for cases where
1921 * exactly same freq is called again and so we can save on few function
1924 if (target_freq == policy->cur)
1927 /* Save last value to restore later on errors */
1928 policy->restore_freq = policy->cur;
1930 if (cpufreq_driver->target)
1931 return cpufreq_driver->target(policy, target_freq, relation);
1933 if (!cpufreq_driver->target_index)
1936 index = cpufreq_frequency_table_target(policy, target_freq, relation);
1938 return __target_index(policy, index);
1940 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1942 int cpufreq_driver_target(struct cpufreq_policy *policy,
1943 unsigned int target_freq,
1944 unsigned int relation)
1948 down_write(&policy->rwsem);
1950 ret = __cpufreq_driver_target(policy, target_freq, relation);
1952 up_write(&policy->rwsem);
1956 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1958 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
1963 static int cpufreq_init_governor(struct cpufreq_policy *policy)
1967 /* Don't start any governor operations if we are entering suspend */
1968 if (cpufreq_suspended)
1971 * Governor might not be initiated here if ACPI _PPC changed
1972 * notification happened, so check it.
1974 if (!policy->governor)
1977 if (policy->governor->max_transition_latency &&
1978 policy->cpuinfo.transition_latency >
1979 policy->governor->max_transition_latency) {
1980 struct cpufreq_governor *gov = cpufreq_fallback_governor();
1983 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1984 policy->governor->name, gov->name);
1985 policy->governor = gov;
1991 if (!try_module_get(policy->governor->owner))
1994 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
1996 if (policy->governor->init) {
1997 ret = policy->governor->init(policy);
1999 module_put(policy->governor->owner);
2007 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2009 if (cpufreq_suspended || !policy->governor)
2012 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2014 if (policy->governor->exit)
2015 policy->governor->exit(policy);
2017 module_put(policy->governor->owner);
2020 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2024 if (cpufreq_suspended)
2027 if (!policy->governor)
2030 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2032 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2033 cpufreq_update_current_freq(policy);
2035 if (policy->governor->start) {
2036 ret = policy->governor->start(policy);
2041 if (policy->governor->limits)
2042 policy->governor->limits(policy);
2047 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2049 if (cpufreq_suspended || !policy->governor)
2052 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2054 if (policy->governor->stop)
2055 policy->governor->stop(policy);
2058 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2060 if (cpufreq_suspended || !policy->governor)
2063 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2065 if (policy->governor->limits)
2066 policy->governor->limits(policy);
2069 int cpufreq_register_governor(struct cpufreq_governor *governor)
2076 if (cpufreq_disabled())
2079 mutex_lock(&cpufreq_governor_mutex);
2082 if (!find_governor(governor->name)) {
2084 list_add(&governor->governor_list, &cpufreq_governor_list);
2087 mutex_unlock(&cpufreq_governor_mutex);
2090 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2092 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2094 struct cpufreq_policy *policy;
2095 unsigned long flags;
2100 if (cpufreq_disabled())
2103 /* clear last_governor for all inactive policies */
2104 read_lock_irqsave(&cpufreq_driver_lock, flags);
2105 for_each_inactive_policy(policy) {
2106 if (!strcmp(policy->last_governor, governor->name)) {
2107 policy->governor = NULL;
2108 strcpy(policy->last_governor, "\0");
2111 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2113 mutex_lock(&cpufreq_governor_mutex);
2114 list_del(&governor->governor_list);
2115 mutex_unlock(&cpufreq_governor_mutex);
2118 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2121 /*********************************************************************
2122 * POLICY INTERFACE *
2123 *********************************************************************/
2126 * cpufreq_get_policy - get the current cpufreq_policy
2127 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2130 * Reads the current cpufreq policy.
2132 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2134 struct cpufreq_policy *cpu_policy;
2138 cpu_policy = cpufreq_cpu_get(cpu);
2142 memcpy(policy, cpu_policy, sizeof(*policy));
2144 cpufreq_cpu_put(cpu_policy);
2147 EXPORT_SYMBOL(cpufreq_get_policy);
2150 * policy : current policy.
2151 * new_policy: policy to be set.
2153 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2154 struct cpufreq_policy *new_policy)
2156 struct cpufreq_governor *old_gov;
2159 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2160 new_policy->cpu, new_policy->min, new_policy->max);
2162 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2165 * This check works well when we store new min/max freq attributes,
2166 * because new_policy is a copy of policy with one field updated.
2168 if (new_policy->min > new_policy->max)
2171 /* verify the cpu speed can be set within this limit */
2172 ret = cpufreq_driver->verify(new_policy);
2176 /* adjust if necessary - all reasons */
2177 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2178 CPUFREQ_ADJUST, new_policy);
2181 * verify the cpu speed can be set within this limit, which might be
2182 * different to the first one
2184 ret = cpufreq_driver->verify(new_policy);
2188 /* notification of the new policy */
2189 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2190 CPUFREQ_NOTIFY, new_policy);
2192 policy->min = new_policy->min;
2193 policy->max = new_policy->max;
2195 policy->cached_target_freq = UINT_MAX;
2197 pr_debug("new min and max freqs are %u - %u kHz\n",
2198 policy->min, policy->max);
2200 if (cpufreq_driver->setpolicy) {
2201 policy->policy = new_policy->policy;
2202 pr_debug("setting range\n");
2203 return cpufreq_driver->setpolicy(new_policy);
2206 if (new_policy->governor == policy->governor) {
2207 pr_debug("cpufreq: governor limits update\n");
2208 cpufreq_governor_limits(policy);
2212 pr_debug("governor switch\n");
2214 /* save old, working values */
2215 old_gov = policy->governor;
2216 /* end old governor */
2218 cpufreq_stop_governor(policy);
2219 cpufreq_exit_governor(policy);
2222 /* start new governor */
2223 policy->governor = new_policy->governor;
2224 ret = cpufreq_init_governor(policy);
2226 ret = cpufreq_start_governor(policy);
2228 pr_debug("cpufreq: governor change\n");
2231 cpufreq_exit_governor(policy);
2234 /* new governor failed, so re-start old one */
2235 pr_debug("starting governor %s failed\n", policy->governor->name);
2237 policy->governor = old_gov;
2238 if (cpufreq_init_governor(policy))
2239 policy->governor = NULL;
2241 cpufreq_start_governor(policy);
2248 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2249 * @cpu: CPU which shall be re-evaluated
2251 * Useful for policy notifiers which have different necessities
2252 * at different times.
2254 void cpufreq_update_policy(unsigned int cpu)
2256 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2257 struct cpufreq_policy new_policy;
2262 down_write(&policy->rwsem);
2264 if (policy_is_inactive(policy))
2267 pr_debug("updating policy for CPU %u\n", cpu);
2268 memcpy(&new_policy, policy, sizeof(*policy));
2269 new_policy.min = policy->user_policy.min;
2270 new_policy.max = policy->user_policy.max;
2273 * BIOS might change freq behind our back
2274 * -> ask driver for current freq and notify governors about a change
2276 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2277 if (cpufreq_suspended)
2280 new_policy.cur = cpufreq_update_current_freq(policy);
2281 if (WARN_ON(!new_policy.cur))
2285 cpufreq_set_policy(policy, &new_policy);
2288 up_write(&policy->rwsem);
2290 cpufreq_cpu_put(policy);
2292 EXPORT_SYMBOL(cpufreq_update_policy);
2294 /*********************************************************************
2296 *********************************************************************/
2297 static int cpufreq_boost_set_sw(int state)
2299 struct cpufreq_policy *policy;
2302 for_each_active_policy(policy) {
2303 if (!policy->freq_table)
2306 ret = cpufreq_frequency_table_cpuinfo(policy,
2307 policy->freq_table);
2309 pr_err("%s: Policy frequency update failed\n",
2314 down_write(&policy->rwsem);
2315 policy->user_policy.max = policy->max;
2316 cpufreq_governor_limits(policy);
2317 up_write(&policy->rwsem);
2323 int cpufreq_boost_trigger_state(int state)
2325 unsigned long flags;
2328 if (cpufreq_driver->boost_enabled == state)
2331 write_lock_irqsave(&cpufreq_driver_lock, flags);
2332 cpufreq_driver->boost_enabled = state;
2333 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2335 ret = cpufreq_driver->set_boost(state);
2337 write_lock_irqsave(&cpufreq_driver_lock, flags);
2338 cpufreq_driver->boost_enabled = !state;
2339 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2341 pr_err("%s: Cannot %s BOOST\n",
2342 __func__, state ? "enable" : "disable");
2348 static bool cpufreq_boost_supported(void)
2350 return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2353 static int create_boost_sysfs_file(void)
2357 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2359 pr_err("%s: cannot register global BOOST sysfs file\n",
2365 static void remove_boost_sysfs_file(void)
2367 if (cpufreq_boost_supported())
2368 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2371 int cpufreq_enable_boost_support(void)
2373 if (!cpufreq_driver)
2376 if (cpufreq_boost_supported())
2379 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2381 /* This will get removed on driver unregister */
2382 return create_boost_sysfs_file();
2384 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2386 int cpufreq_boost_enabled(void)
2388 return cpufreq_driver->boost_enabled;
2390 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2392 /*********************************************************************
2393 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2394 *********************************************************************/
2395 static enum cpuhp_state hp_online;
2398 * cpufreq_register_driver - register a CPU Frequency driver
2399 * @driver_data: A struct cpufreq_driver containing the values#
2400 * submitted by the CPU Frequency driver.
2402 * Registers a CPU Frequency driver to this core code. This code
2403 * returns zero on success, -EEXIST when another driver got here first
2404 * (and isn't unregistered in the meantime).
2407 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2409 unsigned long flags;
2412 if (cpufreq_disabled())
2415 if (!driver_data || !driver_data->verify || !driver_data->init ||
2416 !(driver_data->setpolicy || driver_data->target_index ||
2417 driver_data->target) ||
2418 (driver_data->setpolicy && (driver_data->target_index ||
2419 driver_data->target)) ||
2420 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2423 pr_debug("trying to register driver %s\n", driver_data->name);
2425 /* Protect against concurrent CPU online/offline. */
2428 write_lock_irqsave(&cpufreq_driver_lock, flags);
2429 if (cpufreq_driver) {
2430 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2434 cpufreq_driver = driver_data;
2435 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2437 if (driver_data->setpolicy)
2438 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2440 if (cpufreq_boost_supported()) {
2441 ret = create_boost_sysfs_file();
2443 goto err_null_driver;
2446 ret = subsys_interface_register(&cpufreq_interface);
2448 goto err_boost_unreg;
2450 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2451 list_empty(&cpufreq_policy_list)) {
2452 /* if all ->init() calls failed, unregister */
2453 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2458 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2466 pr_debug("driver %s up and running\n", driver_data->name);
2470 subsys_interface_unregister(&cpufreq_interface);
2472 remove_boost_sysfs_file();
2474 write_lock_irqsave(&cpufreq_driver_lock, flags);
2475 cpufreq_driver = NULL;
2476 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2481 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2484 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2486 * Unregister the current CPUFreq driver. Only call this if you have
2487 * the right to do so, i.e. if you have succeeded in initialising before!
2488 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2489 * currently not initialised.
2491 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2493 unsigned long flags;
2495 if (!cpufreq_driver || (driver != cpufreq_driver))
2498 pr_debug("unregistering driver %s\n", driver->name);
2500 /* Protect against concurrent cpu hotplug */
2502 subsys_interface_unregister(&cpufreq_interface);
2503 remove_boost_sysfs_file();
2504 cpuhp_remove_state_nocalls(hp_online);
2506 write_lock_irqsave(&cpufreq_driver_lock, flags);
2508 cpufreq_driver = NULL;
2510 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2515 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2518 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2519 * or mutexes when secondary CPUs are halted.
2521 static struct syscore_ops cpufreq_syscore_ops = {
2522 .shutdown = cpufreq_suspend,
2525 struct kobject *cpufreq_global_kobject;
2526 EXPORT_SYMBOL(cpufreq_global_kobject);
2528 static int __init cpufreq_core_init(void)
2530 if (cpufreq_disabled())
2533 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2534 BUG_ON(!cpufreq_global_kobject);
2536 register_syscore_ops(&cpufreq_syscore_ops);
2540 module_param(off, int, 0444);
2541 core_initcall(cpufreq_core_init);