2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
34 * The "cpufreq driver" - the arch- or hardware-dependent low
35 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
38 static struct cpufreq_driver *cpufreq_driver;
39 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
41 static DEFINE_RWLOCK(cpufreq_driver_lock);
42 DEFINE_MUTEX(cpufreq_governor_lock);
43 static LIST_HEAD(cpufreq_policy_list);
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 /* Flag to suspend/resume CPUFreq governors */
49 static bool cpufreq_suspended;
51 static inline bool has_target(void)
53 return cpufreq_driver->target_index || cpufreq_driver->target;
57 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
60 static DECLARE_RWSEM(cpufreq_rwsem);
62 /* internal prototypes */
63 static int __cpufreq_governor(struct cpufreq_policy *policy,
65 static unsigned int __cpufreq_get(unsigned int cpu);
66 static void handle_update(struct work_struct *work);
69 * Two notifier lists: the "policy" list is involved in the
70 * validation process for a new CPU frequency policy; the
71 * "transition" list for kernel code that needs to handle
72 * changes to devices when the CPU clock speed changes.
73 * The mutex locks both lists.
75 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
76 static struct srcu_notifier_head cpufreq_transition_notifier_list;
78 static bool init_cpufreq_transition_notifier_list_called;
79 static int __init init_cpufreq_transition_notifier_list(void)
81 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
82 init_cpufreq_transition_notifier_list_called = true;
85 pure_initcall(init_cpufreq_transition_notifier_list);
87 static int off __read_mostly;
88 static int cpufreq_disabled(void)
92 void disable_cpufreq(void)
96 static LIST_HEAD(cpufreq_governor_list);
97 static DEFINE_MUTEX(cpufreq_governor_mutex);
99 bool have_governor_per_policy(void)
101 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
103 EXPORT_SYMBOL_GPL(have_governor_per_policy);
105 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
107 if (have_governor_per_policy())
108 return &policy->kobj;
110 return cpufreq_global_kobject;
112 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
114 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
120 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
122 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
123 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
124 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
125 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
129 idle_time = cur_wall_time - busy_time;
131 *wall = cputime_to_usecs(cur_wall_time);
133 return cputime_to_usecs(idle_time);
136 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
138 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
140 if (idle_time == -1ULL)
141 return get_cpu_idle_time_jiffy(cpu, wall);
143 idle_time += get_cpu_iowait_time_us(cpu, wall);
147 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
150 * This is a generic cpufreq init() routine which can be used by cpufreq
151 * drivers of SMP systems. It will do following:
152 * - validate & show freq table passed
153 * - set policies transition latency
154 * - policy->cpus with all possible CPUs
156 int cpufreq_generic_init(struct cpufreq_policy *policy,
157 struct cpufreq_frequency_table *table,
158 unsigned int transition_latency)
162 ret = cpufreq_table_validate_and_show(policy, table);
164 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
168 policy->cpuinfo.transition_latency = transition_latency;
171 * The driver only supports the SMP configuartion where all processors
172 * share the clock and voltage and clock.
174 cpumask_setall(policy->cpus);
178 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
180 unsigned int cpufreq_generic_get(unsigned int cpu)
182 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
184 if (!policy || IS_ERR(policy->clk)) {
185 pr_err("%s: No %s associated to cpu: %d\n",
186 __func__, policy ? "clk" : "policy", cpu);
190 return clk_get_rate(policy->clk) / 1000;
192 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
194 /* Only for cpufreq core internal use */
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
197 return per_cpu(cpufreq_cpu_data, cpu);
200 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
202 struct cpufreq_policy *policy = NULL;
205 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
208 if (!down_read_trylock(&cpufreq_rwsem))
211 /* get the cpufreq driver */
212 read_lock_irqsave(&cpufreq_driver_lock, flags);
214 if (cpufreq_driver) {
216 policy = per_cpu(cpufreq_cpu_data, cpu);
218 kobject_get(&policy->kobj);
221 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
224 up_read(&cpufreq_rwsem);
228 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
230 void cpufreq_cpu_put(struct cpufreq_policy *policy)
232 if (cpufreq_disabled())
235 kobject_put(&policy->kobj);
236 up_read(&cpufreq_rwsem);
238 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
240 /*********************************************************************
241 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
242 *********************************************************************/
245 * adjust_jiffies - adjust the system "loops_per_jiffy"
247 * This function alters the system "loops_per_jiffy" for the clock
248 * speed change. Note that loops_per_jiffy cannot be updated on SMP
249 * systems as each CPU might be scaled differently. So, use the arch
250 * per-CPU loops_per_jiffy value wherever possible.
253 static unsigned long l_p_j_ref;
254 static unsigned int l_p_j_ref_freq;
256 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
258 if (ci->flags & CPUFREQ_CONST_LOOPS)
261 if (!l_p_j_ref_freq) {
262 l_p_j_ref = loops_per_jiffy;
263 l_p_j_ref_freq = ci->old;
264 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
265 l_p_j_ref, l_p_j_ref_freq);
267 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
268 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
270 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
271 loops_per_jiffy, ci->new);
275 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
281 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
282 struct cpufreq_freqs *freqs, unsigned int state)
284 BUG_ON(irqs_disabled());
286 if (cpufreq_disabled())
289 freqs->flags = cpufreq_driver->flags;
290 pr_debug("notification %u of frequency transition to %u kHz\n",
295 case CPUFREQ_PRECHANGE:
296 /* detect if the driver reported a value as "old frequency"
297 * which is not equal to what the cpufreq core thinks is
300 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
301 if ((policy) && (policy->cpu == freqs->cpu) &&
302 (policy->cur) && (policy->cur != freqs->old)) {
303 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
304 freqs->old, policy->cur);
305 freqs->old = policy->cur;
308 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
309 CPUFREQ_PRECHANGE, freqs);
310 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
313 case CPUFREQ_POSTCHANGE:
314 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
315 pr_debug("FREQ: %lu - CPU: %lu\n",
316 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
317 trace_cpu_frequency(freqs->new, freqs->cpu);
318 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
319 CPUFREQ_POSTCHANGE, freqs);
320 if (likely(policy) && likely(policy->cpu == freqs->cpu))
321 policy->cur = freqs->new;
327 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
328 * on frequency transition.
330 * This function calls the transition notifiers and the "adjust_jiffies"
331 * function. It is called twice on all CPU frequency changes that have
334 void cpufreq_notify_transition(struct cpufreq_policy *policy,
335 struct cpufreq_freqs *freqs, unsigned int state)
337 for_each_cpu(freqs->cpu, policy->cpus)
338 __cpufreq_notify_transition(policy, freqs, state);
340 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
342 /* Do post notifications when there are chances that transition has failed */
343 void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
344 struct cpufreq_freqs *freqs, int transition_failed)
346 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
347 if (!transition_failed)
350 swap(freqs->old, freqs->new);
351 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
352 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
354 EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition);
357 /*********************************************************************
359 *********************************************************************/
360 static ssize_t show_boost(struct kobject *kobj,
361 struct attribute *attr, char *buf)
363 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
366 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
367 const char *buf, size_t count)
371 ret = sscanf(buf, "%d", &enable);
372 if (ret != 1 || enable < 0 || enable > 1)
375 if (cpufreq_boost_trigger_state(enable)) {
376 pr_err("%s: Cannot %s BOOST!\n",
377 __func__, enable ? "enable" : "disable");
381 pr_debug("%s: cpufreq BOOST %s\n",
382 __func__, enable ? "enabled" : "disabled");
386 define_one_global_rw(boost);
388 static struct cpufreq_governor *__find_governor(const char *str_governor)
390 struct cpufreq_governor *t;
392 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
393 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
400 * cpufreq_parse_governor - parse a governor string
402 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
403 struct cpufreq_governor **governor)
410 if (cpufreq_driver->setpolicy) {
411 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
412 *policy = CPUFREQ_POLICY_PERFORMANCE;
414 } else if (!strnicmp(str_governor, "powersave",
416 *policy = CPUFREQ_POLICY_POWERSAVE;
419 } else if (has_target()) {
420 struct cpufreq_governor *t;
422 mutex_lock(&cpufreq_governor_mutex);
424 t = __find_governor(str_governor);
429 mutex_unlock(&cpufreq_governor_mutex);
430 ret = request_module("cpufreq_%s", str_governor);
431 mutex_lock(&cpufreq_governor_mutex);
434 t = __find_governor(str_governor);
442 mutex_unlock(&cpufreq_governor_mutex);
449 * cpufreq_per_cpu_attr_read() / show_##file_name() -
450 * print out cpufreq information
452 * Write out information from cpufreq_driver->policy[cpu]; object must be
456 #define show_one(file_name, object) \
457 static ssize_t show_##file_name \
458 (struct cpufreq_policy *policy, char *buf) \
460 return sprintf(buf, "%u\n", policy->object); \
463 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
464 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
465 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
466 show_one(scaling_min_freq, min);
467 show_one(scaling_max_freq, max);
468 show_one(scaling_cur_freq, cur);
470 static int cpufreq_set_policy(struct cpufreq_policy *policy,
471 struct cpufreq_policy *new_policy);
474 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
476 #define store_one(file_name, object) \
477 static ssize_t store_##file_name \
478 (struct cpufreq_policy *policy, const char *buf, size_t count) \
481 struct cpufreq_policy new_policy; \
483 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
487 ret = sscanf(buf, "%u", &new_policy.object); \
491 ret = cpufreq_set_policy(policy, &new_policy); \
492 policy->user_policy.object = policy->object; \
494 return ret ? ret : count; \
497 store_one(scaling_min_freq, min);
498 store_one(scaling_max_freq, max);
501 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
503 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
506 unsigned int cur_freq = __cpufreq_get(policy->cpu);
508 return sprintf(buf, "<unknown>");
509 return sprintf(buf, "%u\n", cur_freq);
513 * show_scaling_governor - show the current policy for the specified CPU
515 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
517 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
518 return sprintf(buf, "powersave\n");
519 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
520 return sprintf(buf, "performance\n");
521 else if (policy->governor)
522 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
523 policy->governor->name);
528 * store_scaling_governor - store policy for the specified CPU
530 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
531 const char *buf, size_t count)
534 char str_governor[16];
535 struct cpufreq_policy new_policy;
537 ret = cpufreq_get_policy(&new_policy, policy->cpu);
541 ret = sscanf(buf, "%15s", str_governor);
545 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
546 &new_policy.governor))
549 ret = cpufreq_set_policy(policy, &new_policy);
551 policy->user_policy.policy = policy->policy;
552 policy->user_policy.governor = policy->governor;
561 * show_scaling_driver - show the cpufreq driver currently loaded
563 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
565 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
569 * show_scaling_available_governors - show the available CPUfreq governors
571 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
575 struct cpufreq_governor *t;
578 i += sprintf(buf, "performance powersave");
582 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
583 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
584 - (CPUFREQ_NAME_LEN + 2)))
586 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
589 i += sprintf(&buf[i], "\n");
593 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
598 for_each_cpu(cpu, mask) {
600 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
601 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
602 if (i >= (PAGE_SIZE - 5))
605 i += sprintf(&buf[i], "\n");
608 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
611 * show_related_cpus - show the CPUs affected by each transition even if
612 * hw coordination is in use
614 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
616 return cpufreq_show_cpus(policy->related_cpus, buf);
620 * show_affected_cpus - show the CPUs affected by each transition
622 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
624 return cpufreq_show_cpus(policy->cpus, buf);
627 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
628 const char *buf, size_t count)
630 unsigned int freq = 0;
633 if (!policy->governor || !policy->governor->store_setspeed)
636 ret = sscanf(buf, "%u", &freq);
640 policy->governor->store_setspeed(policy, freq);
645 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
647 if (!policy->governor || !policy->governor->show_setspeed)
648 return sprintf(buf, "<unsupported>\n");
650 return policy->governor->show_setspeed(policy, buf);
654 * show_bios_limit - show the current cpufreq HW/BIOS limitation
656 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
660 if (cpufreq_driver->bios_limit) {
661 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
663 return sprintf(buf, "%u\n", limit);
665 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
668 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
669 cpufreq_freq_attr_ro(cpuinfo_min_freq);
670 cpufreq_freq_attr_ro(cpuinfo_max_freq);
671 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
672 cpufreq_freq_attr_ro(scaling_available_governors);
673 cpufreq_freq_attr_ro(scaling_driver);
674 cpufreq_freq_attr_ro(scaling_cur_freq);
675 cpufreq_freq_attr_ro(bios_limit);
676 cpufreq_freq_attr_ro(related_cpus);
677 cpufreq_freq_attr_ro(affected_cpus);
678 cpufreq_freq_attr_rw(scaling_min_freq);
679 cpufreq_freq_attr_rw(scaling_max_freq);
680 cpufreq_freq_attr_rw(scaling_governor);
681 cpufreq_freq_attr_rw(scaling_setspeed);
683 static struct attribute *default_attrs[] = {
684 &cpuinfo_min_freq.attr,
685 &cpuinfo_max_freq.attr,
686 &cpuinfo_transition_latency.attr,
687 &scaling_min_freq.attr,
688 &scaling_max_freq.attr,
691 &scaling_governor.attr,
692 &scaling_driver.attr,
693 &scaling_available_governors.attr,
694 &scaling_setspeed.attr,
698 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
699 #define to_attr(a) container_of(a, struct freq_attr, attr)
701 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
703 struct cpufreq_policy *policy = to_policy(kobj);
704 struct freq_attr *fattr = to_attr(attr);
707 if (!down_read_trylock(&cpufreq_rwsem))
710 down_read(&policy->rwsem);
713 ret = fattr->show(policy, buf);
717 up_read(&policy->rwsem);
718 up_read(&cpufreq_rwsem);
723 static ssize_t store(struct kobject *kobj, struct attribute *attr,
724 const char *buf, size_t count)
726 struct cpufreq_policy *policy = to_policy(kobj);
727 struct freq_attr *fattr = to_attr(attr);
728 ssize_t ret = -EINVAL;
732 if (!cpu_online(policy->cpu))
735 if (!down_read_trylock(&cpufreq_rwsem))
738 down_write(&policy->rwsem);
741 ret = fattr->store(policy, buf, count);
745 up_write(&policy->rwsem);
747 up_read(&cpufreq_rwsem);
754 static void cpufreq_sysfs_release(struct kobject *kobj)
756 struct cpufreq_policy *policy = to_policy(kobj);
757 pr_debug("last reference is dropped\n");
758 complete(&policy->kobj_unregister);
761 static const struct sysfs_ops sysfs_ops = {
766 static struct kobj_type ktype_cpufreq = {
767 .sysfs_ops = &sysfs_ops,
768 .default_attrs = default_attrs,
769 .release = cpufreq_sysfs_release,
772 struct kobject *cpufreq_global_kobject;
773 EXPORT_SYMBOL(cpufreq_global_kobject);
775 static int cpufreq_global_kobject_usage;
777 int cpufreq_get_global_kobject(void)
779 if (!cpufreq_global_kobject_usage++)
780 return kobject_add(cpufreq_global_kobject,
781 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
785 EXPORT_SYMBOL(cpufreq_get_global_kobject);
787 void cpufreq_put_global_kobject(void)
789 if (!--cpufreq_global_kobject_usage)
790 kobject_del(cpufreq_global_kobject);
792 EXPORT_SYMBOL(cpufreq_put_global_kobject);
794 int cpufreq_sysfs_create_file(const struct attribute *attr)
796 int ret = cpufreq_get_global_kobject();
799 ret = sysfs_create_file(cpufreq_global_kobject, attr);
801 cpufreq_put_global_kobject();
806 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
808 void cpufreq_sysfs_remove_file(const struct attribute *attr)
810 sysfs_remove_file(cpufreq_global_kobject, attr);
811 cpufreq_put_global_kobject();
813 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
815 /* symlink affected CPUs */
816 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
821 for_each_cpu(j, policy->cpus) {
822 struct device *cpu_dev;
824 if (j == policy->cpu)
827 pr_debug("Adding link for CPU: %u\n", j);
828 cpu_dev = get_cpu_device(j);
829 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
837 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
840 struct freq_attr **drv_attr;
843 /* prepare interface data */
844 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
845 &dev->kobj, "cpufreq");
849 /* set up files for this cpu device */
850 drv_attr = cpufreq_driver->attr;
851 while ((drv_attr) && (*drv_attr)) {
852 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
854 goto err_out_kobj_put;
857 if (cpufreq_driver->get) {
858 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
860 goto err_out_kobj_put;
863 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
865 goto err_out_kobj_put;
867 if (cpufreq_driver->bios_limit) {
868 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
870 goto err_out_kobj_put;
873 ret = cpufreq_add_dev_symlink(policy);
875 goto err_out_kobj_put;
880 kobject_put(&policy->kobj);
881 wait_for_completion(&policy->kobj_unregister);
885 static void cpufreq_init_policy(struct cpufreq_policy *policy)
887 struct cpufreq_governor *gov = NULL;
888 struct cpufreq_policy new_policy;
891 memcpy(&new_policy, policy, sizeof(*policy));
893 /* Update governor of new_policy to the governor used before hotplug */
894 gov = __find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu));
896 pr_debug("Restoring governor %s for cpu %d\n",
897 policy->governor->name, policy->cpu);
899 gov = CPUFREQ_DEFAULT_GOVERNOR;
901 new_policy.governor = gov;
903 /* Use the default policy if its valid. */
904 if (cpufreq_driver->setpolicy)
905 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL);
907 /* set default policy */
908 ret = cpufreq_set_policy(policy, &new_policy);
910 pr_debug("setting policy failed\n");
911 if (cpufreq_driver->exit)
912 cpufreq_driver->exit(policy);
916 #ifdef CONFIG_HOTPLUG_CPU
917 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
918 unsigned int cpu, struct device *dev)
924 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
926 pr_err("%s: Failed to stop governor\n", __func__);
931 down_write(&policy->rwsem);
933 write_lock_irqsave(&cpufreq_driver_lock, flags);
935 cpumask_set_cpu(cpu, policy->cpus);
936 per_cpu(cpufreq_cpu_data, cpu) = policy;
937 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
939 up_write(&policy->rwsem);
942 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
944 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
947 pr_err("%s: Failed to start governor\n", __func__);
952 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
956 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
958 struct cpufreq_policy *policy;
961 read_lock_irqsave(&cpufreq_driver_lock, flags);
963 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
965 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
967 policy->governor = NULL;
972 static struct cpufreq_policy *cpufreq_policy_alloc(void)
974 struct cpufreq_policy *policy;
976 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
980 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
981 goto err_free_policy;
983 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
984 goto err_free_cpumask;
986 INIT_LIST_HEAD(&policy->policy_list);
987 init_rwsem(&policy->rwsem);
992 free_cpumask_var(policy->cpus);
999 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1001 struct kobject *kobj;
1002 struct completion *cmp;
1004 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1005 CPUFREQ_REMOVE_POLICY, policy);
1007 down_read(&policy->rwsem);
1008 kobj = &policy->kobj;
1009 cmp = &policy->kobj_unregister;
1010 up_read(&policy->rwsem);
1014 * We need to make sure that the underlying kobj is
1015 * actually not referenced anymore by anybody before we
1016 * proceed with unloading.
1018 pr_debug("waiting for dropping of refcount\n");
1019 wait_for_completion(cmp);
1020 pr_debug("wait complete\n");
1023 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1025 free_cpumask_var(policy->related_cpus);
1026 free_cpumask_var(policy->cpus);
1030 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1032 if (WARN_ON(cpu == policy->cpu))
1035 down_write(&policy->rwsem);
1037 policy->last_cpu = policy->cpu;
1040 up_write(&policy->rwsem);
1042 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1043 CPUFREQ_UPDATE_POLICY_CPU, policy);
1046 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1048 unsigned int j, cpu = dev->id;
1050 struct cpufreq_policy *policy;
1051 unsigned long flags;
1052 bool recover_policy = cpufreq_suspended;
1053 #ifdef CONFIG_HOTPLUG_CPU
1054 struct cpufreq_policy *tpolicy;
1057 if (cpu_is_offline(cpu))
1060 pr_debug("adding CPU %u\n", cpu);
1063 /* check whether a different CPU already registered this
1064 * CPU because it is in the same boat. */
1065 policy = cpufreq_cpu_get(cpu);
1066 if (unlikely(policy)) {
1067 cpufreq_cpu_put(policy);
1072 if (!down_read_trylock(&cpufreq_rwsem))
1075 #ifdef CONFIG_HOTPLUG_CPU
1076 /* Check if this cpu was hot-unplugged earlier and has siblings */
1077 read_lock_irqsave(&cpufreq_driver_lock, flags);
1078 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
1079 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
1080 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1081 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
1082 up_read(&cpufreq_rwsem);
1086 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1090 * Restore the saved policy when doing light-weight init and fall back
1091 * to the full init if that fails.
1093 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL;
1095 recover_policy = false;
1096 policy = cpufreq_policy_alloc();
1102 * In the resume path, since we restore a saved policy, the assignment
1103 * to policy->cpu is like an update of the existing policy, rather than
1104 * the creation of a brand new one. So we need to perform this update
1105 * by invoking update_policy_cpu().
1107 if (recover_policy && cpu != policy->cpu)
1108 update_policy_cpu(policy, cpu);
1112 cpumask_copy(policy->cpus, cpumask_of(cpu));
1114 init_completion(&policy->kobj_unregister);
1115 INIT_WORK(&policy->update, handle_update);
1117 /* call driver. From then on the cpufreq must be able
1118 * to accept all calls to ->verify and ->setpolicy for this CPU
1120 ret = cpufreq_driver->init(policy);
1122 pr_debug("initialization failed\n");
1123 goto err_set_policy_cpu;
1126 /* related cpus should atleast have policy->cpus */
1127 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1130 * affected cpus must always be the one, which are online. We aren't
1131 * managing offline cpus here.
1133 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1135 if (!recover_policy) {
1136 policy->user_policy.min = policy->min;
1137 policy->user_policy.max = policy->max;
1140 down_write(&policy->rwsem);
1141 write_lock_irqsave(&cpufreq_driver_lock, flags);
1142 for_each_cpu(j, policy->cpus)
1143 per_cpu(cpufreq_cpu_data, j) = policy;
1144 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1146 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1147 policy->cur = cpufreq_driver->get(policy->cpu);
1149 pr_err("%s: ->get() failed\n", __func__);
1155 * Sometimes boot loaders set CPU frequency to a value outside of
1156 * frequency table present with cpufreq core. In such cases CPU might be
1157 * unstable if it has to run on that frequency for long duration of time
1158 * and so its better to set it to a frequency which is specified in
1159 * freq-table. This also makes cpufreq stats inconsistent as
1160 * cpufreq-stats would fail to register because current frequency of CPU
1161 * isn't found in freq-table.
1163 * Because we don't want this change to effect boot process badly, we go
1164 * for the next freq which is >= policy->cur ('cur' must be set by now,
1165 * otherwise we will end up setting freq to lowest of the table as 'cur'
1166 * is initialized to zero).
1168 * We are passing target-freq as "policy->cur - 1" otherwise
1169 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1170 * equal to target-freq.
1172 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1174 /* Are we running at unknown frequency ? */
1175 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1176 if (ret == -EINVAL) {
1177 /* Warn user and fix it */
1178 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1179 __func__, policy->cpu, policy->cur);
1180 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1181 CPUFREQ_RELATION_L);
1184 * Reaching here after boot in a few seconds may not
1185 * mean that system will remain stable at "unknown"
1186 * frequency for longer duration. Hence, a BUG_ON().
1189 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1190 __func__, policy->cpu, policy->cur);
1194 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1195 CPUFREQ_START, policy);
1197 if (!recover_policy) {
1198 ret = cpufreq_add_dev_interface(policy, dev);
1200 goto err_out_unregister;
1201 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1202 CPUFREQ_CREATE_POLICY, policy);
1205 write_lock_irqsave(&cpufreq_driver_lock, flags);
1206 list_add(&policy->policy_list, &cpufreq_policy_list);
1207 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1209 cpufreq_init_policy(policy);
1211 if (!recover_policy) {
1212 policy->user_policy.policy = policy->policy;
1213 policy->user_policy.governor = policy->governor;
1215 up_write(&policy->rwsem);
1217 kobject_uevent(&policy->kobj, KOBJ_ADD);
1218 up_read(&cpufreq_rwsem);
1220 pr_debug("initialization complete\n");
1226 write_lock_irqsave(&cpufreq_driver_lock, flags);
1227 for_each_cpu(j, policy->cpus)
1228 per_cpu(cpufreq_cpu_data, j) = NULL;
1229 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1231 if (cpufreq_driver->exit)
1232 cpufreq_driver->exit(policy);
1234 if (recover_policy) {
1235 /* Do not leave stale fallback data behind. */
1236 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
1237 cpufreq_policy_put_kobj(policy);
1239 cpufreq_policy_free(policy);
1242 up_read(&cpufreq_rwsem);
1248 * cpufreq_add_dev - add a CPU device
1250 * Adds the cpufreq interface for a CPU device.
1252 * The Oracle says: try running cpufreq registration/unregistration concurrently
1253 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1254 * mess up, but more thorough testing is needed. - Mathieu
1256 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1258 return __cpufreq_add_dev(dev, sif);
1261 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1262 unsigned int old_cpu)
1264 struct device *cpu_dev;
1267 /* first sibling now owns the new sysfs dir */
1268 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1270 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1271 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1273 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1275 down_write(&policy->rwsem);
1276 cpumask_set_cpu(old_cpu, policy->cpus);
1277 up_write(&policy->rwsem);
1279 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1288 static int __cpufreq_remove_dev_prepare(struct device *dev,
1289 struct subsys_interface *sif)
1291 unsigned int cpu = dev->id, cpus;
1293 unsigned long flags;
1294 struct cpufreq_policy *policy;
1296 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1298 write_lock_irqsave(&cpufreq_driver_lock, flags);
1300 policy = per_cpu(cpufreq_cpu_data, cpu);
1302 /* Save the policy somewhere when doing a light-weight tear-down */
1303 if (cpufreq_suspended)
1304 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1306 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1309 pr_debug("%s: No cpu_data found\n", __func__);
1314 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1316 pr_err("%s: Failed to stop governor\n", __func__);
1321 if (!cpufreq_driver->setpolicy)
1322 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1323 policy->governor->name, CPUFREQ_NAME_LEN);
1325 down_read(&policy->rwsem);
1326 cpus = cpumask_weight(policy->cpus);
1327 up_read(&policy->rwsem);
1329 if (cpu != policy->cpu) {
1330 sysfs_remove_link(&dev->kobj, "cpufreq");
1331 } else if (cpus > 1) {
1332 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
1334 update_policy_cpu(policy, new_cpu);
1336 if (!cpufreq_suspended)
1337 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1338 __func__, new_cpu, cpu);
1340 } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
1341 cpufreq_driver->stop_cpu(policy);
1347 static int __cpufreq_remove_dev_finish(struct device *dev,
1348 struct subsys_interface *sif)
1350 unsigned int cpu = dev->id, cpus;
1352 unsigned long flags;
1353 struct cpufreq_policy *policy;
1355 read_lock_irqsave(&cpufreq_driver_lock, flags);
1356 policy = per_cpu(cpufreq_cpu_data, cpu);
1357 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1360 pr_debug("%s: No cpu_data found\n", __func__);
1364 down_write(&policy->rwsem);
1365 cpus = cpumask_weight(policy->cpus);
1368 cpumask_clear_cpu(cpu, policy->cpus);
1369 up_write(&policy->rwsem);
1371 /* If cpu is last user of policy, free policy */
1374 ret = __cpufreq_governor(policy,
1375 CPUFREQ_GOV_POLICY_EXIT);
1377 pr_err("%s: Failed to exit governor\n",
1383 if (!cpufreq_suspended)
1384 cpufreq_policy_put_kobj(policy);
1387 * Perform the ->exit() even during light-weight tear-down,
1388 * since this is a core component, and is essential for the
1389 * subsequent light-weight ->init() to succeed.
1391 if (cpufreq_driver->exit)
1392 cpufreq_driver->exit(policy);
1394 /* Remove policy from list of active policies */
1395 write_lock_irqsave(&cpufreq_driver_lock, flags);
1396 list_del(&policy->policy_list);
1397 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1399 if (!cpufreq_suspended)
1400 cpufreq_policy_free(policy);
1401 } else if (has_target()) {
1402 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START);
1404 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
1407 pr_err("%s: Failed to start governor\n", __func__);
1412 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1417 * cpufreq_remove_dev - remove a CPU device
1419 * Removes the cpufreq interface for a CPU device.
1421 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1423 unsigned int cpu = dev->id;
1426 if (cpu_is_offline(cpu))
1429 ret = __cpufreq_remove_dev_prepare(dev, sif);
1432 ret = __cpufreq_remove_dev_finish(dev, sif);
1437 static void handle_update(struct work_struct *work)
1439 struct cpufreq_policy *policy =
1440 container_of(work, struct cpufreq_policy, update);
1441 unsigned int cpu = policy->cpu;
1442 pr_debug("handle_update for cpu %u called\n", cpu);
1443 cpufreq_update_policy(cpu);
1447 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1450 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1451 * @new_freq: CPU frequency the CPU actually runs at
1453 * We adjust to current frequency first, and need to clean up later.
1454 * So either call to cpufreq_update_policy() or schedule handle_update()).
1456 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1457 unsigned int new_freq)
1459 struct cpufreq_policy *policy;
1460 struct cpufreq_freqs freqs;
1461 unsigned long flags;
1463 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1464 old_freq, new_freq);
1466 freqs.old = old_freq;
1467 freqs.new = new_freq;
1469 read_lock_irqsave(&cpufreq_driver_lock, flags);
1470 policy = per_cpu(cpufreq_cpu_data, cpu);
1471 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1473 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1474 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1478 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1481 * This is the last known freq, without actually getting it from the driver.
1482 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1484 unsigned int cpufreq_quick_get(unsigned int cpu)
1486 struct cpufreq_policy *policy;
1487 unsigned int ret_freq = 0;
1489 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1490 return cpufreq_driver->get(cpu);
1492 policy = cpufreq_cpu_get(cpu);
1494 ret_freq = policy->cur;
1495 cpufreq_cpu_put(policy);
1500 EXPORT_SYMBOL(cpufreq_quick_get);
1503 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1506 * Just return the max possible frequency for a given CPU.
1508 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1510 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1511 unsigned int ret_freq = 0;
1514 ret_freq = policy->max;
1515 cpufreq_cpu_put(policy);
1520 EXPORT_SYMBOL(cpufreq_quick_get_max);
1522 static unsigned int __cpufreq_get(unsigned int cpu)
1524 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1525 unsigned int ret_freq = 0;
1527 if (!cpufreq_driver->get)
1530 ret_freq = cpufreq_driver->get(cpu);
1532 if (ret_freq && policy->cur &&
1533 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1534 /* verify no discrepancy between actual and
1535 saved value exists */
1536 if (unlikely(ret_freq != policy->cur)) {
1537 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1538 schedule_work(&policy->update);
1546 * cpufreq_get - get the current CPU frequency (in kHz)
1549 * Get the CPU current (static) CPU frequency
1551 unsigned int cpufreq_get(unsigned int cpu)
1553 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1554 unsigned int ret_freq = 0;
1557 down_read(&policy->rwsem);
1558 ret_freq = __cpufreq_get(cpu);
1559 up_read(&policy->rwsem);
1561 cpufreq_cpu_put(policy);
1566 EXPORT_SYMBOL(cpufreq_get);
1568 static struct subsys_interface cpufreq_interface = {
1570 .subsys = &cpu_subsys,
1571 .add_dev = cpufreq_add_dev,
1572 .remove_dev = cpufreq_remove_dev,
1576 * In case platform wants some specific frequency to be configured
1579 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1583 if (!policy->suspend_freq) {
1584 pr_err("%s: suspend_freq can't be zero\n", __func__);
1588 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1589 policy->suspend_freq);
1591 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1592 CPUFREQ_RELATION_H);
1594 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1595 __func__, policy->suspend_freq, ret);
1599 EXPORT_SYMBOL(cpufreq_generic_suspend);
1602 * cpufreq_suspend() - Suspend CPUFreq governors
1604 * Called during system wide Suspend/Hibernate cycles for suspending governors
1605 * as some platforms can't change frequency after this point in suspend cycle.
1606 * Because some of the devices (like: i2c, regulators, etc) they use for
1607 * changing frequency are suspended quickly after this point.
1609 void cpufreq_suspend(void)
1611 struct cpufreq_policy *policy;
1613 if (!cpufreq_driver)
1619 pr_debug("%s: Suspending Governors\n", __func__);
1621 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1622 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1623 pr_err("%s: Failed to stop governor for policy: %p\n",
1625 else if (cpufreq_driver->suspend
1626 && cpufreq_driver->suspend(policy))
1627 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1631 cpufreq_suspended = true;
1635 * cpufreq_resume() - Resume CPUFreq governors
1637 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1638 * are suspended with cpufreq_suspend().
1640 void cpufreq_resume(void)
1642 struct cpufreq_policy *policy;
1644 if (!cpufreq_driver)
1650 pr_debug("%s: Resuming Governors\n", __func__);
1652 cpufreq_suspended = false;
1654 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
1655 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1656 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1657 pr_err("%s: Failed to start governor for policy: %p\n",
1659 else if (cpufreq_driver->resume
1660 && cpufreq_driver->resume(policy))
1661 pr_err("%s: Failed to resume driver: %p\n", __func__,
1665 * schedule call cpufreq_update_policy() for boot CPU, i.e. last
1666 * policy in list. It will verify that the current freq is in
1667 * sync with what we believe it to be.
1669 if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
1670 schedule_work(&policy->update);
1675 * cpufreq_get_current_driver - return current driver's name
1677 * Return the name string of the currently loaded cpufreq driver
1680 const char *cpufreq_get_current_driver(void)
1683 return cpufreq_driver->name;
1687 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1689 /*********************************************************************
1690 * NOTIFIER LISTS INTERFACE *
1691 *********************************************************************/
1694 * cpufreq_register_notifier - register a driver with cpufreq
1695 * @nb: notifier function to register
1696 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1698 * Add a driver to one of two lists: either a list of drivers that
1699 * are notified about clock rate changes (once before and once after
1700 * the transition), or a list of drivers that are notified about
1701 * changes in cpufreq policy.
1703 * This function may sleep, and has the same return conditions as
1704 * blocking_notifier_chain_register.
1706 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1710 if (cpufreq_disabled())
1713 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1716 case CPUFREQ_TRANSITION_NOTIFIER:
1717 ret = srcu_notifier_chain_register(
1718 &cpufreq_transition_notifier_list, nb);
1720 case CPUFREQ_POLICY_NOTIFIER:
1721 ret = blocking_notifier_chain_register(
1722 &cpufreq_policy_notifier_list, nb);
1730 EXPORT_SYMBOL(cpufreq_register_notifier);
1733 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1734 * @nb: notifier block to be unregistered
1735 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1737 * Remove a driver from the CPU frequency notifier list.
1739 * This function may sleep, and has the same return conditions as
1740 * blocking_notifier_chain_unregister.
1742 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1746 if (cpufreq_disabled())
1750 case CPUFREQ_TRANSITION_NOTIFIER:
1751 ret = srcu_notifier_chain_unregister(
1752 &cpufreq_transition_notifier_list, nb);
1754 case CPUFREQ_POLICY_NOTIFIER:
1755 ret = blocking_notifier_chain_unregister(
1756 &cpufreq_policy_notifier_list, nb);
1764 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1767 /*********************************************************************
1769 *********************************************************************/
1771 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1772 unsigned int target_freq,
1773 unsigned int relation)
1775 int retval = -EINVAL;
1776 unsigned int old_target_freq = target_freq;
1778 if (cpufreq_disabled())
1781 /* Make sure that target_freq is within supported range */
1782 if (target_freq > policy->max)
1783 target_freq = policy->max;
1784 if (target_freq < policy->min)
1785 target_freq = policy->min;
1787 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1788 policy->cpu, target_freq, relation, old_target_freq);
1791 * This might look like a redundant call as we are checking it again
1792 * after finding index. But it is left intentionally for cases where
1793 * exactly same freq is called again and so we can save on few function
1796 if (target_freq == policy->cur)
1799 if (cpufreq_driver->target)
1800 retval = cpufreq_driver->target(policy, target_freq, relation);
1801 else if (cpufreq_driver->target_index) {
1802 struct cpufreq_frequency_table *freq_table;
1803 struct cpufreq_freqs freqs;
1807 freq_table = cpufreq_frequency_get_table(policy->cpu);
1808 if (unlikely(!freq_table)) {
1809 pr_err("%s: Unable to find freq_table\n", __func__);
1813 retval = cpufreq_frequency_table_target(policy, freq_table,
1814 target_freq, relation, &index);
1815 if (unlikely(retval)) {
1816 pr_err("%s: Unable to find matching freq\n", __func__);
1820 if (freq_table[index].frequency == policy->cur) {
1825 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1828 freqs.old = policy->cur;
1829 freqs.new = freq_table[index].frequency;
1832 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1833 __func__, policy->cpu, freqs.old, freqs.new);
1835 cpufreq_notify_transition(policy, &freqs,
1839 retval = cpufreq_driver->target_index(policy, index);
1841 pr_err("%s: Failed to change cpu frequency: %d\n",
1845 cpufreq_notify_post_transition(policy, &freqs, retval);
1851 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1853 int cpufreq_driver_target(struct cpufreq_policy *policy,
1854 unsigned int target_freq,
1855 unsigned int relation)
1859 down_write(&policy->rwsem);
1861 ret = __cpufreq_driver_target(policy, target_freq, relation);
1863 up_write(&policy->rwsem);
1867 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1870 * when "event" is CPUFREQ_GOV_LIMITS
1873 static int __cpufreq_governor(struct cpufreq_policy *policy,
1878 /* Only must be defined when default governor is known to have latency
1879 restrictions, like e.g. conservative or ondemand.
1880 That this is the case is already ensured in Kconfig
1882 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1883 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1885 struct cpufreq_governor *gov = NULL;
1888 /* Don't start any governor operations if we are entering suspend */
1889 if (cpufreq_suspended)
1892 if (policy->governor->max_transition_latency &&
1893 policy->cpuinfo.transition_latency >
1894 policy->governor->max_transition_latency) {
1898 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1899 policy->governor->name, gov->name);
1900 policy->governor = gov;
1904 if (event == CPUFREQ_GOV_POLICY_INIT)
1905 if (!try_module_get(policy->governor->owner))
1908 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1909 policy->cpu, event);
1911 mutex_lock(&cpufreq_governor_lock);
1912 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1913 || (!policy->governor_enabled
1914 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1915 mutex_unlock(&cpufreq_governor_lock);
1919 if (event == CPUFREQ_GOV_STOP)
1920 policy->governor_enabled = false;
1921 else if (event == CPUFREQ_GOV_START)
1922 policy->governor_enabled = true;
1924 mutex_unlock(&cpufreq_governor_lock);
1926 ret = policy->governor->governor(policy, event);
1929 if (event == CPUFREQ_GOV_POLICY_INIT)
1930 policy->governor->initialized++;
1931 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1932 policy->governor->initialized--;
1934 /* Restore original values */
1935 mutex_lock(&cpufreq_governor_lock);
1936 if (event == CPUFREQ_GOV_STOP)
1937 policy->governor_enabled = true;
1938 else if (event == CPUFREQ_GOV_START)
1939 policy->governor_enabled = false;
1940 mutex_unlock(&cpufreq_governor_lock);
1943 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1944 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1945 module_put(policy->governor->owner);
1950 int cpufreq_register_governor(struct cpufreq_governor *governor)
1957 if (cpufreq_disabled())
1960 mutex_lock(&cpufreq_governor_mutex);
1962 governor->initialized = 0;
1964 if (__find_governor(governor->name) == NULL) {
1966 list_add(&governor->governor_list, &cpufreq_governor_list);
1969 mutex_unlock(&cpufreq_governor_mutex);
1972 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1974 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1981 if (cpufreq_disabled())
1984 for_each_present_cpu(cpu) {
1985 if (cpu_online(cpu))
1987 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1988 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1991 mutex_lock(&cpufreq_governor_mutex);
1992 list_del(&governor->governor_list);
1993 mutex_unlock(&cpufreq_governor_mutex);
1996 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1999 /*********************************************************************
2000 * POLICY INTERFACE *
2001 *********************************************************************/
2004 * cpufreq_get_policy - get the current cpufreq_policy
2005 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2008 * Reads the current cpufreq policy.
2010 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2012 struct cpufreq_policy *cpu_policy;
2016 cpu_policy = cpufreq_cpu_get(cpu);
2020 memcpy(policy, cpu_policy, sizeof(*policy));
2022 cpufreq_cpu_put(cpu_policy);
2025 EXPORT_SYMBOL(cpufreq_get_policy);
2028 * policy : current policy.
2029 * new_policy: policy to be set.
2031 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2032 struct cpufreq_policy *new_policy)
2034 struct cpufreq_governor *old_gov;
2037 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2038 new_policy->cpu, new_policy->min, new_policy->max);
2040 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2042 if (new_policy->min > policy->max || new_policy->max < policy->min)
2045 /* verify the cpu speed can be set within this limit */
2046 ret = cpufreq_driver->verify(new_policy);
2050 /* adjust if necessary - all reasons */
2051 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2052 CPUFREQ_ADJUST, new_policy);
2054 /* adjust if necessary - hardware incompatibility*/
2055 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2056 CPUFREQ_INCOMPATIBLE, new_policy);
2059 * verify the cpu speed can be set within this limit, which might be
2060 * different to the first one
2062 ret = cpufreq_driver->verify(new_policy);
2066 /* notification of the new policy */
2067 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2068 CPUFREQ_NOTIFY, new_policy);
2070 policy->min = new_policy->min;
2071 policy->max = new_policy->max;
2073 pr_debug("new min and max freqs are %u - %u kHz\n",
2074 policy->min, policy->max);
2076 if (cpufreq_driver->setpolicy) {
2077 policy->policy = new_policy->policy;
2078 pr_debug("setting range\n");
2079 return cpufreq_driver->setpolicy(new_policy);
2082 if (new_policy->governor == policy->governor)
2085 pr_debug("governor switch\n");
2087 /* save old, working values */
2088 old_gov = policy->governor;
2089 /* end old governor */
2091 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2092 up_write(&policy->rwsem);
2093 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2094 down_write(&policy->rwsem);
2097 /* start new governor */
2098 policy->governor = new_policy->governor;
2099 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2100 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START))
2103 up_write(&policy->rwsem);
2104 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
2105 down_write(&policy->rwsem);
2108 /* new governor failed, so re-start old one */
2109 pr_debug("starting governor %s failed\n", policy->governor->name);
2111 policy->governor = old_gov;
2112 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT);
2113 __cpufreq_governor(policy, CPUFREQ_GOV_START);
2119 pr_debug("governor: change or update limits\n");
2120 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2124 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2125 * @cpu: CPU which shall be re-evaluated
2127 * Useful for policy notifiers which have different necessities
2128 * at different times.
2130 int cpufreq_update_policy(unsigned int cpu)
2132 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2133 struct cpufreq_policy new_policy;
2141 down_write(&policy->rwsem);
2143 pr_debug("updating policy for CPU %u\n", cpu);
2144 memcpy(&new_policy, policy, sizeof(*policy));
2145 new_policy.min = policy->user_policy.min;
2146 new_policy.max = policy->user_policy.max;
2147 new_policy.policy = policy->user_policy.policy;
2148 new_policy.governor = policy->user_policy.governor;
2151 * BIOS might change freq behind our back
2152 * -> ask driver for current freq and notify governors about a change
2154 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2155 new_policy.cur = cpufreq_driver->get(cpu);
2156 if (WARN_ON(!new_policy.cur)) {
2162 pr_debug("Driver did not initialize current freq\n");
2163 policy->cur = new_policy.cur;
2165 if (policy->cur != new_policy.cur && has_target())
2166 cpufreq_out_of_sync(cpu, policy->cur,
2171 ret = cpufreq_set_policy(policy, &new_policy);
2173 up_write(&policy->rwsem);
2175 cpufreq_cpu_put(policy);
2179 EXPORT_SYMBOL(cpufreq_update_policy);
2181 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2182 unsigned long action, void *hcpu)
2184 unsigned int cpu = (unsigned long)hcpu;
2187 dev = get_cpu_device(cpu);
2189 switch (action & ~CPU_TASKS_FROZEN) {
2191 __cpufreq_add_dev(dev, NULL);
2194 case CPU_DOWN_PREPARE:
2195 __cpufreq_remove_dev_prepare(dev, NULL);
2199 __cpufreq_remove_dev_finish(dev, NULL);
2202 case CPU_DOWN_FAILED:
2203 __cpufreq_add_dev(dev, NULL);
2210 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2211 .notifier_call = cpufreq_cpu_callback,
2214 /*********************************************************************
2216 *********************************************************************/
2217 static int cpufreq_boost_set_sw(int state)
2219 struct cpufreq_frequency_table *freq_table;
2220 struct cpufreq_policy *policy;
2223 list_for_each_entry(policy, &cpufreq_policy_list, policy_list) {
2224 freq_table = cpufreq_frequency_get_table(policy->cpu);
2226 ret = cpufreq_frequency_table_cpuinfo(policy,
2229 pr_err("%s: Policy frequency update failed\n",
2233 policy->user_policy.max = policy->max;
2234 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2241 int cpufreq_boost_trigger_state(int state)
2243 unsigned long flags;
2246 if (cpufreq_driver->boost_enabled == state)
2249 write_lock_irqsave(&cpufreq_driver_lock, flags);
2250 cpufreq_driver->boost_enabled = state;
2251 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2253 ret = cpufreq_driver->set_boost(state);
2255 write_lock_irqsave(&cpufreq_driver_lock, flags);
2256 cpufreq_driver->boost_enabled = !state;
2257 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2259 pr_err("%s: Cannot %s BOOST\n",
2260 __func__, state ? "enable" : "disable");
2266 int cpufreq_boost_supported(void)
2268 if (likely(cpufreq_driver))
2269 return cpufreq_driver->boost_supported;
2273 EXPORT_SYMBOL_GPL(cpufreq_boost_supported);
2275 int cpufreq_boost_enabled(void)
2277 return cpufreq_driver->boost_enabled;
2279 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2281 /*********************************************************************
2282 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2283 *********************************************************************/
2286 * cpufreq_register_driver - register a CPU Frequency driver
2287 * @driver_data: A struct cpufreq_driver containing the values#
2288 * submitted by the CPU Frequency driver.
2290 * Registers a CPU Frequency driver to this core code. This code
2291 * returns zero on success, -EBUSY when another driver got here first
2292 * (and isn't unregistered in the meantime).
2295 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2297 unsigned long flags;
2300 if (cpufreq_disabled())
2303 if (!driver_data || !driver_data->verify || !driver_data->init ||
2304 !(driver_data->setpolicy || driver_data->target_index ||
2305 driver_data->target) ||
2306 (driver_data->setpolicy && (driver_data->target_index ||
2307 driver_data->target)))
2310 pr_debug("trying to register driver %s\n", driver_data->name);
2312 if (driver_data->setpolicy)
2313 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2315 write_lock_irqsave(&cpufreq_driver_lock, flags);
2316 if (cpufreq_driver) {
2317 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2320 cpufreq_driver = driver_data;
2321 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2323 if (cpufreq_boost_supported()) {
2325 * Check if driver provides function to enable boost -
2326 * if not, use cpufreq_boost_set_sw as default
2328 if (!cpufreq_driver->set_boost)
2329 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2331 ret = cpufreq_sysfs_create_file(&boost.attr);
2333 pr_err("%s: cannot register global BOOST sysfs file\n",
2335 goto err_null_driver;
2339 ret = subsys_interface_register(&cpufreq_interface);
2341 goto err_boost_unreg;
2343 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2347 /* check for at least one working CPU */
2348 for (i = 0; i < nr_cpu_ids; i++)
2349 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2354 /* if all ->init() calls failed, unregister */
2356 pr_debug("no CPU initialized for driver %s\n",
2362 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2363 pr_debug("driver %s up and running\n", driver_data->name);
2367 subsys_interface_unregister(&cpufreq_interface);
2369 if (cpufreq_boost_supported())
2370 cpufreq_sysfs_remove_file(&boost.attr);
2372 write_lock_irqsave(&cpufreq_driver_lock, flags);
2373 cpufreq_driver = NULL;
2374 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2377 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2380 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2382 * Unregister the current CPUFreq driver. Only call this if you have
2383 * the right to do so, i.e. if you have succeeded in initialising before!
2384 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2385 * currently not initialised.
2387 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2389 unsigned long flags;
2391 if (!cpufreq_driver || (driver != cpufreq_driver))
2394 pr_debug("unregistering driver %s\n", driver->name);
2396 subsys_interface_unregister(&cpufreq_interface);
2397 if (cpufreq_boost_supported())
2398 cpufreq_sysfs_remove_file(&boost.attr);
2400 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2402 down_write(&cpufreq_rwsem);
2403 write_lock_irqsave(&cpufreq_driver_lock, flags);
2405 cpufreq_driver = NULL;
2407 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2408 up_write(&cpufreq_rwsem);
2412 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2414 static int __init cpufreq_core_init(void)
2416 if (cpufreq_disabled())
2419 cpufreq_global_kobject = kobject_create();
2420 BUG_ON(!cpufreq_global_kobject);
2424 core_initcall(cpufreq_core_init);