2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
8 * Added handling for CPU hotplug
9 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
10 * Fix handling for CPU hotplug -- affected CPUs
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/notifier.h>
24 #include <linux/cpufreq.h>
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <linux/device.h>
29 #include <linux/slab.h>
30 #include <linux/cpu.h>
31 #include <linux/completion.h>
32 #include <linux/mutex.h>
33 #include <linux/syscore_ops.h>
35 #include <trace/events/power.h>
38 * The "cpufreq driver" - the arch- or hardware-dependent low
39 * level driver of CPUFreq support, and its spinlock. This lock
40 * also protects the cpufreq_cpu_data array.
42 static struct cpufreq_driver __rcu *cpufreq_driver;
43 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
44 #ifdef CONFIG_HOTPLUG_CPU
45 /* This one keeps track of the previously set governor of a removed CPU */
46 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
48 static DEFINE_RWLOCK(cpufreq_driver_lock);
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
67 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
68 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
70 #define lock_policy_rwsem(mode, cpu) \
71 static int lock_policy_rwsem_##mode(int cpu) \
73 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
74 BUG_ON(policy_cpu == -1); \
75 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
80 lock_policy_rwsem(read, cpu);
81 lock_policy_rwsem(write, cpu);
83 #define unlock_policy_rwsem(mode, cpu) \
84 static void unlock_policy_rwsem_##mode(int cpu) \
86 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
87 BUG_ON(policy_cpu == -1); \
88 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
91 unlock_policy_rwsem(read, cpu);
92 unlock_policy_rwsem(write, cpu);
94 /* internal prototypes */
95 static int __cpufreq_governor(struct cpufreq_policy *policy,
97 static unsigned int __cpufreq_get(unsigned int cpu);
98 static void handle_update(struct work_struct *work);
101 * Two notifier lists: the "policy" list is involved in the
102 * validation process for a new CPU frequency policy; the
103 * "transition" list for kernel code that needs to handle
104 * changes to devices when the CPU clock speed changes.
105 * The mutex locks both lists.
107 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
108 static struct srcu_notifier_head cpufreq_transition_notifier_list;
110 static bool init_cpufreq_transition_notifier_list_called;
111 static int __init init_cpufreq_transition_notifier_list(void)
113 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
114 init_cpufreq_transition_notifier_list_called = true;
117 pure_initcall(init_cpufreq_transition_notifier_list);
119 static int off __read_mostly;
120 static int cpufreq_disabled(void)
124 void disable_cpufreq(void)
128 static LIST_HEAD(cpufreq_governor_list);
129 static DEFINE_MUTEX(cpufreq_governor_mutex);
131 bool have_governor_per_policy(void)
133 bool have_governor_per_policy;
135 have_governor_per_policy =
136 rcu_dereference(cpufreq_driver)->have_governor_per_policy;
138 return have_governor_per_policy;
141 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
143 struct cpufreq_policy *data;
144 struct cpufreq_driver *driver;
147 if (cpu >= nr_cpu_ids)
150 /* get the cpufreq driver */
152 driver = rcu_dereference(cpufreq_driver);
157 if (!try_module_get(driver->owner))
160 read_lock_irqsave(&cpufreq_driver_lock, flags);
163 data = per_cpu(cpufreq_cpu_data, cpu);
166 goto err_out_put_module;
168 if (!sysfs && !kobject_get(&data->kobj))
169 goto err_out_put_module;
171 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
176 module_put(driver->owner);
177 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
184 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
186 if (cpufreq_disabled())
189 return __cpufreq_cpu_get(cpu, false);
191 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
193 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
195 return __cpufreq_cpu_get(cpu, true);
198 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
201 kobject_put(&data->kobj);
203 module_put(rcu_dereference(cpufreq_driver)->owner);
207 void cpufreq_cpu_put(struct cpufreq_policy *data)
209 if (cpufreq_disabled())
212 __cpufreq_cpu_put(data, false);
214 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
216 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
218 __cpufreq_cpu_put(data, true);
221 /*********************************************************************
222 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
223 *********************************************************************/
226 * adjust_jiffies - adjust the system "loops_per_jiffy"
228 * This function alters the system "loops_per_jiffy" for the clock
229 * speed change. Note that loops_per_jiffy cannot be updated on SMP
230 * systems as each CPU might be scaled differently. So, use the arch
231 * per-CPU loops_per_jiffy value wherever possible.
234 static unsigned long l_p_j_ref;
235 static unsigned int l_p_j_ref_freq;
237 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
239 if (ci->flags & CPUFREQ_CONST_LOOPS)
242 if (!l_p_j_ref_freq) {
243 l_p_j_ref = loops_per_jiffy;
244 l_p_j_ref_freq = ci->old;
245 pr_debug("saving %lu as reference value for loops_per_jiffy; "
246 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
248 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
249 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
250 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
252 pr_debug("scaling loops_per_jiffy to %lu "
253 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
257 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
264 void __cpufreq_notify_transition(struct cpufreq_policy *policy,
265 struct cpufreq_freqs *freqs, unsigned int state)
267 BUG_ON(irqs_disabled());
269 if (cpufreq_disabled())
273 freqs->flags = rcu_dereference(cpufreq_driver)->flags;
275 pr_debug("notification %u of frequency transition to %u kHz\n",
280 case CPUFREQ_PRECHANGE:
281 /* detect if the driver reported a value as "old frequency"
282 * which is not equal to what the cpufreq core thinks is
285 if (!(freqs->flags & CPUFREQ_CONST_LOOPS)) {
286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
288 pr_debug("Warning: CPU frequency is"
289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
295 CPUFREQ_PRECHANGE, freqs);
296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
302 (unsigned long)freqs->cpu);
303 trace_cpu_frequency(freqs->new, freqs->cpu);
304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
305 CPUFREQ_POSTCHANGE, freqs);
306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
312 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313 * on frequency transition.
315 * This function calls the transition notifiers and the "adjust_jiffies"
316 * function. It is called twice on all CPU frequency changes that have
319 void cpufreq_notify_transition(struct cpufreq_policy *policy,
320 struct cpufreq_freqs *freqs, unsigned int state)
322 for_each_cpu(freqs->cpu, policy->cpus)
323 __cpufreq_notify_transition(policy, freqs, state);
325 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
329 /*********************************************************************
331 *********************************************************************/
333 static struct cpufreq_governor *__find_governor(const char *str_governor)
335 struct cpufreq_governor *t;
337 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
338 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
345 * cpufreq_parse_governor - parse a governor string
347 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
348 struct cpufreq_governor **governor)
351 struct cpufreq_driver *driver;
356 driver = rcu_dereference(cpufreq_driver);
361 has_setpolicy = driver->setpolicy ? true : false;
362 has_target = driver->target ? true : false;
366 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
367 *policy = CPUFREQ_POLICY_PERFORMANCE;
369 } else if (!strnicmp(str_governor, "powersave",
371 *policy = CPUFREQ_POLICY_POWERSAVE;
374 } else if (has_target) {
375 struct cpufreq_governor *t;
377 mutex_lock(&cpufreq_governor_mutex);
379 t = __find_governor(str_governor);
384 mutex_unlock(&cpufreq_governor_mutex);
385 ret = request_module("cpufreq_%s", str_governor);
386 mutex_lock(&cpufreq_governor_mutex);
389 t = __find_governor(str_governor);
397 mutex_unlock(&cpufreq_governor_mutex);
405 * cpufreq_per_cpu_attr_read() / show_##file_name() -
406 * print out cpufreq information
408 * Write out information from cpufreq_driver->policy[cpu]; object must be
412 #define show_one(file_name, object) \
413 static ssize_t show_##file_name \
414 (struct cpufreq_policy *policy, char *buf) \
416 return sprintf(buf, "%u\n", policy->object); \
419 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
420 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
421 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
422 show_one(scaling_min_freq, min);
423 show_one(scaling_max_freq, max);
424 show_one(scaling_cur_freq, cur);
426 static int __cpufreq_set_policy(struct cpufreq_policy *data,
427 struct cpufreq_policy *policy);
430 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
432 #define store_one(file_name, object) \
433 static ssize_t store_##file_name \
434 (struct cpufreq_policy *policy, const char *buf, size_t count) \
437 struct cpufreq_policy new_policy; \
439 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
443 ret = sscanf(buf, "%u", &new_policy.object); \
447 ret = __cpufreq_set_policy(policy, &new_policy); \
448 policy->user_policy.object = policy->object; \
450 return ret ? ret : count; \
453 store_one(scaling_min_freq, min);
454 store_one(scaling_max_freq, max);
457 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
459 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
462 unsigned int cur_freq = __cpufreq_get(policy->cpu);
464 return sprintf(buf, "<unknown>");
465 return sprintf(buf, "%u\n", cur_freq);
470 * show_scaling_governor - show the current policy for the specified CPU
472 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
474 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
475 return sprintf(buf, "powersave\n");
476 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
477 return sprintf(buf, "performance\n");
478 else if (policy->governor)
479 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
480 policy->governor->name);
486 * store_scaling_governor - store policy for the specified CPU
488 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
489 const char *buf, size_t count)
492 char str_governor[16];
493 struct cpufreq_policy new_policy;
495 ret = cpufreq_get_policy(&new_policy, policy->cpu);
499 ret = sscanf(buf, "%15s", str_governor);
503 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
504 &new_policy.governor))
507 /* Do not use cpufreq_set_policy here or the user_policy.max
508 will be wrongly overridden */
509 ret = __cpufreq_set_policy(policy, &new_policy);
511 policy->user_policy.policy = policy->policy;
512 policy->user_policy.governor = policy->governor;
521 * show_scaling_driver - show the cpufreq driver currently loaded
523 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
527 size = scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
528 rcu_dereference(cpufreq_driver)->name);
534 * show_scaling_available_governors - show the available CPUfreq governors
536 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
540 struct cpufreq_governor *t;
543 if (!rcu_dereference(cpufreq_driver)->target) {
545 i += sprintf(buf, "performance powersave");
550 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
551 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
552 - (CPUFREQ_NAME_LEN + 2)))
554 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
557 i += sprintf(&buf[i], "\n");
561 static ssize_t show_cpus(const struct cpumask *mask, char *buf)
566 for_each_cpu(cpu, mask) {
568 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
569 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
570 if (i >= (PAGE_SIZE - 5))
573 i += sprintf(&buf[i], "\n");
578 * show_related_cpus - show the CPUs affected by each transition even if
579 * hw coordination is in use
581 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
583 return show_cpus(policy->related_cpus, buf);
587 * show_affected_cpus - show the CPUs affected by each transition
589 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
591 return show_cpus(policy->cpus, buf);
594 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
595 const char *buf, size_t count)
597 unsigned int freq = 0;
600 if (!policy->governor || !policy->governor->store_setspeed)
603 ret = sscanf(buf, "%u", &freq);
607 policy->governor->store_setspeed(policy, freq);
612 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
614 if (!policy->governor || !policy->governor->show_setspeed)
615 return sprintf(buf, "<unsupported>\n");
617 return policy->governor->show_setspeed(policy, buf);
621 * show_bios_limit - show the current cpufreq HW/BIOS limitation
623 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
626 int (*bios_limit)(int cpu, unsigned int *limit);
630 bios_limit = rcu_dereference(cpufreq_driver)->bios_limit;
634 ret = bios_limit(policy->cpu, &limit);
636 return sprintf(buf, "%u\n", limit);
638 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
641 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
642 cpufreq_freq_attr_ro(cpuinfo_min_freq);
643 cpufreq_freq_attr_ro(cpuinfo_max_freq);
644 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
645 cpufreq_freq_attr_ro(scaling_available_governors);
646 cpufreq_freq_attr_ro(scaling_driver);
647 cpufreq_freq_attr_ro(scaling_cur_freq);
648 cpufreq_freq_attr_ro(bios_limit);
649 cpufreq_freq_attr_ro(related_cpus);
650 cpufreq_freq_attr_ro(affected_cpus);
651 cpufreq_freq_attr_rw(scaling_min_freq);
652 cpufreq_freq_attr_rw(scaling_max_freq);
653 cpufreq_freq_attr_rw(scaling_governor);
654 cpufreq_freq_attr_rw(scaling_setspeed);
656 static struct attribute *default_attrs[] = {
657 &cpuinfo_min_freq.attr,
658 &cpuinfo_max_freq.attr,
659 &cpuinfo_transition_latency.attr,
660 &scaling_min_freq.attr,
661 &scaling_max_freq.attr,
664 &scaling_governor.attr,
665 &scaling_driver.attr,
666 &scaling_available_governors.attr,
667 &scaling_setspeed.attr,
671 struct kobject *cpufreq_global_kobject;
672 EXPORT_SYMBOL(cpufreq_global_kobject);
674 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
675 #define to_attr(a) container_of(a, struct freq_attr, attr)
677 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
679 struct cpufreq_policy *policy = to_policy(kobj);
680 struct freq_attr *fattr = to_attr(attr);
681 ssize_t ret = -EINVAL;
682 policy = cpufreq_cpu_get_sysfs(policy->cpu);
686 if (lock_policy_rwsem_read(policy->cpu) < 0)
690 ret = fattr->show(policy, buf);
694 unlock_policy_rwsem_read(policy->cpu);
696 cpufreq_cpu_put_sysfs(policy);
701 static ssize_t store(struct kobject *kobj, struct attribute *attr,
702 const char *buf, size_t count)
704 struct cpufreq_policy *policy = to_policy(kobj);
705 struct freq_attr *fattr = to_attr(attr);
706 ssize_t ret = -EINVAL;
707 policy = cpufreq_cpu_get_sysfs(policy->cpu);
711 if (lock_policy_rwsem_write(policy->cpu) < 0)
715 ret = fattr->store(policy, buf, count);
719 unlock_policy_rwsem_write(policy->cpu);
721 cpufreq_cpu_put_sysfs(policy);
726 static void cpufreq_sysfs_release(struct kobject *kobj)
728 struct cpufreq_policy *policy = to_policy(kobj);
729 pr_debug("last reference is dropped\n");
730 complete(&policy->kobj_unregister);
733 static const struct sysfs_ops sysfs_ops = {
738 static struct kobj_type ktype_cpufreq = {
739 .sysfs_ops = &sysfs_ops,
740 .default_attrs = default_attrs,
741 .release = cpufreq_sysfs_release,
744 /* symlink affected CPUs */
745 static int cpufreq_add_dev_symlink(unsigned int cpu,
746 struct cpufreq_policy *policy)
751 for_each_cpu(j, policy->cpus) {
752 struct cpufreq_policy *managed_policy;
753 struct device *cpu_dev;
758 pr_debug("CPU %u already managed, adding link\n", j);
759 managed_policy = cpufreq_cpu_get(cpu);
760 cpu_dev = get_cpu_device(j);
761 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
764 cpufreq_cpu_put(managed_policy);
771 static int cpufreq_add_dev_interface(unsigned int cpu,
772 struct cpufreq_policy *policy,
775 struct cpufreq_policy new_policy;
776 struct freq_attr **drv_attr;
777 struct cpufreq_driver *driver;
782 /* prepare interface data */
783 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
784 &dev->kobj, "cpufreq");
788 /* set up files for this cpu device */
790 driver = rcu_dereference(cpufreq_driver);
791 drv_attr = driver->attr;
792 while ((drv_attr) && (*drv_attr)) {
793 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
799 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
803 if (driver->target) {
804 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
808 if (driver->bios_limit) {
809 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
815 write_lock_irqsave(&cpufreq_driver_lock, flags);
816 for_each_cpu(j, policy->cpus) {
817 per_cpu(cpufreq_cpu_data, j) = policy;
818 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
820 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
822 ret = cpufreq_add_dev_symlink(cpu, policy);
824 goto err_out_kobj_put;
826 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
827 /* assure that the starting sequence is run in __cpufreq_set_policy */
828 policy->governor = NULL;
830 /* set default policy */
831 ret = __cpufreq_set_policy(policy, &new_policy);
832 policy->user_policy.policy = policy->policy;
833 policy->user_policy.governor = policy->governor;
836 int (*exit)(struct cpufreq_policy *policy);
838 pr_debug("setting policy failed\n");
840 exit = rcu_dereference(cpufreq_driver)->exit;
851 kobject_put(&policy->kobj);
852 wait_for_completion(&policy->kobj_unregister);
856 #ifdef CONFIG_HOTPLUG_CPU
857 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
860 struct cpufreq_policy *policy;
864 policy = cpufreq_cpu_get(sibling);
867 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
869 lock_policy_rwsem_write(sibling);
871 write_lock_irqsave(&cpufreq_driver_lock, flags);
873 cpumask_set_cpu(cpu, policy->cpus);
874 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
875 per_cpu(cpufreq_cpu_data, cpu) = policy;
876 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
878 unlock_policy_rwsem_write(sibling);
880 __cpufreq_governor(policy, CPUFREQ_GOV_START);
881 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
883 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
885 cpufreq_cpu_put(policy);
894 * cpufreq_add_dev - add a CPU device
896 * Adds the cpufreq interface for a CPU device.
898 * The Oracle says: try running cpufreq registration/unregistration concurrently
899 * with with cpu hotplugging and all hell will break loose. Tried to clean this
900 * mess up, but more thorough testing is needed. - Mathieu
902 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
904 unsigned int j, cpu = dev->id;
906 struct cpufreq_policy *policy;
907 struct cpufreq_driver *driver;
908 int (*init)(struct cpufreq_policy *policy);
910 #ifdef CONFIG_HOTPLUG_CPU
911 struct cpufreq_governor *gov;
915 if (cpu_is_offline(cpu))
918 pr_debug("adding CPU %u\n", cpu);
921 /* check whether a different CPU already registered this
922 * CPU because it is in the same boat. */
923 policy = cpufreq_cpu_get(cpu);
924 if (unlikely(policy)) {
925 cpufreq_cpu_put(policy);
929 #ifdef CONFIG_HOTPLUG_CPU
930 /* Check if this cpu was hot-unplugged earlier and has siblings */
931 read_lock_irqsave(&cpufreq_driver_lock, flags);
932 for_each_online_cpu(sibling) {
933 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
934 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
935 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
936 return cpufreq_add_policy_cpu(cpu, sibling, dev);
939 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
944 driver = rcu_dereference(cpufreq_driver);
945 if (!try_module_get(driver->owner)) {
953 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
957 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
958 goto err_free_policy;
960 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
961 goto err_free_cpumask;
964 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
965 cpumask_copy(policy->cpus, cpumask_of(cpu));
967 /* Initially set CPU itself as the policy_cpu */
968 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
970 init_completion(&policy->kobj_unregister);
971 INIT_WORK(&policy->update, handle_update);
973 /* call driver. From then on the cpufreq must be able
974 * to accept all calls to ->verify and ->setpolicy for this CPU
978 pr_debug("initialization failed\n");
979 goto err_set_policy_cpu;
982 /* related cpus should atleast have policy->cpus */
983 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
986 * affected cpus must always be the one, which are online. We aren't
987 * managing offline cpus here.
989 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
991 policy->user_policy.min = policy->min;
992 policy->user_policy.max = policy->max;
994 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
995 CPUFREQ_START, policy);
997 #ifdef CONFIG_HOTPLUG_CPU
998 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1000 policy->governor = gov;
1001 pr_debug("Restoring governor %s for cpu %d\n",
1002 policy->governor->name, cpu);
1006 ret = cpufreq_add_dev_interface(cpu, policy, dev);
1008 goto err_out_unregister;
1010 kobject_uevent(&policy->kobj, KOBJ_ADD);
1012 module_put(rcu_dereference(cpufreq_driver)->owner);
1014 pr_debug("initialization complete\n");
1019 write_lock_irqsave(&cpufreq_driver_lock, flags);
1020 for_each_cpu(j, policy->cpus)
1021 per_cpu(cpufreq_cpu_data, j) = NULL;
1022 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1024 kobject_put(&policy->kobj);
1025 wait_for_completion(&policy->kobj_unregister);
1028 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1029 free_cpumask_var(policy->related_cpus);
1031 free_cpumask_var(policy->cpus);
1036 module_put(rcu_dereference(cpufreq_driver)->owner);
1042 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1046 policy->last_cpu = policy->cpu;
1049 for_each_cpu(j, policy->cpus)
1050 per_cpu(cpufreq_policy_cpu, j) = cpu;
1052 #ifdef CONFIG_CPU_FREQ_TABLE
1053 cpufreq_frequency_table_update_policy_cpu(policy);
1055 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1056 CPUFREQ_UPDATE_POLICY_CPU, policy);
1060 * __cpufreq_remove_dev - remove a CPU device
1062 * Removes the cpufreq interface for a CPU device.
1063 * Caller should already have policy_rwsem in write mode for this CPU.
1064 * This routine frees the rwsem before returning.
1066 static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1068 unsigned int cpu = dev->id, ret, cpus;
1069 unsigned long flags;
1070 struct cpufreq_policy *data;
1071 struct cpufreq_driver *driver;
1072 struct kobject *kobj;
1073 struct completion *cmp;
1074 struct device *cpu_dev;
1076 int (*exit)(struct cpufreq_policy *policy);
1078 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1080 write_lock_irqsave(&cpufreq_driver_lock, flags);
1082 data = per_cpu(cpufreq_cpu_data, cpu);
1083 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1085 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1088 pr_debug("%s: No cpu_data found\n", __func__);
1093 driver = rcu_dereference(cpufreq_driver);
1094 has_target = driver->target ? true : false;
1095 exit = driver->exit;
1097 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1099 #ifdef CONFIG_HOTPLUG_CPU
1100 if (!driver->setpolicy)
1101 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1102 data->governor->name, CPUFREQ_NAME_LEN);
1106 WARN_ON(lock_policy_rwsem_write(cpu));
1107 cpus = cpumask_weight(data->cpus);
1110 cpumask_clear_cpu(cpu, data->cpus);
1111 unlock_policy_rwsem_write(cpu);
1113 if (cpu != data->cpu) {
1114 sysfs_remove_link(&dev->kobj, "cpufreq");
1115 } else if (cpus > 1) {
1116 /* first sibling now owns the new sysfs dir */
1117 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1118 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1119 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1121 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1123 WARN_ON(lock_policy_rwsem_write(cpu));
1124 cpumask_set_cpu(cpu, data->cpus);
1126 write_lock_irqsave(&cpufreq_driver_lock, flags);
1127 per_cpu(cpufreq_cpu_data, cpu) = data;
1128 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1130 unlock_policy_rwsem_write(cpu);
1132 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1137 WARN_ON(lock_policy_rwsem_write(cpu));
1138 update_policy_cpu(data, cpu_dev->id);
1139 unlock_policy_rwsem_write(cpu);
1140 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1141 __func__, cpu_dev->id, cpu);
1144 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1145 cpufreq_cpu_put(data);
1147 /* If cpu is last user of policy, free policy */
1149 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1151 lock_policy_rwsem_read(cpu);
1153 cmp = &data->kobj_unregister;
1154 unlock_policy_rwsem_read(cpu);
1157 /* we need to make sure that the underlying kobj is actually
1158 * not referenced anymore by anybody before we proceed with
1161 pr_debug("waiting for dropping of refcount\n");
1162 wait_for_completion(cmp);
1163 pr_debug("wait complete\n");
1168 free_cpumask_var(data->related_cpus);
1169 free_cpumask_var(data->cpus);
1171 } else if (has_target) {
1172 __cpufreq_governor(data, CPUFREQ_GOV_START);
1173 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1176 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1181 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1183 unsigned int cpu = dev->id;
1186 if (cpu_is_offline(cpu))
1189 retval = __cpufreq_remove_dev(dev, sif);
1194 static void handle_update(struct work_struct *work)
1196 struct cpufreq_policy *policy =
1197 container_of(work, struct cpufreq_policy, update);
1198 unsigned int cpu = policy->cpu;
1199 pr_debug("handle_update for cpu %u called\n", cpu);
1200 cpufreq_update_policy(cpu);
1204 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble.
1206 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1207 * @new_freq: CPU frequency the CPU actually runs at
1209 * We adjust to current frequency first, and need to clean up later.
1210 * So either call to cpufreq_update_policy() or schedule handle_update()).
1212 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1213 unsigned int new_freq)
1215 struct cpufreq_policy *policy;
1216 struct cpufreq_freqs freqs;
1217 unsigned long flags;
1220 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1221 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1223 freqs.old = old_freq;
1224 freqs.new = new_freq;
1226 read_lock_irqsave(&cpufreq_driver_lock, flags);
1227 policy = per_cpu(cpufreq_cpu_data, cpu);
1228 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1230 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1231 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1236 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1239 * This is the last known freq, without actually getting it from the driver.
1240 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1242 unsigned int cpufreq_quick_get(unsigned int cpu)
1244 struct cpufreq_policy *policy;
1245 struct cpufreq_driver *driver;
1246 unsigned int (*get)(unsigned int cpu);
1247 unsigned int ret_freq = 0;
1250 driver = rcu_dereference(cpufreq_driver);
1251 if (driver && driver->setpolicy && driver->get) {
1258 policy = cpufreq_cpu_get(cpu);
1260 ret_freq = policy->cur;
1261 cpufreq_cpu_put(policy);
1266 EXPORT_SYMBOL(cpufreq_quick_get);
1269 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1272 * Just return the max possible frequency for a given CPU.
1274 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1276 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1277 unsigned int ret_freq = 0;
1280 ret_freq = policy->max;
1281 cpufreq_cpu_put(policy);
1286 EXPORT_SYMBOL(cpufreq_quick_get_max);
1289 static unsigned int __cpufreq_get(unsigned int cpu)
1291 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1292 struct cpufreq_driver *driver;
1293 unsigned int (*get)(unsigned int cpu);
1294 unsigned int ret_freq = 0;
1299 driver = rcu_dereference(cpufreq_driver);
1304 flags = driver->flags;
1308 ret_freq = get(cpu);
1310 if (ret_freq && policy->cur &&
1311 !(flags & CPUFREQ_CONST_LOOPS)) {
1312 /* verify no discrepancy between actual and
1313 saved value exists */
1314 if (unlikely(ret_freq != policy->cur)) {
1315 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1316 schedule_work(&policy->update);
1324 * cpufreq_get - get the current CPU frequency (in kHz)
1327 * Get the CPU current (static) CPU frequency
1329 unsigned int cpufreq_get(unsigned int cpu)
1331 unsigned int ret_freq = 0;
1332 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1337 if (unlikely(lock_policy_rwsem_read(cpu)))
1340 ret_freq = __cpufreq_get(cpu);
1342 unlock_policy_rwsem_read(cpu);
1345 cpufreq_cpu_put(policy);
1349 EXPORT_SYMBOL(cpufreq_get);
1351 static struct subsys_interface cpufreq_interface = {
1353 .subsys = &cpu_subsys,
1354 .add_dev = cpufreq_add_dev,
1355 .remove_dev = cpufreq_remove_dev,
1360 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1362 * This function is only executed for the boot processor. The other CPUs
1363 * have been put offline by means of CPU hotplug.
1365 static int cpufreq_bp_suspend(void)
1367 int (*suspend)(struct cpufreq_policy *policy);
1370 int cpu = smp_processor_id();
1371 struct cpufreq_policy *cpu_policy;
1373 pr_debug("suspending cpu %u\n", cpu);
1375 /* If there's no policy for the boot CPU, we have nothing to do. */
1376 cpu_policy = cpufreq_cpu_get(cpu);
1381 suspend = rcu_dereference(cpufreq_driver)->suspend;
1384 ret = suspend(cpu_policy);
1386 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1387 "step on CPU %u\n", cpu_policy->cpu);
1390 cpufreq_cpu_put(cpu_policy);
1395 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1397 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1398 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1399 * restored. It will verify that the current freq is in sync with
1400 * what we believe it to be. This is a bit later than when it
1401 * should be, but nonethteless it's better than calling
1402 * cpufreq_driver->get() here which might re-enable interrupts...
1404 * This function is only executed for the boot CPU. The other CPUs have not
1405 * been turned on yet.
1407 static void cpufreq_bp_resume(void)
1410 int (*resume)(struct cpufreq_policy *policy);
1412 int cpu = smp_processor_id();
1413 struct cpufreq_policy *cpu_policy;
1415 pr_debug("resuming cpu %u\n", cpu);
1417 /* If there's no policy for the boot CPU, we have nothing to do. */
1418 cpu_policy = cpufreq_cpu_get(cpu);
1423 resume = rcu_dereference(cpufreq_driver)->resume;
1427 ret = resume(cpu_policy);
1429 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1430 "step on CPU %u\n", cpu_policy->cpu);
1435 schedule_work(&cpu_policy->update);
1438 cpufreq_cpu_put(cpu_policy);
1441 static struct syscore_ops cpufreq_syscore_ops = {
1442 .suspend = cpufreq_bp_suspend,
1443 .resume = cpufreq_bp_resume,
1447 * cpufreq_get_current_driver - return current driver's name
1449 * Return the name string of the currently loaded cpufreq driver
1452 const char *cpufreq_get_current_driver(void)
1454 struct cpufreq_driver *driver;
1455 const char *name = NULL;
1457 driver = rcu_dereference(cpufreq_driver);
1459 name = driver->name;
1463 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1465 /*********************************************************************
1466 * NOTIFIER LISTS INTERFACE *
1467 *********************************************************************/
1470 * cpufreq_register_notifier - register a driver with cpufreq
1471 * @nb: notifier function to register
1472 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1474 * Add a driver to one of two lists: either a list of drivers that
1475 * are notified about clock rate changes (once before and once after
1476 * the transition), or a list of drivers that are notified about
1477 * changes in cpufreq policy.
1479 * This function may sleep, and has the same return conditions as
1480 * blocking_notifier_chain_register.
1482 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1486 if (cpufreq_disabled())
1489 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1492 case CPUFREQ_TRANSITION_NOTIFIER:
1493 ret = srcu_notifier_chain_register(
1494 &cpufreq_transition_notifier_list, nb);
1496 case CPUFREQ_POLICY_NOTIFIER:
1497 ret = blocking_notifier_chain_register(
1498 &cpufreq_policy_notifier_list, nb);
1506 EXPORT_SYMBOL(cpufreq_register_notifier);
1510 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1511 * @nb: notifier block to be unregistered
1512 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1514 * Remove a driver from the CPU frequency notifier list.
1516 * This function may sleep, and has the same return conditions as
1517 * blocking_notifier_chain_unregister.
1519 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1523 if (cpufreq_disabled())
1527 case CPUFREQ_TRANSITION_NOTIFIER:
1528 ret = srcu_notifier_chain_unregister(
1529 &cpufreq_transition_notifier_list, nb);
1531 case CPUFREQ_POLICY_NOTIFIER:
1532 ret = blocking_notifier_chain_unregister(
1533 &cpufreq_policy_notifier_list, nb);
1541 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1544 /*********************************************************************
1546 *********************************************************************/
1549 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1550 unsigned int target_freq,
1551 unsigned int relation)
1553 int retval = -EINVAL;
1554 unsigned int old_target_freq = target_freq;
1555 int (*target)(struct cpufreq_policy *policy,
1556 unsigned int target_freq,
1557 unsigned int relation);
1559 if (cpufreq_disabled())
1562 /* Make sure that target_freq is within supported range */
1563 if (target_freq > policy->max)
1564 target_freq = policy->max;
1565 if (target_freq < policy->min)
1566 target_freq = policy->min;
1568 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1569 policy->cpu, target_freq, relation, old_target_freq);
1571 if (target_freq == policy->cur)
1575 target = rcu_dereference(cpufreq_driver)->target;
1578 retval = target(policy, target_freq, relation);
1582 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1584 int cpufreq_driver_target(struct cpufreq_policy *policy,
1585 unsigned int target_freq,
1586 unsigned int relation)
1590 policy = cpufreq_cpu_get(policy->cpu);
1594 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1597 ret = __cpufreq_driver_target(policy, target_freq, relation);
1599 unlock_policy_rwsem_write(policy->cpu);
1602 cpufreq_cpu_put(policy);
1606 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1608 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1611 unsigned int (*getavg)(struct cpufreq_policy *policy,
1614 if (cpufreq_disabled())
1618 getavg = rcu_dereference(cpufreq_driver)->getavg;
1624 policy = cpufreq_cpu_get(policy->cpu);
1628 ret = getavg(policy, cpu);
1630 cpufreq_cpu_put(policy);
1633 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1636 * when "event" is CPUFREQ_GOV_LIMITS
1639 static int __cpufreq_governor(struct cpufreq_policy *policy,
1644 /* Only must be defined when default governor is known to have latency
1645 restrictions, like e.g. conservative or ondemand.
1646 That this is the case is already ensured in Kconfig
1648 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1649 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1651 struct cpufreq_governor *gov = NULL;
1654 if (policy->governor->max_transition_latency &&
1655 policy->cpuinfo.transition_latency >
1656 policy->governor->max_transition_latency) {
1660 printk(KERN_WARNING "%s governor failed, too long"
1661 " transition latency of HW, fallback"
1662 " to %s governor\n",
1663 policy->governor->name,
1665 policy->governor = gov;
1669 if (!try_module_get(policy->governor->owner))
1672 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1673 policy->cpu, event);
1674 ret = policy->governor->governor(policy, event);
1677 if (event == CPUFREQ_GOV_POLICY_INIT)
1678 policy->governor->initialized++;
1679 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1680 policy->governor->initialized--;
1683 /* we keep one module reference alive for
1684 each CPU governed by this CPU */
1685 if ((event != CPUFREQ_GOV_START) || ret)
1686 module_put(policy->governor->owner);
1687 if ((event == CPUFREQ_GOV_STOP) && !ret)
1688 module_put(policy->governor->owner);
1694 int cpufreq_register_governor(struct cpufreq_governor *governor)
1701 if (cpufreq_disabled())
1704 mutex_lock(&cpufreq_governor_mutex);
1706 governor->initialized = 0;
1708 if (__find_governor(governor->name) == NULL) {
1710 list_add(&governor->governor_list, &cpufreq_governor_list);
1713 mutex_unlock(&cpufreq_governor_mutex);
1716 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1719 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1721 #ifdef CONFIG_HOTPLUG_CPU
1728 if (cpufreq_disabled())
1731 #ifdef CONFIG_HOTPLUG_CPU
1732 for_each_present_cpu(cpu) {
1733 if (cpu_online(cpu))
1735 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1736 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1740 mutex_lock(&cpufreq_governor_mutex);
1741 list_del(&governor->governor_list);
1742 mutex_unlock(&cpufreq_governor_mutex);
1745 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1749 /*********************************************************************
1750 * POLICY INTERFACE *
1751 *********************************************************************/
1754 * cpufreq_get_policy - get the current cpufreq_policy
1755 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1758 * Reads the current cpufreq policy.
1760 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1762 struct cpufreq_policy *cpu_policy;
1766 cpu_policy = cpufreq_cpu_get(cpu);
1770 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1772 cpufreq_cpu_put(cpu_policy);
1775 EXPORT_SYMBOL(cpufreq_get_policy);
1779 * data : current policy.
1780 * policy : policy to be set.
1782 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1783 struct cpufreq_policy *policy)
1785 int ret = 0, failed = 1;
1786 struct cpufreq_driver *driver;
1787 int (*verify)(struct cpufreq_policy *policy);
1788 int (*setpolicy)(struct cpufreq_policy *policy);
1790 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1791 policy->min, policy->max);
1793 memcpy(&policy->cpuinfo, &data->cpuinfo,
1794 sizeof(struct cpufreq_cpuinfo));
1796 if (policy->min > data->max || policy->max < data->min) {
1801 /* verify the cpu speed can be set within this limit */
1803 driver = rcu_dereference(cpufreq_driver);
1804 verify = driver->verify;
1805 setpolicy = driver->setpolicy;
1808 ret = verify(policy);
1812 /* adjust if necessary - all reasons */
1813 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1814 CPUFREQ_ADJUST, policy);
1816 /* adjust if necessary - hardware incompatibility*/
1817 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1818 CPUFREQ_INCOMPATIBLE, policy);
1820 /* verify the cpu speed can be set within this limit,
1821 which might be different to the first one */
1822 ret = verify(policy);
1826 /* notification of the new policy */
1827 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1828 CPUFREQ_NOTIFY, policy);
1830 data->min = policy->min;
1831 data->max = policy->max;
1833 pr_debug("new min and max freqs are %u - %u kHz\n",
1834 data->min, data->max);
1837 data->policy = policy->policy;
1838 pr_debug("setting range\n");
1839 ret = setpolicy(policy);
1841 if (policy->governor != data->governor) {
1842 /* save old, working values */
1843 struct cpufreq_governor *old_gov = data->governor;
1845 pr_debug("governor switch\n");
1847 /* end old governor */
1848 if (data->governor) {
1849 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1850 __cpufreq_governor(data,
1851 CPUFREQ_GOV_POLICY_EXIT);
1854 /* start new governor */
1855 data->governor = policy->governor;
1856 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1857 if (!__cpufreq_governor(data, CPUFREQ_GOV_START))
1860 __cpufreq_governor(data,
1861 CPUFREQ_GOV_POLICY_EXIT);
1865 /* new governor failed, so re-start old one */
1866 pr_debug("starting governor %s failed\n",
1867 data->governor->name);
1869 data->governor = old_gov;
1870 __cpufreq_governor(data,
1871 CPUFREQ_GOV_POLICY_INIT);
1872 __cpufreq_governor(data,
1878 /* might be a policy change, too, so fall through */
1880 pr_debug("governor: change or update limits\n");
1881 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1889 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1890 * @cpu: CPU which shall be re-evaluated
1892 * Useful for policy notifiers which have different necessities
1893 * at different times.
1895 int cpufreq_update_policy(unsigned int cpu)
1897 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1898 struct cpufreq_policy policy;
1899 struct cpufreq_driver *driver;
1900 unsigned int (*get)(unsigned int cpu);
1901 int (*target)(struct cpufreq_policy *policy,
1902 unsigned int target_freq,
1903 unsigned int relation);
1911 if (unlikely(lock_policy_rwsem_write(cpu))) {
1916 pr_debug("updating policy for CPU %u\n", cpu);
1917 memcpy(&policy, data, sizeof(struct cpufreq_policy));
1918 policy.min = data->user_policy.min;
1919 policy.max = data->user_policy.max;
1920 policy.policy = data->user_policy.policy;
1921 policy.governor = data->user_policy.governor;
1923 /* BIOS might change freq behind our back
1924 -> ask driver for current freq and notify governors about a change */
1926 driver = rcu_access_pointer(cpufreq_driver);
1928 target = driver->target;
1931 policy.cur = get(cpu);
1933 pr_debug("Driver did not initialize current freq");
1934 data->cur = policy.cur;
1936 if (data->cur != policy.cur && target)
1937 cpufreq_out_of_sync(cpu, data->cur,
1942 ret = __cpufreq_set_policy(data, &policy);
1944 unlock_policy_rwsem_write(cpu);
1947 cpufreq_cpu_put(data);
1951 EXPORT_SYMBOL(cpufreq_update_policy);
1953 static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1954 unsigned long action, void *hcpu)
1956 unsigned int cpu = (unsigned long)hcpu;
1959 dev = get_cpu_device(cpu);
1963 case CPU_ONLINE_FROZEN:
1964 cpufreq_add_dev(dev, NULL);
1966 case CPU_DOWN_PREPARE:
1967 case CPU_DOWN_PREPARE_FROZEN:
1968 __cpufreq_remove_dev(dev, NULL);
1970 case CPU_DOWN_FAILED:
1971 case CPU_DOWN_FAILED_FROZEN:
1972 cpufreq_add_dev(dev, NULL);
1979 static struct notifier_block __refdata cpufreq_cpu_notifier = {
1980 .notifier_call = cpufreq_cpu_callback,
1983 /*********************************************************************
1984 * REGISTER / UNREGISTER CPUFREQ DRIVER *
1985 *********************************************************************/
1988 * cpufreq_register_driver - register a CPU Frequency driver
1989 * @driver_data: A struct cpufreq_driver containing the values#
1990 * submitted by the CPU Frequency driver.
1992 * Registers a CPU Frequency driver to this core code. This code
1993 * returns zero on success, -EBUSY when another driver got here first
1994 * (and isn't unregistered in the meantime).
1997 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1999 unsigned long flags;
2002 if (cpufreq_disabled())
2005 if (!driver_data || !driver_data->verify || !driver_data->init ||
2006 ((!driver_data->setpolicy) && (!driver_data->target)))
2009 pr_debug("trying to register driver %s\n", driver_data->name);
2011 if (driver_data->setpolicy)
2012 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2014 write_lock_irqsave(&cpufreq_driver_lock, flags);
2015 if (rcu_access_pointer(cpufreq_driver)) {
2016 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2019 rcu_assign_pointer(cpufreq_driver, driver_data);
2020 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2023 ret = subsys_interface_register(&cpufreq_interface);
2025 goto err_null_driver;
2027 if (!(driver_data->flags & CPUFREQ_STICKY)) {
2031 /* check for at least one working CPU */
2032 for (i = 0; i < nr_cpu_ids; i++)
2033 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2038 /* if all ->init() calls failed, unregister */
2040 pr_debug("no CPU initialized for driver %s\n",
2046 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2047 pr_debug("driver %s up and running\n", driver_data->name);
2051 subsys_interface_unregister(&cpufreq_interface);
2053 write_lock_irqsave(&cpufreq_driver_lock, flags);
2054 rcu_assign_pointer(cpufreq_driver, NULL);
2055 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2059 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2063 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2065 * Unregister the current CPUFreq driver. Only call this if you have
2066 * the right to do so, i.e. if you have succeeded in initialising before!
2067 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2068 * currently not initialised.
2070 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2072 unsigned long flags;
2073 struct cpufreq_driver *old_driver;
2076 old_driver = rcu_access_pointer(cpufreq_driver);
2077 if (!old_driver || (driver != old_driver)) {
2083 pr_debug("unregistering driver %s\n", driver->name);
2085 subsys_interface_unregister(&cpufreq_interface);
2086 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2088 write_lock_irqsave(&cpufreq_driver_lock, flags);
2089 rcu_assign_pointer(cpufreq_driver, NULL);
2090 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2095 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2097 static int __init cpufreq_core_init(void)
2101 if (cpufreq_disabled())
2104 for_each_possible_cpu(cpu) {
2105 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2106 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2109 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2110 BUG_ON(!cpufreq_global_kobject);
2111 register_syscore_ops(&cpufreq_syscore_ops);
2115 core_initcall(cpufreq_core_init);