2 * drivers/cpufreq/cpufreq_ondemand.c
4 * Copyright (C) 2001 Russell King
5 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
6 * Jun Nakajima <jun.nakajima@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/cpufreq.h>
17 #include <linux/cpu.h>
18 #include <linux/jiffies.h>
19 #include <linux/kernel_stat.h>
20 #include <linux/mutex.h>
21 #include <linux/hrtimer.h>
22 #include <linux/tick.h>
23 #include <linux/ktime.h>
24 #include <linux/sched.h>
27 * dbs is used in this file as a shortform for demandbased switching
28 * It helps to keep variable names smaller, simpler
31 #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
32 #define DEF_FREQUENCY_UP_THRESHOLD (80)
33 #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
34 #define MICRO_FREQUENCY_UP_THRESHOLD (95)
35 #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
36 #define MIN_FREQUENCY_UP_THRESHOLD (11)
37 #define MAX_FREQUENCY_UP_THRESHOLD (100)
40 * The polling frequency of this governor depends on the capability of
41 * the processor. Default polling frequency is 1000 times the transition
42 * latency of the processor. The governor will work on any processor with
43 * transition latency <= 10mS, using appropriate sampling
45 * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
46 * this governor will not work.
47 * All times here are in uS.
49 #define MIN_SAMPLING_RATE_RATIO (2)
51 static unsigned int min_sampling_rate;
53 #define LATENCY_MULTIPLIER (1000)
54 #define MIN_LATENCY_MULTIPLIER (100)
55 #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
57 static void do_dbs_timer(struct work_struct *work);
58 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
61 #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
64 struct cpufreq_governor cpufreq_gov_ondemand = {
66 .governor = cpufreq_governor_dbs,
67 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
72 enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
74 struct cpu_dbs_info_s {
75 cputime64_t prev_cpu_idle;
76 cputime64_t prev_cpu_wall;
77 cputime64_t prev_cpu_nice;
78 struct cpufreq_policy *cur_policy;
79 struct delayed_work work;
80 struct cpufreq_frequency_table *freq_table;
82 unsigned int freq_lo_jiffies;
83 unsigned int freq_hi_jiffies;
85 unsigned int sample_type:1;
87 * percpu mutex that serializes governor limit change with
88 * do_dbs_timer invocation. We do not want do_dbs_timer to run
89 * when user is changing the governor or limits.
91 struct mutex timer_mutex;
93 static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
95 static unsigned int dbs_enable; /* number of CPUs using this policy */
98 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
99 * different CPUs. It protects dbs_enable in governor start/stop.
101 static DEFINE_MUTEX(dbs_mutex);
103 static struct workqueue_struct *kondemand_wq;
105 static struct dbs_tuners {
106 unsigned int sampling_rate;
107 unsigned int up_threshold;
108 unsigned int down_differential;
109 unsigned int ignore_nice;
110 unsigned int powersave_bias;
112 .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
113 .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
118 static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
121 cputime64_t idle_time;
122 cputime64_t cur_wall_time;
123 cputime64_t busy_time;
125 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
126 busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
127 kstat_cpu(cpu).cpustat.system);
129 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
130 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
131 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
132 busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
134 idle_time = cputime64_sub(cur_wall_time, busy_time);
136 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
138 return (cputime64_t)jiffies_to_usecs(idle_time);
141 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
143 u64 idle_time = get_cpu_idle_time_us(cpu, wall);
145 if (idle_time == -1ULL)
146 return get_cpu_idle_time_jiffy(cpu, wall);
152 * Find right freq to be set now with powersave_bias on.
153 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
154 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
156 static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
157 unsigned int freq_next,
158 unsigned int relation)
160 unsigned int freq_req, freq_reduc, freq_avg;
161 unsigned int freq_hi, freq_lo;
162 unsigned int index = 0;
163 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
164 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
167 if (!dbs_info->freq_table) {
168 dbs_info->freq_lo = 0;
169 dbs_info->freq_lo_jiffies = 0;
173 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
175 freq_req = dbs_info->freq_table[index].frequency;
176 freq_reduc = freq_req * dbs_tuners_ins.powersave_bias / 1000;
177 freq_avg = freq_req - freq_reduc;
179 /* Find freq bounds for freq_avg in freq_table */
181 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
182 CPUFREQ_RELATION_H, &index);
183 freq_lo = dbs_info->freq_table[index].frequency;
185 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
186 CPUFREQ_RELATION_L, &index);
187 freq_hi = dbs_info->freq_table[index].frequency;
189 /* Find out how long we have to be in hi and lo freqs */
190 if (freq_hi == freq_lo) {
191 dbs_info->freq_lo = 0;
192 dbs_info->freq_lo_jiffies = 0;
195 jiffies_total = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
196 jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
197 jiffies_hi += ((freq_hi - freq_lo) / 2);
198 jiffies_hi /= (freq_hi - freq_lo);
199 jiffies_lo = jiffies_total - jiffies_hi;
200 dbs_info->freq_lo = freq_lo;
201 dbs_info->freq_lo_jiffies = jiffies_lo;
202 dbs_info->freq_hi_jiffies = jiffies_hi;
206 static void ondemand_powersave_bias_init_cpu(int cpu)
208 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
209 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
210 dbs_info->freq_lo = 0;
213 static void ondemand_powersave_bias_init(void)
216 for_each_online_cpu(i) {
217 ondemand_powersave_bias_init_cpu(i);
221 /************************** sysfs interface ************************/
223 static ssize_t show_sampling_rate_max(struct kobject *kobj,
224 struct attribute *attr, char *buf)
226 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
227 "sysfs file is deprecated - used by: %s\n", current->comm);
228 return sprintf(buf, "%u\n", -1U);
231 static ssize_t show_sampling_rate_min(struct kobject *kobj,
232 struct attribute *attr, char *buf)
234 return sprintf(buf, "%u\n", min_sampling_rate);
237 define_one_global_ro(sampling_rate_max);
238 define_one_global_ro(sampling_rate_min);
240 /* cpufreq_ondemand Governor Tunables */
241 #define show_one(file_name, object) \
242 static ssize_t show_##file_name \
243 (struct kobject *kobj, struct attribute *attr, char *buf) \
245 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
247 show_one(sampling_rate, sampling_rate);
248 show_one(up_threshold, up_threshold);
249 show_one(ignore_nice_load, ignore_nice);
250 show_one(powersave_bias, powersave_bias);
252 /*** delete after deprecation time ***/
254 #define DEPRECATION_MSG(file_name) \
255 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
256 "interface is deprecated - " #file_name "\n");
258 #define show_one_old(file_name) \
259 static ssize_t show_##file_name##_old \
260 (struct cpufreq_policy *unused, char *buf) \
262 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
263 "interface is deprecated - " #file_name "\n"); \
264 return show_##file_name(NULL, NULL, buf); \
266 show_one_old(sampling_rate);
267 show_one_old(up_threshold);
268 show_one_old(ignore_nice_load);
269 show_one_old(powersave_bias);
270 show_one_old(sampling_rate_min);
271 show_one_old(sampling_rate_max);
273 cpufreq_freq_attr_ro_old(sampling_rate_min);
274 cpufreq_freq_attr_ro_old(sampling_rate_max);
276 /*** delete after deprecation time ***/
278 static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
279 const char *buf, size_t count)
283 ret = sscanf(buf, "%u", &input);
287 mutex_lock(&dbs_mutex);
288 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
289 mutex_unlock(&dbs_mutex);
294 static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
295 const char *buf, size_t count)
299 ret = sscanf(buf, "%u", &input);
301 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
302 input < MIN_FREQUENCY_UP_THRESHOLD) {
306 mutex_lock(&dbs_mutex);
307 dbs_tuners_ins.up_threshold = input;
308 mutex_unlock(&dbs_mutex);
313 static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
314 const char *buf, size_t count)
321 ret = sscanf(buf, "%u", &input);
328 mutex_lock(&dbs_mutex);
329 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
330 mutex_unlock(&dbs_mutex);
333 dbs_tuners_ins.ignore_nice = input;
335 /* we need to re-evaluate prev_cpu_idle */
336 for_each_online_cpu(j) {
337 struct cpu_dbs_info_s *dbs_info;
338 dbs_info = &per_cpu(od_cpu_dbs_info, j);
339 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
340 &dbs_info->prev_cpu_wall);
341 if (dbs_tuners_ins.ignore_nice)
342 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
345 mutex_unlock(&dbs_mutex);
350 static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
351 const char *buf, size_t count)
355 ret = sscanf(buf, "%u", &input);
363 mutex_lock(&dbs_mutex);
364 dbs_tuners_ins.powersave_bias = input;
365 ondemand_powersave_bias_init();
366 mutex_unlock(&dbs_mutex);
371 define_one_global_rw(sampling_rate);
372 define_one_global_rw(up_threshold);
373 define_one_global_rw(ignore_nice_load);
374 define_one_global_rw(powersave_bias);
376 static struct attribute *dbs_attributes[] = {
377 &sampling_rate_max.attr,
378 &sampling_rate_min.attr,
381 &ignore_nice_load.attr,
382 &powersave_bias.attr,
386 static struct attribute_group dbs_attr_group = {
387 .attrs = dbs_attributes,
391 /*** delete after deprecation time ***/
393 #define write_one_old(file_name) \
394 static ssize_t store_##file_name##_old \
395 (struct cpufreq_policy *unused, const char *buf, size_t count) \
397 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
398 "interface is deprecated - " #file_name "\n"); \
399 return store_##file_name(NULL, NULL, buf, count); \
401 write_one_old(sampling_rate);
402 write_one_old(up_threshold);
403 write_one_old(ignore_nice_load);
404 write_one_old(powersave_bias);
406 cpufreq_freq_attr_rw_old(sampling_rate);
407 cpufreq_freq_attr_rw_old(up_threshold);
408 cpufreq_freq_attr_rw_old(ignore_nice_load);
409 cpufreq_freq_attr_rw_old(powersave_bias);
411 static struct attribute *dbs_attributes_old[] = {
412 &sampling_rate_max_old.attr,
413 &sampling_rate_min_old.attr,
414 &sampling_rate_old.attr,
415 &up_threshold_old.attr,
416 &ignore_nice_load_old.attr,
417 &powersave_bias_old.attr,
421 static struct attribute_group dbs_attr_group_old = {
422 .attrs = dbs_attributes_old,
426 /*** delete after deprecation time ***/
428 /************************** sysfs end ************************/
430 static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
432 unsigned int max_load_freq;
434 struct cpufreq_policy *policy;
437 this_dbs_info->freq_lo = 0;
438 policy = this_dbs_info->cur_policy;
441 * Every sampling_rate, we check, if current idle time is less
442 * than 20% (default), then we try to increase frequency
443 * Every sampling_rate, we look for a the lowest
444 * frequency which can sustain the load while keeping idle time over
445 * 30%. If such a frequency exist, we try to decrease to this frequency.
447 * Any frequency increase takes it to the maximum frequency.
448 * Frequency reduction happens at minimum steps of
449 * 5% (default) of current frequency
452 /* Get Absolute Load - in terms of freq */
455 for_each_cpu(j, policy->cpus) {
456 struct cpu_dbs_info_s *j_dbs_info;
457 cputime64_t cur_wall_time, cur_idle_time;
458 unsigned int idle_time, wall_time;
459 unsigned int load, load_freq;
462 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
464 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
466 wall_time = (unsigned int) cputime64_sub(cur_wall_time,
467 j_dbs_info->prev_cpu_wall);
468 j_dbs_info->prev_cpu_wall = cur_wall_time;
470 idle_time = (unsigned int) cputime64_sub(cur_idle_time,
471 j_dbs_info->prev_cpu_idle);
472 j_dbs_info->prev_cpu_idle = cur_idle_time;
474 if (dbs_tuners_ins.ignore_nice) {
475 cputime64_t cur_nice;
476 unsigned long cur_nice_jiffies;
478 cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
479 j_dbs_info->prev_cpu_nice);
481 * Assumption: nice time between sampling periods will
482 * be less than 2^32 jiffies for 32 bit sys
484 cur_nice_jiffies = (unsigned long)
485 cputime64_to_jiffies64(cur_nice);
487 j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
488 idle_time += jiffies_to_usecs(cur_nice_jiffies);
491 if (unlikely(!wall_time || wall_time < idle_time))
494 load = 100 * (wall_time - idle_time) / wall_time;
496 freq_avg = __cpufreq_driver_getavg(policy, j);
498 freq_avg = policy->cur;
500 load_freq = load * freq_avg;
501 if (load_freq > max_load_freq)
502 max_load_freq = load_freq;
505 /* Check for frequency increase */
506 if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
507 /* if we are already at full speed then break out early */
508 if (!dbs_tuners_ins.powersave_bias) {
509 if (policy->cur == policy->max)
512 __cpufreq_driver_target(policy, policy->max,
515 int freq = powersave_bias_target(policy, policy->max,
517 __cpufreq_driver_target(policy, freq,
523 /* Check for frequency decrease */
524 /* if we cannot reduce the frequency anymore, break out early */
525 if (policy->cur == policy->min)
529 * The optimal frequency is the frequency that is the lowest that
530 * can support the current CPU usage without triggering the up
531 * policy. To be safe, we focus 10 points under the threshold.
534 (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) *
536 unsigned int freq_next;
537 freq_next = max_load_freq /
538 (dbs_tuners_ins.up_threshold -
539 dbs_tuners_ins.down_differential);
541 if (freq_next < policy->min)
542 freq_next = policy->min;
544 if (!dbs_tuners_ins.powersave_bias) {
545 __cpufreq_driver_target(policy, freq_next,
548 int freq = powersave_bias_target(policy, freq_next,
550 __cpufreq_driver_target(policy, freq,
556 static void do_dbs_timer(struct work_struct *work)
558 struct cpu_dbs_info_s *dbs_info =
559 container_of(work, struct cpu_dbs_info_s, work.work);
560 unsigned int cpu = dbs_info->cpu;
561 int sample_type = dbs_info->sample_type;
563 /* We want all CPUs to do sampling nearly on same jiffy */
564 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
566 delay -= jiffies % delay;
567 mutex_lock(&dbs_info->timer_mutex);
569 /* Common NORMAL_SAMPLE setup */
570 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
571 if (!dbs_tuners_ins.powersave_bias ||
572 sample_type == DBS_NORMAL_SAMPLE) {
573 dbs_check_cpu(dbs_info);
574 if (dbs_info->freq_lo) {
575 /* Setup timer for SUB_SAMPLE */
576 dbs_info->sample_type = DBS_SUB_SAMPLE;
577 delay = dbs_info->freq_hi_jiffies;
580 __cpufreq_driver_target(dbs_info->cur_policy,
581 dbs_info->freq_lo, CPUFREQ_RELATION_H);
583 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
584 mutex_unlock(&dbs_info->timer_mutex);
587 static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
589 /* We want all CPUs to do sampling nearly on same jiffy */
590 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
591 delay -= jiffies % delay;
593 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
594 INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
595 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
599 static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
601 cancel_delayed_work_sync(&dbs_info->work);
604 static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
607 unsigned int cpu = policy->cpu;
608 struct cpu_dbs_info_s *this_dbs_info;
612 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
615 case CPUFREQ_GOV_START:
616 if ((!cpu_online(cpu)) || (!policy->cur))
619 mutex_lock(&dbs_mutex);
621 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
623 mutex_unlock(&dbs_mutex);
628 for_each_cpu(j, policy->cpus) {
629 struct cpu_dbs_info_s *j_dbs_info;
630 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
631 j_dbs_info->cur_policy = policy;
633 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
634 &j_dbs_info->prev_cpu_wall);
635 if (dbs_tuners_ins.ignore_nice) {
636 j_dbs_info->prev_cpu_nice =
637 kstat_cpu(j).cpustat.nice;
640 this_dbs_info->cpu = cpu;
641 ondemand_powersave_bias_init_cpu(cpu);
643 * Start the timerschedule work, when this governor
644 * is used for first time
646 if (dbs_enable == 1) {
647 unsigned int latency;
649 rc = sysfs_create_group(cpufreq_global_kobject,
652 mutex_unlock(&dbs_mutex);
656 /* policy latency is in nS. Convert it to uS first */
657 latency = policy->cpuinfo.transition_latency / 1000;
660 /* Bring kernel and HW constraints together */
661 min_sampling_rate = max(min_sampling_rate,
662 MIN_LATENCY_MULTIPLIER * latency);
663 dbs_tuners_ins.sampling_rate =
664 max(min_sampling_rate,
665 latency * LATENCY_MULTIPLIER);
667 mutex_unlock(&dbs_mutex);
669 mutex_init(&this_dbs_info->timer_mutex);
670 dbs_timer_init(this_dbs_info);
673 case CPUFREQ_GOV_STOP:
674 dbs_timer_exit(this_dbs_info);
676 mutex_lock(&dbs_mutex);
677 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
678 mutex_destroy(&this_dbs_info->timer_mutex);
680 mutex_unlock(&dbs_mutex);
682 sysfs_remove_group(cpufreq_global_kobject,
687 case CPUFREQ_GOV_LIMITS:
688 mutex_lock(&this_dbs_info->timer_mutex);
689 if (policy->max < this_dbs_info->cur_policy->cur)
690 __cpufreq_driver_target(this_dbs_info->cur_policy,
691 policy->max, CPUFREQ_RELATION_H);
692 else if (policy->min > this_dbs_info->cur_policy->cur)
693 __cpufreq_driver_target(this_dbs_info->cur_policy,
694 policy->min, CPUFREQ_RELATION_L);
695 mutex_unlock(&this_dbs_info->timer_mutex);
701 static int __init cpufreq_gov_dbs_init(void)
708 idle_time = get_cpu_idle_time_us(cpu, &wall);
710 if (idle_time != -1ULL) {
711 /* Idle micro accounting is supported. Use finer thresholds */
712 dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
713 dbs_tuners_ins.down_differential =
714 MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
716 * In no_hz/micro accounting case we set the minimum frequency
717 * not depending on HZ, but fixed (very low). The deferred
718 * timer might skip some samples if idle/sleeping as needed.
720 min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
722 /* For correct statistics, we need 10 ticks for each measure */
724 MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
727 kondemand_wq = create_workqueue("kondemand");
729 printk(KERN_ERR "Creation of kondemand failed\n");
732 err = cpufreq_register_governor(&cpufreq_gov_ondemand);
734 destroy_workqueue(kondemand_wq);
739 static void __exit cpufreq_gov_dbs_exit(void)
741 cpufreq_unregister_governor(&cpufreq_gov_ondemand);
742 destroy_workqueue(kondemand_wq);
746 MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
747 MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
748 MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
749 "Low Latency Frequency Transition capable processors");
750 MODULE_LICENSE("GPL");
752 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
753 fs_initcall(cpufreq_gov_dbs_init);
755 module_init(cpufreq_gov_dbs_init);
757 module_exit(cpufreq_gov_dbs_exit);