2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define SAMPLE_COUNT 3
37 #define BYT_RATIOS 0x66a
38 #define BYT_VIDS 0x66b
39 #define BYT_TURBO_RATIOS 0x66c
40 #define BYT_TURBO_VIDS 0x66d
44 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
45 #define fp_toint(X) ((X) >> FRAC_BITS)
48 static inline int32_t mul_fp(int32_t x, int32_t y)
50 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
53 static inline int32_t div_fp(int32_t x, int32_t y)
55 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
59 int32_t core_pct_busy;
95 struct timer_list timer;
97 struct pstate_data pstate;
101 ktime_t last_sample_time;
104 struct sample sample;
107 static struct cpudata **all_cpu_data;
108 struct pstate_adjust_policy {
117 struct pstate_funcs {
118 int (*get_max)(void);
119 int (*get_min)(void);
120 int (*get_turbo)(void);
121 void (*set)(struct cpudata*, int pstate);
122 void (*get_vid)(struct cpudata *);
125 struct cpu_defaults {
126 struct pstate_adjust_policy pid_policy;
127 struct pstate_funcs funcs;
130 static struct pstate_adjust_policy pid_params;
131 static struct pstate_funcs pstate_funcs;
144 static struct perf_limits limits = {
147 .max_perf = int_tofp(1),
150 .max_policy_pct = 100,
151 .max_sysfs_pct = 100,
154 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
155 int deadband, int integral) {
156 pid->setpoint = setpoint;
157 pid->deadband = deadband;
158 pid->integral = int_tofp(integral);
159 pid->last_err = setpoint - busy;
162 static inline void pid_p_gain_set(struct _pid *pid, int percent)
164 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
167 static inline void pid_i_gain_set(struct _pid *pid, int percent)
169 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
172 static inline void pid_d_gain_set(struct _pid *pid, int percent)
175 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
178 static signed int pid_calc(struct _pid *pid, int32_t busy)
181 int32_t pterm, dterm, fp_error;
182 int32_t integral_limit;
184 fp_error = int_tofp(pid->setpoint) - busy;
186 if (abs(fp_error) <= int_tofp(pid->deadband))
189 pterm = mul_fp(pid->p_gain, fp_error);
191 pid->integral += fp_error;
193 /* limit the integral term */
194 integral_limit = int_tofp(30);
195 if (pid->integral > integral_limit)
196 pid->integral = integral_limit;
197 if (pid->integral < -integral_limit)
198 pid->integral = -integral_limit;
200 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
201 pid->last_err = fp_error;
203 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
204 result = result + (1 << (FRAC_BITS-1));
205 return (signed int)fp_toint(result);
208 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
210 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
211 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
212 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
221 static inline void intel_pstate_reset_all_pid(void)
224 for_each_online_cpu(cpu) {
225 if (all_cpu_data[cpu])
226 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
230 /************************** debugfs begin ************************/
231 static int pid_param_set(void *data, u64 val)
234 intel_pstate_reset_all_pid();
237 static int pid_param_get(void *data, u64 *val)
242 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get,
243 pid_param_set, "%llu\n");
250 static struct pid_param pid_files[] = {
251 {"sample_rate_ms", &pid_params.sample_rate_ms},
252 {"d_gain_pct", &pid_params.d_gain_pct},
253 {"i_gain_pct", &pid_params.i_gain_pct},
254 {"deadband", &pid_params.deadband},
255 {"setpoint", &pid_params.setpoint},
256 {"p_gain_pct", &pid_params.p_gain_pct},
260 static struct dentry *debugfs_parent;
261 static void intel_pstate_debug_expose_params(void)
265 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
266 if (IS_ERR_OR_NULL(debugfs_parent))
268 while (pid_files[i].name) {
269 debugfs_create_file(pid_files[i].name, 0660,
270 debugfs_parent, pid_files[i].value,
276 /************************** debugfs end ************************/
278 /************************** sysfs begin ************************/
279 #define show_one(file_name, object) \
280 static ssize_t show_##file_name \
281 (struct kobject *kobj, struct attribute *attr, char *buf) \
283 return sprintf(buf, "%u\n", limits.object); \
286 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
287 const char *buf, size_t count)
291 ret = sscanf(buf, "%u", &input);
294 limits.no_turbo = clamp_t(int, input, 0 , 1);
295 if (limits.turbo_disabled) {
296 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
297 limits.no_turbo = limits.turbo_disabled;
302 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
303 const char *buf, size_t count)
307 ret = sscanf(buf, "%u", &input);
311 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
312 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
313 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
317 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
318 const char *buf, size_t count)
322 ret = sscanf(buf, "%u", &input);
325 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
326 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
331 show_one(no_turbo, no_turbo);
332 show_one(max_perf_pct, max_perf_pct);
333 show_one(min_perf_pct, min_perf_pct);
335 define_one_global_rw(no_turbo);
336 define_one_global_rw(max_perf_pct);
337 define_one_global_rw(min_perf_pct);
339 static struct attribute *intel_pstate_attributes[] = {
346 static struct attribute_group intel_pstate_attr_group = {
347 .attrs = intel_pstate_attributes,
349 static struct kobject *intel_pstate_kobject;
351 static void intel_pstate_sysfs_expose_params(void)
355 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
356 &cpu_subsys.dev_root->kobj);
357 BUG_ON(!intel_pstate_kobject);
358 rc = sysfs_create_group(intel_pstate_kobject,
359 &intel_pstate_attr_group);
363 /************************** sysfs end ************************/
364 static int byt_get_min_pstate(void)
367 rdmsrl(BYT_RATIOS, value);
368 return (value >> 8) & 0x7F;
371 static int byt_get_max_pstate(void)
374 rdmsrl(BYT_RATIOS, value);
375 return (value >> 16) & 0x7F;
378 static int byt_get_turbo_pstate(void)
381 rdmsrl(BYT_TURBO_RATIOS, value);
385 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
392 if (limits.no_turbo && !limits.turbo_disabled)
395 vid_fp = cpudata->vid.min + mul_fp(
396 int_tofp(pstate - cpudata->pstate.min_pstate),
399 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
400 vid = fp_toint(vid_fp);
402 if (pstate > cpudata->pstate.max_pstate)
403 vid = cpudata->vid.turbo;
407 wrmsrl(MSR_IA32_PERF_CTL, val);
410 static void byt_get_vid(struct cpudata *cpudata)
415 rdmsrl(BYT_VIDS, value);
416 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
417 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
418 cpudata->vid.ratio = div_fp(
419 cpudata->vid.max - cpudata->vid.min,
420 int_tofp(cpudata->pstate.max_pstate -
421 cpudata->pstate.min_pstate));
423 rdmsrl(BYT_TURBO_VIDS, value);
424 cpudata->vid.turbo = value & 0x7f;
428 static int core_get_min_pstate(void)
431 rdmsrl(MSR_PLATFORM_INFO, value);
432 return (value >> 40) & 0xFF;
435 static int core_get_max_pstate(void)
438 rdmsrl(MSR_PLATFORM_INFO, value);
439 return (value >> 8) & 0xFF;
442 static int core_get_turbo_pstate(void)
446 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
447 nont = core_get_max_pstate();
448 ret = ((value) & 255);
454 static void core_set_pstate(struct cpudata *cpudata, int pstate)
459 if (limits.no_turbo && !limits.turbo_disabled)
462 wrmsrl(MSR_IA32_PERF_CTL, val);
465 static struct cpu_defaults core_params = {
467 .sample_rate_ms = 10,
475 .get_max = core_get_max_pstate,
476 .get_min = core_get_min_pstate,
477 .get_turbo = core_get_turbo_pstate,
478 .set = core_set_pstate,
482 static struct cpu_defaults byt_params = {
484 .sample_rate_ms = 10,
492 .get_max = byt_get_max_pstate,
493 .get_min = byt_get_min_pstate,
494 .get_turbo = byt_get_turbo_pstate,
495 .set = byt_set_pstate,
496 .get_vid = byt_get_vid,
501 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
503 int max_perf = cpu->pstate.turbo_pstate;
507 max_perf = cpu->pstate.max_pstate;
509 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
510 *max = clamp_t(int, max_perf_adj,
511 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
513 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
514 *min = clamp_t(int, min_perf,
515 cpu->pstate.min_pstate, max_perf);
518 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
520 int max_perf, min_perf;
522 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
524 pstate = clamp_t(int, pstate, min_perf, max_perf);
526 if (pstate == cpu->pstate.current_pstate)
529 trace_cpu_frequency(pstate * 100000, cpu->cpu);
531 cpu->pstate.current_pstate = pstate;
533 pstate_funcs.set(cpu, pstate);
536 static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
539 target = cpu->pstate.current_pstate + steps;
541 intel_pstate_set_pstate(cpu, target);
544 static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
547 target = cpu->pstate.current_pstate - steps;
548 intel_pstate_set_pstate(cpu, target);
551 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
553 sprintf(cpu->name, "Intel 2nd generation core");
555 cpu->pstate.min_pstate = pstate_funcs.get_min();
556 cpu->pstate.max_pstate = pstate_funcs.get_max();
557 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
559 if (pstate_funcs.get_vid)
560 pstate_funcs.get_vid(cpu);
561 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
564 static inline void intel_pstate_calc_busy(struct cpudata *cpu,
565 struct sample *sample)
570 core_pct = int_tofp(sample->aperf) * int_tofp(100);
571 core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem);
573 if ((rem << 1) >= int_tofp(sample->mperf))
576 sample->freq = fp_toint(
577 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
579 sample->core_pct_busy = (int32_t)core_pct;
582 static inline void intel_pstate_sample(struct cpudata *cpu)
586 rdmsrl(MSR_IA32_APERF, aperf);
587 rdmsrl(MSR_IA32_MPERF, mperf);
589 aperf = aperf >> FRAC_BITS;
590 mperf = mperf >> FRAC_BITS;
592 cpu->last_sample_time = cpu->sample.time;
593 cpu->sample.time = ktime_get();
594 cpu->sample.aperf = aperf;
595 cpu->sample.mperf = mperf;
596 cpu->sample.aperf -= cpu->prev_aperf;
597 cpu->sample.mperf -= cpu->prev_mperf;
599 intel_pstate_calc_busy(cpu, &cpu->sample);
601 cpu->prev_aperf = aperf;
602 cpu->prev_mperf = mperf;
605 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
607 int sample_time, delay;
609 sample_time = pid_params.sample_rate_ms;
610 delay = msecs_to_jiffies(sample_time);
611 mod_timer_pinned(&cpu->timer, jiffies + delay);
614 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
616 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
620 core_busy = cpu->sample.core_pct_busy;
621 max_pstate = int_tofp(cpu->pstate.max_pstate);
622 current_pstate = int_tofp(cpu->pstate.current_pstate);
623 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
625 sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC);
626 duration_us = (u32) ktime_us_delta(cpu->sample.time,
627 cpu->last_sample_time);
628 if (duration_us > sample_time * 3) {
629 sample_ratio = div_fp(int_tofp(sample_time),
630 int_tofp(duration_us));
631 core_busy = mul_fp(core_busy, sample_ratio);
637 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
645 busy_scaled = intel_pstate_get_scaled_busy(cpu);
647 ctl = pid_calc(pid, busy_scaled);
652 intel_pstate_pstate_increase(cpu, steps);
654 intel_pstate_pstate_decrease(cpu, steps);
657 static void intel_pstate_timer_func(unsigned long __data)
659 struct cpudata *cpu = (struct cpudata *) __data;
660 struct sample *sample;
662 intel_pstate_sample(cpu);
664 sample = &cpu->sample;
666 intel_pstate_adjust_busy_pstate(cpu);
668 trace_pstate_sample(fp_toint(sample->core_pct_busy),
669 fp_toint(intel_pstate_get_scaled_busy(cpu)),
670 cpu->pstate.current_pstate,
675 intel_pstate_set_sample_time(cpu);
678 #define ICPU(model, policy) \
679 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
680 (unsigned long)&policy }
682 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
683 ICPU(0x2a, core_params),
684 ICPU(0x2d, core_params),
685 ICPU(0x37, byt_params),
686 ICPU(0x3a, core_params),
687 ICPU(0x3c, core_params),
688 ICPU(0x3d, core_params),
689 ICPU(0x3e, core_params),
690 ICPU(0x3f, core_params),
691 ICPU(0x45, core_params),
692 ICPU(0x46, core_params),
693 ICPU(0x4f, core_params),
694 ICPU(0x56, core_params),
697 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
699 static int intel_pstate_init_cpu(unsigned int cpunum)
702 const struct x86_cpu_id *id;
705 id = x86_match_cpu(intel_pstate_cpu_ids);
709 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
710 if (!all_cpu_data[cpunum])
713 cpu = all_cpu_data[cpunum];
716 intel_pstate_get_cpu_pstates(cpu);
718 init_timer_deferrable(&cpu->timer);
719 cpu->timer.function = intel_pstate_timer_func;
722 cpu->timer.expires = jiffies + HZ/100;
723 intel_pstate_busy_pid_reset(cpu);
724 intel_pstate_sample(cpu);
726 add_timer_on(&cpu->timer, cpunum);
728 pr_info("Intel pstate controlling: cpu %d\n", cpunum);
733 static unsigned int intel_pstate_get(unsigned int cpu_num)
735 struct sample *sample;
738 cpu = all_cpu_data[cpu_num];
741 sample = &cpu->sample;
745 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
749 cpu = all_cpu_data[policy->cpu];
751 if (!policy->cpuinfo.max_freq)
754 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
755 limits.min_perf_pct = 100;
756 limits.min_perf = int_tofp(1);
757 limits.max_policy_pct = 100;
758 limits.max_perf_pct = 100;
759 limits.max_perf = int_tofp(1);
760 limits.no_turbo = limits.turbo_disabled;
763 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
764 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
765 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
767 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
768 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
769 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
770 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
775 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
777 cpufreq_verify_within_cpu_limits(policy);
779 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) &&
780 (policy->policy != CPUFREQ_POLICY_PERFORMANCE))
786 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
788 int cpu = policy->cpu;
790 del_timer(&all_cpu_data[cpu]->timer);
791 kfree(all_cpu_data[cpu]);
792 all_cpu_data[cpu] = NULL;
796 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
802 rc = intel_pstate_init_cpu(policy->cpu);
806 cpu = all_cpu_data[policy->cpu];
808 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
809 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
810 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
811 limits.turbo_disabled = 1;
814 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
815 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
817 policy->policy = CPUFREQ_POLICY_POWERSAVE;
819 policy->min = cpu->pstate.min_pstate * 100000;
820 policy->max = cpu->pstate.turbo_pstate * 100000;
822 /* cpuinfo and default policy values */
823 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
824 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000;
825 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
826 cpumask_set_cpu(policy->cpu, policy->cpus);
831 static struct cpufreq_driver intel_pstate_driver = {
832 .flags = CPUFREQ_CONST_LOOPS,
833 .verify = intel_pstate_verify_policy,
834 .setpolicy = intel_pstate_set_policy,
835 .get = intel_pstate_get,
836 .init = intel_pstate_cpu_init,
837 .exit = intel_pstate_cpu_exit,
838 .name = "intel_pstate",
841 static int __initdata no_load;
843 static int intel_pstate_msrs_not_valid(void)
845 /* Check that all the msr's we are using are valid. */
846 u64 aperf, mperf, tmp;
848 rdmsrl(MSR_IA32_APERF, aperf);
849 rdmsrl(MSR_IA32_MPERF, mperf);
851 if (!pstate_funcs.get_max() ||
852 !pstate_funcs.get_min() ||
853 !pstate_funcs.get_turbo())
856 rdmsrl(MSR_IA32_APERF, tmp);
860 rdmsrl(MSR_IA32_MPERF, tmp);
867 static void copy_pid_params(struct pstate_adjust_policy *policy)
869 pid_params.sample_rate_ms = policy->sample_rate_ms;
870 pid_params.p_gain_pct = policy->p_gain_pct;
871 pid_params.i_gain_pct = policy->i_gain_pct;
872 pid_params.d_gain_pct = policy->d_gain_pct;
873 pid_params.deadband = policy->deadband;
874 pid_params.setpoint = policy->setpoint;
877 static void copy_cpu_funcs(struct pstate_funcs *funcs)
879 pstate_funcs.get_max = funcs->get_max;
880 pstate_funcs.get_min = funcs->get_min;
881 pstate_funcs.get_turbo = funcs->get_turbo;
882 pstate_funcs.set = funcs->set;
883 pstate_funcs.get_vid = funcs->get_vid;
886 #if IS_ENABLED(CONFIG_ACPI)
887 #include <acpi/processor.h>
889 static bool intel_pstate_no_acpi_pss(void)
893 for_each_possible_cpu(i) {
895 union acpi_object *pss;
896 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
897 struct acpi_processor *pr = per_cpu(processors, i);
902 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
903 if (ACPI_FAILURE(status))
906 pss = buffer.pointer;
907 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
918 struct hw_vendor_info {
920 char oem_id[ACPI_OEM_ID_SIZE];
921 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
924 /* Hardware vendor-specific info that has its own power management modes */
925 static struct hw_vendor_info vendor_info[] = {
926 {1, "HP ", "ProLiant"},
930 static bool intel_pstate_platform_pwr_mgmt_exists(void)
932 struct acpi_table_header hdr;
933 struct hw_vendor_info *v_info;
936 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
939 for (v_info = vendor_info; v_info->valid; v_info++) {
940 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE)
941 && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE)
942 && intel_pstate_no_acpi_pss())
948 #else /* CONFIG_ACPI not enabled */
949 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
950 #endif /* CONFIG_ACPI */
952 static int __init intel_pstate_init(void)
955 const struct x86_cpu_id *id;
956 struct cpu_defaults *cpu_info;
961 id = x86_match_cpu(intel_pstate_cpu_ids);
966 * The Intel pstate driver will be ignored if the platform
967 * firmware has its own power management modes.
969 if (intel_pstate_platform_pwr_mgmt_exists())
972 cpu_info = (struct cpu_defaults *)id->driver_data;
974 copy_pid_params(&cpu_info->pid_policy);
975 copy_cpu_funcs(&cpu_info->funcs);
977 if (intel_pstate_msrs_not_valid())
980 pr_info("Intel P-state driver initializing.\n");
982 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
986 rc = cpufreq_register_driver(&intel_pstate_driver);
990 intel_pstate_debug_expose_params();
991 intel_pstate_sysfs_expose_params();
996 for_each_online_cpu(cpu) {
997 if (all_cpu_data[cpu]) {
998 del_timer_sync(&all_cpu_data[cpu]->timer);
999 kfree(all_cpu_data[cpu]);
1004 vfree(all_cpu_data);
1007 device_initcall(intel_pstate_init);
1009 static int __init intel_pstate_setup(char *str)
1014 if (!strcmp(str, "disable"))
1018 early_param("intel_pstate", intel_pstate_setup);
1020 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1021 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1022 MODULE_LICENSE("GPL");