2 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
36 #include <linux/slab.h>
38 #include <linux/acpi.h>
40 #include <linux/delay.h>
41 #include <linux/uaccess.h>
43 #include <acpi/processor.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
50 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52 MODULE_LICENSE("GPL");
54 #define PFX "acpi-cpufreq: "
57 UNDEFINED_CAPABLE = 0,
58 SYSTEM_INTEL_MSR_CAPABLE,
59 SYSTEM_AMD_MSR_CAPABLE,
63 #define INTEL_MSR_RANGE (0xffff)
64 #define AMD_MSR_RANGE (0x7)
66 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
68 struct acpi_cpufreq_data {
69 struct acpi_processor_performance *acpi_data;
70 struct cpufreq_frequency_table *freq_table;
72 unsigned int cpu_feature;
75 static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
77 /* acpi_perf_data is a pointer to percpu data. */
78 static struct acpi_processor_performance __percpu *acpi_perf_data;
80 static struct cpufreq_driver acpi_cpufreq_driver;
82 static unsigned int acpi_pstate_strict;
83 static bool boost_enabled, boost_supported;
84 static struct msr __percpu *msrs;
86 static bool boost_state(unsigned int cpu)
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
104 static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
136 static ssize_t _store_boost(const char *buf, size_t count)
139 unsigned long val = 0;
141 if (!boost_supported)
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
148 if ((val && boost_enabled) || (!val && !boost_enabled))
153 boost_set_msrs(val, cpu_online_mask);
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
163 static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
166 return _store_boost(buf, count);
169 static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
172 return sprintf(buf, "%u\n", boost_enabled);
175 static struct global_attr global_boost = __ATTR(boost, 0644,
179 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
183 return _store_boost(buf, count);
186 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
188 return sprintf(buf, "%u\n", boost_enabled);
191 static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
194 static int check_est_cpu(unsigned int cpuid)
196 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
198 return cpu_has(cpu, X86_FEATURE_EST);
201 static int check_amd_hwpstate_cpu(unsigned int cpuid)
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
208 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
210 struct acpi_processor_performance *perf;
213 perf = data->acpi_data;
215 for (i = 0; i < perf->state_count; i++) {
216 if (value == perf->states[i].status)
217 return data->freq_table[i].frequency;
222 static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
225 struct acpi_processor_performance *perf;
227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
228 msr &= AMD_MSR_RANGE;
230 msr &= INTEL_MSR_RANGE;
232 perf = data->acpi_data;
234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
235 if (msr == perf->states[data->freq_table[i].driver_data].status)
236 return data->freq_table[i].frequency;
238 return data->freq_table[0].frequency;
241 static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
243 switch (data->cpu_feature) {
244 case SYSTEM_INTEL_MSR_CAPABLE:
245 case SYSTEM_AMD_MSR_CAPABLE:
246 return extract_msr(val, data);
247 case SYSTEM_IO_CAPABLE:
248 return extract_io(val, data);
265 const struct cpumask *mask;
273 /* Called via smp_call_function_single(), on the target CPU */
274 static void do_drv_read(void *_cmd)
276 struct drv_cmd *cmd = _cmd;
280 case SYSTEM_INTEL_MSR_CAPABLE:
281 case SYSTEM_AMD_MSR_CAPABLE:
282 rdmsr(cmd->addr.msr.reg, cmd->val, h);
284 case SYSTEM_IO_CAPABLE:
285 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
287 (u32)cmd->addr.io.bit_width);
294 /* Called via smp_call_function_many(), on the target CPUs */
295 static void do_drv_write(void *_cmd)
297 struct drv_cmd *cmd = _cmd;
301 case SYSTEM_INTEL_MSR_CAPABLE:
302 rdmsr(cmd->addr.msr.reg, lo, hi);
303 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
304 wrmsr(cmd->addr.msr.reg, lo, hi);
306 case SYSTEM_AMD_MSR_CAPABLE:
307 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
309 case SYSTEM_IO_CAPABLE:
310 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
312 (u32)cmd->addr.io.bit_width);
319 static void drv_read(struct drv_cmd *cmd)
324 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
325 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
328 static void drv_write(struct drv_cmd *cmd)
332 this_cpu = get_cpu();
333 if (cpumask_test_cpu(this_cpu, cmd->mask))
335 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
339 static u32 get_cur_val(const struct cpumask *mask)
341 struct acpi_processor_performance *perf;
344 if (unlikely(cpumask_empty(mask)))
347 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
348 case SYSTEM_INTEL_MSR_CAPABLE:
349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
350 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
356 case SYSTEM_IO_CAPABLE:
357 cmd.type = SYSTEM_IO_CAPABLE;
358 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
359 cmd.addr.io.port = perf->control_register.address;
360 cmd.addr.io.bit_width = perf->control_register.bit_width;
369 pr_debug("get_cur_val = %u\n", cmd.val);
374 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
376 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
378 unsigned int cached_freq;
380 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
382 if (unlikely(data == NULL ||
383 data->acpi_data == NULL || data->freq_table == NULL)) {
387 cached_freq = data->freq_table[data->acpi_data->state].frequency;
388 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
389 if (freq != cached_freq) {
391 * The dreaded BIOS frequency change behind our back.
392 * Force set the frequency on next target call.
397 pr_debug("cur freq = %u\n", freq);
402 static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
403 struct acpi_cpufreq_data *data)
405 unsigned int cur_freq;
408 for (i = 0; i < 100; i++) {
409 cur_freq = extract_freq(get_cur_val(mask), data);
410 if (cur_freq == freq)
417 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
418 unsigned int target_freq, unsigned int relation)
420 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
421 struct acpi_processor_performance *perf;
422 struct cpufreq_freqs freqs;
424 unsigned int next_state = 0; /* Index into freq_table */
425 unsigned int next_perf_state = 0; /* Index into perf table */
428 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
430 if (unlikely(data == NULL ||
431 data->acpi_data == NULL || data->freq_table == NULL)) {
435 perf = data->acpi_data;
436 result = cpufreq_frequency_table_target(policy,
439 relation, &next_state);
440 if (unlikely(result)) {
445 next_perf_state = data->freq_table[next_state].driver_data;
446 if (perf->state == next_perf_state) {
447 if (unlikely(data->resume)) {
448 pr_debug("Called after resume, resetting to P%d\n",
452 pr_debug("Already at target state (P%d)\n",
458 switch (data->cpu_feature) {
459 case SYSTEM_INTEL_MSR_CAPABLE:
460 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
461 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
462 cmd.val = (u32) perf->states[next_perf_state].control;
464 case SYSTEM_AMD_MSR_CAPABLE:
465 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
466 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
467 cmd.val = (u32) perf->states[next_perf_state].control;
469 case SYSTEM_IO_CAPABLE:
470 cmd.type = SYSTEM_IO_CAPABLE;
471 cmd.addr.io.port = perf->control_register.address;
472 cmd.addr.io.bit_width = perf->control_register.bit_width;
473 cmd.val = (u32) perf->states[next_perf_state].control;
480 /* cpufreq holds the hotplug lock, so we are safe from here on */
481 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
482 cmd.mask = policy->cpus;
484 cmd.mask = cpumask_of(policy->cpu);
486 freqs.old = perf->states[perf->state].core_frequency * 1000;
487 freqs.new = data->freq_table[next_state].frequency;
488 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
492 if (acpi_pstate_strict) {
493 if (!check_freqs(cmd.mask, freqs.new, data)) {
494 pr_debug("acpi_cpufreq_target failed (%d)\n",
497 freqs.new = freqs.old;
501 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
504 perf->state = next_perf_state;
510 static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
512 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
514 pr_debug("acpi_cpufreq_verify\n");
516 return cpufreq_frequency_table_verify(policy, data->freq_table);
520 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
522 struct acpi_processor_performance *perf = data->acpi_data;
525 /* search the closest match to cpu_khz */
528 unsigned long freqn = perf->states[0].core_frequency * 1000;
530 for (i = 0; i < (perf->state_count-1); i++) {
532 freqn = perf->states[i+1].core_frequency * 1000;
533 if ((2 * cpu_khz) > (freqn + freq)) {
538 perf->state = perf->state_count-1;
541 /* assume CPU is at P0... */
543 return perf->states[0].core_frequency * 1000;
547 static void free_acpi_perf_data(void)
551 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
552 for_each_possible_cpu(i)
553 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
555 free_percpu(acpi_perf_data);
558 static int boost_notify(struct notifier_block *nb, unsigned long action,
561 unsigned cpu = (long)hcpu;
562 const struct cpumask *cpumask;
564 cpumask = get_cpu_mask(cpu);
567 * Clear the boost-disable bit on the CPU_DOWN path so that
568 * this cpu cannot block the remaining ones from boosting. On
569 * the CPU_UP path we simply keep the boost-disable flag in
570 * sync with the current global state.
575 case CPU_UP_PREPARE_FROZEN:
576 boost_set_msrs(boost_enabled, cpumask);
579 case CPU_DOWN_PREPARE:
580 case CPU_DOWN_PREPARE_FROZEN:
581 boost_set_msrs(1, cpumask);
592 static struct notifier_block boost_nb = {
593 .notifier_call = boost_notify,
597 * acpi_cpufreq_early_init - initialize ACPI P-States library
599 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
600 * in order to determine correct frequency and voltage pairings. We can
601 * do _PDC and _PSD and find out the processor dependency for the
602 * actual init that will happen later...
604 static int __init acpi_cpufreq_early_init(void)
607 pr_debug("acpi_cpufreq_early_init\n");
609 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
610 if (!acpi_perf_data) {
611 pr_debug("Memory allocation error for acpi_perf_data.\n");
614 for_each_possible_cpu(i) {
615 if (!zalloc_cpumask_var_node(
616 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
617 GFP_KERNEL, cpu_to_node(i))) {
619 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
620 free_acpi_perf_data();
625 /* Do initialization in ACPI core */
626 acpi_processor_preregister_performance(acpi_perf_data);
632 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
633 * or do it in BIOS firmware and won't inform about it to OS. If not
634 * detected, this has a side effect of making CPU run at a different speed
635 * than OS intended it to run at. Detect it and handle it cleanly.
637 static int bios_with_sw_any_bug;
639 static int sw_any_bug_found(const struct dmi_system_id *d)
641 bios_with_sw_any_bug = 1;
645 static const struct dmi_system_id sw_any_bug_dmi_table[] = {
647 .callback = sw_any_bug_found,
648 .ident = "Supermicro Server X6DLP",
650 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
651 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
652 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
658 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
660 /* Intel Xeon Processor 7100 Series Specification Update
661 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
662 * AL30: A Machine Check Exception (MCE) Occurring during an
663 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
664 * Both Processor Cores to Lock Up. */
665 if (c->x86_vendor == X86_VENDOR_INTEL) {
666 if ((c->x86 == 15) &&
667 (c->x86_model == 6) &&
668 (c->x86_mask == 8)) {
669 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
670 "Xeon(R) 7100 Errata AL30, processors may "
671 "lock up on frequency changes: disabling "
680 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
683 unsigned int valid_states = 0;
684 unsigned int cpu = policy->cpu;
685 struct acpi_cpufreq_data *data;
686 unsigned int result = 0;
687 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
688 struct acpi_processor_performance *perf;
690 static int blacklisted;
693 pr_debug("acpi_cpufreq_cpu_init\n");
698 blacklisted = acpi_cpufreq_blacklist(c);
703 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
707 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
708 per_cpu(acfreq_data, cpu) = data;
710 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
711 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
713 result = acpi_processor_register_performance(data->acpi_data, cpu);
717 perf = data->acpi_data;
718 policy->shared_type = perf->shared_type;
721 * Will let policy->cpus know about dependency only when software
722 * coordination is required.
724 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
725 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
726 cpumask_copy(policy->cpus, perf->shared_cpu_map);
730 dmi_check_system(sw_any_bug_dmi_table);
731 if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
732 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
733 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
736 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
737 cpumask_clear(policy->cpus);
738 cpumask_set_cpu(cpu, policy->cpus);
739 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
740 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
744 /* capability check */
745 if (perf->state_count <= 1) {
746 pr_debug("No P-States\n");
751 if (perf->control_register.space_id != perf->status_register.space_id) {
756 switch (perf->control_register.space_id) {
757 case ACPI_ADR_SPACE_SYSTEM_IO:
758 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
759 boot_cpu_data.x86 == 0xf) {
760 pr_debug("AMD K8 systems must use native drivers.\n");
764 pr_debug("SYSTEM IO addr space\n");
765 data->cpu_feature = SYSTEM_IO_CAPABLE;
767 case ACPI_ADR_SPACE_FIXED_HARDWARE:
768 pr_debug("HARDWARE addr space\n");
769 if (check_est_cpu(cpu)) {
770 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
773 if (check_amd_hwpstate_cpu(cpu)) {
774 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
780 pr_debug("Unknown addr space %d\n",
781 (u32) (perf->control_register.space_id));
786 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
787 (perf->state_count+1), GFP_KERNEL);
788 if (!data->freq_table) {
793 /* detect transition latency */
794 policy->cpuinfo.transition_latency = 0;
795 for (i = 0; i < perf->state_count; i++) {
796 if ((perf->states[i].transition_latency * 1000) >
797 policy->cpuinfo.transition_latency)
798 policy->cpuinfo.transition_latency =
799 perf->states[i].transition_latency * 1000;
802 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
803 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
804 policy->cpuinfo.transition_latency > 20 * 1000) {
805 policy->cpuinfo.transition_latency = 20 * 1000;
806 printk_once(KERN_INFO
807 "P-state transition latency capped at 20 uS\n");
811 for (i = 0; i < perf->state_count; i++) {
812 if (i > 0 && perf->states[i].core_frequency >=
813 data->freq_table[valid_states-1].frequency / 1000)
816 data->freq_table[valid_states].driver_data = i;
817 data->freq_table[valid_states].frequency =
818 perf->states[i].core_frequency * 1000;
821 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
824 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
828 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
829 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
831 switch (perf->control_register.space_id) {
832 case ACPI_ADR_SPACE_SYSTEM_IO:
833 /* Current speed is unknown and not detectable by IO port */
834 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
836 case ACPI_ADR_SPACE_FIXED_HARDWARE:
837 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
838 policy->cur = get_cur_freq_on_cpu(cpu);
844 /* notify BIOS that we exist */
845 acpi_processor_notify_smm(THIS_MODULE);
847 /* Check for APERF/MPERF support in hardware */
848 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
849 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
851 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
852 for (i = 0; i < perf->state_count; i++)
853 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
854 (i == perf->state ? '*' : ' '), i,
855 (u32) perf->states[i].core_frequency,
856 (u32) perf->states[i].power,
857 (u32) perf->states[i].transition_latency);
859 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
862 * the first call to ->target() should result in us actually
863 * writing something to the appropriate registers.
870 kfree(data->freq_table);
872 acpi_processor_unregister_performance(perf, cpu);
875 per_cpu(acfreq_data, cpu) = NULL;
880 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
882 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
884 pr_debug("acpi_cpufreq_cpu_exit\n");
887 cpufreq_frequency_table_put_attr(policy->cpu);
888 per_cpu(acfreq_data, policy->cpu) = NULL;
889 acpi_processor_unregister_performance(data->acpi_data,
891 kfree(data->freq_table);
898 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
900 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
902 pr_debug("acpi_cpufreq_resume\n");
909 static struct freq_attr *acpi_cpufreq_attr[] = {
910 &cpufreq_freq_attr_scaling_available_freqs,
911 NULL, /* this is a placeholder for cpb, do not remove */
915 static struct cpufreq_driver acpi_cpufreq_driver = {
916 .verify = acpi_cpufreq_verify,
917 .target = acpi_cpufreq_target,
918 .bios_limit = acpi_processor_get_bios_limit,
919 .init = acpi_cpufreq_cpu_init,
920 .exit = acpi_cpufreq_cpu_exit,
921 .resume = acpi_cpufreq_resume,
922 .name = "acpi-cpufreq",
923 .owner = THIS_MODULE,
924 .attr = acpi_cpufreq_attr,
927 static void __init acpi_cpufreq_boost_init(void)
929 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
935 boost_supported = true;
936 boost_enabled = boost_state(0);
940 /* Force all MSRs to the same value */
941 boost_set_msrs(boost_enabled, cpu_online_mask);
943 register_cpu_notifier(&boost_nb);
947 global_boost.attr.mode = 0444;
949 /* We create the boost file in any case, though for systems without
950 * hardware support it will be read-only and hardwired to return 0.
952 if (cpufreq_sysfs_create_file(&(global_boost.attr)))
953 pr_warn(PFX "could not register global boost sysfs file\n");
955 pr_debug("registered global boost sysfs file\n");
958 static void __exit acpi_cpufreq_boost_exit(void)
960 cpufreq_sysfs_remove_file(&(global_boost.attr));
963 unregister_cpu_notifier(&boost_nb);
970 static int __init acpi_cpufreq_init(void)
977 pr_debug("acpi_cpufreq_init\n");
979 ret = acpi_cpufreq_early_init();
983 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
984 /* this is a sysfs file with a strange name and an even stranger
985 * semantic - per CPU instantiation, but system global effect.
986 * Lets enable it only on AMD CPUs for compatibility reasons and
987 * only if configured. This is considered legacy code, which
988 * will probably be removed at some point in the future.
990 if (check_amd_hwpstate_cpu(0)) {
991 struct freq_attr **iter;
993 pr_debug("adding sysfs entry for cpb\n");
995 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
998 /* make sure there is a terminator behind it */
1004 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
1006 free_acpi_perf_data();
1008 acpi_cpufreq_boost_init();
1013 static void __exit acpi_cpufreq_exit(void)
1015 pr_debug("acpi_cpufreq_exit\n");
1017 acpi_cpufreq_boost_exit();
1019 cpufreq_unregister_driver(&acpi_cpufreq_driver);
1021 free_acpi_perf_data();
1024 module_param(acpi_pstate_strict, uint, 0644);
1025 MODULE_PARM_DESC(acpi_pstate_strict,
1026 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1027 "performed during frequency changes.");
1029 late_initcall(acpi_cpufreq_init);
1030 module_exit(acpi_cpufreq_exit);
1032 static const struct x86_cpu_id acpi_cpufreq_ids[] = {
1033 X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1034 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1037 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1039 static const struct acpi_device_id processor_device_ids[] = {
1040 {ACPI_PROCESSOR_OBJECT_HID, },
1041 {ACPI_PROCESSOR_DEVICE_HID, },
1044 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
1046 MODULE_ALIAS("acpi");