2 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
36 #include <linux/slab.h>
38 #include <linux/acpi.h>
40 #include <linux/delay.h>
41 #include <linux/uaccess.h>
43 #include <acpi/processor.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
50 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52 MODULE_LICENSE("GPL");
54 #define PFX "acpi-cpufreq: "
57 UNDEFINED_CAPABLE = 0,
58 SYSTEM_INTEL_MSR_CAPABLE,
59 SYSTEM_AMD_MSR_CAPABLE,
63 #define INTEL_MSR_RANGE (0xffff)
64 #define AMD_MSR_RANGE (0x7)
66 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
68 struct acpi_cpufreq_data {
69 struct acpi_processor_performance *acpi_data;
70 struct cpufreq_frequency_table *freq_table;
72 unsigned int cpu_feature;
75 static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data);
77 /* acpi_perf_data is a pointer to percpu data. */
78 static struct acpi_processor_performance __percpu *acpi_perf_data;
80 static struct cpufreq_driver acpi_cpufreq_driver;
82 static unsigned int acpi_pstate_strict;
83 static bool boost_enabled, boost_supported;
84 static struct msr __percpu *msrs;
86 static bool boost_state(unsigned int cpu)
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
104 static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
136 static ssize_t _store_boost(const char *buf, size_t count)
139 unsigned long val = 0;
141 if (!boost_supported)
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
148 if ((val && boost_enabled) || (!val && !boost_enabled))
153 boost_set_msrs(val, cpu_online_mask);
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
163 static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
166 return _store_boost(buf, count);
169 static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
172 return sprintf(buf, "%u\n", boost_enabled);
175 static struct global_attr global_boost = __ATTR(boost, 0644,
179 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
183 return _store_boost(buf, count);
186 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
188 return sprintf(buf, "%u\n", boost_enabled);
191 static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
194 static int check_est_cpu(unsigned int cpuid)
196 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
198 return cpu_has(cpu, X86_FEATURE_EST);
201 static int check_amd_hwpstate_cpu(unsigned int cpuid)
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
208 static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
210 struct acpi_processor_performance *perf;
213 perf = data->acpi_data;
215 for (i = 0; i < perf->state_count; i++) {
216 if (value == perf->states[i].status)
217 return data->freq_table[i].frequency;
222 static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
225 struct acpi_processor_performance *perf;
227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
228 msr &= AMD_MSR_RANGE;
230 msr &= INTEL_MSR_RANGE;
232 perf = data->acpi_data;
234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
235 if (msr == perf->states[data->freq_table[i].index].status)
236 return data->freq_table[i].frequency;
238 return data->freq_table[0].frequency;
241 static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
243 switch (data->cpu_feature) {
244 case SYSTEM_INTEL_MSR_CAPABLE:
245 case SYSTEM_AMD_MSR_CAPABLE:
246 return extract_msr(val, data);
247 case SYSTEM_IO_CAPABLE:
248 return extract_io(val, data);
265 const struct cpumask *mask;
273 /* Called via smp_call_function_single(), on the target CPU */
274 static void do_drv_read(void *_cmd)
276 struct drv_cmd *cmd = _cmd;
280 case SYSTEM_INTEL_MSR_CAPABLE:
281 case SYSTEM_AMD_MSR_CAPABLE:
282 rdmsr(cmd->addr.msr.reg, cmd->val, h);
284 case SYSTEM_IO_CAPABLE:
285 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
287 (u32)cmd->addr.io.bit_width);
294 /* Called via smp_call_function_many(), on the target CPUs */
295 static void do_drv_write(void *_cmd)
297 struct drv_cmd *cmd = _cmd;
301 case SYSTEM_INTEL_MSR_CAPABLE:
302 rdmsr(cmd->addr.msr.reg, lo, hi);
303 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
304 wrmsr(cmd->addr.msr.reg, lo, hi);
306 case SYSTEM_AMD_MSR_CAPABLE:
307 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
309 case SYSTEM_IO_CAPABLE:
310 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
312 (u32)cmd->addr.io.bit_width);
319 static void drv_read(struct drv_cmd *cmd)
324 err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1);
325 WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */
328 static void drv_write(struct drv_cmd *cmd)
332 this_cpu = get_cpu();
333 if (cpumask_test_cpu(this_cpu, cmd->mask))
335 smp_call_function_many(cmd->mask, do_drv_write, cmd, 1);
339 static u32 get_cur_val(const struct cpumask *mask)
341 struct acpi_processor_performance *perf;
344 if (unlikely(cpumask_empty(mask)))
347 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) {
348 case SYSTEM_INTEL_MSR_CAPABLE:
349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
356 case SYSTEM_IO_CAPABLE:
357 cmd.type = SYSTEM_IO_CAPABLE;
358 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
359 cmd.addr.io.port = perf->control_register.address;
360 cmd.addr.io.bit_width = perf->control_register.bit_width;
369 pr_debug("get_cur_val = %u\n", cmd.val);
374 static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
376 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu);
378 unsigned int cached_freq;
380 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
382 if (unlikely(data == NULL ||
383 data->acpi_data == NULL || data->freq_table == NULL)) {
387 cached_freq = data->freq_table[data->acpi_data->state].frequency;
388 freq = extract_freq(get_cur_val(cpumask_of(cpu)), data);
389 if (freq != cached_freq) {
391 * The dreaded BIOS frequency change behind our back.
392 * Force set the frequency on next target call.
397 pr_debug("cur freq = %u\n", freq);
402 static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq,
403 struct acpi_cpufreq_data *data)
405 unsigned int cur_freq;
408 for (i = 0; i < 100; i++) {
409 cur_freq = extract_freq(get_cur_val(mask), data);
410 if (cur_freq == freq)
417 static int acpi_cpufreq_target(struct cpufreq_policy *policy,
418 unsigned int target_freq, unsigned int relation)
420 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
421 struct acpi_processor_performance *perf;
422 struct cpufreq_freqs freqs;
424 unsigned int next_state = 0; /* Index into freq_table */
425 unsigned int next_perf_state = 0; /* Index into perf table */
429 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
431 if (unlikely(data == NULL ||
432 data->acpi_data == NULL || data->freq_table == NULL)) {
436 perf = data->acpi_data;
437 result = cpufreq_frequency_table_target(policy,
440 relation, &next_state);
441 if (unlikely(result)) {
446 next_perf_state = data->freq_table[next_state].index;
447 if (perf->state == next_perf_state) {
448 if (unlikely(data->resume)) {
449 pr_debug("Called after resume, resetting to P%d\n",
453 pr_debug("Already at target state (P%d)\n",
459 switch (data->cpu_feature) {
460 case SYSTEM_INTEL_MSR_CAPABLE:
461 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
462 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
463 cmd.val = (u32) perf->states[next_perf_state].control;
465 case SYSTEM_AMD_MSR_CAPABLE:
466 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
467 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
468 cmd.val = (u32) perf->states[next_perf_state].control;
470 case SYSTEM_IO_CAPABLE:
471 cmd.type = SYSTEM_IO_CAPABLE;
472 cmd.addr.io.port = perf->control_register.address;
473 cmd.addr.io.bit_width = perf->control_register.bit_width;
474 cmd.val = (u32) perf->states[next_perf_state].control;
481 /* cpufreq holds the hotplug lock, so we are safe from here on */
482 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
483 cmd.mask = policy->cpus;
485 cmd.mask = cpumask_of(policy->cpu);
487 freqs.old = perf->states[perf->state].core_frequency * 1000;
488 freqs.new = data->freq_table[next_state].frequency;
489 for_each_cpu(i, policy->cpus) {
491 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
496 if (acpi_pstate_strict) {
497 if (!check_freqs(cmd.mask, freqs.new, data)) {
498 pr_debug("acpi_cpufreq_target failed (%d)\n",
505 for_each_cpu(i, policy->cpus) {
507 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
509 perf->state = next_perf_state;
515 static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
517 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
519 pr_debug("acpi_cpufreq_verify\n");
521 return cpufreq_frequency_table_verify(policy, data->freq_table);
525 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
527 struct acpi_processor_performance *perf = data->acpi_data;
530 /* search the closest match to cpu_khz */
533 unsigned long freqn = perf->states[0].core_frequency * 1000;
535 for (i = 0; i < (perf->state_count-1); i++) {
537 freqn = perf->states[i+1].core_frequency * 1000;
538 if ((2 * cpu_khz) > (freqn + freq)) {
543 perf->state = perf->state_count-1;
546 /* assume CPU is at P0... */
548 return perf->states[0].core_frequency * 1000;
552 static void free_acpi_perf_data(void)
556 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
557 for_each_possible_cpu(i)
558 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
560 free_percpu(acpi_perf_data);
563 static int boost_notify(struct notifier_block *nb, unsigned long action,
566 unsigned cpu = (long)hcpu;
567 const struct cpumask *cpumask;
569 cpumask = get_cpu_mask(cpu);
572 * Clear the boost-disable bit on the CPU_DOWN path so that
573 * this cpu cannot block the remaining ones from boosting. On
574 * the CPU_UP path we simply keep the boost-disable flag in
575 * sync with the current global state.
580 case CPU_UP_PREPARE_FROZEN:
581 boost_set_msrs(boost_enabled, cpumask);
584 case CPU_DOWN_PREPARE:
585 case CPU_DOWN_PREPARE_FROZEN:
586 boost_set_msrs(1, cpumask);
597 static struct notifier_block boost_nb = {
598 .notifier_call = boost_notify,
602 * acpi_cpufreq_early_init - initialize ACPI P-States library
604 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
605 * in order to determine correct frequency and voltage pairings. We can
606 * do _PDC and _PSD and find out the processor dependency for the
607 * actual init that will happen later...
609 static int __init acpi_cpufreq_early_init(void)
612 pr_debug("acpi_cpufreq_early_init\n");
614 acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
615 if (!acpi_perf_data) {
616 pr_debug("Memory allocation error for acpi_perf_data.\n");
619 for_each_possible_cpu(i) {
620 if (!zalloc_cpumask_var_node(
621 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
622 GFP_KERNEL, cpu_to_node(i))) {
624 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
625 free_acpi_perf_data();
630 /* Do initialization in ACPI core */
631 acpi_processor_preregister_performance(acpi_perf_data);
637 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
638 * or do it in BIOS firmware and won't inform about it to OS. If not
639 * detected, this has a side effect of making CPU run at a different speed
640 * than OS intended it to run at. Detect it and handle it cleanly.
642 static int bios_with_sw_any_bug;
644 static int sw_any_bug_found(const struct dmi_system_id *d)
646 bios_with_sw_any_bug = 1;
650 static const struct dmi_system_id sw_any_bug_dmi_table[] = {
652 .callback = sw_any_bug_found,
653 .ident = "Supermicro Server X6DLP",
655 DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
656 DMI_MATCH(DMI_BIOS_VERSION, "080010"),
657 DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"),
663 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
665 /* Intel Xeon Processor 7100 Series Specification Update
666 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
667 * AL30: A Machine Check Exception (MCE) Occurring during an
668 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
669 * Both Processor Cores to Lock Up. */
670 if (c->x86_vendor == X86_VENDOR_INTEL) {
671 if ((c->x86 == 15) &&
672 (c->x86_model == 6) &&
673 (c->x86_mask == 8)) {
674 printk(KERN_INFO "acpi-cpufreq: Intel(R) "
675 "Xeon(R) 7100 Errata AL30, processors may "
676 "lock up on frequency changes: disabling "
685 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
688 unsigned int valid_states = 0;
689 unsigned int cpu = policy->cpu;
690 struct acpi_cpufreq_data *data;
691 unsigned int result = 0;
692 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
693 struct acpi_processor_performance *perf;
695 static int blacklisted;
698 pr_debug("acpi_cpufreq_cpu_init\n");
703 blacklisted = acpi_cpufreq_blacklist(c);
708 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
712 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
713 per_cpu(acfreq_data, cpu) = data;
715 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
716 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
718 result = acpi_processor_register_performance(data->acpi_data, cpu);
722 perf = data->acpi_data;
723 policy->shared_type = perf->shared_type;
726 * Will let policy->cpus know about dependency only when software
727 * coordination is required.
729 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
730 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
731 cpumask_copy(policy->cpus, perf->shared_cpu_map);
733 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
736 dmi_check_system(sw_any_bug_dmi_table);
737 if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
739 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
751 /* capability check */
752 if (perf->state_count <= 1) {
753 pr_debug("No P-States\n");
758 if (perf->control_register.space_id != perf->status_register.space_id) {
763 switch (perf->control_register.space_id) {
764 case ACPI_ADR_SPACE_SYSTEM_IO:
765 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
766 boot_cpu_data.x86 == 0xf) {
767 pr_debug("AMD K8 systems must use native drivers.\n");
771 pr_debug("SYSTEM IO addr space\n");
772 data->cpu_feature = SYSTEM_IO_CAPABLE;
774 case ACPI_ADR_SPACE_FIXED_HARDWARE:
775 pr_debug("HARDWARE addr space\n");
776 if (check_est_cpu(cpu)) {
777 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
780 if (check_amd_hwpstate_cpu(cpu)) {
781 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
787 pr_debug("Unknown addr space %d\n",
788 (u32) (perf->control_register.space_id));
793 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
794 (perf->state_count+1), GFP_KERNEL);
795 if (!data->freq_table) {
800 /* detect transition latency */
801 policy->cpuinfo.transition_latency = 0;
802 for (i = 0; i < perf->state_count; i++) {
803 if ((perf->states[i].transition_latency * 1000) >
804 policy->cpuinfo.transition_latency)
805 policy->cpuinfo.transition_latency =
806 perf->states[i].transition_latency * 1000;
809 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
810 if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
811 policy->cpuinfo.transition_latency > 20 * 1000) {
812 policy->cpuinfo.transition_latency = 20 * 1000;
813 printk_once(KERN_INFO
814 "P-state transition latency capped at 20 uS\n");
818 for (i = 0; i < perf->state_count; i++) {
819 if (i > 0 && perf->states[i].core_frequency >=
820 data->freq_table[valid_states-1].frequency / 1000)
823 data->freq_table[valid_states].index = i;
824 data->freq_table[valid_states].frequency =
825 perf->states[i].core_frequency * 1000;
828 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
831 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
835 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
836 printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n");
838 switch (perf->control_register.space_id) {
839 case ACPI_ADR_SPACE_SYSTEM_IO:
840 /* Current speed is unknown and not detectable by IO port */
841 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
843 case ACPI_ADR_SPACE_FIXED_HARDWARE:
844 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
845 policy->cur = get_cur_freq_on_cpu(cpu);
851 /* notify BIOS that we exist */
852 acpi_processor_notify_smm(THIS_MODULE);
854 /* Check for APERF/MPERF support in hardware */
855 if (boot_cpu_has(X86_FEATURE_APERFMPERF))
856 acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
858 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
859 for (i = 0; i < perf->state_count; i++)
860 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
861 (i == perf->state ? '*' : ' '), i,
862 (u32) perf->states[i].core_frequency,
863 (u32) perf->states[i].power,
864 (u32) perf->states[i].transition_latency);
866 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
869 * the first call to ->target() should result in us actually
870 * writing something to the appropriate registers.
877 kfree(data->freq_table);
879 acpi_processor_unregister_performance(perf, cpu);
882 per_cpu(acfreq_data, cpu) = NULL;
887 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
889 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
891 pr_debug("acpi_cpufreq_cpu_exit\n");
894 cpufreq_frequency_table_put_attr(policy->cpu);
895 per_cpu(acfreq_data, policy->cpu) = NULL;
896 acpi_processor_unregister_performance(data->acpi_data,
898 kfree(data->freq_table);
905 static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
907 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu);
909 pr_debug("acpi_cpufreq_resume\n");
916 static struct freq_attr *acpi_cpufreq_attr[] = {
917 &cpufreq_freq_attr_scaling_available_freqs,
918 NULL, /* this is a placeholder for cpb, do not remove */
922 static struct cpufreq_driver acpi_cpufreq_driver = {
923 .verify = acpi_cpufreq_verify,
924 .target = acpi_cpufreq_target,
925 .bios_limit = acpi_processor_get_bios_limit,
926 .init = acpi_cpufreq_cpu_init,
927 .exit = acpi_cpufreq_cpu_exit,
928 .resume = acpi_cpufreq_resume,
929 .name = "acpi-cpufreq",
930 .owner = THIS_MODULE,
931 .attr = acpi_cpufreq_attr,
934 static void __init acpi_cpufreq_boost_init(void)
936 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
942 boost_supported = true;
943 boost_enabled = boost_state(0);
947 /* Force all MSRs to the same value */
948 boost_set_msrs(boost_enabled, cpu_online_mask);
950 register_cpu_notifier(&boost_nb);
954 global_boost.attr.mode = 0444;
956 /* We create the boost file in any case, though for systems without
957 * hardware support it will be read-only and hardwired to return 0.
959 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
960 pr_warn(PFX "could not register global boost sysfs file\n");
962 pr_debug("registered global boost sysfs file\n");
965 static void __exit acpi_cpufreq_boost_exit(void)
967 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
970 unregister_cpu_notifier(&boost_nb);
977 static int __init acpi_cpufreq_init(void)
984 pr_debug("acpi_cpufreq_init\n");
986 ret = acpi_cpufreq_early_init();
990 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
991 /* this is a sysfs file with a strange name and an even stranger
992 * semantic - per CPU instantiation, but system global effect.
993 * Lets enable it only on AMD CPUs for compatibility reasons and
994 * only if configured. This is considered legacy code, which
995 * will probably be removed at some point in the future.
997 if (check_amd_hwpstate_cpu(0)) {
998 struct freq_attr **iter;
1000 pr_debug("adding sysfs entry for cpb\n");
1002 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
1005 /* make sure there is a terminator behind it */
1006 if (iter[1] == NULL)
1011 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
1013 free_acpi_perf_data();
1015 acpi_cpufreq_boost_init();
1020 static void __exit acpi_cpufreq_exit(void)
1022 pr_debug("acpi_cpufreq_exit\n");
1024 acpi_cpufreq_boost_exit();
1026 cpufreq_unregister_driver(&acpi_cpufreq_driver);
1028 free_acpi_perf_data();
1031 module_param(acpi_pstate_strict, uint, 0644);
1032 MODULE_PARM_DESC(acpi_pstate_strict,
1033 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1034 "performed during frequency changes.");
1036 late_initcall(acpi_cpufreq_init);
1037 module_exit(acpi_cpufreq_exit);
1039 static const struct x86_cpu_id acpi_cpufreq_ids[] = {
1040 X86_FEATURE_MATCH(X86_FEATURE_ACPI),
1041 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
1044 MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids);
1046 MODULE_ALIAS("acpi");