2 * processor_idle - idle state submodule to the ACPI processor driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/init.h>
34 #include <linux/cpufreq.h>
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/acpi.h>
38 #include <linux/dmi.h>
39 #include <linux/moduleparam.h>
40 #include <linux/sched.h> /* need_resched() */
41 #include <linux/pm_qos_params.h>
42 #include <linux/clockchips.h>
43 #include <linux/cpuidle.h>
46 * Include the apic definitions for x86 to have the APIC timer related defines
47 * available also for UP (on SMP it gets magically included via linux/smp.h).
48 * asm/acpi.h is not an option, as it would require more include magic. Also
49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
56 #include <asm/uaccess.h>
58 #include <acpi/acpi_bus.h>
59 #include <acpi/processor.h>
60 #include <asm/processor.h>
62 #define ACPI_PROCESSOR_CLASS "processor"
63 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
64 ACPI_MODULE_NAME("processor_idle");
65 #define ACPI_PROCESSOR_FILE_POWER "power"
66 #define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
67 #define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
68 #ifndef CONFIG_CPU_IDLE
69 #define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70 #define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
71 static void (*pm_idle_save) (void) __read_mostly;
73 #define C2_OVERHEAD 1 /* 1us */
74 #define C3_OVERHEAD 1 /* 1us */
76 #define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
78 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
79 #ifdef CONFIG_CPU_IDLE
80 module_param(max_cstate, uint, 0000);
82 module_param(max_cstate, uint, 0644);
84 static unsigned int nocst __read_mostly;
85 module_param(nocst, uint, 0000);
87 #ifndef CONFIG_CPU_IDLE
89 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
90 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
91 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
92 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
93 * reduce history for more aggressive entry into C3
95 static unsigned int bm_history __read_mostly =
96 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
97 module_param(bm_history, uint, 0644);
99 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
101 #else /* CONFIG_CPU_IDLE */
102 static unsigned int latency_factor __read_mostly = 2;
103 module_param(latency_factor, uint, 0644);
107 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
108 * For now disable this. Probably a bug somewhere else.
110 * To skip this limit, boot/load with a large max_cstate limit.
112 static int set_max_cstate(const struct dmi_system_id *id)
114 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
117 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
118 " Override with \"processor.max_cstate=%d\"\n", id->ident,
119 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
121 max_cstate = (long)id->driver_data;
126 /* Actually this shouldn't be __cpuinitdata, would be better to fix the
127 callers to only run once -AK */
128 static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
129 { set_max_cstate, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
132 { set_max_cstate, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
135 { set_max_cstate, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
138 { set_max_cstate, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
141 { set_max_cstate, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
144 { set_max_cstate, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
147 { set_max_cstate, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
150 { set_max_cstate, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
153 { set_max_cstate, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
156 { set_max_cstate, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
159 { set_max_cstate, "IBM ThinkPad R40e", {
160 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
162 { set_max_cstate, "IBM ThinkPad R40e", {
163 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
165 { set_max_cstate, "IBM ThinkPad R40e", {
166 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
168 { set_max_cstate, "IBM ThinkPad R40e", {
169 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
171 { set_max_cstate, "IBM ThinkPad R40e", {
172 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
174 { set_max_cstate, "IBM ThinkPad R40e", {
175 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
176 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
177 { set_max_cstate, "Medion 41700", {
178 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
180 { set_max_cstate, "Clevo 5600D", {
181 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
182 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
187 static inline u32 ticks_elapsed(u32 t1, u32 t2)
191 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
192 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
194 return ((0xFFFFFFFF - t1) + t2);
197 static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
200 return PM_TIMER_TICKS_TO_US(t2 - t1);
201 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
202 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
204 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
208 * Callers should disable interrupts before the call and enable
209 * interrupts after return.
211 static void acpi_safe_halt(void)
213 current_thread_info()->status &= ~TS_POLLING;
215 * TS_POLLING-cleared state must be visible before we
219 if (!need_resched()) {
223 current_thread_info()->status |= TS_POLLING;
226 #ifndef CONFIG_CPU_IDLE
229 acpi_processor_power_activate(struct acpi_processor *pr,
230 struct acpi_processor_cx *new)
232 struct acpi_processor_cx *old;
237 old = pr->power.state;
240 old->promotion.count = 0;
241 new->demotion.count = 0;
243 /* Cleanup from old state. */
247 /* Disable bus master reload */
248 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
249 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
254 /* Prepare to use new state. */
257 /* Enable bus master reload */
258 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
259 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
263 pr->power.state = new;
268 static atomic_t c3_cpu_count;
270 /* Common C-state entry for C2, C3, .. */
271 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
275 /* Don't trace irqs off for idle */
276 stop_critical_timings();
277 perf_flags = hw_perf_save_disable();
278 if (cstate->entry_method == ACPI_CSTATE_FFH) {
279 /* Call into architectural FFH based C-state */
280 acpi_processor_ffh_cstate_enter(cstate);
283 /* IO port based C-state */
284 inb(cstate->address);
285 /* Dummy wait op - must do something useless after P_LVL2 read
286 because chipsets cannot guarantee that STPCLK# signal
287 gets asserted in time to freeze execution properly. */
288 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
290 hw_perf_restore(perf_flags);
291 start_critical_timings();
293 #endif /* !CONFIG_CPU_IDLE */
295 #ifdef ARCH_APICTIMER_STOPS_ON_C3
298 * Some BIOS implementations switch to C3 in the published C2 state.
299 * This seems to be a common problem on AMD boxen, but other vendors
300 * are affected too. We pick the most conservative approach: we assume
301 * that the local APIC stops in both C2 and C3.
303 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
304 struct acpi_processor_cx *cx)
306 struct acpi_processor_power *pwr = &pr->power;
307 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
310 * Check, if one of the previous states already marked the lapic
313 if (pwr->timer_broadcast_on_state < state)
316 if (cx->type >= type)
317 pr->power.timer_broadcast_on_state = state;
320 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
322 unsigned long reason;
324 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
325 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
327 clockevents_notify(reason, &pr->id);
330 /* Power(C) State timer broadcast control */
331 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
332 struct acpi_processor_cx *cx,
335 int state = cx - pr->power.states;
337 if (state >= pr->power.timer_broadcast_on_state) {
338 unsigned long reason;
340 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
341 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
342 clockevents_notify(reason, &pr->id);
348 static void acpi_timer_check_state(int state, struct acpi_processor *pr,
349 struct acpi_processor_cx *cstate) { }
350 static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
351 static void acpi_state_timer_broadcast(struct acpi_processor *pr,
352 struct acpi_processor_cx *cx,
360 * Suspend / resume control
362 static int acpi_idle_suspend;
364 int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
366 acpi_idle_suspend = 1;
370 int acpi_processor_resume(struct acpi_device * device)
372 acpi_idle_suspend = 0;
376 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
377 static int tsc_halts_in_c(int state)
379 switch (boot_cpu_data.x86_vendor) {
381 case X86_VENDOR_INTEL:
383 * AMD Fam10h TSC will tick in all
384 * C/P/S0/S1 states when this bit is set.
386 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
391 return state > ACPI_STATE_C1;
396 #ifndef CONFIG_CPU_IDLE
397 static void acpi_processor_idle(void)
399 struct acpi_processor *pr = NULL;
400 struct acpi_processor_cx *cx = NULL;
401 struct acpi_processor_cx *next_state = NULL;
406 * Interrupts must be disabled during bus mastering calculations and
407 * for C2/C3 transitions.
411 pr = __get_cpu_var(processors);
418 * Check whether we truly need to go idle, or should
421 if (unlikely(need_resched())) {
426 cx = pr->power.state;
427 if (!cx || acpi_idle_suspend) {
429 pm_idle_save(); /* enables IRQs */
441 * Check for bus mastering activity (if required), record, and check
444 if (pr->flags.bm_check) {
446 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
451 pr->power.bm_activity <<= diff;
453 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
455 pr->power.bm_activity |= 0x1;
456 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
459 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
460 * the true state of bus mastering activity; forcing us to
461 * manually check the BMIDEA bit of each IDE channel.
463 else if (errata.piix4.bmisx) {
464 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
465 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
466 pr->power.bm_activity |= 0x1;
469 pr->power.bm_check_timestamp = jiffies;
472 * If bus mastering is or was active this jiffy, demote
473 * to avoid a faulty transition. Note that the processor
474 * won't enter a low-power state during this call (to this
475 * function) but should upon the next.
477 * TBD: A better policy might be to fallback to the demotion
478 * state (use it for this quantum only) istead of
479 * demoting -- and rely on duration as our sole demotion
480 * qualification. This may, however, introduce DMA
481 * issues (e.g. floppy DMA transfer overrun/underrun).
483 if ((pr->power.bm_activity & 0x1) &&
484 cx->demotion.threshold.bm) {
486 next_state = cx->demotion.state;
491 #ifdef CONFIG_HOTPLUG_CPU
493 * Check for P_LVL2_UP flag before entering C2 and above on
494 * an SMP system. We do it here instead of doing it at _CST/P_LVL
495 * detection phase, to work cleanly with logical CPU hotplug.
497 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
498 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
499 cx = &pr->power.states[ACPI_STATE_C1];
505 * Invoke the current Cx state to put the processor to sleep.
507 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
508 current_thread_info()->status &= ~TS_POLLING;
510 * TS_POLLING-cleared state must be visible before we
514 if (need_resched()) {
515 current_thread_info()->status |= TS_POLLING;
526 * Use the appropriate idle routine, the one that would
527 * be used without acpi C-states.
530 pm_idle_save(); /* enables IRQs */
537 * TBD: Can't get time duration while in C1, as resumes
538 * go to an ISR rather than here. Need to instrument
539 * base interrupt handler.
541 * Note: the TSC better not stop in C1, sched_clock() will
544 sleep_ticks = 0xFFFFFFFF;
549 /* Get start time (ticks) */
550 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
551 /* Tell the scheduler that we are going deep-idle: */
552 sched_clock_idle_sleep_event();
554 acpi_state_timer_broadcast(pr, cx, 1);
555 acpi_cstate_enter(cx);
556 /* Get end time (ticks) */
557 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
559 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
560 /* TSC halts in C2, so notify users */
561 if (tsc_halts_in_c(ACPI_STATE_C2))
562 mark_tsc_unstable("possible TSC halt in C2");
564 /* Compute time (ticks) that we were actually asleep */
565 sleep_ticks = ticks_elapsed(t1, t2);
567 /* Tell the scheduler how much we idled: */
568 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
570 /* Re-enable interrupts */
572 /* Do not account our idle-switching overhead: */
573 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
575 current_thread_info()->status |= TS_POLLING;
576 acpi_state_timer_broadcast(pr, cx, 0);
580 acpi_unlazy_tlb(smp_processor_id());
582 * Must be done before busmaster disable as we might
583 * need to access HPET !
585 acpi_state_timer_broadcast(pr, cx, 1);
588 * bm_check implies we need ARB_DIS
589 * !bm_check implies we need cache flush
590 * bm_control implies whether we can do ARB_DIS
592 * That leaves a case where bm_check is set and bm_control is
593 * not set. In that case we cannot do much, we enter C3
594 * without doing anything.
596 if (pr->flags.bm_check && pr->flags.bm_control) {
597 if (atomic_inc_return(&c3_cpu_count) ==
600 * All CPUs are trying to go to C3
601 * Disable bus master arbitration
603 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
605 } else if (!pr->flags.bm_check) {
606 /* SMP with no shared cache... Invalidate cache */
607 ACPI_FLUSH_CPU_CACHE();
610 /* Get start time (ticks) */
611 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
613 /* Tell the scheduler that we are going deep-idle: */
614 sched_clock_idle_sleep_event();
615 acpi_cstate_enter(cx);
616 /* Get end time (ticks) */
617 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
618 if (pr->flags.bm_check && pr->flags.bm_control) {
619 /* Enable bus master arbitration */
620 atomic_dec(&c3_cpu_count);
621 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
624 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
625 /* TSC halts in C3, so notify users */
626 if (tsc_halts_in_c(ACPI_STATE_C3))
627 mark_tsc_unstable("TSC halts in C3");
629 /* Compute time (ticks) that we were actually asleep */
630 sleep_ticks = ticks_elapsed(t1, t2);
631 /* Tell the scheduler how much we idled: */
632 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
634 /* Re-enable interrupts */
636 /* Do not account our idle-switching overhead: */
637 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
639 current_thread_info()->status |= TS_POLLING;
640 acpi_state_timer_broadcast(pr, cx, 0);
648 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
649 cx->time += sleep_ticks;
651 next_state = pr->power.state;
653 #ifdef CONFIG_HOTPLUG_CPU
654 /* Don't do promotion/demotion */
655 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
656 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
665 * Track the number of longs (time asleep is greater than threshold)
666 * and promote when the count threshold is reached. Note that bus
667 * mastering activity may prevent promotions.
668 * Do not promote above max_cstate.
670 if (cx->promotion.state &&
671 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
672 if (sleep_ticks > cx->promotion.threshold.ticks &&
673 cx->promotion.state->latency <=
674 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
675 cx->promotion.count++;
676 cx->demotion.count = 0;
677 if (cx->promotion.count >=
678 cx->promotion.threshold.count) {
679 if (pr->flags.bm_check) {
681 (pr->power.bm_activity & cx->
682 promotion.threshold.bm)) {
688 next_state = cx->promotion.state;
698 * Track the number of shorts (time asleep is less than time threshold)
699 * and demote when the usage threshold is reached.
701 if (cx->demotion.state) {
702 if (sleep_ticks < cx->demotion.threshold.ticks) {
703 cx->demotion.count++;
704 cx->promotion.count = 0;
705 if (cx->demotion.count >= cx->demotion.threshold.count) {
706 next_state = cx->demotion.state;
714 * Demote if current state exceeds max_cstate
715 * or if the latency of the current state is unacceptable
717 if ((pr->power.state - pr->power.states) > max_cstate ||
718 pr->power.state->latency >
719 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
720 if (cx->demotion.state)
721 next_state = cx->demotion.state;
727 * If we're going to start using a new Cx state we must clean up
728 * from the previous and prepare to use the new.
730 if (next_state != pr->power.state)
731 acpi_processor_power_activate(pr, next_state);
734 static int acpi_processor_set_power_policy(struct acpi_processor *pr)
737 unsigned int state_is_set = 0;
738 struct acpi_processor_cx *lower = NULL;
739 struct acpi_processor_cx *higher = NULL;
740 struct acpi_processor_cx *cx;
747 * This function sets the default Cx state policy (OS idle handler).
748 * Our scheme is to promote quickly to C2 but more conservatively
749 * to C3. We're favoring C2 for its characteristics of low latency
750 * (quick response), good power savings, and ability to allow bus
751 * mastering activity. Note that the Cx state policy is completely
752 * customizable and can be altered dynamically.
756 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
757 cx = &pr->power.states[i];
762 pr->power.state = cx;
771 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
772 cx = &pr->power.states[i];
777 cx->demotion.state = lower;
778 cx->demotion.threshold.ticks = cx->latency_ticks;
779 cx->demotion.threshold.count = 1;
780 if (cx->type == ACPI_STATE_C3)
781 cx->demotion.threshold.bm = bm_history;
788 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
789 cx = &pr->power.states[i];
794 cx->promotion.state = higher;
795 cx->promotion.threshold.ticks = cx->latency_ticks;
796 if (cx->type >= ACPI_STATE_C2)
797 cx->promotion.threshold.count = 4;
799 cx->promotion.threshold.count = 10;
800 if (higher->type == ACPI_STATE_C3)
801 cx->promotion.threshold.bm = bm_history;
809 #endif /* !CONFIG_CPU_IDLE */
811 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
820 /* if info is obtained from pblk/fadt, type equals state */
821 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
822 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
824 #ifndef CONFIG_HOTPLUG_CPU
826 * Check for P_LVL2_UP flag before entering C2 and above on
829 if ((num_online_cpus() > 1) &&
830 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
834 /* determine C2 and C3 address from pblk */
835 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
836 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
838 /* determine latencies from FADT */
839 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
840 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
842 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
843 "lvl2[0x%08x] lvl3[0x%08x]\n",
844 pr->power.states[ACPI_STATE_C2].address,
845 pr->power.states[ACPI_STATE_C3].address));
850 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
852 if (!pr->power.states[ACPI_STATE_C1].valid) {
853 /* set the first C-State to C1 */
854 /* all processors need to support C1 */
855 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
856 pr->power.states[ACPI_STATE_C1].valid = 1;
857 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
859 /* the C0 state only exists as a filler in our array */
860 pr->power.states[ACPI_STATE_C0].valid = 1;
864 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
866 acpi_status status = 0;
870 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
871 union acpi_object *cst;
879 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
880 if (ACPI_FAILURE(status)) {
881 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
885 cst = buffer.pointer;
887 /* There must be at least 2 elements */
888 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
889 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
894 count = cst->package.elements[0].integer.value;
896 /* Validate number of power states. */
897 if (count < 1 || count != cst->package.count - 1) {
898 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
903 /* Tell driver that at least _CST is supported. */
904 pr->flags.has_cst = 1;
906 for (i = 1; i <= count; i++) {
907 union acpi_object *element;
908 union acpi_object *obj;
909 struct acpi_power_register *reg;
910 struct acpi_processor_cx cx;
912 memset(&cx, 0, sizeof(cx));
914 element = &(cst->package.elements[i]);
915 if (element->type != ACPI_TYPE_PACKAGE)
918 if (element->package.count != 4)
921 obj = &(element->package.elements[0]);
923 if (obj->type != ACPI_TYPE_BUFFER)
926 reg = (struct acpi_power_register *)obj->buffer.pointer;
928 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
929 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
932 /* There should be an easy way to extract an integer... */
933 obj = &(element->package.elements[1]);
934 if (obj->type != ACPI_TYPE_INTEGER)
937 cx.type = obj->integer.value;
939 * Some buggy BIOSes won't list C1 in _CST -
940 * Let acpi_processor_get_power_info_default() handle them later
942 if (i == 1 && cx.type != ACPI_STATE_C1)
945 cx.address = reg->address;
946 cx.index = current_count + 1;
948 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
949 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
950 if (acpi_processor_ffh_cstate_probe
951 (pr->id, &cx, reg) == 0) {
952 cx.entry_method = ACPI_CSTATE_FFH;
953 } else if (cx.type == ACPI_STATE_C1) {
955 * C1 is a special case where FIXED_HARDWARE
956 * can be handled in non-MWAIT way as well.
957 * In that case, save this _CST entry info.
958 * Otherwise, ignore this info and continue.
960 cx.entry_method = ACPI_CSTATE_HALT;
961 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
965 if (cx.type == ACPI_STATE_C1 &&
966 (idle_halt || idle_nomwait)) {
968 * In most cases the C1 space_id obtained from
969 * _CST object is FIXED_HARDWARE access mode.
970 * But when the option of idle=halt is added,
971 * the entry_method type should be changed from
972 * CSTATE_FFH to CSTATE_HALT.
973 * When the option of idle=nomwait is added,
974 * the C1 entry_method type should be
977 cx.entry_method = ACPI_CSTATE_HALT;
978 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
981 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
985 if (cx.type == ACPI_STATE_C1) {
989 obj = &(element->package.elements[2]);
990 if (obj->type != ACPI_TYPE_INTEGER)
993 cx.latency = obj->integer.value;
995 obj = &(element->package.elements[3]);
996 if (obj->type != ACPI_TYPE_INTEGER)
999 cx.power = obj->integer.value;
1002 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
1005 * We support total ACPI_PROCESSOR_MAX_POWER - 1
1006 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
1008 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
1010 "Limiting number of power states to max (%d)\n",
1011 ACPI_PROCESSOR_MAX_POWER);
1013 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1018 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
1021 /* Validate number of power states discovered */
1022 if (current_count < 2)
1026 kfree(buffer.pointer);
1031 static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
1038 * C2 latency must be less than or equal to 100
1041 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1042 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1043 "latency too large [%d]\n", cx->latency));
1048 * Otherwise we've met all of our C2 requirements.
1049 * Normalize the C2 latency to expidite policy
1053 #ifndef CONFIG_CPU_IDLE
1054 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1056 cx->latency_ticks = cx->latency;
1062 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1063 struct acpi_processor_cx *cx)
1065 static int bm_check_flag;
1072 * C3 latency must be less than or equal to 1000
1075 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1076 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1077 "latency too large [%d]\n", cx->latency));
1082 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1083 * DMA transfers are used by any ISA device to avoid livelock.
1084 * Note that we could disable Type-F DMA (as recommended by
1085 * the erratum), but this is known to disrupt certain ISA
1086 * devices thus we take the conservative approach.
1088 else if (errata.piix4.fdma) {
1089 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1090 "C3 not supported on PIIX4 with Type-F DMA\n"));
1094 /* All the logic here assumes flags.bm_check is same across all CPUs */
1095 if (!bm_check_flag) {
1096 /* Determine whether bm_check is needed based on CPU */
1097 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1098 bm_check_flag = pr->flags.bm_check;
1100 pr->flags.bm_check = bm_check_flag;
1103 if (pr->flags.bm_check) {
1104 if (!pr->flags.bm_control) {
1105 if (pr->flags.has_cst != 1) {
1106 /* bus mastering control is necessary */
1107 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1108 "C3 support requires BM control\n"));
1111 /* Here we enter C3 without bus mastering */
1112 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1113 "C3 support without BM control\n"));
1118 * WBINVD should be set in fadt, for C3 state to be
1119 * supported on when bm_check is not required.
1121 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
1122 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1123 "Cache invalidation should work properly"
1124 " for C3 to be enabled on SMP systems\n"));
1127 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1131 * Otherwise we've met all of our C3 requirements.
1132 * Normalize the C3 latency to expidite policy. Enable
1133 * checking of bus mastering status (bm_check) so we can
1134 * use this in our C3 policy
1138 #ifndef CONFIG_CPU_IDLE
1139 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1141 cx->latency_ticks = cx->latency;
1147 static int acpi_processor_power_verify(struct acpi_processor *pr)
1150 unsigned int working = 0;
1152 pr->power.timer_broadcast_on_state = INT_MAX;
1154 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1155 struct acpi_processor_cx *cx = &pr->power.states[i];
1163 acpi_processor_power_verify_c2(cx);
1165 acpi_timer_check_state(i, pr, cx);
1169 acpi_processor_power_verify_c3(pr, cx);
1171 acpi_timer_check_state(i, pr, cx);
1179 acpi_propagate_timer_broadcast(pr);
1184 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1190 /* NOTE: the idle thread may not be running while calling
1193 /* Zero initialize all the C-states info. */
1194 memset(pr->power.states, 0, sizeof(pr->power.states));
1196 result = acpi_processor_get_power_info_cst(pr);
1197 if (result == -ENODEV)
1198 result = acpi_processor_get_power_info_fadt(pr);
1203 acpi_processor_get_power_info_default(pr);
1205 pr->power.count = acpi_processor_power_verify(pr);
1207 #ifndef CONFIG_CPU_IDLE
1209 * Set Default Policy
1210 * ------------------
1211 * Now that we know which states are supported, set the default
1212 * policy. Note that this policy can be changed dynamically
1213 * (e.g. encourage deeper sleeps to conserve battery life when
1216 result = acpi_processor_set_power_policy(pr);
1222 * if one state of type C2 or C3 is available, mark this
1223 * CPU as being "idle manageable"
1225 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1226 if (pr->power.states[i].valid) {
1227 pr->power.count = i;
1228 if (pr->power.states[i].type >= ACPI_STATE_C2)
1229 pr->flags.power = 1;
1236 static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1238 struct acpi_processor *pr = seq->private;
1245 seq_printf(seq, "active state: C%zd\n"
1247 "bus master activity: %08x\n"
1248 "maximum allowed latency: %d usec\n",
1249 pr->power.state ? pr->power.state - pr->power.states : 0,
1250 max_cstate, (unsigned)pr->power.bm_activity,
1251 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1253 seq_puts(seq, "states:\n");
1255 for (i = 1; i <= pr->power.count; i++) {
1256 seq_printf(seq, " %cC%d: ",
1257 (&pr->power.states[i] ==
1258 pr->power.state ? '*' : ' '), i);
1260 if (!pr->power.states[i].valid) {
1261 seq_puts(seq, "<not supported>\n");
1265 switch (pr->power.states[i].type) {
1267 seq_printf(seq, "type[C1] ");
1270 seq_printf(seq, "type[C2] ");
1273 seq_printf(seq, "type[C3] ");
1276 seq_printf(seq, "type[--] ");
1280 if (pr->power.states[i].promotion.state)
1281 seq_printf(seq, "promotion[C%zd] ",
1282 (pr->power.states[i].promotion.state -
1285 seq_puts(seq, "promotion[--] ");
1287 if (pr->power.states[i].demotion.state)
1288 seq_printf(seq, "demotion[C%zd] ",
1289 (pr->power.states[i].demotion.state -
1292 seq_puts(seq, "demotion[--] ");
1294 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
1295 pr->power.states[i].latency,
1296 pr->power.states[i].usage,
1297 (unsigned long long)pr->power.states[i].time);
1304 static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1306 return single_open(file, acpi_processor_power_seq_show,
1310 static const struct file_operations acpi_processor_power_fops = {
1311 .owner = THIS_MODULE,
1312 .open = acpi_processor_power_open_fs,
1314 .llseek = seq_lseek,
1315 .release = single_release,
1318 #ifndef CONFIG_CPU_IDLE
1320 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1324 if (boot_option_idle_override)
1334 if (!pr->flags.power_setup_done)
1338 * Fall back to the default idle loop, when pm_idle_save had
1342 pm_idle = pm_idle_save;
1343 /* Relies on interrupts forcing exit from idle. */
1344 synchronize_sched();
1347 pr->flags.power = 0;
1348 result = acpi_processor_get_power_info(pr);
1349 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1350 pm_idle = acpi_processor_idle;
1356 static void smp_callback(void *v)
1358 /* we already woke the CPU up, nothing more to do */
1362 * This function gets called when a part of the kernel has a new latency
1363 * requirement. This means we need to get all processors out of their C-state,
1364 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1365 * wakes them all right up.
1367 static int acpi_processor_latency_notify(struct notifier_block *b,
1368 unsigned long l, void *v)
1370 smp_call_function(smp_callback, NULL, 1);
1374 static struct notifier_block acpi_processor_latency_notifier = {
1375 .notifier_call = acpi_processor_latency_notify,
1380 #else /* CONFIG_CPU_IDLE */
1383 * acpi_idle_bm_check - checks if bus master activity was detected
1385 static int acpi_idle_bm_check(void)
1389 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1391 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1393 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1394 * the true state of bus mastering activity; forcing us to
1395 * manually check the BMIDEA bit of each IDE channel.
1397 else if (errata.piix4.bmisx) {
1398 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1399 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1406 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1407 * @pr: the processor
1408 * @target: the new target state
1410 static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1411 struct acpi_processor_cx *target)
1413 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1414 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1415 pr->flags.bm_rld_set = 0;
1418 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1419 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1420 pr->flags.bm_rld_set = 1;
1425 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1428 * Caller disables interrupt before call and enables interrupt after return.
1430 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1434 /* Don't trace irqs off for idle */
1435 stop_critical_timings();
1436 pctrl = hw_perf_save_disable();
1437 if (cx->entry_method == ACPI_CSTATE_FFH) {
1438 /* Call into architectural FFH based C-state */
1439 acpi_processor_ffh_cstate_enter(cx);
1440 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1444 /* IO port based C-state */
1446 /* Dummy wait op - must do something useless after P_LVL2 read
1447 because chipsets cannot guarantee that STPCLK# signal
1448 gets asserted in time to freeze execution properly. */
1449 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1451 hw_perf_restore(pctrl);
1452 start_critical_timings();
1456 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1457 * @dev: the target CPU
1458 * @state: the state data
1460 * This is equivalent to the HALT instruction.
1462 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1463 struct cpuidle_state *state)
1466 struct acpi_processor *pr;
1467 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1469 pr = __get_cpu_var(processors);
1474 local_irq_disable();
1476 /* Do not access any ACPI IO ports in suspend path */
1477 if (acpi_idle_suspend) {
1483 if (pr->flags.bm_check)
1484 acpi_idle_update_bm_rld(pr, cx);
1486 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1487 acpi_idle_do_entry(cx);
1488 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1493 return ticks_elapsed_in_us(t1, t2);
1497 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1498 * @dev: the target CPU
1499 * @state: the state data
1501 static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1502 struct cpuidle_state *state)
1504 struct acpi_processor *pr;
1505 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1507 int sleep_ticks = 0;
1509 pr = __get_cpu_var(processors);
1514 if (acpi_idle_suspend)
1515 return(acpi_idle_enter_c1(dev, state));
1517 local_irq_disable();
1518 current_thread_info()->status &= ~TS_POLLING;
1520 * TS_POLLING-cleared state must be visible before we test
1525 if (unlikely(need_resched())) {
1526 current_thread_info()->status |= TS_POLLING;
1532 * Must be done before busmaster disable as we might need to
1535 acpi_state_timer_broadcast(pr, cx, 1);
1537 if (pr->flags.bm_check)
1538 acpi_idle_update_bm_rld(pr, cx);
1540 if (cx->type == ACPI_STATE_C3)
1541 ACPI_FLUSH_CPU_CACHE();
1543 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1544 /* Tell the scheduler that we are going deep-idle: */
1545 sched_clock_idle_sleep_event();
1546 acpi_idle_do_entry(cx);
1547 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1549 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1550 /* TSC could halt in idle, so notify users */
1551 if (tsc_halts_in_c(cx->type))
1552 mark_tsc_unstable("TSC halts in idle");;
1554 sleep_ticks = ticks_elapsed(t1, t2);
1556 /* Tell the scheduler how much we idled: */
1557 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1560 current_thread_info()->status |= TS_POLLING;
1564 acpi_state_timer_broadcast(pr, cx, 0);
1565 cx->time += sleep_ticks;
1566 return ticks_elapsed_in_us(t1, t2);
1569 static int c3_cpu_count;
1570 static DEFINE_SPINLOCK(c3_lock);
1573 * acpi_idle_enter_bm - enters C3 with proper BM handling
1574 * @dev: the target CPU
1575 * @state: the state data
1577 * If BM is detected, the deepest non-C3 idle state is entered instead.
1579 static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1580 struct cpuidle_state *state)
1582 struct acpi_processor *pr;
1583 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1585 int sleep_ticks = 0;
1587 pr = __get_cpu_var(processors);
1592 if (acpi_idle_suspend)
1593 return(acpi_idle_enter_c1(dev, state));
1595 if (acpi_idle_bm_check()) {
1596 if (dev->safe_state) {
1597 dev->last_state = dev->safe_state;
1598 return dev->safe_state->enter(dev, dev->safe_state);
1600 local_irq_disable();
1607 local_irq_disable();
1608 current_thread_info()->status &= ~TS_POLLING;
1610 * TS_POLLING-cleared state must be visible before we test
1615 if (unlikely(need_resched())) {
1616 current_thread_info()->status |= TS_POLLING;
1621 acpi_unlazy_tlb(smp_processor_id());
1623 /* Tell the scheduler that we are going deep-idle: */
1624 sched_clock_idle_sleep_event();
1626 * Must be done before busmaster disable as we might need to
1629 acpi_state_timer_broadcast(pr, cx, 1);
1631 acpi_idle_update_bm_rld(pr, cx);
1634 * disable bus master
1635 * bm_check implies we need ARB_DIS
1636 * !bm_check implies we need cache flush
1637 * bm_control implies whether we can do ARB_DIS
1639 * That leaves a case where bm_check is set and bm_control is
1640 * not set. In that case we cannot do much, we enter C3
1641 * without doing anything.
1643 if (pr->flags.bm_check && pr->flags.bm_control) {
1644 spin_lock(&c3_lock);
1646 /* Disable bus master arbitration when all CPUs are in C3 */
1647 if (c3_cpu_count == num_online_cpus())
1648 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1649 spin_unlock(&c3_lock);
1650 } else if (!pr->flags.bm_check) {
1651 ACPI_FLUSH_CPU_CACHE();
1654 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1655 acpi_idle_do_entry(cx);
1656 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1658 /* Re-enable bus master arbitration */
1659 if (pr->flags.bm_check && pr->flags.bm_control) {
1660 spin_lock(&c3_lock);
1661 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
1663 spin_unlock(&c3_lock);
1666 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
1667 /* TSC could halt in idle, so notify users */
1668 if (tsc_halts_in_c(ACPI_STATE_C3))
1669 mark_tsc_unstable("TSC halts in idle");
1671 sleep_ticks = ticks_elapsed(t1, t2);
1672 /* Tell the scheduler how much we idled: */
1673 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
1676 current_thread_info()->status |= TS_POLLING;
1680 acpi_state_timer_broadcast(pr, cx, 0);
1681 cx->time += sleep_ticks;
1682 return ticks_elapsed_in_us(t1, t2);
1685 struct cpuidle_driver acpi_idle_driver = {
1686 .name = "acpi_idle",
1687 .owner = THIS_MODULE,
1691 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1692 * @pr: the ACPI processor
1694 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1696 int i, count = CPUIDLE_DRIVER_STATE_START;
1697 struct acpi_processor_cx *cx;
1698 struct cpuidle_state *state;
1699 struct cpuidle_device *dev = &pr->power.dev;
1701 if (!pr->flags.power_setup_done)
1704 if (pr->flags.power == 0) {
1709 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1710 dev->states[i].name[0] = '\0';
1711 dev->states[i].desc[0] = '\0';
1714 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1715 cx = &pr->power.states[i];
1716 state = &dev->states[count];
1721 #ifdef CONFIG_HOTPLUG_CPU
1722 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1723 !pr->flags.has_cst &&
1724 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1727 cpuidle_set_statedata(state, cx);
1729 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1730 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
1731 state->exit_latency = cx->latency;
1732 state->target_residency = cx->latency * latency_factor;
1733 state->power_usage = cx->power;
1738 state->flags |= CPUIDLE_FLAG_SHALLOW;
1739 if (cx->entry_method == ACPI_CSTATE_FFH)
1740 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1742 state->enter = acpi_idle_enter_c1;
1743 dev->safe_state = state;
1747 state->flags |= CPUIDLE_FLAG_BALANCED;
1748 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1749 state->enter = acpi_idle_enter_simple;
1750 dev->safe_state = state;
1754 state->flags |= CPUIDLE_FLAG_DEEP;
1755 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1756 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1757 state->enter = pr->flags.bm_check ?
1758 acpi_idle_enter_bm :
1759 acpi_idle_enter_simple;
1764 if (count == CPUIDLE_STATE_MAX)
1768 dev->state_count = count;
1776 int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1780 if (boot_option_idle_override)
1790 if (!pr->flags.power_setup_done)
1793 cpuidle_pause_and_lock();
1794 cpuidle_disable_device(&pr->power.dev);
1795 acpi_processor_get_power_info(pr);
1796 if (pr->flags.power) {
1797 acpi_processor_setup_cpuidle(pr);
1798 ret = cpuidle_enable_device(&pr->power.dev);
1800 cpuidle_resume_and_unlock();
1805 #endif /* CONFIG_CPU_IDLE */
1807 int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1808 struct acpi_device *device)
1810 acpi_status status = 0;
1811 static int first_run;
1812 struct proc_dir_entry *entry = NULL;
1815 if (boot_option_idle_override)
1821 * When the boot option of "idle=halt" is added, halt
1822 * is used for CPU IDLE.
1823 * In such case C2/C3 is meaningless. So the max_cstate
1828 dmi_check_system(processor_power_dmi_table);
1829 max_cstate = acpi_processor_cstate_check(max_cstate);
1830 if (max_cstate < ACPI_C_STATES_MAX)
1832 "ACPI: processor limited to max C-state %d\n",
1835 #if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1836 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1837 &acpi_processor_latency_notifier);
1844 if (acpi_gbl_FADT.cst_control && !nocst) {
1846 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1847 if (ACPI_FAILURE(status)) {
1848 ACPI_EXCEPTION((AE_INFO, status,
1849 "Notifying BIOS of _CST ability failed"));
1853 acpi_processor_get_power_info(pr);
1854 pr->flags.power_setup_done = 1;
1857 * Install the idle handler if processor power management is supported.
1858 * Note that we use previously set idle handler will be used on
1859 * platforms that only support C1.
1861 if (pr->flags.power) {
1862 #ifdef CONFIG_CPU_IDLE
1863 acpi_processor_setup_cpuidle(pr);
1864 if (cpuidle_register_device(&pr->power.dev))
1868 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1869 for (i = 1; i <= pr->power.count; i++)
1870 if (pr->power.states[i].valid)
1871 printk(" C%d[C%d]", i,
1872 pr->power.states[i].type);
1875 #ifndef CONFIG_CPU_IDLE
1877 pm_idle_save = pm_idle;
1878 pm_idle = acpi_processor_idle;
1884 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1885 S_IRUGO, acpi_device_dir(device),
1886 &acpi_processor_power_fops,
1887 acpi_driver_data(device));
1893 int acpi_processor_power_exit(struct acpi_processor *pr,
1894 struct acpi_device *device)
1896 if (boot_option_idle_override)
1899 #ifdef CONFIG_CPU_IDLE
1900 cpuidle_unregister_device(&pr->power.dev);
1902 pr->flags.power_setup_done = 0;
1904 if (acpi_device_dir(device))
1905 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1906 acpi_device_dir(device));
1908 #ifndef CONFIG_CPU_IDLE
1910 /* Unregister the idle handler when processor #0 is removed. */
1913 pm_idle = pm_idle_save;
1916 * We are about to unload the current idle thread pm callback
1917 * (pm_idle), Wait for all processors to update cached/local
1918 * copies of pm_idle before proceeding.
1922 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1923 &acpi_processor_latency_notifier);