2 * Generic entry point for the idle threads
4 #include <linux/sched.h>
6 #include <linux/cpuidle.h>
7 #include <linux/tick.h>
9 #include <linux/stackprotector.h>
13 #include <trace/events/power.h>
15 static int __read_mostly cpu_idle_force_poll;
17 void cpu_idle_poll_ctrl(bool enable)
20 cpu_idle_force_poll++;
22 cpu_idle_force_poll--;
23 WARN_ON_ONCE(cpu_idle_force_poll < 0);
27 #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
28 static int __init cpu_idle_poll_setup(char *__unused)
30 cpu_idle_force_poll = 1;
33 __setup("nohlt", cpu_idle_poll_setup);
35 static int __init cpu_idle_nopoll_setup(char *__unused)
37 cpu_idle_force_poll = 0;
40 __setup("hlt", cpu_idle_nopoll_setup);
43 static inline int cpu_idle_poll(void)
46 trace_cpu_idle_rcuidle(0, smp_processor_id());
48 while (!tif_need_resched())
50 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
55 /* Weak implementations for optional arch specific functions */
56 void __weak arch_cpu_idle_prepare(void) { }
57 void __weak arch_cpu_idle_enter(void) { }
58 void __weak arch_cpu_idle_exit(void) { }
59 void __weak arch_cpu_idle_dead(void) { }
60 void __weak arch_cpu_idle(void)
62 cpu_idle_force_poll = 1;
67 * Generic idle loop implementation
69 static void cpu_idle_loop(void)
72 tick_nohz_idle_enter();
74 while (!need_resched()) {
78 if (cpu_is_offline(smp_processor_id()))
82 arch_cpu_idle_enter();
85 * In poll mode we reenable interrupts and spin.
87 * Also if we detected in the wakeup from idle
88 * path that the tick broadcast device expired
89 * for us, we don't want to go deep idle as we
90 * know that the IPI is going to arrive right
93 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
96 if (!current_clr_polling_and_test()) {
97 stop_critical_timings();
99 if (cpuidle_idle_call())
101 if (WARN_ON_ONCE(irqs_disabled()))
104 start_critical_timings();
108 __current_set_polling();
110 arch_cpu_idle_exit();
114 * Since we fell out of the loop above, we know
115 * TIF_NEED_RESCHED must be set, propagate it into
116 * PREEMPT_NEED_RESCHED.
118 * This is required because for polling idle loops we will
119 * not have had an IPI to fold the state for us.
121 preempt_set_need_resched();
122 tick_nohz_idle_exit();
123 schedule_preempt_disabled();
127 void cpu_startup_entry(enum cpuhp_state state)
130 * This #ifdef needs to die, but it's too late in the cycle to
131 * make this generic (arm and sh have never invoked the canary
132 * init for the non boot cpus!). Will be fixed in 3.11
136 * If we're the non-boot CPU, nothing set the stack canary up
137 * for us. The boot CPU already has it initialized but no harm
138 * in doing it again. This is a good place for updating it, as
139 * we wont ever return from this function (so the invalid
140 * canaries already on the stack wont ever trigger).
142 boot_init_stack_canary();
144 __current_set_polling();
145 arch_cpu_idle_prepare();