1 #ifndef _LINUX_SCHED_IDLE_H
2 #define _LINUX_SCHED_IDLE_H
4 #include <linux/sched.h>
13 extern void wake_up_if_idle(int cpu);
16 * Idle thread specific functions to determine the need_resched
19 #ifdef TIF_POLLING_NRFLAG
20 static inline int tsk_is_polling(struct task_struct *p)
22 return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
25 static inline void __current_set_polling(void)
27 set_thread_flag(TIF_POLLING_NRFLAG);
30 static inline bool __must_check current_set_polling_and_test(void)
32 __current_set_polling();
35 * Polling state must be visible before we test NEED_RESCHED,
36 * paired by resched_curr()
38 smp_mb__after_atomic();
40 return unlikely(tif_need_resched());
43 static inline void __current_clr_polling(void)
45 clear_thread_flag(TIF_POLLING_NRFLAG);
48 static inline bool __must_check current_clr_polling_and_test(void)
50 __current_clr_polling();
53 * Polling state must be visible before we test NEED_RESCHED,
54 * paired by resched_curr()
56 smp_mb__after_atomic();
58 return unlikely(tif_need_resched());
62 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
63 static inline void __current_set_polling(void) { }
64 static inline void __current_clr_polling(void) { }
66 static inline bool __must_check current_set_polling_and_test(void)
68 return unlikely(tif_need_resched());
70 static inline bool __must_check current_clr_polling_and_test(void)
72 return unlikely(tif_need_resched());
76 static inline void current_clr_polling(void)
78 __current_clr_polling();
81 * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
82 * Once the bit is cleared, we'll get IPIs with every new
83 * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
86 smp_mb(); /* paired with resched_curr() */
88 preempt_fold_need_resched();
91 #endif /* _LINUX_SCHED_IDLE_H */