]> git.karo-electronics.de Git - karo-tx-linux.git/blob - include/linux/sched/idle.h
sched/headers: Move 'init_task' and 'init_thread_union' from <linux/sched.h> to ...
[karo-tx-linux.git] / include / linux / sched / idle.h
1 #ifndef _LINUX_SCHED_IDLE_H
2 #define _LINUX_SCHED_IDLE_H
3
4 #include <linux/sched.h>
5
6 enum cpu_idle_type {
7         CPU_IDLE,
8         CPU_NOT_IDLE,
9         CPU_NEWLY_IDLE,
10         CPU_MAX_IDLE_TYPES
11 };
12
13 extern void wake_up_if_idle(int cpu);
14
15 /*
16  * Idle thread specific functions to determine the need_resched
17  * polling state.
18  */
19 #ifdef TIF_POLLING_NRFLAG
20 static inline int tsk_is_polling(struct task_struct *p)
21 {
22         return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
23 }
24
25 static inline void __current_set_polling(void)
26 {
27         set_thread_flag(TIF_POLLING_NRFLAG);
28 }
29
30 static inline bool __must_check current_set_polling_and_test(void)
31 {
32         __current_set_polling();
33
34         /*
35          * Polling state must be visible before we test NEED_RESCHED,
36          * paired by resched_curr()
37          */
38         smp_mb__after_atomic();
39
40         return unlikely(tif_need_resched());
41 }
42
43 static inline void __current_clr_polling(void)
44 {
45         clear_thread_flag(TIF_POLLING_NRFLAG);
46 }
47
48 static inline bool __must_check current_clr_polling_and_test(void)
49 {
50         __current_clr_polling();
51
52         /*
53          * Polling state must be visible before we test NEED_RESCHED,
54          * paired by resched_curr()
55          */
56         smp_mb__after_atomic();
57
58         return unlikely(tif_need_resched());
59 }
60
61 #else
62 static inline int tsk_is_polling(struct task_struct *p) { return 0; }
63 static inline void __current_set_polling(void) { }
64 static inline void __current_clr_polling(void) { }
65
66 static inline bool __must_check current_set_polling_and_test(void)
67 {
68         return unlikely(tif_need_resched());
69 }
70 static inline bool __must_check current_clr_polling_and_test(void)
71 {
72         return unlikely(tif_need_resched());
73 }
74 #endif
75
76 static inline void current_clr_polling(void)
77 {
78         __current_clr_polling();
79
80         /*
81          * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
82          * Once the bit is cleared, we'll get IPIs with every new
83          * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
84          * fold.
85          */
86         smp_mb(); /* paired with resched_curr() */
87
88         preempt_fold_need_resched();
89 }
90
91 #endif /* _LINUX_SCHED_IDLE_H */