4 * Linux wait queue related types and methods
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
10 #include <asm/current.h>
11 #include <uapi/linux/wait.h>
13 typedef struct wait_queue_entry wait_queue_entry_t;
15 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
18 /* wait_queue_entry::flags */
19 #define WQ_FLAG_EXCLUSIVE 0x01
20 #define WQ_FLAG_WOKEN 0x02
23 * A single wait-queue entry structure:
25 struct wait_queue_entry {
28 wait_queue_func_t func;
29 struct list_head task_list;
35 #define WAIT_ATOMIC_T_BIT_NR -1
36 unsigned long timeout;
39 struct wait_bit_queue_entry {
40 struct wait_bit_key key;
41 struct wait_queue_entry wq_entry;
44 struct wait_queue_head {
46 struct list_head task_list;
48 typedef struct wait_queue_head wait_queue_head_t;
53 * Macros for declaration and initialisaton of the datatypes
56 #define __WAITQUEUE_INITIALIZER(name, tsk) { \
58 .func = default_wake_function, \
59 .task_list = { NULL, NULL } }
61 #define DECLARE_WAITQUEUE(name, tsk) \
62 struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
64 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
65 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
66 .task_list = { &(name).task_list, &(name).task_list } }
68 #define DECLARE_WAIT_QUEUE_HEAD(name) \
69 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
71 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
72 { .flags = word, .bit_nr = bit, }
74 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
75 { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
77 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
79 #define init_waitqueue_head(wq_head) \
81 static struct lock_class_key __key; \
83 __init_waitqueue_head((wq_head), #wq_head, &__key); \
87 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
88 ({ init_waitqueue_head(&name); name; })
89 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
90 struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
92 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
95 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
98 wq_entry->private = p;
99 wq_entry->func = default_wake_function;
103 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
106 wq_entry->private = NULL;
107 wq_entry->func = func;
111 * waitqueue_active -- locklessly test for waiters on the queue
112 * @wq_head: the waitqueue to test for waiters
114 * returns true if the wait list is not empty
116 * NOTE: this function is lockless and requires care, incorrect usage _will_
117 * lead to sporadic and non-obvious failure.
119 * Use either while holding wait_queue_head::lock or when used for wakeups
120 * with an extra smp_mb() like:
122 * CPU0 - waker CPU1 - waiter
125 * @cond = true; prepare_to_wait(&wq_head, &wait, state);
126 * smp_mb(); // smp_mb() from set_current_state()
127 * if (waitqueue_active(wq_head)) if (@cond)
128 * wake_up(wq_head); break;
131 * finish_wait(&wq_head, &wait);
133 * Because without the explicit smp_mb() it's possible for the
134 * waitqueue_active() load to get hoisted over the @cond store such that we'll
135 * observe an empty wait list while the waiter might not observe @cond.
137 * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
138 * which (when the lock is uncontended) are of roughly equal cost.
140 static inline int waitqueue_active(struct wait_queue_head *wq_head)
142 return !list_empty(&wq_head->task_list);
146 * wq_has_sleeper - check if there are any waiting processes
147 * @wq_head: wait queue head
149 * Returns true if wq_head has waiting processes
151 * Please refer to the comment for waitqueue_active.
153 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
156 * We need to be sure we are in sync with the
157 * add_wait_queue modifications to the wait queue.
159 * This memory barrier should be paired with one on the
163 return waitqueue_active(wq_head);
166 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
170 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
172 list_add(&wq_entry->task_list, &wq_head->task_list);
176 * Used for wake-one threads:
179 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
181 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
182 __add_wait_queue(wq_head, wq_entry);
185 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
187 list_add_tail(&wq_entry->task_list, &wq_head->task_list);
191 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
193 wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
194 __add_wait_queue_entry_tail(wq_head, wq_entry);
198 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
200 list_del(&wq_entry->task_list);
203 typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
204 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
205 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
206 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
207 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
208 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209 void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
210 int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
211 int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
212 void wake_up_bit(void *word, int bit);
213 void wake_up_atomic_t(atomic_t *p);
214 int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
215 int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
216 int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
217 int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
218 struct wait_queue_head *bit_waitqueue(void *word, int bit);
220 #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
221 #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
222 #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
223 #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1)
224 #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0)
226 #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
227 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
228 #define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
229 #define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
232 * Wakeup macros to be used to report events to the targets.
234 #define wake_up_poll(x, m) \
235 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
236 #define wake_up_locked_poll(x, m) \
237 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
238 #define wake_up_interruptible_poll(x, m) \
239 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
240 #define wake_up_interruptible_sync_poll(x, m) \
241 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
243 #define ___wait_cond_timeout(condition) \
245 bool __cond = (condition); \
246 if (__cond && !__ret) \
251 #define ___wait_is_interruptible(state) \
252 (!__builtin_constant_p(state) || \
253 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
255 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
258 * The below macro ___wait_event() has an explicit shadow of the __ret
259 * variable when used from the wait_event_*() macros.
261 * This is so that both can use the ___wait_cond_timeout() construct
262 * to wrap the condition.
264 * The type inconsistency of the wait_event_*() __ret variable is also
265 * on purpose; we use long where we can return timeout values and int
269 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
272 struct wait_queue_entry __wq_entry; \
273 long __ret = ret; /* explicit shadow */ \
275 init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
277 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
282 if (___wait_is_interruptible(state) && __int) { \
289 finish_wait(&wq_head, &__wq_entry); \
293 #define __wait_event(wq_head, condition) \
294 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
298 * wait_event - sleep until a condition gets true
299 * @wq_head: the waitqueue to wait on
300 * @condition: a C expression for the event to wait for
302 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
303 * @condition evaluates to true. The @condition is checked each time
304 * the waitqueue @wq_head is woken up.
306 * wake_up() has to be called after changing any variable that could
307 * change the result of the wait condition.
309 #define wait_event(wq_head, condition) \
314 __wait_event(wq_head, condition); \
317 #define __io_wait_event(wq_head, condition) \
318 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
322 * io_wait_event() -- like wait_event() but with io_schedule()
324 #define io_wait_event(wq_head, condition) \
329 __io_wait_event(wq_head, condition); \
332 #define __wait_event_freezable(wq_head, condition) \
333 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
334 schedule(); try_to_freeze())
337 * wait_event_freezable - sleep (or freeze) until a condition gets true
338 * @wq_head: the waitqueue to wait on
339 * @condition: a C expression for the event to wait for
341 * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
342 * to system load) until the @condition evaluates to true. The
343 * @condition is checked each time the waitqueue @wq_head is woken up.
345 * wake_up() has to be called after changing any variable that could
346 * change the result of the wait condition.
348 #define wait_event_freezable(wq_head, condition) \
353 __ret = __wait_event_freezable(wq_head, condition); \
357 #define __wait_event_timeout(wq_head, condition, timeout) \
358 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
359 TASK_UNINTERRUPTIBLE, 0, timeout, \
360 __ret = schedule_timeout(__ret))
363 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
364 * @wq_head: the waitqueue to wait on
365 * @condition: a C expression for the event to wait for
366 * @timeout: timeout, in jiffies
368 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
369 * @condition evaluates to true. The @condition is checked each time
370 * the waitqueue @wq_head is woken up.
372 * wake_up() has to be called after changing any variable that could
373 * change the result of the wait condition.
376 * 0 if the @condition evaluated to %false after the @timeout elapsed,
377 * 1 if the @condition evaluated to %true after the @timeout elapsed,
378 * or the remaining jiffies (at least 1) if the @condition evaluated
379 * to %true before the @timeout elapsed.
381 #define wait_event_timeout(wq_head, condition, timeout) \
383 long __ret = timeout; \
385 if (!___wait_cond_timeout(condition)) \
386 __ret = __wait_event_timeout(wq_head, condition, timeout); \
390 #define __wait_event_freezable_timeout(wq_head, condition, timeout) \
391 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
392 TASK_INTERRUPTIBLE, 0, timeout, \
393 __ret = schedule_timeout(__ret); try_to_freeze())
396 * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
397 * increasing load and is freezable.
399 #define wait_event_freezable_timeout(wq_head, condition, timeout) \
401 long __ret = timeout; \
403 if (!___wait_cond_timeout(condition)) \
404 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
408 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
409 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0, \
410 cmd1; schedule(); cmd2)
412 * Just like wait_event_cmd(), except it sets exclusive flag
414 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2) \
418 __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2); \
421 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2) \
422 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
423 cmd1; schedule(); cmd2)
426 * wait_event_cmd - sleep until a condition gets true
427 * @wq_head: the waitqueue to wait on
428 * @condition: a C expression for the event to wait for
429 * @cmd1: the command will be executed before sleep
430 * @cmd2: the command will be executed after sleep
432 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
433 * @condition evaluates to true. The @condition is checked each time
434 * the waitqueue @wq_head is woken up.
436 * wake_up() has to be called after changing any variable that could
437 * change the result of the wait condition.
439 #define wait_event_cmd(wq_head, condition, cmd1, cmd2) \
443 __wait_event_cmd(wq_head, condition, cmd1, cmd2); \
446 #define __wait_event_interruptible(wq_head, condition) \
447 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
451 * wait_event_interruptible - sleep until a condition gets true
452 * @wq_head: the waitqueue to wait on
453 * @condition: a C expression for the event to wait for
455 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
456 * @condition evaluates to true or a signal is received.
457 * The @condition is checked each time the waitqueue @wq_head is woken up.
459 * wake_up() has to be called after changing any variable that could
460 * change the result of the wait condition.
462 * The function will return -ERESTARTSYS if it was interrupted by a
463 * signal and 0 if @condition evaluated to true.
465 #define wait_event_interruptible(wq_head, condition) \
470 __ret = __wait_event_interruptible(wq_head, condition); \
474 #define __wait_event_interruptible_timeout(wq_head, condition, timeout) \
475 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
476 TASK_INTERRUPTIBLE, 0, timeout, \
477 __ret = schedule_timeout(__ret))
480 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
481 * @wq_head: the waitqueue to wait on
482 * @condition: a C expression for the event to wait for
483 * @timeout: timeout, in jiffies
485 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
486 * @condition evaluates to true or a signal is received.
487 * The @condition is checked each time the waitqueue @wq_head is woken up.
489 * wake_up() has to be called after changing any variable that could
490 * change the result of the wait condition.
493 * 0 if the @condition evaluated to %false after the @timeout elapsed,
494 * 1 if the @condition evaluated to %true after the @timeout elapsed,
495 * the remaining jiffies (at least 1) if the @condition evaluated
496 * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
497 * interrupted by a signal.
499 #define wait_event_interruptible_timeout(wq_head, condition, timeout) \
501 long __ret = timeout; \
503 if (!___wait_cond_timeout(condition)) \
504 __ret = __wait_event_interruptible_timeout(wq_head, \
505 condition, timeout); \
509 #define __wait_event_hrtimeout(wq_head, condition, timeout, state) \
512 struct hrtimer_sleeper __t; \
514 hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); \
515 hrtimer_init_sleeper(&__t, current); \
516 if ((timeout) != KTIME_MAX) \
517 hrtimer_start_range_ns(&__t.timer, timeout, \
518 current->timer_slack_ns, \
521 __ret = ___wait_event(wq_head, condition, state, 0, 0, \
528 hrtimer_cancel(&__t.timer); \
529 destroy_hrtimer_on_stack(&__t.timer); \
534 * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
535 * @wq_head: the waitqueue to wait on
536 * @condition: a C expression for the event to wait for
537 * @timeout: timeout, as a ktime_t
539 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
540 * @condition evaluates to true or a signal is received.
541 * The @condition is checked each time the waitqueue @wq_head is woken up.
543 * wake_up() has to be called after changing any variable that could
544 * change the result of the wait condition.
546 * The function returns 0 if @condition became true, or -ETIME if the timeout
549 #define wait_event_hrtimeout(wq_head, condition, timeout) \
554 __ret = __wait_event_hrtimeout(wq_head, condition, timeout, \
555 TASK_UNINTERRUPTIBLE); \
560 * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
561 * @wq_head: the waitqueue to wait on
562 * @condition: a C expression for the event to wait for
563 * @timeout: timeout, as a ktime_t
565 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
566 * @condition evaluates to true or a signal is received.
567 * The @condition is checked each time the waitqueue @wq_head is woken up.
569 * wake_up() has to be called after changing any variable that could
570 * change the result of the wait condition.
572 * The function returns 0 if @condition became true, -ERESTARTSYS if it was
573 * interrupted by a signal, or -ETIME if the timeout elapsed.
575 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
580 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
581 TASK_INTERRUPTIBLE); \
585 #define __wait_event_interruptible_exclusive(wq, condition) \
586 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
589 #define wait_event_interruptible_exclusive(wq, condition) \
594 __ret = __wait_event_interruptible_exclusive(wq, condition); \
598 #define __wait_event_killable_exclusive(wq, condition) \
599 ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \
602 #define wait_event_killable_exclusive(wq, condition) \
607 __ret = __wait_event_killable_exclusive(wq, condition); \
612 #define __wait_event_freezable_exclusive(wq, condition) \
613 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
614 schedule(); try_to_freeze())
616 #define wait_event_freezable_exclusive(wq, condition) \
621 __ret = __wait_event_freezable_exclusive(wq, condition); \
625 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
626 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
628 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
631 DEFINE_WAIT(__wait); \
633 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
635 __ret = fn(&(wq), &__wait); \
638 } while (!(condition)); \
639 __remove_wait_queue(&(wq), &__wait); \
640 __set_current_state(TASK_RUNNING); \
646 * wait_event_interruptible_locked - sleep until a condition gets true
647 * @wq: the waitqueue to wait on
648 * @condition: a C expression for the event to wait for
650 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
651 * @condition evaluates to true or a signal is received.
652 * The @condition is checked each time the waitqueue @wq is woken up.
654 * It must be called with wq.lock being held. This spinlock is
655 * unlocked while sleeping but @condition testing is done while lock
656 * is held and when this macro exits the lock is held.
658 * The lock is locked/unlocked using spin_lock()/spin_unlock()
659 * functions which must match the way they are locked/unlocked outside
662 * wake_up_locked() has to be called after changing any variable that could
663 * change the result of the wait condition.
665 * The function will return -ERESTARTSYS if it was interrupted by a
666 * signal and 0 if @condition evaluated to true.
668 #define wait_event_interruptible_locked(wq, condition) \
670 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
673 * wait_event_interruptible_locked_irq - sleep until a condition gets true
674 * @wq: the waitqueue to wait on
675 * @condition: a C expression for the event to wait for
677 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
678 * @condition evaluates to true or a signal is received.
679 * The @condition is checked each time the waitqueue @wq is woken up.
681 * It must be called with wq.lock being held. This spinlock is
682 * unlocked while sleeping but @condition testing is done while lock
683 * is held and when this macro exits the lock is held.
685 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
686 * functions which must match the way they are locked/unlocked outside
689 * wake_up_locked() has to be called after changing any variable that could
690 * change the result of the wait condition.
692 * The function will return -ERESTARTSYS if it was interrupted by a
693 * signal and 0 if @condition evaluated to true.
695 #define wait_event_interruptible_locked_irq(wq, condition) \
697 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
700 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
701 * @wq: the waitqueue to wait on
702 * @condition: a C expression for the event to wait for
704 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
705 * @condition evaluates to true or a signal is received.
706 * The @condition is checked each time the waitqueue @wq is woken up.
708 * It must be called with wq.lock being held. This spinlock is
709 * unlocked while sleeping but @condition testing is done while lock
710 * is held and when this macro exits the lock is held.
712 * The lock is locked/unlocked using spin_lock()/spin_unlock()
713 * functions which must match the way they are locked/unlocked outside
716 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
717 * set thus when other process waits process on the list if this
718 * process is awaken further processes are not considered.
720 * wake_up_locked() has to be called after changing any variable that could
721 * change the result of the wait condition.
723 * The function will return -ERESTARTSYS if it was interrupted by a
724 * signal and 0 if @condition evaluated to true.
726 #define wait_event_interruptible_exclusive_locked(wq, condition) \
728 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
731 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
732 * @wq: the waitqueue to wait on
733 * @condition: a C expression for the event to wait for
735 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
736 * @condition evaluates to true or a signal is received.
737 * The @condition is checked each time the waitqueue @wq is woken up.
739 * It must be called with wq.lock being held. This spinlock is
740 * unlocked while sleeping but @condition testing is done while lock
741 * is held and when this macro exits the lock is held.
743 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
744 * functions which must match the way they are locked/unlocked outside
747 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
748 * set thus when other process waits process on the list if this
749 * process is awaken further processes are not considered.
751 * wake_up_locked() has to be called after changing any variable that could
752 * change the result of the wait condition.
754 * The function will return -ERESTARTSYS if it was interrupted by a
755 * signal and 0 if @condition evaluated to true.
757 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
759 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
762 #define __wait_event_killable(wq, condition) \
763 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
766 * wait_event_killable - sleep until a condition gets true
767 * @wq: the waitqueue to wait on
768 * @condition: a C expression for the event to wait for
770 * The process is put to sleep (TASK_KILLABLE) until the
771 * @condition evaluates to true or a signal is received.
772 * The @condition is checked each time the waitqueue @wq is woken up.
774 * wake_up() has to be called after changing any variable that could
775 * change the result of the wait condition.
777 * The function will return -ERESTARTSYS if it was interrupted by a
778 * signal and 0 if @condition evaluated to true.
780 #define wait_event_killable(wq_head, condition) \
785 __ret = __wait_event_killable(wq_head, condition); \
790 #define __wait_event_lock_irq(wq_head, condition, lock, cmd) \
791 (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
792 spin_unlock_irq(&lock); \
795 spin_lock_irq(&lock))
798 * wait_event_lock_irq_cmd - sleep until a condition gets true. The
799 * condition is checked under the lock. This
800 * is expected to be called with the lock
802 * @wq_head: the waitqueue to wait on
803 * @condition: a C expression for the event to wait for
804 * @lock: a locked spinlock_t, which will be released before cmd
805 * and schedule() and reacquired afterwards.
806 * @cmd: a command which is invoked outside the critical section before
809 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
810 * @condition evaluates to true. The @condition is checked each time
811 * the waitqueue @wq_head is woken up.
813 * wake_up() has to be called after changing any variable that could
814 * change the result of the wait condition.
816 * This is supposed to be called while holding the lock. The lock is
817 * dropped before invoking the cmd and going to sleep and is reacquired
820 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd) \
824 __wait_event_lock_irq(wq_head, condition, lock, cmd); \
828 * wait_event_lock_irq - sleep until a condition gets true. The
829 * condition is checked under the lock. This
830 * is expected to be called with the lock
832 * @wq_head: the waitqueue to wait on
833 * @condition: a C expression for the event to wait for
834 * @lock: a locked spinlock_t, which will be released before schedule()
835 * and reacquired afterwards.
837 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
838 * @condition evaluates to true. The @condition is checked each time
839 * the waitqueue @wq_head is woken up.
841 * wake_up() has to be called after changing any variable that could
842 * change the result of the wait condition.
844 * This is supposed to be called while holding the lock. The lock is
845 * dropped before going to sleep and is reacquired afterwards.
847 #define wait_event_lock_irq(wq_head, condition, lock) \
851 __wait_event_lock_irq(wq_head, condition, lock, ); \
855 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd) \
856 ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \
857 spin_unlock_irq(&lock); \
860 spin_lock_irq(&lock))
863 * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
864 * The condition is checked under the lock. This is expected to
865 * be called with the lock taken.
866 * @wq_head: the waitqueue to wait on
867 * @condition: a C expression for the event to wait for
868 * @lock: a locked spinlock_t, which will be released before cmd and
869 * schedule() and reacquired afterwards.
870 * @cmd: a command which is invoked outside the critical section before
873 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
874 * @condition evaluates to true or a signal is received. The @condition is
875 * checked each time the waitqueue @wq_head is woken up.
877 * wake_up() has to be called after changing any variable that could
878 * change the result of the wait condition.
880 * This is supposed to be called while holding the lock. The lock is
881 * dropped before invoking the cmd and going to sleep and is reacquired
884 * The macro will return -ERESTARTSYS if it was interrupted by a signal
885 * and 0 if @condition evaluated to true.
887 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd) \
891 __ret = __wait_event_interruptible_lock_irq(wq_head, \
892 condition, lock, cmd); \
897 * wait_event_interruptible_lock_irq - sleep until a condition gets true.
898 * The condition is checked under the lock. This is expected
899 * to be called with the lock taken.
900 * @wq_head: the waitqueue to wait on
901 * @condition: a C expression for the event to wait for
902 * @lock: a locked spinlock_t, which will be released before schedule()
903 * and reacquired afterwards.
905 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
906 * @condition evaluates to true or signal is received. The @condition is
907 * checked each time the waitqueue @wq_head is woken up.
909 * wake_up() has to be called after changing any variable that could
910 * change the result of the wait condition.
912 * This is supposed to be called while holding the lock. The lock is
913 * dropped before going to sleep and is reacquired afterwards.
915 * The macro will return -ERESTARTSYS if it was interrupted by a signal
916 * and 0 if @condition evaluated to true.
918 #define wait_event_interruptible_lock_irq(wq_head, condition, lock) \
922 __ret = __wait_event_interruptible_lock_irq(wq_head, \
927 #define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
929 ___wait_event(wq_head, ___wait_cond_timeout(condition), \
930 TASK_INTERRUPTIBLE, 0, timeout, \
931 spin_unlock_irq(&lock); \
932 __ret = schedule_timeout(__ret); \
933 spin_lock_irq(&lock));
936 * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
937 * true or a timeout elapses. The condition is checked under
938 * the lock. This is expected to be called with the lock taken.
939 * @wq_head: the waitqueue to wait on
940 * @condition: a C expression for the event to wait for
941 * @lock: a locked spinlock_t, which will be released before schedule()
942 * and reacquired afterwards.
943 * @timeout: timeout, in jiffies
945 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
946 * @condition evaluates to true or signal is received. The @condition is
947 * checked each time the waitqueue @wq_head is woken up.
949 * wake_up() has to be called after changing any variable that could
950 * change the result of the wait condition.
952 * This is supposed to be called while holding the lock. The lock is
953 * dropped before going to sleep and is reacquired afterwards.
955 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
956 * was interrupted by a signal, and the remaining jiffies otherwise
957 * if the condition evaluated to true before the timeout elapsed.
959 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock, \
962 long __ret = timeout; \
963 if (!___wait_cond_timeout(condition)) \
964 __ret = __wait_event_interruptible_lock_irq_timeout( \
965 wq_head, condition, lock, timeout); \
970 * Waitqueues which are removed from the waitqueue_head at wakeup time
972 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
973 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
974 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
975 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
976 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
977 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
978 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
979 int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
981 #define DEFINE_WAIT_FUNC(name, function) \
982 struct wait_queue_entry name = { \
983 .private = current, \
985 .task_list = LIST_HEAD_INIT((name).task_list), \
988 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
990 #define DEFINE_WAIT_BIT(name, word, bit) \
991 struct wait_bit_queue_entry name = { \
992 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
994 .private = current, \
995 .func = wake_bit_function, \
997 LIST_HEAD_INIT((name).wq_entry.task_list), \
1001 #define init_wait(wait) \
1003 (wait)->private = current; \
1004 (wait)->func = autoremove_wake_function; \
1005 INIT_LIST_HEAD(&(wait)->task_list); \
1006 (wait)->flags = 0; \
1010 extern int bit_wait(struct wait_bit_key *key, int bit);
1011 extern int bit_wait_io(struct wait_bit_key *key, int bit);
1012 extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
1013 extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
1016 * wait_on_bit - wait for a bit to be cleared
1017 * @word: the word being waited on, a kernel virtual address
1018 * @bit: the bit of the word being waited on
1019 * @mode: the task state to sleep in
1021 * There is a standard hashed waitqueue table for generic use. This
1022 * is the part of the hashtable's accessor API that waits on a bit.
1023 * For instance, if one were to have waiters on a bitflag, one would
1024 * call wait_on_bit() in threads waiting for the bit to clear.
1025 * One uses wait_on_bit() where one is waiting for the bit to clear,
1026 * but has no intention of setting it.
1027 * Returned value will be zero if the bit was cleared, or non-zero
1028 * if the process received a signal and the mode permitted wakeup
1032 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1035 if (!test_bit(bit, word))
1037 return out_of_line_wait_on_bit(word, bit,
1043 * wait_on_bit_io - wait for a bit to be cleared
1044 * @word: the word being waited on, a kernel virtual address
1045 * @bit: the bit of the word being waited on
1046 * @mode: the task state to sleep in
1048 * Use the standard hashed waitqueue table to wait for a bit
1049 * to be cleared. This is similar to wait_on_bit(), but calls
1050 * io_schedule() instead of schedule() for the actual waiting.
1052 * Returned value will be zero if the bit was cleared, or non-zero
1053 * if the process received a signal and the mode permitted wakeup
1057 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1060 if (!test_bit(bit, word))
1062 return out_of_line_wait_on_bit(word, bit,
1068 * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1069 * @word: the word being waited on, a kernel virtual address
1070 * @bit: the bit of the word being waited on
1071 * @mode: the task state to sleep in
1072 * @timeout: timeout, in jiffies
1074 * Use the standard hashed waitqueue table to wait for a bit
1075 * to be cleared. This is similar to wait_on_bit(), except also takes a
1076 * timeout parameter.
1078 * Returned value will be zero if the bit was cleared before the
1079 * @timeout elapsed, or non-zero if the @timeout elapsed or process
1080 * received a signal and the mode permitted wakeup on that signal.
1083 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1084 unsigned long timeout)
1087 if (!test_bit(bit, word))
1089 return out_of_line_wait_on_bit_timeout(word, bit,
1095 * wait_on_bit_action - wait for a bit to be cleared
1096 * @word: the word being waited on, a kernel virtual address
1097 * @bit: the bit of the word being waited on
1098 * @action: the function used to sleep, which may take special actions
1099 * @mode: the task state to sleep in
1101 * Use the standard hashed waitqueue table to wait for a bit
1102 * to be cleared, and allow the waiting action to be specified.
1103 * This is like wait_on_bit() but allows fine control of how the waiting
1106 * Returned value will be zero if the bit was cleared, or non-zero
1107 * if the process received a signal and the mode permitted wakeup
1111 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1115 if (!test_bit(bit, word))
1117 return out_of_line_wait_on_bit(word, bit, action, mode);
1121 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1122 * @word: the word being waited on, a kernel virtual address
1123 * @bit: the bit of the word being waited on
1124 * @mode: the task state to sleep in
1126 * There is a standard hashed waitqueue table for generic use. This
1127 * is the part of the hashtable's accessor API that waits on a bit
1128 * when one intends to set it, for instance, trying to lock bitflags.
1129 * For instance, if one were to have waiters trying to set bitflag
1130 * and waiting for it to clear before setting it, one would call
1131 * wait_on_bit() in threads waiting to be able to set the bit.
1132 * One uses wait_on_bit_lock() where one is waiting for the bit to
1133 * clear with the intention of setting it, and when done, clearing it.
1135 * Returns zero if the bit was (eventually) found to be clear and was
1136 * set. Returns non-zero if a signal was delivered to the process and
1137 * the @mode allows that signal to wake the process.
1140 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1143 if (!test_and_set_bit(bit, word))
1145 return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1149 * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1150 * @word: the word being waited on, a kernel virtual address
1151 * @bit: the bit of the word being waited on
1152 * @mode: the task state to sleep in
1154 * Use the standard hashed waitqueue table to wait for a bit
1155 * to be cleared and then to atomically set it. This is similar
1156 * to wait_on_bit(), but calls io_schedule() instead of schedule()
1157 * for the actual waiting.
1159 * Returns zero if the bit was (eventually) found to be clear and was
1160 * set. Returns non-zero if a signal was delivered to the process and
1161 * the @mode allows that signal to wake the process.
1164 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1167 if (!test_and_set_bit(bit, word))
1169 return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1173 * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1174 * @word: the word being waited on, a kernel virtual address
1175 * @bit: the bit of the word being waited on
1176 * @action: the function used to sleep, which may take special actions
1177 * @mode: the task state to sleep in
1179 * Use the standard hashed waitqueue table to wait for a bit
1180 * to be cleared and then to set it, and allow the waiting action
1182 * This is like wait_on_bit() but allows fine control of how the waiting
1185 * Returns zero if the bit was (eventually) found to be clear and was
1186 * set. Returns non-zero if a signal was delivered to the process and
1187 * the @mode allows that signal to wake the process.
1190 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1194 if (!test_and_set_bit(bit, word))
1196 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1200 * wait_on_atomic_t - Wait for an atomic_t to become 0
1201 * @val: The atomic value being waited on, a kernel virtual address
1202 * @action: the function used to sleep, which may take special actions
1203 * @mode: the task state to sleep in
1205 * Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
1206 * the purpose of getting a waitqueue, but we set the key to a bit number
1207 * outside of the target 'word'.
1210 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1213 if (atomic_read(val) == 0)
1215 return out_of_line_wait_on_atomic_t(val, action, mode);
1218 #endif /* _LINUX_WAIT_H */