]> git.karo-electronics.de Git - karo-tx-linux.git/blob - include/linux/wait.h
sched/wait: Re-adjust macro line continuation backslashes in <linux/wait.h>
[karo-tx-linux.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9
10 #include <asm/current.h>
11 #include <uapi/linux/wait.h>
12
13 typedef struct wait_queue_entry wait_queue_entry_t;
14
15 typedef int (*wait_queue_func_t)(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
16 int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int flags, void *key);
17
18 /* wait_queue_entry::flags */
19 #define WQ_FLAG_EXCLUSIVE       0x01
20 #define WQ_FLAG_WOKEN           0x02
21
22 /*
23  * A single wait-queue entry structure:
24  */
25 struct wait_queue_entry {
26         unsigned int            flags;
27         void                    *private;
28         wait_queue_func_t       func;
29         struct list_head        task_list;
30 };
31
32 struct wait_bit_key {
33         void                    *flags;
34         int                     bit_nr;
35 #define WAIT_ATOMIC_T_BIT_NR    -1
36         unsigned long           timeout;
37 };
38
39 struct wait_bit_queue_entry {
40         struct wait_bit_key     key;
41         struct wait_queue_entry wq_entry;
42 };
43
44 struct wait_queue_head {
45         spinlock_t              lock;
46         struct list_head        task_list;
47 };
48 typedef struct wait_queue_head wait_queue_head_t;
49
50 struct task_struct;
51
52 /*
53  * Macros for declaration and initialisaton of the datatypes
54  */
55
56 #define __WAITQUEUE_INITIALIZER(name, tsk) {                                    \
57         .private        = tsk,                                                  \
58         .func           = default_wake_function,                                \
59         .task_list      = { NULL, NULL } }
60
61 #define DECLARE_WAITQUEUE(name, tsk)                                            \
62         struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
63
64 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                                   \
65         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),                      \
66         .task_list      = { &(name).task_list, &(name).task_list } }
67
68 #define DECLARE_WAIT_QUEUE_HEAD(name) \
69         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
70
71 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                                   \
72         { .flags = word, .bit_nr = bit, }
73
74 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                                      \
75         { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
76
77 extern void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *);
78
79 #define init_waitqueue_head(wq_head)                                            \
80         do {                                                                    \
81                 static struct lock_class_key __key;                             \
82                                                                                 \
83                 __init_waitqueue_head((wq_head), #wq_head, &__key);             \
84         } while (0)
85
86 #ifdef CONFIG_LOCKDEP
87 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
88         ({ init_waitqueue_head(&name); name; })
89 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
90         struct wait_queue_head name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
91 #else
92 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
93 #endif
94
95 static inline void init_waitqueue_entry(struct wait_queue_entry *wq_entry, struct task_struct *p)
96 {
97         wq_entry->flags         = 0;
98         wq_entry->private       = p;
99         wq_entry->func          = default_wake_function;
100 }
101
102 static inline void
103 init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t func)
104 {
105         wq_entry->flags         = 0;
106         wq_entry->private       = NULL;
107         wq_entry->func          = func;
108 }
109
110 /**
111  * waitqueue_active -- locklessly test for waiters on the queue
112  * @wq_head: the waitqueue to test for waiters
113  *
114  * returns true if the wait list is not empty
115  *
116  * NOTE: this function is lockless and requires care, incorrect usage _will_
117  * lead to sporadic and non-obvious failure.
118  *
119  * Use either while holding wait_queue_head::lock or when used for wakeups
120  * with an extra smp_mb() like:
121  *
122  *      CPU0 - waker                    CPU1 - waiter
123  *
124  *                                      for (;;) {
125  *      @cond = true;                     prepare_to_wait(&wq_head, &wait, state);
126  *      smp_mb();                         // smp_mb() from set_current_state()
127  *      if (waitqueue_active(wq_head))         if (@cond)
128  *        wake_up(wq_head);                      break;
129  *                                        schedule();
130  *                                      }
131  *                                      finish_wait(&wq_head, &wait);
132  *
133  * Because without the explicit smp_mb() it's possible for the
134  * waitqueue_active() load to get hoisted over the @cond store such that we'll
135  * observe an empty wait list while the waiter might not observe @cond.
136  *
137  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
138  * which (when the lock is uncontended) are of roughly equal cost.
139  */
140 static inline int waitqueue_active(struct wait_queue_head *wq_head)
141 {
142         return !list_empty(&wq_head->task_list);
143 }
144
145 /**
146  * wq_has_sleeper - check if there are any waiting processes
147  * @wq_head: wait queue head
148  *
149  * Returns true if wq_head has waiting processes
150  *
151  * Please refer to the comment for waitqueue_active.
152  */
153 static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
154 {
155         /*
156          * We need to be sure we are in sync with the
157          * add_wait_queue modifications to the wait queue.
158          *
159          * This memory barrier should be paired with one on the
160          * waiting side.
161          */
162         smp_mb();
163         return waitqueue_active(wq_head);
164 }
165
166 extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
167 extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
168 extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
169
170 static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
171 {
172         list_add(&wq_entry->task_list, &wq_head->task_list);
173 }
174
175 /*
176  * Used for wake-one threads:
177  */
178 static inline void
179 __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
180 {
181         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
182         __add_wait_queue(wq_head, wq_entry);
183 }
184
185 static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
186 {
187         list_add_tail(&wq_entry->task_list, &wq_head->task_list);
188 }
189
190 static inline void
191 __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
192 {
193         wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
194         __add_wait_queue_entry_tail(wq_head, wq_entry);
195 }
196
197 static inline void
198 __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
199 {
200         list_del(&wq_entry->task_list);
201 }
202
203 typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
204 void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
205 void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
206 void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
207 void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
208 void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
209 void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
210 int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
211 int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
212 void wake_up_bit(void *word, int bit);
213 void wake_up_atomic_t(atomic_t *p);
214 int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
215 int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
216 int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
217 int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
218 struct wait_queue_head *bit_waitqueue(void *word, int bit);
219
220 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
221 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
222 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
223 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
224 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
225
226 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
227 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
228 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
229 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
230
231 /*
232  * Wakeup macros to be used to report events to the targets.
233  */
234 #define wake_up_poll(x, m)                                                      \
235         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
236 #define wake_up_locked_poll(x, m)                                               \
237         __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
238 #define wake_up_interruptible_poll(x, m)                                        \
239         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
240 #define wake_up_interruptible_sync_poll(x, m)                                   \
241         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
242
243 #define ___wait_cond_timeout(condition)                                         \
244 ({                                                                              \
245         bool __cond = (condition);                                              \
246         if (__cond && !__ret)                                                   \
247                 __ret = 1;                                                      \
248         __cond || !__ret;                                                       \
249 })
250
251 #define ___wait_is_interruptible(state)                                         \
252         (!__builtin_constant_p(state) ||                                        \
253                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)          \
254
255 extern void init_wait_entry(struct wait_queue_entry *wq_entry, int flags);
256
257 /*
258  * The below macro ___wait_event() has an explicit shadow of the __ret
259  * variable when used from the wait_event_*() macros.
260  *
261  * This is so that both can use the ___wait_cond_timeout() construct
262  * to wrap the condition.
263  *
264  * The type inconsistency of the wait_event_*() __ret variable is also
265  * on purpose; we use long where we can return timeout values and int
266  * otherwise.
267  */
268
269 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)           \
270 ({                                                                              \
271         __label__ __out;                                                        \
272         struct wait_queue_entry __wq_entry;                                     \
273         long __ret = ret;       /* explicit shadow */                           \
274                                                                                 \
275         init_wait_entry(&__wq_entry, exclusive ? WQ_FLAG_EXCLUSIVE : 0);        \
276         for (;;) {                                                              \
277                 long __int = prepare_to_wait_event(&wq_head, &__wq_entry, state);\
278                                                                                 \
279                 if (condition)                                                  \
280                         break;                                                  \
281                                                                                 \
282                 if (___wait_is_interruptible(state) && __int) {                 \
283                         __ret = __int;                                          \
284                         goto __out;                                             \
285                 }                                                               \
286                                                                                 \
287                 cmd;                                                            \
288         }                                                                       \
289         finish_wait(&wq_head, &__wq_entry);                                     \
290 __out:  __ret;                                                                  \
291 })
292
293 #define __wait_event(wq_head, condition)                                        \
294         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
295                             schedule())
296
297 /**
298  * wait_event - sleep until a condition gets true
299  * @wq_head: the waitqueue to wait on
300  * @condition: a C expression for the event to wait for
301  *
302  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
303  * @condition evaluates to true. The @condition is checked each time
304  * the waitqueue @wq_head is woken up.
305  *
306  * wake_up() has to be called after changing any variable that could
307  * change the result of the wait condition.
308  */
309 #define wait_event(wq_head, condition)                                          \
310 do {                                                                            \
311         might_sleep();                                                          \
312         if (condition)                                                          \
313                 break;                                                          \
314         __wait_event(wq_head, condition);                                       \
315 } while (0)
316
317 #define __io_wait_event(wq_head, condition)                                     \
318         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
319                             io_schedule())
320
321 /*
322  * io_wait_event() -- like wait_event() but with io_schedule()
323  */
324 #define io_wait_event(wq_head, condition)                                       \
325 do {                                                                            \
326         might_sleep();                                                          \
327         if (condition)                                                          \
328                 break;                                                          \
329         __io_wait_event(wq_head, condition);                                    \
330 } while (0)
331
332 #define __wait_event_freezable(wq_head, condition)                              \
333         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
334                             schedule(); try_to_freeze())
335
336 /**
337  * wait_event_freezable - sleep (or freeze) until a condition gets true
338  * @wq_head: the waitqueue to wait on
339  * @condition: a C expression for the event to wait for
340  *
341  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
342  * to system load) until the @condition evaluates to true. The
343  * @condition is checked each time the waitqueue @wq_head is woken up.
344  *
345  * wake_up() has to be called after changing any variable that could
346  * change the result of the wait condition.
347  */
348 #define wait_event_freezable(wq_head, condition)                                \
349 ({                                                                              \
350         int __ret = 0;                                                          \
351         might_sleep();                                                          \
352         if (!(condition))                                                       \
353                 __ret = __wait_event_freezable(wq_head, condition);             \
354         __ret;                                                                  \
355 })
356
357 #define __wait_event_timeout(wq_head, condition, timeout)                       \
358         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
359                       TASK_UNINTERRUPTIBLE, 0, timeout,                         \
360                       __ret = schedule_timeout(__ret))
361
362 /**
363  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
364  * @wq_head: the waitqueue to wait on
365  * @condition: a C expression for the event to wait for
366  * @timeout: timeout, in jiffies
367  *
368  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
369  * @condition evaluates to true. The @condition is checked each time
370  * the waitqueue @wq_head is woken up.
371  *
372  * wake_up() has to be called after changing any variable that could
373  * change the result of the wait condition.
374  *
375  * Returns:
376  * 0 if the @condition evaluated to %false after the @timeout elapsed,
377  * 1 if the @condition evaluated to %true after the @timeout elapsed,
378  * or the remaining jiffies (at least 1) if the @condition evaluated
379  * to %true before the @timeout elapsed.
380  */
381 #define wait_event_timeout(wq_head, condition, timeout)                         \
382 ({                                                                              \
383         long __ret = timeout;                                                   \
384         might_sleep();                                                          \
385         if (!___wait_cond_timeout(condition))                                   \
386                 __ret = __wait_event_timeout(wq_head, condition, timeout);      \
387         __ret;                                                                  \
388 })
389
390 #define __wait_event_freezable_timeout(wq_head, condition, timeout)             \
391         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
392                       TASK_INTERRUPTIBLE, 0, timeout,                           \
393                       __ret = schedule_timeout(__ret); try_to_freeze())
394
395 /*
396  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
397  * increasing load and is freezable.
398  */
399 #define wait_event_freezable_timeout(wq_head, condition, timeout)               \
400 ({                                                                              \
401         long __ret = timeout;                                                   \
402         might_sleep();                                                          \
403         if (!___wait_cond_timeout(condition))                                   \
404                 __ret = __wait_event_freezable_timeout(wq_head, condition, timeout); \
405         __ret;                                                                  \
406 })
407
408 #define __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)              \
409         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 1, 0,     \
410                             cmd1; schedule(); cmd2)
411 /*
412  * Just like wait_event_cmd(), except it sets exclusive flag
413  */
414 #define wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2)                \
415 do {                                                                            \
416         if (condition)                                                          \
417                 break;                                                          \
418         __wait_event_exclusive_cmd(wq_head, condition, cmd1, cmd2);             \
419 } while (0)
420
421 #define __wait_event_cmd(wq_head, condition, cmd1, cmd2)                        \
422         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
423                             cmd1; schedule(); cmd2)
424
425 /**
426  * wait_event_cmd - sleep until a condition gets true
427  * @wq_head: the waitqueue to wait on
428  * @condition: a C expression for the event to wait for
429  * @cmd1: the command will be executed before sleep
430  * @cmd2: the command will be executed after sleep
431  *
432  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
433  * @condition evaluates to true. The @condition is checked each time
434  * the waitqueue @wq_head is woken up.
435  *
436  * wake_up() has to be called after changing any variable that could
437  * change the result of the wait condition.
438  */
439 #define wait_event_cmd(wq_head, condition, cmd1, cmd2)                          \
440 do {                                                                            \
441         if (condition)                                                          \
442                 break;                                                          \
443         __wait_event_cmd(wq_head, condition, cmd1, cmd2);                       \
444 } while (0)
445
446 #define __wait_event_interruptible(wq_head, condition)                          \
447         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
448                       schedule())
449
450 /**
451  * wait_event_interruptible - sleep until a condition gets true
452  * @wq_head: the waitqueue to wait on
453  * @condition: a C expression for the event to wait for
454  *
455  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
456  * @condition evaluates to true or a signal is received.
457  * The @condition is checked each time the waitqueue @wq_head is woken up.
458  *
459  * wake_up() has to be called after changing any variable that could
460  * change the result of the wait condition.
461  *
462  * The function will return -ERESTARTSYS if it was interrupted by a
463  * signal and 0 if @condition evaluated to true.
464  */
465 #define wait_event_interruptible(wq_head, condition)                            \
466 ({                                                                              \
467         int __ret = 0;                                                          \
468         might_sleep();                                                          \
469         if (!(condition))                                                       \
470                 __ret = __wait_event_interruptible(wq_head, condition);         \
471         __ret;                                                                  \
472 })
473
474 #define __wait_event_interruptible_timeout(wq_head, condition, timeout)         \
475         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
476                       TASK_INTERRUPTIBLE, 0, timeout,                           \
477                       __ret = schedule_timeout(__ret))
478
479 /**
480  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
481  * @wq_head: the waitqueue to wait on
482  * @condition: a C expression for the event to wait for
483  * @timeout: timeout, in jiffies
484  *
485  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
486  * @condition evaluates to true or a signal is received.
487  * The @condition is checked each time the waitqueue @wq_head is woken up.
488  *
489  * wake_up() has to be called after changing any variable that could
490  * change the result of the wait condition.
491  *
492  * Returns:
493  * 0 if the @condition evaluated to %false after the @timeout elapsed,
494  * 1 if the @condition evaluated to %true after the @timeout elapsed,
495  * the remaining jiffies (at least 1) if the @condition evaluated
496  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
497  * interrupted by a signal.
498  */
499 #define wait_event_interruptible_timeout(wq_head, condition, timeout)           \
500 ({                                                                              \
501         long __ret = timeout;                                                   \
502         might_sleep();                                                          \
503         if (!___wait_cond_timeout(condition))                                   \
504                 __ret = __wait_event_interruptible_timeout(wq_head,             \
505                                                 condition, timeout);            \
506         __ret;                                                                  \
507 })
508
509 #define __wait_event_hrtimeout(wq_head, condition, timeout, state)              \
510 ({                                                                              \
511         int __ret = 0;                                                          \
512         struct hrtimer_sleeper __t;                                             \
513                                                                                 \
514         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);   \
515         hrtimer_init_sleeper(&__t, current);                                    \
516         if ((timeout) != KTIME_MAX)                                             \
517                 hrtimer_start_range_ns(&__t.timer, timeout,                     \
518                                        current->timer_slack_ns,                 \
519                                        HRTIMER_MODE_REL);                       \
520                                                                                 \
521         __ret = ___wait_event(wq_head, condition, state, 0, 0,                  \
522                 if (!__t.task) {                                                \
523                         __ret = -ETIME;                                         \
524                         break;                                                  \
525                 }                                                               \
526                 schedule());                                                    \
527                                                                                 \
528         hrtimer_cancel(&__t.timer);                                             \
529         destroy_hrtimer_on_stack(&__t.timer);                                   \
530         __ret;                                                                  \
531 })
532
533 /**
534  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
535  * @wq_head: the waitqueue to wait on
536  * @condition: a C expression for the event to wait for
537  * @timeout: timeout, as a ktime_t
538  *
539  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
540  * @condition evaluates to true or a signal is received.
541  * The @condition is checked each time the waitqueue @wq_head is woken up.
542  *
543  * wake_up() has to be called after changing any variable that could
544  * change the result of the wait condition.
545  *
546  * The function returns 0 if @condition became true, or -ETIME if the timeout
547  * elapsed.
548  */
549 #define wait_event_hrtimeout(wq_head, condition, timeout)                       \
550 ({                                                                              \
551         int __ret = 0;                                                          \
552         might_sleep();                                                          \
553         if (!(condition))                                                       \
554                 __ret = __wait_event_hrtimeout(wq_head, condition, timeout,     \
555                                                TASK_UNINTERRUPTIBLE);           \
556         __ret;                                                                  \
557 })
558
559 /**
560  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
561  * @wq_head: the waitqueue to wait on
562  * @condition: a C expression for the event to wait for
563  * @timeout: timeout, as a ktime_t
564  *
565  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
566  * @condition evaluates to true or a signal is received.
567  * The @condition is checked each time the waitqueue @wq_head is woken up.
568  *
569  * wake_up() has to be called after changing any variable that could
570  * change the result of the wait condition.
571  *
572  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
573  * interrupted by a signal, or -ETIME if the timeout elapsed.
574  */
575 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)              \
576 ({                                                                              \
577         long __ret = 0;                                                         \
578         might_sleep();                                                          \
579         if (!(condition))                                                       \
580                 __ret = __wait_event_hrtimeout(wq, condition, timeout,          \
581                                                TASK_INTERRUPTIBLE);             \
582         __ret;                                                                  \
583 })
584
585 #define __wait_event_interruptible_exclusive(wq, condition)                     \
586         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
587                       schedule())
588
589 #define wait_event_interruptible_exclusive(wq, condition)                       \
590 ({                                                                              \
591         int __ret = 0;                                                          \
592         might_sleep();                                                          \
593         if (!(condition))                                                       \
594                 __ret = __wait_event_interruptible_exclusive(wq, condition);    \
595         __ret;                                                                  \
596 })
597
598 #define __wait_event_killable_exclusive(wq, condition)                          \
599         ___wait_event(wq, condition, TASK_KILLABLE, 1, 0,                       \
600                       schedule())
601
602 #define wait_event_killable_exclusive(wq, condition)                            \
603 ({                                                                              \
604         int __ret = 0;                                                          \
605         might_sleep();                                                          \
606         if (!(condition))                                                       \
607                 __ret = __wait_event_killable_exclusive(wq, condition);         \
608         __ret;                                                                  \
609 })
610
611
612 #define __wait_event_freezable_exclusive(wq, condition)                         \
613         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,                  \
614                         schedule(); try_to_freeze())
615
616 #define wait_event_freezable_exclusive(wq, condition)                           \
617 ({                                                                              \
618         int __ret = 0;                                                          \
619         might_sleep();                                                          \
620         if (!(condition))                                                       \
621                 __ret = __wait_event_freezable_exclusive(wq, condition);        \
622         __ret;                                                                  \
623 })
624
625 extern int do_wait_intr(wait_queue_head_t *, wait_queue_entry_t *);
626 extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_entry_t *);
627
628 #define __wait_event_interruptible_locked(wq, condition, exclusive, fn)         \
629 ({                                                                              \
630         int __ret;                                                              \
631         DEFINE_WAIT(__wait);                                                    \
632         if (exclusive)                                                          \
633                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                              \
634         do {                                                                    \
635                 __ret = fn(&(wq), &__wait);                                     \
636                 if (__ret)                                                      \
637                         break;                                                  \
638         } while (!(condition));                                                 \
639         __remove_wait_queue(&(wq), &__wait);                                    \
640         __set_current_state(TASK_RUNNING);                                      \
641         __ret;                                                                  \
642 })
643
644
645 /**
646  * wait_event_interruptible_locked - sleep until a condition gets true
647  * @wq: the waitqueue to wait on
648  * @condition: a C expression for the event to wait for
649  *
650  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
651  * @condition evaluates to true or a signal is received.
652  * The @condition is checked each time the waitqueue @wq is woken up.
653  *
654  * It must be called with wq.lock being held.  This spinlock is
655  * unlocked while sleeping but @condition testing is done while lock
656  * is held and when this macro exits the lock is held.
657  *
658  * The lock is locked/unlocked using spin_lock()/spin_unlock()
659  * functions which must match the way they are locked/unlocked outside
660  * of this macro.
661  *
662  * wake_up_locked() has to be called after changing any variable that could
663  * change the result of the wait condition.
664  *
665  * The function will return -ERESTARTSYS if it was interrupted by a
666  * signal and 0 if @condition evaluated to true.
667  */
668 #define wait_event_interruptible_locked(wq, condition)                          \
669         ((condition)                                                            \
670          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
671
672 /**
673  * wait_event_interruptible_locked_irq - sleep until a condition gets true
674  * @wq: the waitqueue to wait on
675  * @condition: a C expression for the event to wait for
676  *
677  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
678  * @condition evaluates to true or a signal is received.
679  * The @condition is checked each time the waitqueue @wq is woken up.
680  *
681  * It must be called with wq.lock being held.  This spinlock is
682  * unlocked while sleeping but @condition testing is done while lock
683  * is held and when this macro exits the lock is held.
684  *
685  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
686  * functions which must match the way they are locked/unlocked outside
687  * of this macro.
688  *
689  * wake_up_locked() has to be called after changing any variable that could
690  * change the result of the wait condition.
691  *
692  * The function will return -ERESTARTSYS if it was interrupted by a
693  * signal and 0 if @condition evaluated to true.
694  */
695 #define wait_event_interruptible_locked_irq(wq, condition)                      \
696         ((condition)                                                            \
697          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
698
699 /**
700  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
701  * @wq: the waitqueue to wait on
702  * @condition: a C expression for the event to wait for
703  *
704  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
705  * @condition evaluates to true or a signal is received.
706  * The @condition is checked each time the waitqueue @wq is woken up.
707  *
708  * It must be called with wq.lock being held.  This spinlock is
709  * unlocked while sleeping but @condition testing is done while lock
710  * is held and when this macro exits the lock is held.
711  *
712  * The lock is locked/unlocked using spin_lock()/spin_unlock()
713  * functions which must match the way they are locked/unlocked outside
714  * of this macro.
715  *
716  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
717  * set thus when other process waits process on the list if this
718  * process is awaken further processes are not considered.
719  *
720  * wake_up_locked() has to be called after changing any variable that could
721  * change the result of the wait condition.
722  *
723  * The function will return -ERESTARTSYS if it was interrupted by a
724  * signal and 0 if @condition evaluated to true.
725  */
726 #define wait_event_interruptible_exclusive_locked(wq, condition)                \
727         ((condition)                                                            \
728          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
729
730 /**
731  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
732  * @wq: the waitqueue to wait on
733  * @condition: a C expression for the event to wait for
734  *
735  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
736  * @condition evaluates to true or a signal is received.
737  * The @condition is checked each time the waitqueue @wq is woken up.
738  *
739  * It must be called with wq.lock being held.  This spinlock is
740  * unlocked while sleeping but @condition testing is done while lock
741  * is held and when this macro exits the lock is held.
742  *
743  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
744  * functions which must match the way they are locked/unlocked outside
745  * of this macro.
746  *
747  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
748  * set thus when other process waits process on the list if this
749  * process is awaken further processes are not considered.
750  *
751  * wake_up_locked() has to be called after changing any variable that could
752  * change the result of the wait condition.
753  *
754  * The function will return -ERESTARTSYS if it was interrupted by a
755  * signal and 0 if @condition evaluated to true.
756  */
757 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)            \
758         ((condition)                                                            \
759          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
760
761
762 #define __wait_event_killable(wq, condition)                                    \
763         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
764
765 /**
766  * wait_event_killable - sleep until a condition gets true
767  * @wq: the waitqueue to wait on
768  * @condition: a C expression for the event to wait for
769  *
770  * The process is put to sleep (TASK_KILLABLE) until the
771  * @condition evaluates to true or a signal is received.
772  * The @condition is checked each time the waitqueue @wq is woken up.
773  *
774  * wake_up() has to be called after changing any variable that could
775  * change the result of the wait condition.
776  *
777  * The function will return -ERESTARTSYS if it was interrupted by a
778  * signal and 0 if @condition evaluated to true.
779  */
780 #define wait_event_killable(wq_head, condition)                                 \
781 ({                                                                              \
782         int __ret = 0;                                                          \
783         might_sleep();                                                          \
784         if (!(condition))                                                       \
785                 __ret = __wait_event_killable(wq_head, condition);              \
786         __ret;                                                                  \
787 })
788
789
790 #define __wait_event_lock_irq(wq_head, condition, lock, cmd)                    \
791         (void)___wait_event(wq_head, condition, TASK_UNINTERRUPTIBLE, 0, 0,     \
792                             spin_unlock_irq(&lock);                             \
793                             cmd;                                                \
794                             schedule();                                         \
795                             spin_lock_irq(&lock))
796
797 /**
798  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
799  *                           condition is checked under the lock. This
800  *                           is expected to be called with the lock
801  *                           taken.
802  * @wq_head: the waitqueue to wait on
803  * @condition: a C expression for the event to wait for
804  * @lock: a locked spinlock_t, which will be released before cmd
805  *        and schedule() and reacquired afterwards.
806  * @cmd: a command which is invoked outside the critical section before
807  *       sleep
808  *
809  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
810  * @condition evaluates to true. The @condition is checked each time
811  * the waitqueue @wq_head is woken up.
812  *
813  * wake_up() has to be called after changing any variable that could
814  * change the result of the wait condition.
815  *
816  * This is supposed to be called while holding the lock. The lock is
817  * dropped before invoking the cmd and going to sleep and is reacquired
818  * afterwards.
819  */
820 #define wait_event_lock_irq_cmd(wq_head, condition, lock, cmd)                  \
821 do {                                                                            \
822         if (condition)                                                          \
823                 break;                                                          \
824         __wait_event_lock_irq(wq_head, condition, lock, cmd);                   \
825 } while (0)
826
827 /**
828  * wait_event_lock_irq - sleep until a condition gets true. The
829  *                       condition is checked under the lock. This
830  *                       is expected to be called with the lock
831  *                       taken.
832  * @wq_head: the waitqueue to wait on
833  * @condition: a C expression for the event to wait for
834  * @lock: a locked spinlock_t, which will be released before schedule()
835  *        and reacquired afterwards.
836  *
837  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
838  * @condition evaluates to true. The @condition is checked each time
839  * the waitqueue @wq_head is woken up.
840  *
841  * wake_up() has to be called after changing any variable that could
842  * change the result of the wait condition.
843  *
844  * This is supposed to be called while holding the lock. The lock is
845  * dropped before going to sleep and is reacquired afterwards.
846  */
847 #define wait_event_lock_irq(wq_head, condition, lock)                           \
848 do {                                                                            \
849         if (condition)                                                          \
850                 break;                                                          \
851         __wait_event_lock_irq(wq_head, condition, lock, );                      \
852 } while (0)
853
854
855 #define __wait_event_interruptible_lock_irq(wq_head, condition, lock, cmd)      \
856         ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0,             \
857                       spin_unlock_irq(&lock);                                   \
858                       cmd;                                                      \
859                       schedule();                                               \
860                       spin_lock_irq(&lock))
861
862 /**
863  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
864  *              The condition is checked under the lock. This is expected to
865  *              be called with the lock taken.
866  * @wq_head: the waitqueue to wait on
867  * @condition: a C expression for the event to wait for
868  * @lock: a locked spinlock_t, which will be released before cmd and
869  *        schedule() and reacquired afterwards.
870  * @cmd: a command which is invoked outside the critical section before
871  *       sleep
872  *
873  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
874  * @condition evaluates to true or a signal is received. The @condition is
875  * checked each time the waitqueue @wq_head is woken up.
876  *
877  * wake_up() has to be called after changing any variable that could
878  * change the result of the wait condition.
879  *
880  * This is supposed to be called while holding the lock. The lock is
881  * dropped before invoking the cmd and going to sleep and is reacquired
882  * afterwards.
883  *
884  * The macro will return -ERESTARTSYS if it was interrupted by a signal
885  * and 0 if @condition evaluated to true.
886  */
887 #define wait_event_interruptible_lock_irq_cmd(wq_head, condition, lock, cmd)    \
888 ({                                                                              \
889         int __ret = 0;                                                          \
890         if (!(condition))                                                       \
891                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
892                                                 condition, lock, cmd);          \
893         __ret;                                                                  \
894 })
895
896 /**
897  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
898  *              The condition is checked under the lock. This is expected
899  *              to be called with the lock taken.
900  * @wq_head: the waitqueue to wait on
901  * @condition: a C expression for the event to wait for
902  * @lock: a locked spinlock_t, which will be released before schedule()
903  *        and reacquired afterwards.
904  *
905  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
906  * @condition evaluates to true or signal is received. The @condition is
907  * checked each time the waitqueue @wq_head is woken up.
908  *
909  * wake_up() has to be called after changing any variable that could
910  * change the result of the wait condition.
911  *
912  * This is supposed to be called while holding the lock. The lock is
913  * dropped before going to sleep and is reacquired afterwards.
914  *
915  * The macro will return -ERESTARTSYS if it was interrupted by a signal
916  * and 0 if @condition evaluated to true.
917  */
918 #define wait_event_interruptible_lock_irq(wq_head, condition, lock)             \
919 ({                                                                              \
920         int __ret = 0;                                                          \
921         if (!(condition))                                                       \
922                 __ret = __wait_event_interruptible_lock_irq(wq_head,            \
923                                                 condition, lock,);              \
924         __ret;                                                                  \
925 })
926
927 #define __wait_event_interruptible_lock_irq_timeout(wq_head, condition,         \
928                                                     lock, timeout)              \
929         ___wait_event(wq_head, ___wait_cond_timeout(condition),                 \
930                       TASK_INTERRUPTIBLE, 0, timeout,                           \
931                       spin_unlock_irq(&lock);                                   \
932                       __ret = schedule_timeout(__ret);                          \
933                       spin_lock_irq(&lock));
934
935 /**
936  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
937  *              true or a timeout elapses. The condition is checked under
938  *              the lock. This is expected to be called with the lock taken.
939  * @wq_head: the waitqueue to wait on
940  * @condition: a C expression for the event to wait for
941  * @lock: a locked spinlock_t, which will be released before schedule()
942  *        and reacquired afterwards.
943  * @timeout: timeout, in jiffies
944  *
945  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
946  * @condition evaluates to true or signal is received. The @condition is
947  * checked each time the waitqueue @wq_head is woken up.
948  *
949  * wake_up() has to be called after changing any variable that could
950  * change the result of the wait condition.
951  *
952  * This is supposed to be called while holding the lock. The lock is
953  * dropped before going to sleep and is reacquired afterwards.
954  *
955  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
956  * was interrupted by a signal, and the remaining jiffies otherwise
957  * if the condition evaluated to true before the timeout elapsed.
958  */
959 #define wait_event_interruptible_lock_irq_timeout(wq_head, condition, lock,     \
960                                                   timeout)                      \
961 ({                                                                              \
962         long __ret = timeout;                                                   \
963         if (!___wait_cond_timeout(condition))                                   \
964                 __ret = __wait_event_interruptible_lock_irq_timeout(            \
965                                         wq_head, condition, lock, timeout);     \
966         __ret;                                                                  \
967 })
968
969 /*
970  * Waitqueues which are removed from the waitqueue_head at wakeup time
971  */
972 void prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
973 void prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
974 long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry, int state);
975 void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
976 long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout);
977 int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
978 int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
979 int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
980
981 #define DEFINE_WAIT_FUNC(name, function)                                        \
982         struct wait_queue_entry name = {                                        \
983                 .private        = current,                                      \
984                 .func           = function,                                     \
985                 .task_list      = LIST_HEAD_INIT((name).task_list),             \
986         }
987
988 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
989
990 #define DEFINE_WAIT_BIT(name, word, bit)                                        \
991         struct wait_bit_queue_entry name = {                                    \
992                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),                   \
993                 .wq_entry = {                                                   \
994                         .private        = current,                              \
995                         .func           = wake_bit_function,                    \
996                         .task_list      =                                       \
997                                 LIST_HEAD_INIT((name).wq_entry.task_list),      \
998                 },                                                              \
999         }
1000
1001 #define init_wait(wait)                                                         \
1002         do {                                                                    \
1003                 (wait)->private = current;                                      \
1004                 (wait)->func = autoremove_wake_function;                        \
1005                 INIT_LIST_HEAD(&(wait)->task_list);                             \
1006                 (wait)->flags = 0;                                              \
1007         } while (0)
1008
1009
1010 extern int bit_wait(struct wait_bit_key *key, int bit);
1011 extern int bit_wait_io(struct wait_bit_key *key, int bit);
1012 extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
1013 extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
1014
1015 /**
1016  * wait_on_bit - wait for a bit to be cleared
1017  * @word: the word being waited on, a kernel virtual address
1018  * @bit: the bit of the word being waited on
1019  * @mode: the task state to sleep in
1020  *
1021  * There is a standard hashed waitqueue table for generic use. This
1022  * is the part of the hashtable's accessor API that waits on a bit.
1023  * For instance, if one were to have waiters on a bitflag, one would
1024  * call wait_on_bit() in threads waiting for the bit to clear.
1025  * One uses wait_on_bit() where one is waiting for the bit to clear,
1026  * but has no intention of setting it.
1027  * Returned value will be zero if the bit was cleared, or non-zero
1028  * if the process received a signal and the mode permitted wakeup
1029  * on that signal.
1030  */
1031 static inline int
1032 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1033 {
1034         might_sleep();
1035         if (!test_bit(bit, word))
1036                 return 0;
1037         return out_of_line_wait_on_bit(word, bit,
1038                                        bit_wait,
1039                                        mode);
1040 }
1041
1042 /**
1043  * wait_on_bit_io - wait for a bit to be cleared
1044  * @word: the word being waited on, a kernel virtual address
1045  * @bit: the bit of the word being waited on
1046  * @mode: the task state to sleep in
1047  *
1048  * Use the standard hashed waitqueue table to wait for a bit
1049  * to be cleared.  This is similar to wait_on_bit(), but calls
1050  * io_schedule() instead of schedule() for the actual waiting.
1051  *
1052  * Returned value will be zero if the bit was cleared, or non-zero
1053  * if the process received a signal and the mode permitted wakeup
1054  * on that signal.
1055  */
1056 static inline int
1057 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1058 {
1059         might_sleep();
1060         if (!test_bit(bit, word))
1061                 return 0;
1062         return out_of_line_wait_on_bit(word, bit,
1063                                        bit_wait_io,
1064                                        mode);
1065 }
1066
1067 /**
1068  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1069  * @word: the word being waited on, a kernel virtual address
1070  * @bit: the bit of the word being waited on
1071  * @mode: the task state to sleep in
1072  * @timeout: timeout, in jiffies
1073  *
1074  * Use the standard hashed waitqueue table to wait for a bit
1075  * to be cleared. This is similar to wait_on_bit(), except also takes a
1076  * timeout parameter.
1077  *
1078  * Returned value will be zero if the bit was cleared before the
1079  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1080  * received a signal and the mode permitted wakeup on that signal.
1081  */
1082 static inline int
1083 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1084                     unsigned long timeout)
1085 {
1086         might_sleep();
1087         if (!test_bit(bit, word))
1088                 return 0;
1089         return out_of_line_wait_on_bit_timeout(word, bit,
1090                                                bit_wait_timeout,
1091                                                mode, timeout);
1092 }
1093
1094 /**
1095  * wait_on_bit_action - wait for a bit to be cleared
1096  * @word: the word being waited on, a kernel virtual address
1097  * @bit: the bit of the word being waited on
1098  * @action: the function used to sleep, which may take special actions
1099  * @mode: the task state to sleep in
1100  *
1101  * Use the standard hashed waitqueue table to wait for a bit
1102  * to be cleared, and allow the waiting action to be specified.
1103  * This is like wait_on_bit() but allows fine control of how the waiting
1104  * is done.
1105  *
1106  * Returned value will be zero if the bit was cleared, or non-zero
1107  * if the process received a signal and the mode permitted wakeup
1108  * on that signal.
1109  */
1110 static inline int
1111 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1112                    unsigned mode)
1113 {
1114         might_sleep();
1115         if (!test_bit(bit, word))
1116                 return 0;
1117         return out_of_line_wait_on_bit(word, bit, action, mode);
1118 }
1119
1120 /**
1121  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1122  * @word: the word being waited on, a kernel virtual address
1123  * @bit: the bit of the word being waited on
1124  * @mode: the task state to sleep in
1125  *
1126  * There is a standard hashed waitqueue table for generic use. This
1127  * is the part of the hashtable's accessor API that waits on a bit
1128  * when one intends to set it, for instance, trying to lock bitflags.
1129  * For instance, if one were to have waiters trying to set bitflag
1130  * and waiting for it to clear before setting it, one would call
1131  * wait_on_bit() in threads waiting to be able to set the bit.
1132  * One uses wait_on_bit_lock() where one is waiting for the bit to
1133  * clear with the intention of setting it, and when done, clearing it.
1134  *
1135  * Returns zero if the bit was (eventually) found to be clear and was
1136  * set.  Returns non-zero if a signal was delivered to the process and
1137  * the @mode allows that signal to wake the process.
1138  */
1139 static inline int
1140 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1141 {
1142         might_sleep();
1143         if (!test_and_set_bit(bit, word))
1144                 return 0;
1145         return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1146 }
1147
1148 /**
1149  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1150  * @word: the word being waited on, a kernel virtual address
1151  * @bit: the bit of the word being waited on
1152  * @mode: the task state to sleep in
1153  *
1154  * Use the standard hashed waitqueue table to wait for a bit
1155  * to be cleared and then to atomically set it.  This is similar
1156  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1157  * for the actual waiting.
1158  *
1159  * Returns zero if the bit was (eventually) found to be clear and was
1160  * set.  Returns non-zero if a signal was delivered to the process and
1161  * the @mode allows that signal to wake the process.
1162  */
1163 static inline int
1164 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1165 {
1166         might_sleep();
1167         if (!test_and_set_bit(bit, word))
1168                 return 0;
1169         return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1170 }
1171
1172 /**
1173  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1174  * @word: the word being waited on, a kernel virtual address
1175  * @bit: the bit of the word being waited on
1176  * @action: the function used to sleep, which may take special actions
1177  * @mode: the task state to sleep in
1178  *
1179  * Use the standard hashed waitqueue table to wait for a bit
1180  * to be cleared and then to set it, and allow the waiting action
1181  * to be specified.
1182  * This is like wait_on_bit() but allows fine control of how the waiting
1183  * is done.
1184  *
1185  * Returns zero if the bit was (eventually) found to be clear and was
1186  * set.  Returns non-zero if a signal was delivered to the process and
1187  * the @mode allows that signal to wake the process.
1188  */
1189 static inline int
1190 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1191                         unsigned mode)
1192 {
1193         might_sleep();
1194         if (!test_and_set_bit(bit, word))
1195                 return 0;
1196         return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1197 }
1198
1199 /**
1200  * wait_on_atomic_t - Wait for an atomic_t to become 0
1201  * @val: The atomic value being waited on, a kernel virtual address
1202  * @action: the function used to sleep, which may take special actions
1203  * @mode: the task state to sleep in
1204  *
1205  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1206  * the purpose of getting a waitqueue, but we set the key to a bit number
1207  * outside of the target 'word'.
1208  */
1209 static inline
1210 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1211 {
1212         might_sleep();
1213         if (atomic_read(val) == 0)
1214                 return 0;
1215         return out_of_line_wait_on_atomic_t(val, action, mode);
1216 }
1217
1218 #endif /* _LINUX_WAIT_H */