]> git.karo-electronics.de Git - karo-tx-linux.git/blob - include/linux/wait.h
Merge branch 'for-linux-next' of git://linux-c6x.org/git/projects/linux-c6x-upstreaming
[karo-tx-linux.git] / include / linux / wait.h
1 #ifndef _LINUX_WAIT_H
2 #define _LINUX_WAIT_H
3 /*
4  * Linux wait queue related types and methods
5  */
6 #include <linux/list.h>
7 #include <linux/stddef.h>
8 #include <linux/spinlock.h>
9 #include <asm/current.h>
10 #include <uapi/linux/wait.h>
11
12 typedef struct __wait_queue wait_queue_t;
13 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
14 int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
15
16 /* __wait_queue::flags */
17 #define WQ_FLAG_EXCLUSIVE       0x01
18 #define WQ_FLAG_WOKEN           0x02
19
20 struct __wait_queue {
21         unsigned int            flags;
22         void                    *private;
23         wait_queue_func_t       func;
24         struct list_head        task_list;
25 };
26
27 struct wait_bit_key {
28         void                    *flags;
29         int                     bit_nr;
30 #define WAIT_ATOMIC_T_BIT_NR    -1
31         unsigned long           timeout;
32 };
33
34 struct wait_bit_queue {
35         struct wait_bit_key     key;
36         wait_queue_t            wait;
37 };
38
39 struct __wait_queue_head {
40         spinlock_t              lock;
41         struct list_head        task_list;
42 };
43 typedef struct __wait_queue_head wait_queue_head_t;
44
45 struct task_struct;
46
47 /*
48  * Macros for declaration and initialisaton of the datatypes
49  */
50
51 #define __WAITQUEUE_INITIALIZER(name, tsk) {                            \
52         .private        = tsk,                                          \
53         .func           = default_wake_function,                        \
54         .task_list      = { NULL, NULL } }
55
56 #define DECLARE_WAITQUEUE(name, tsk)                                    \
57         wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
58
59 #define __WAIT_QUEUE_HEAD_INITIALIZER(name) {                           \
60         .lock           = __SPIN_LOCK_UNLOCKED(name.lock),              \
61         .task_list      = { &(name).task_list, &(name).task_list } }
62
63 #define DECLARE_WAIT_QUEUE_HEAD(name) \
64         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
65
66 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                           \
67         { .flags = word, .bit_nr = bit, }
68
69 #define __WAIT_ATOMIC_T_KEY_INITIALIZER(p)                              \
70         { .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
71
72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
73
74 #define init_waitqueue_head(q)                          \
75         do {                                            \
76                 static struct lock_class_key __key;     \
77                                                         \
78                 __init_waitqueue_head((q), #q, &__key); \
79         } while (0)
80
81 #ifdef CONFIG_LOCKDEP
82 # define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
83         ({ init_waitqueue_head(&name); name; })
84 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
85         wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
86 #else
87 # define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
88 #endif
89
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
91 {
92         q->flags        = 0;
93         q->private      = p;
94         q->func         = default_wake_function;
95 }
96
97 static inline void
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func)
99 {
100         q->flags        = 0;
101         q->private      = NULL;
102         q->func         = func;
103 }
104
105 /**
106  * waitqueue_active -- locklessly test for waiters on the queue
107  * @q: the waitqueue to test for waiters
108  *
109  * returns true if the wait list is not empty
110  *
111  * NOTE: this function is lockless and requires care, incorrect usage _will_
112  * lead to sporadic and non-obvious failure.
113  *
114  * Use either while holding wait_queue_head_t::lock or when used for wakeups
115  * with an extra smp_mb() like:
116  *
117  *      CPU0 - waker                    CPU1 - waiter
118  *
119  *                                      for (;;) {
120  *      @cond = true;                     prepare_to_wait(&wq, &wait, state);
121  *      smp_mb();                         // smp_mb() from set_current_state()
122  *      if (waitqueue_active(wq))         if (@cond)
123  *        wake_up(wq);                      break;
124  *                                        schedule();
125  *                                      }
126  *                                      finish_wait(&wq, &wait);
127  *
128  * Because without the explicit smp_mb() it's possible for the
129  * waitqueue_active() load to get hoisted over the @cond store such that we'll
130  * observe an empty wait list while the waiter might not observe @cond.
131  *
132  * Also note that this 'optimization' trades a spin_lock() for an smp_mb(),
133  * which (when the lock is uncontended) are of roughly equal cost.
134  */
135 static inline int waitqueue_active(wait_queue_head_t *q)
136 {
137         return !list_empty(&q->task_list);
138 }
139
140 extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
141 extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
142 extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
143
144 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
145 {
146         list_add(&new->task_list, &head->task_list);
147 }
148
149 /*
150  * Used for wake-one threads:
151  */
152 static inline void
153 __add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
154 {
155         wait->flags |= WQ_FLAG_EXCLUSIVE;
156         __add_wait_queue(q, wait);
157 }
158
159 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
160                                          wait_queue_t *new)
161 {
162         list_add_tail(&new->task_list, &head->task_list);
163 }
164
165 static inline void
166 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
167 {
168         wait->flags |= WQ_FLAG_EXCLUSIVE;
169         __add_wait_queue_tail(q, wait);
170 }
171
172 static inline void
173 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
174 {
175         list_del(&old->task_list);
176 }
177
178 typedef int wait_bit_action_f(struct wait_bit_key *, int mode);
179 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
180 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
181 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
182 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
183 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
184 void __wake_up_bit(wait_queue_head_t *, void *, int);
185 int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
186 int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
187 void wake_up_bit(void *, int);
188 void wake_up_atomic_t(atomic_t *);
189 int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
190 int out_of_line_wait_on_bit_timeout(void *, int, wait_bit_action_f *, unsigned, unsigned long);
191 int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
192 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
193 wait_queue_head_t *bit_waitqueue(void *, int);
194
195 #define wake_up(x)                      __wake_up(x, TASK_NORMAL, 1, NULL)
196 #define wake_up_nr(x, nr)               __wake_up(x, TASK_NORMAL, nr, NULL)
197 #define wake_up_all(x)                  __wake_up(x, TASK_NORMAL, 0, NULL)
198 #define wake_up_locked(x)               __wake_up_locked((x), TASK_NORMAL, 1)
199 #define wake_up_all_locked(x)           __wake_up_locked((x), TASK_NORMAL, 0)
200
201 #define wake_up_interruptible(x)        __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
202 #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
203 #define wake_up_interruptible_all(x)    __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
204 #define wake_up_interruptible_sync(x)   __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
205
206 /*
207  * Wakeup macros to be used to report events to the targets.
208  */
209 #define wake_up_poll(x, m)                                              \
210         __wake_up(x, TASK_NORMAL, 1, (void *) (m))
211 #define wake_up_locked_poll(x, m)                                       \
212         __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
213 #define wake_up_interruptible_poll(x, m)                                \
214         __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
215 #define wake_up_interruptible_sync_poll(x, m)                           \
216         __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
217
218 #define ___wait_cond_timeout(condition)                                 \
219 ({                                                                      \
220         bool __cond = (condition);                                      \
221         if (__cond && !__ret)                                           \
222                 __ret = 1;                                              \
223         __cond || !__ret;                                               \
224 })
225
226 #define ___wait_is_interruptible(state)                                 \
227         (!__builtin_constant_p(state) ||                                \
228                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)  \
229
230 /*
231  * The below macro ___wait_event() has an explicit shadow of the __ret
232  * variable when used from the wait_event_*() macros.
233  *
234  * This is so that both can use the ___wait_cond_timeout() construct
235  * to wrap the condition.
236  *
237  * The type inconsistency of the wait_event_*() __ret variable is also
238  * on purpose; we use long where we can return timeout values and int
239  * otherwise.
240  */
241
242 #define ___wait_event(wq, condition, state, exclusive, ret, cmd)        \
243 ({                                                                      \
244         __label__ __out;                                                \
245         wait_queue_t __wait;                                            \
246         long __ret = ret;       /* explicit shadow */                   \
247                                                                         \
248         INIT_LIST_HEAD(&__wait.task_list);                              \
249         if (exclusive)                                                  \
250                 __wait.flags = WQ_FLAG_EXCLUSIVE;                       \
251         else                                                            \
252                 __wait.flags = 0;                                       \
253                                                                         \
254         for (;;) {                                                      \
255                 long __int = prepare_to_wait_event(&wq, &__wait, state);\
256                                                                         \
257                 if (condition)                                          \
258                         break;                                          \
259                                                                         \
260                 if (___wait_is_interruptible(state) && __int) {         \
261                         __ret = __int;                                  \
262                         if (exclusive) {                                \
263                                 abort_exclusive_wait(&wq, &__wait,      \
264                                                      state, NULL);      \
265                                 goto __out;                             \
266                         }                                               \
267                         break;                                          \
268                 }                                                       \
269                                                                         \
270                 cmd;                                                    \
271         }                                                               \
272         finish_wait(&wq, &__wait);                                      \
273 __out:  __ret;                                                          \
274 })
275
276 #define __wait_event(wq, condition)                                     \
277         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
278                             schedule())
279
280 /**
281  * wait_event - sleep until a condition gets true
282  * @wq: the waitqueue to wait on
283  * @condition: a C expression for the event to wait for
284  *
285  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
286  * @condition evaluates to true. The @condition is checked each time
287  * the waitqueue @wq is woken up.
288  *
289  * wake_up() has to be called after changing any variable that could
290  * change the result of the wait condition.
291  */
292 #define wait_event(wq, condition)                                       \
293 do {                                                                    \
294         might_sleep();                                                  \
295         if (condition)                                                  \
296                 break;                                                  \
297         __wait_event(wq, condition);                                    \
298 } while (0)
299
300 #define __io_wait_event(wq, condition)                                  \
301         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
302                             io_schedule())
303
304 /*
305  * io_wait_event() -- like wait_event() but with io_schedule()
306  */
307 #define io_wait_event(wq, condition)                                    \
308 do {                                                                    \
309         might_sleep();                                                  \
310         if (condition)                                                  \
311                 break;                                                  \
312         __io_wait_event(wq, condition);                                 \
313 } while (0)
314
315 #define __wait_event_freezable(wq, condition)                           \
316         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
317                             schedule(); try_to_freeze())
318
319 /**
320  * wait_event - sleep (or freeze) until a condition gets true
321  * @wq: the waitqueue to wait on
322  * @condition: a C expression for the event to wait for
323  *
324  * The process is put to sleep (TASK_INTERRUPTIBLE -- so as not to contribute
325  * to system load) until the @condition evaluates to true. The
326  * @condition is checked each time the waitqueue @wq is woken up.
327  *
328  * wake_up() has to be called after changing any variable that could
329  * change the result of the wait condition.
330  */
331 #define wait_event_freezable(wq, condition)                             \
332 ({                                                                      \
333         int __ret = 0;                                                  \
334         might_sleep();                                                  \
335         if (!(condition))                                               \
336                 __ret = __wait_event_freezable(wq, condition);          \
337         __ret;                                                          \
338 })
339
340 #define __wait_event_timeout(wq, condition, timeout)                    \
341         ___wait_event(wq, ___wait_cond_timeout(condition),              \
342                       TASK_UNINTERRUPTIBLE, 0, timeout,                 \
343                       __ret = schedule_timeout(__ret))
344
345 /**
346  * wait_event_timeout - sleep until a condition gets true or a timeout elapses
347  * @wq: the waitqueue to wait on
348  * @condition: a C expression for the event to wait for
349  * @timeout: timeout, in jiffies
350  *
351  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
352  * @condition evaluates to true. The @condition is checked each time
353  * the waitqueue @wq is woken up.
354  *
355  * wake_up() has to be called after changing any variable that could
356  * change the result of the wait condition.
357  *
358  * Returns:
359  * 0 if the @condition evaluated to %false after the @timeout elapsed,
360  * 1 if the @condition evaluated to %true after the @timeout elapsed,
361  * or the remaining jiffies (at least 1) if the @condition evaluated
362  * to %true before the @timeout elapsed.
363  */
364 #define wait_event_timeout(wq, condition, timeout)                      \
365 ({                                                                      \
366         long __ret = timeout;                                           \
367         might_sleep();                                                  \
368         if (!___wait_cond_timeout(condition))                           \
369                 __ret = __wait_event_timeout(wq, condition, timeout);   \
370         __ret;                                                          \
371 })
372
373 #define __wait_event_freezable_timeout(wq, condition, timeout)          \
374         ___wait_event(wq, ___wait_cond_timeout(condition),              \
375                       TASK_INTERRUPTIBLE, 0, timeout,                   \
376                       __ret = schedule_timeout(__ret); try_to_freeze())
377
378 /*
379  * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid
380  * increasing load and is freezable.
381  */
382 #define wait_event_freezable_timeout(wq, condition, timeout)            \
383 ({                                                                      \
384         long __ret = timeout;                                           \
385         might_sleep();                                                  \
386         if (!___wait_cond_timeout(condition))                           \
387                 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
388         __ret;                                                          \
389 })
390
391 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)           \
392         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0,  \
393                             cmd1; schedule(); cmd2)
394 /*
395  * Just like wait_event_cmd(), except it sets exclusive flag
396  */
397 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2)             \
398 do {                                                                    \
399         if (condition)                                                  \
400                 break;                                                  \
401         __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2);          \
402 } while (0)
403
404 #define __wait_event_cmd(wq, condition, cmd1, cmd2)                     \
405         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
406                             cmd1; schedule(); cmd2)
407
408 /**
409  * wait_event_cmd - sleep until a condition gets true
410  * @wq: the waitqueue to wait on
411  * @condition: a C expression for the event to wait for
412  * @cmd1: the command will be executed before sleep
413  * @cmd2: the command will be executed after sleep
414  *
415  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
416  * @condition evaluates to true. The @condition is checked each time
417  * the waitqueue @wq is woken up.
418  *
419  * wake_up() has to be called after changing any variable that could
420  * change the result of the wait condition.
421  */
422 #define wait_event_cmd(wq, condition, cmd1, cmd2)                       \
423 do {                                                                    \
424         if (condition)                                                  \
425                 break;                                                  \
426         __wait_event_cmd(wq, condition, cmd1, cmd2);                    \
427 } while (0)
428
429 #define __wait_event_interruptible(wq, condition)                       \
430         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
431                       schedule())
432
433 /**
434  * wait_event_interruptible - sleep until a condition gets true
435  * @wq: the waitqueue to wait on
436  * @condition: a C expression for the event to wait for
437  *
438  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
439  * @condition evaluates to true or a signal is received.
440  * The @condition is checked each time the waitqueue @wq is woken up.
441  *
442  * wake_up() has to be called after changing any variable that could
443  * change the result of the wait condition.
444  *
445  * The function will return -ERESTARTSYS if it was interrupted by a
446  * signal and 0 if @condition evaluated to true.
447  */
448 #define wait_event_interruptible(wq, condition)                         \
449 ({                                                                      \
450         int __ret = 0;                                                  \
451         might_sleep();                                                  \
452         if (!(condition))                                               \
453                 __ret = __wait_event_interruptible(wq, condition);      \
454         __ret;                                                          \
455 })
456
457 #define __wait_event_interruptible_timeout(wq, condition, timeout)      \
458         ___wait_event(wq, ___wait_cond_timeout(condition),              \
459                       TASK_INTERRUPTIBLE, 0, timeout,                   \
460                       __ret = schedule_timeout(__ret))
461
462 /**
463  * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
464  * @wq: the waitqueue to wait on
465  * @condition: a C expression for the event to wait for
466  * @timeout: timeout, in jiffies
467  *
468  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
469  * @condition evaluates to true or a signal is received.
470  * The @condition is checked each time the waitqueue @wq is woken up.
471  *
472  * wake_up() has to be called after changing any variable that could
473  * change the result of the wait condition.
474  *
475  * Returns:
476  * 0 if the @condition evaluated to %false after the @timeout elapsed,
477  * 1 if the @condition evaluated to %true after the @timeout elapsed,
478  * the remaining jiffies (at least 1) if the @condition evaluated
479  * to %true before the @timeout elapsed, or -%ERESTARTSYS if it was
480  * interrupted by a signal.
481  */
482 #define wait_event_interruptible_timeout(wq, condition, timeout)        \
483 ({                                                                      \
484         long __ret = timeout;                                           \
485         might_sleep();                                                  \
486         if (!___wait_cond_timeout(condition))                           \
487                 __ret = __wait_event_interruptible_timeout(wq,          \
488                                                 condition, timeout);    \
489         __ret;                                                          \
490 })
491
492 #define __wait_event_hrtimeout(wq, condition, timeout, state)           \
493 ({                                                                      \
494         int __ret = 0;                                                  \
495         struct hrtimer_sleeper __t;                                     \
496                                                                         \
497         hrtimer_init_on_stack(&__t.timer, CLOCK_MONOTONIC,              \
498                               HRTIMER_MODE_REL);                        \
499         hrtimer_init_sleeper(&__t, current);                            \
500         if ((timeout).tv64 != KTIME_MAX)                                \
501                 hrtimer_start_range_ns(&__t.timer, timeout,             \
502                                        current->timer_slack_ns,         \
503                                        HRTIMER_MODE_REL);               \
504                                                                         \
505         __ret = ___wait_event(wq, condition, state, 0, 0,               \
506                 if (!__t.task) {                                        \
507                         __ret = -ETIME;                                 \
508                         break;                                          \
509                 }                                                       \
510                 schedule());                                            \
511                                                                         \
512         hrtimer_cancel(&__t.timer);                                     \
513         destroy_hrtimer_on_stack(&__t.timer);                           \
514         __ret;                                                          \
515 })
516
517 /**
518  * wait_event_hrtimeout - sleep until a condition gets true or a timeout elapses
519  * @wq: the waitqueue to wait on
520  * @condition: a C expression for the event to wait for
521  * @timeout: timeout, as a ktime_t
522  *
523  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
524  * @condition evaluates to true or a signal is received.
525  * The @condition is checked each time the waitqueue @wq is woken up.
526  *
527  * wake_up() has to be called after changing any variable that could
528  * change the result of the wait condition.
529  *
530  * The function returns 0 if @condition became true, or -ETIME if the timeout
531  * elapsed.
532  */
533 #define wait_event_hrtimeout(wq, condition, timeout)                    \
534 ({                                                                      \
535         int __ret = 0;                                                  \
536         might_sleep();                                                  \
537         if (!(condition))                                               \
538                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
539                                                TASK_UNINTERRUPTIBLE);   \
540         __ret;                                                          \
541 })
542
543 /**
544  * wait_event_interruptible_hrtimeout - sleep until a condition gets true or a timeout elapses
545  * @wq: the waitqueue to wait on
546  * @condition: a C expression for the event to wait for
547  * @timeout: timeout, as a ktime_t
548  *
549  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
550  * @condition evaluates to true or a signal is received.
551  * The @condition is checked each time the waitqueue @wq is woken up.
552  *
553  * wake_up() has to be called after changing any variable that could
554  * change the result of the wait condition.
555  *
556  * The function returns 0 if @condition became true, -ERESTARTSYS if it was
557  * interrupted by a signal, or -ETIME if the timeout elapsed.
558  */
559 #define wait_event_interruptible_hrtimeout(wq, condition, timeout)      \
560 ({                                                                      \
561         long __ret = 0;                                                 \
562         might_sleep();                                                  \
563         if (!(condition))                                               \
564                 __ret = __wait_event_hrtimeout(wq, condition, timeout,  \
565                                                TASK_INTERRUPTIBLE);     \
566         __ret;                                                          \
567 })
568
569 #define __wait_event_interruptible_exclusive(wq, condition)             \
570         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
571                       schedule())
572
573 #define wait_event_interruptible_exclusive(wq, condition)               \
574 ({                                                                      \
575         int __ret = 0;                                                  \
576         might_sleep();                                                  \
577         if (!(condition))                                               \
578                 __ret = __wait_event_interruptible_exclusive(wq, condition);\
579         __ret;                                                          \
580 })
581
582
583 #define __wait_event_freezable_exclusive(wq, condition)                 \
584         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0,          \
585                         schedule(); try_to_freeze())
586
587 #define wait_event_freezable_exclusive(wq, condition)                   \
588 ({                                                                      \
589         int __ret = 0;                                                  \
590         might_sleep();                                                  \
591         if (!(condition))                                               \
592                 __ret = __wait_event_freezable_exclusive(wq, condition);\
593         __ret;                                                          \
594 })
595
596
597 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
598 ({                                                                      \
599         int __ret = 0;                                                  \
600         DEFINE_WAIT(__wait);                                            \
601         if (exclusive)                                                  \
602                 __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
603         do {                                                            \
604                 if (likely(list_empty(&__wait.task_list)))              \
605                         __add_wait_queue_tail(&(wq), &__wait);          \
606                 set_current_state(TASK_INTERRUPTIBLE);                  \
607                 if (signal_pending(current)) {                          \
608                         __ret = -ERESTARTSYS;                           \
609                         break;                                          \
610                 }                                                       \
611                 if (irq)                                                \
612                         spin_unlock_irq(&(wq).lock);                    \
613                 else                                                    \
614                         spin_unlock(&(wq).lock);                        \
615                 schedule();                                             \
616                 if (irq)                                                \
617                         spin_lock_irq(&(wq).lock);                      \
618                 else                                                    \
619                         spin_lock(&(wq).lock);                          \
620         } while (!(condition));                                         \
621         __remove_wait_queue(&(wq), &__wait);                            \
622         __set_current_state(TASK_RUNNING);                              \
623         __ret;                                                          \
624 })
625
626
627 /**
628  * wait_event_interruptible_locked - sleep until a condition gets true
629  * @wq: the waitqueue to wait on
630  * @condition: a C expression for the event to wait for
631  *
632  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
633  * @condition evaluates to true or a signal is received.
634  * The @condition is checked each time the waitqueue @wq is woken up.
635  *
636  * It must be called with wq.lock being held.  This spinlock is
637  * unlocked while sleeping but @condition testing is done while lock
638  * is held and when this macro exits the lock is held.
639  *
640  * The lock is locked/unlocked using spin_lock()/spin_unlock()
641  * functions which must match the way they are locked/unlocked outside
642  * of this macro.
643  *
644  * wake_up_locked() has to be called after changing any variable that could
645  * change the result of the wait condition.
646  *
647  * The function will return -ERESTARTSYS if it was interrupted by a
648  * signal and 0 if @condition evaluated to true.
649  */
650 #define wait_event_interruptible_locked(wq, condition)                  \
651         ((condition)                                                    \
652          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
653
654 /**
655  * wait_event_interruptible_locked_irq - sleep until a condition gets true
656  * @wq: the waitqueue to wait on
657  * @condition: a C expression for the event to wait for
658  *
659  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
660  * @condition evaluates to true or a signal is received.
661  * The @condition is checked each time the waitqueue @wq is woken up.
662  *
663  * It must be called with wq.lock being held.  This spinlock is
664  * unlocked while sleeping but @condition testing is done while lock
665  * is held and when this macro exits the lock is held.
666  *
667  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
668  * functions which must match the way they are locked/unlocked outside
669  * of this macro.
670  *
671  * wake_up_locked() has to be called after changing any variable that could
672  * change the result of the wait condition.
673  *
674  * The function will return -ERESTARTSYS if it was interrupted by a
675  * signal and 0 if @condition evaluated to true.
676  */
677 #define wait_event_interruptible_locked_irq(wq, condition)              \
678         ((condition)                                                    \
679          ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
680
681 /**
682  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
683  * @wq: the waitqueue to wait on
684  * @condition: a C expression for the event to wait for
685  *
686  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
687  * @condition evaluates to true or a signal is received.
688  * The @condition is checked each time the waitqueue @wq is woken up.
689  *
690  * It must be called with wq.lock being held.  This spinlock is
691  * unlocked while sleeping but @condition testing is done while lock
692  * is held and when this macro exits the lock is held.
693  *
694  * The lock is locked/unlocked using spin_lock()/spin_unlock()
695  * functions which must match the way they are locked/unlocked outside
696  * of this macro.
697  *
698  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
699  * set thus when other process waits process on the list if this
700  * process is awaken further processes are not considered.
701  *
702  * wake_up_locked() has to be called after changing any variable that could
703  * change the result of the wait condition.
704  *
705  * The function will return -ERESTARTSYS if it was interrupted by a
706  * signal and 0 if @condition evaluated to true.
707  */
708 #define wait_event_interruptible_exclusive_locked(wq, condition)        \
709         ((condition)                                                    \
710          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
711
712 /**
713  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
714  * @wq: the waitqueue to wait on
715  * @condition: a C expression for the event to wait for
716  *
717  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
718  * @condition evaluates to true or a signal is received.
719  * The @condition is checked each time the waitqueue @wq is woken up.
720  *
721  * It must be called with wq.lock being held.  This spinlock is
722  * unlocked while sleeping but @condition testing is done while lock
723  * is held and when this macro exits the lock is held.
724  *
725  * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
726  * functions which must match the way they are locked/unlocked outside
727  * of this macro.
728  *
729  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
730  * set thus when other process waits process on the list if this
731  * process is awaken further processes are not considered.
732  *
733  * wake_up_locked() has to be called after changing any variable that could
734  * change the result of the wait condition.
735  *
736  * The function will return -ERESTARTSYS if it was interrupted by a
737  * signal and 0 if @condition evaluated to true.
738  */
739 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)    \
740         ((condition)                                                    \
741          ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
742
743
744 #define __wait_event_killable(wq, condition)                            \
745         ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
746
747 /**
748  * wait_event_killable - sleep until a condition gets true
749  * @wq: the waitqueue to wait on
750  * @condition: a C expression for the event to wait for
751  *
752  * The process is put to sleep (TASK_KILLABLE) until the
753  * @condition evaluates to true or a signal is received.
754  * The @condition is checked each time the waitqueue @wq is woken up.
755  *
756  * wake_up() has to be called after changing any variable that could
757  * change the result of the wait condition.
758  *
759  * The function will return -ERESTARTSYS if it was interrupted by a
760  * signal and 0 if @condition evaluated to true.
761  */
762 #define wait_event_killable(wq, condition)                              \
763 ({                                                                      \
764         int __ret = 0;                                                  \
765         might_sleep();                                                  \
766         if (!(condition))                                               \
767                 __ret = __wait_event_killable(wq, condition);           \
768         __ret;                                                          \
769 })
770
771
772 #define __wait_event_lock_irq(wq, condition, lock, cmd)                 \
773         (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0,  \
774                             spin_unlock_irq(&lock);                     \
775                             cmd;                                        \
776                             schedule();                                 \
777                             spin_lock_irq(&lock))
778
779 /**
780  * wait_event_lock_irq_cmd - sleep until a condition gets true. The
781  *                           condition is checked under the lock. This
782  *                           is expected to be called with the lock
783  *                           taken.
784  * @wq: the waitqueue to wait on
785  * @condition: a C expression for the event to wait for
786  * @lock: a locked spinlock_t, which will be released before cmd
787  *        and schedule() and reacquired afterwards.
788  * @cmd: a command which is invoked outside the critical section before
789  *       sleep
790  *
791  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
792  * @condition evaluates to true. The @condition is checked each time
793  * the waitqueue @wq is woken up.
794  *
795  * wake_up() has to be called after changing any variable that could
796  * change the result of the wait condition.
797  *
798  * This is supposed to be called while holding the lock. The lock is
799  * dropped before invoking the cmd and going to sleep and is reacquired
800  * afterwards.
801  */
802 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd)               \
803 do {                                                                    \
804         if (condition)                                                  \
805                 break;                                                  \
806         __wait_event_lock_irq(wq, condition, lock, cmd);                \
807 } while (0)
808
809 /**
810  * wait_event_lock_irq - sleep until a condition gets true. The
811  *                       condition is checked under the lock. This
812  *                       is expected to be called with the lock
813  *                       taken.
814  * @wq: the waitqueue to wait on
815  * @condition: a C expression for the event to wait for
816  * @lock: a locked spinlock_t, which will be released before schedule()
817  *        and reacquired afterwards.
818  *
819  * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
820  * @condition evaluates to true. The @condition is checked each time
821  * the waitqueue @wq is woken up.
822  *
823  * wake_up() has to be called after changing any variable that could
824  * change the result of the wait condition.
825  *
826  * This is supposed to be called while holding the lock. The lock is
827  * dropped before going to sleep and is reacquired afterwards.
828  */
829 #define wait_event_lock_irq(wq, condition, lock)                        \
830 do {                                                                    \
831         if (condition)                                                  \
832                 break;                                                  \
833         __wait_event_lock_irq(wq, condition, lock, );                   \
834 } while (0)
835
836
837 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd)   \
838         ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0,          \
839                       spin_unlock_irq(&lock);                           \
840                       cmd;                                              \
841                       schedule();                                       \
842                       spin_lock_irq(&lock))
843
844 /**
845  * wait_event_interruptible_lock_irq_cmd - sleep until a condition gets true.
846  *              The condition is checked under the lock. This is expected to
847  *              be called with the lock taken.
848  * @wq: the waitqueue to wait on
849  * @condition: a C expression for the event to wait for
850  * @lock: a locked spinlock_t, which will be released before cmd and
851  *        schedule() and reacquired afterwards.
852  * @cmd: a command which is invoked outside the critical section before
853  *       sleep
854  *
855  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
856  * @condition evaluates to true or a signal is received. The @condition is
857  * checked each time the waitqueue @wq is woken up.
858  *
859  * wake_up() has to be called after changing any variable that could
860  * change the result of the wait condition.
861  *
862  * This is supposed to be called while holding the lock. The lock is
863  * dropped before invoking the cmd and going to sleep and is reacquired
864  * afterwards.
865  *
866  * The macro will return -ERESTARTSYS if it was interrupted by a signal
867  * and 0 if @condition evaluated to true.
868  */
869 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
870 ({                                                                      \
871         int __ret = 0;                                                  \
872         if (!(condition))                                               \
873                 __ret = __wait_event_interruptible_lock_irq(wq,         \
874                                                 condition, lock, cmd);  \
875         __ret;                                                          \
876 })
877
878 /**
879  * wait_event_interruptible_lock_irq - sleep until a condition gets true.
880  *              The condition is checked under the lock. This is expected
881  *              to be called with the lock taken.
882  * @wq: the waitqueue to wait on
883  * @condition: a C expression for the event to wait for
884  * @lock: a locked spinlock_t, which will be released before schedule()
885  *        and reacquired afterwards.
886  *
887  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
888  * @condition evaluates to true or signal is received. The @condition is
889  * checked each time the waitqueue @wq is woken up.
890  *
891  * wake_up() has to be called after changing any variable that could
892  * change the result of the wait condition.
893  *
894  * This is supposed to be called while holding the lock. The lock is
895  * dropped before going to sleep and is reacquired afterwards.
896  *
897  * The macro will return -ERESTARTSYS if it was interrupted by a signal
898  * and 0 if @condition evaluated to true.
899  */
900 #define wait_event_interruptible_lock_irq(wq, condition, lock)          \
901 ({                                                                      \
902         int __ret = 0;                                                  \
903         if (!(condition))                                               \
904                 __ret = __wait_event_interruptible_lock_irq(wq,         \
905                                                 condition, lock,);      \
906         __ret;                                                          \
907 })
908
909 #define __wait_event_interruptible_lock_irq_timeout(wq, condition,      \
910                                                     lock, timeout)      \
911         ___wait_event(wq, ___wait_cond_timeout(condition),              \
912                       TASK_INTERRUPTIBLE, 0, timeout,                   \
913                       spin_unlock_irq(&lock);                           \
914                       __ret = schedule_timeout(__ret);                  \
915                       spin_lock_irq(&lock));
916
917 /**
918  * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets
919  *              true or a timeout elapses. The condition is checked under
920  *              the lock. This is expected to be called with the lock taken.
921  * @wq: the waitqueue to wait on
922  * @condition: a C expression for the event to wait for
923  * @lock: a locked spinlock_t, which will be released before schedule()
924  *        and reacquired afterwards.
925  * @timeout: timeout, in jiffies
926  *
927  * The process is put to sleep (TASK_INTERRUPTIBLE) until the
928  * @condition evaluates to true or signal is received. The @condition is
929  * checked each time the waitqueue @wq is woken up.
930  *
931  * wake_up() has to be called after changing any variable that could
932  * change the result of the wait condition.
933  *
934  * This is supposed to be called while holding the lock. The lock is
935  * dropped before going to sleep and is reacquired afterwards.
936  *
937  * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
938  * was interrupted by a signal, and the remaining jiffies otherwise
939  * if the condition evaluated to true before the timeout elapsed.
940  */
941 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock,  \
942                                                   timeout)              \
943 ({                                                                      \
944         long __ret = timeout;                                           \
945         if (!___wait_cond_timeout(condition))                           \
946                 __ret = __wait_event_interruptible_lock_irq_timeout(    \
947                                         wq, condition, lock, timeout);  \
948         __ret;                                                          \
949 })
950
951 /*
952  * Waitqueues which are removed from the waitqueue_head at wakeup time
953  */
954 void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
955 void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
956 long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
957 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
958 void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
959 long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
960 int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
961 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
962 int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
963
964 #define DEFINE_WAIT_FUNC(name, function)                                \
965         wait_queue_t name = {                                           \
966                 .private        = current,                              \
967                 .func           = function,                             \
968                 .task_list      = LIST_HEAD_INIT((name).task_list),     \
969         }
970
971 #define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
972
973 #define DEFINE_WAIT_BIT(name, word, bit)                                \
974         struct wait_bit_queue name = {                                  \
975                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
976                 .wait   = {                                             \
977                         .private        = current,                      \
978                         .func           = wake_bit_function,            \
979                         .task_list      =                               \
980                                 LIST_HEAD_INIT((name).wait.task_list),  \
981                 },                                                      \
982         }
983
984 #define init_wait(wait)                                                 \
985         do {                                                            \
986                 (wait)->private = current;                              \
987                 (wait)->func = autoremove_wake_function;                \
988                 INIT_LIST_HEAD(&(wait)->task_list);                     \
989                 (wait)->flags = 0;                                      \
990         } while (0)
991
992
993 extern int bit_wait(struct wait_bit_key *, int);
994 extern int bit_wait_io(struct wait_bit_key *, int);
995 extern int bit_wait_timeout(struct wait_bit_key *, int);
996 extern int bit_wait_io_timeout(struct wait_bit_key *, int);
997
998 /**
999  * wait_on_bit - wait for a bit to be cleared
1000  * @word: the word being waited on, a kernel virtual address
1001  * @bit: the bit of the word being waited on
1002  * @mode: the task state to sleep in
1003  *
1004  * There is a standard hashed waitqueue table for generic use. This
1005  * is the part of the hashtable's accessor API that waits on a bit.
1006  * For instance, if one were to have waiters on a bitflag, one would
1007  * call wait_on_bit() in threads waiting for the bit to clear.
1008  * One uses wait_on_bit() where one is waiting for the bit to clear,
1009  * but has no intention of setting it.
1010  * Returned value will be zero if the bit was cleared, or non-zero
1011  * if the process received a signal and the mode permitted wakeup
1012  * on that signal.
1013  */
1014 static inline int
1015 wait_on_bit(unsigned long *word, int bit, unsigned mode)
1016 {
1017         might_sleep();
1018         if (!test_bit(bit, word))
1019                 return 0;
1020         return out_of_line_wait_on_bit(word, bit,
1021                                        bit_wait,
1022                                        mode);
1023 }
1024
1025 /**
1026  * wait_on_bit_io - wait for a bit to be cleared
1027  * @word: the word being waited on, a kernel virtual address
1028  * @bit: the bit of the word being waited on
1029  * @mode: the task state to sleep in
1030  *
1031  * Use the standard hashed waitqueue table to wait for a bit
1032  * to be cleared.  This is similar to wait_on_bit(), but calls
1033  * io_schedule() instead of schedule() for the actual waiting.
1034  *
1035  * Returned value will be zero if the bit was cleared, or non-zero
1036  * if the process received a signal and the mode permitted wakeup
1037  * on that signal.
1038  */
1039 static inline int
1040 wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
1041 {
1042         might_sleep();
1043         if (!test_bit(bit, word))
1044                 return 0;
1045         return out_of_line_wait_on_bit(word, bit,
1046                                        bit_wait_io,
1047                                        mode);
1048 }
1049
1050 /**
1051  * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
1052  * @word: the word being waited on, a kernel virtual address
1053  * @bit: the bit of the word being waited on
1054  * @mode: the task state to sleep in
1055  * @timeout: timeout, in jiffies
1056  *
1057  * Use the standard hashed waitqueue table to wait for a bit
1058  * to be cleared. This is similar to wait_on_bit(), except also takes a
1059  * timeout parameter.
1060  *
1061  * Returned value will be zero if the bit was cleared before the
1062  * @timeout elapsed, or non-zero if the @timeout elapsed or process
1063  * received a signal and the mode permitted wakeup on that signal.
1064  */
1065 static inline int
1066 wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
1067                     unsigned long timeout)
1068 {
1069         might_sleep();
1070         if (!test_bit(bit, word))
1071                 return 0;
1072         return out_of_line_wait_on_bit_timeout(word, bit,
1073                                                bit_wait_timeout,
1074                                                mode, timeout);
1075 }
1076
1077 /**
1078  * wait_on_bit_action - wait for a bit to be cleared
1079  * @word: the word being waited on, a kernel virtual address
1080  * @bit: the bit of the word being waited on
1081  * @action: the function used to sleep, which may take special actions
1082  * @mode: the task state to sleep in
1083  *
1084  * Use the standard hashed waitqueue table to wait for a bit
1085  * to be cleared, and allow the waiting action to be specified.
1086  * This is like wait_on_bit() but allows fine control of how the waiting
1087  * is done.
1088  *
1089  * Returned value will be zero if the bit was cleared, or non-zero
1090  * if the process received a signal and the mode permitted wakeup
1091  * on that signal.
1092  */
1093 static inline int
1094 wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
1095                    unsigned mode)
1096 {
1097         might_sleep();
1098         if (!test_bit(bit, word))
1099                 return 0;
1100         return out_of_line_wait_on_bit(word, bit, action, mode);
1101 }
1102
1103 /**
1104  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
1105  * @word: the word being waited on, a kernel virtual address
1106  * @bit: the bit of the word being waited on
1107  * @mode: the task state to sleep in
1108  *
1109  * There is a standard hashed waitqueue table for generic use. This
1110  * is the part of the hashtable's accessor API that waits on a bit
1111  * when one intends to set it, for instance, trying to lock bitflags.
1112  * For instance, if one were to have waiters trying to set bitflag
1113  * and waiting for it to clear before setting it, one would call
1114  * wait_on_bit() in threads waiting to be able to set the bit.
1115  * One uses wait_on_bit_lock() where one is waiting for the bit to
1116  * clear with the intention of setting it, and when done, clearing it.
1117  *
1118  * Returns zero if the bit was (eventually) found to be clear and was
1119  * set.  Returns non-zero if a signal was delivered to the process and
1120  * the @mode allows that signal to wake the process.
1121  */
1122 static inline int
1123 wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
1124 {
1125         might_sleep();
1126         if (!test_and_set_bit(bit, word))
1127                 return 0;
1128         return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
1129 }
1130
1131 /**
1132  * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
1133  * @word: the word being waited on, a kernel virtual address
1134  * @bit: the bit of the word being waited on
1135  * @mode: the task state to sleep in
1136  *
1137  * Use the standard hashed waitqueue table to wait for a bit
1138  * to be cleared and then to atomically set it.  This is similar
1139  * to wait_on_bit(), but calls io_schedule() instead of schedule()
1140  * for the actual waiting.
1141  *
1142  * Returns zero if the bit was (eventually) found to be clear and was
1143  * set.  Returns non-zero if a signal was delivered to the process and
1144  * the @mode allows that signal to wake the process.
1145  */
1146 static inline int
1147 wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
1148 {
1149         might_sleep();
1150         if (!test_and_set_bit(bit, word))
1151                 return 0;
1152         return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
1153 }
1154
1155 /**
1156  * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
1157  * @word: the word being waited on, a kernel virtual address
1158  * @bit: the bit of the word being waited on
1159  * @action: the function used to sleep, which may take special actions
1160  * @mode: the task state to sleep in
1161  *
1162  * Use the standard hashed waitqueue table to wait for a bit
1163  * to be cleared and then to set it, and allow the waiting action
1164  * to be specified.
1165  * This is like wait_on_bit() but allows fine control of how the waiting
1166  * is done.
1167  *
1168  * Returns zero if the bit was (eventually) found to be clear and was
1169  * set.  Returns non-zero if a signal was delivered to the process and
1170  * the @mode allows that signal to wake the process.
1171  */
1172 static inline int
1173 wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
1174                         unsigned mode)
1175 {
1176         might_sleep();
1177         if (!test_and_set_bit(bit, word))
1178                 return 0;
1179         return out_of_line_wait_on_bit_lock(word, bit, action, mode);
1180 }
1181
1182 /**
1183  * wait_on_atomic_t - Wait for an atomic_t to become 0
1184  * @val: The atomic value being waited on, a kernel virtual address
1185  * @action: the function used to sleep, which may take special actions
1186  * @mode: the task state to sleep in
1187  *
1188  * Wait for an atomic_t to become 0.  We abuse the bit-wait waitqueue table for
1189  * the purpose of getting a waitqueue, but we set the key to a bit number
1190  * outside of the target 'word'.
1191  */
1192 static inline
1193 int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
1194 {
1195         might_sleep();
1196         if (atomic_read(val) == 0)
1197                 return 0;
1198         return out_of_line_wait_on_atomic_t(val, action, mode);
1199 }
1200
1201 #endif /* _LINUX_WAIT_H */