From bb1f30cb7d3ba21098f0ee7e0382160ba2599a43 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 28 Jun 2013 09:53:57 +1000 Subject: [PATCH] wait: introduce wait_event_common(wq, condition, state, timeout) 1. 4c663cfc "fix false timeouts when using wait_event_timeout()" is not enough, wait(wq, true, 0) still returns zero. __wait_event_timeout() was already fixed but we need the same logic in wait_event_timeout() if the fast-path check succeeds. 2. wait_event_timeout/__wait_event_timeout interface do not match wait_event(), you can't use __wait_event_timeout() instead of wait_event_timeout() if you do not need the fast-path check. Same for wait_event_interruptible/__wait_event_interruptible, so this patch cleanups rtlx.c, ip_vs_sync.c, and af_irda.c: - __wait_event_interruptible(wq, cond, ret); + ret = __wait_event_interruptible(wq, cond); 3. wait_event_* macros duplicate the same code. This patch adds a single helper wait_event_common() which hopefully does everything right. Compiler optimizes out the "dead" code when we do not need signal_pending/schedule_timeout. "size vmlinux" reports: text data bss dec hex filename - 4978601 2935080 10104832 18018513 112f0d1 vmlinux + 4977769 2930984 10104832 18013585 112dd91 vmlinux but I think this depends on gcc/config. In particular, wait_even_timeout(true, non_const_timeout) should generate more code in the non-void context because the patch adds the additional code to fix the 1st problem. Signed-off-by: Oleg Nesterov Reviewed-by: Tejun Heo Cc: Daniel Vetter Cc: Imre Deak Cc: Lukas Czerner Cc: Samuel Ortiz Cc: Wensong Zhang Cc: Simon Horman Cc: Julian Anastasov Cc: Ralf Baechle Signed-off-by: Andrew Morton --- arch/mips/kernel/rtlx.c | 17 ++- include/linux/wait.h | 181 ++++++++++++-------------------- net/irda/af_irda.c | 5 +- net/netfilter/ipvs/ip_vs_sync.c | 5 +- 4 files changed, 81 insertions(+), 127 deletions(-) diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index d763f11e35e2..580d8b65a092 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -172,8 +172,8 @@ int rtlx_open(int index, int can_sleep) if (rtlx == NULL) { if( (p = vpe_get_shared(tclimit)) == NULL) { if (can_sleep) { - __wait_event_interruptible(channel_wqs[index].lx_queue, - (p = vpe_get_shared(tclimit)), ret); + ret = __wait_event_interruptible(channel_wqs[index].lx_queue, + (p = vpe_get_shared(tclimit))); if (ret) goto out_fail; } else { @@ -263,11 +263,11 @@ unsigned int rtlx_read_poll(int index, int can_sleep) /* data available to read? */ if (chan->lx_read == chan->lx_write) { if (can_sleep) { - int ret = 0; + int ret; - __wait_event_interruptible(channel_wqs[index].lx_queue, + ret = __wait_event_interruptible(channel_wqs[index].lx_queue, (chan->lx_read != chan->lx_write) || - sp_stopping, ret); + sp_stopping); if (ret) return ret; @@ -440,14 +440,13 @@ static ssize_t file_write(struct file *file, const char __user * buffer, /* any space left... */ if (!rtlx_write_poll(minor)) { - int ret = 0; + int ret; if (file->f_flags & O_NONBLOCK) return -EAGAIN; - __wait_event_interruptible(channel_wqs[minor].rt_queue, - rtlx_write_poll(minor), - ret); + ret = __wait_event_interruptible(channel_wqs[minor].rt_queue, + rtlx_write_poll(minor)); if (ret) return ret; } diff --git a/include/linux/wait.h b/include/linux/wait.h index f487a4750b7f..5c6dd19efc82 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -179,18 +179,52 @@ wait_queue_head_t *bit_waitqueue(void *, int); #define wake_up_interruptible_sync_poll(x, m) \ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) -#define __wait_event(wq, condition) \ -do { \ +#define __wait_no_timeout(tout) \ + (__builtin_constant_p(tout) && (tout) == MAX_SCHEDULE_TIMEOUT) + +/* uglified signal_pending_state() optimized for constant state */ +#define __wait_signal_pending(state) \ + ((state == TASK_INTERRUPTIBLE) ? signal_pending(current) : \ + (state == TASK_KILLABLE) ? fatal_signal_pending(current) : \ + 0) + +#define __wait_event_common(wq, condition, state, tout) \ +({ \ DEFINE_WAIT(__wait); \ + long __ret = 0, __tout = tout; \ \ for (;;) { \ - prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ - if (condition) \ + prepare_to_wait(&wq, &__wait, state); \ + if (condition) { \ + __ret = __wait_no_timeout(tout) ?: __tout ?: 1; \ + break; \ + } \ + \ + if (__wait_signal_pending(state)) { \ + __ret = -ERESTARTSYS; \ + break; \ + } \ + \ + if (__wait_no_timeout(tout)) \ + schedule(); \ + else if (__tout) \ + __tout = schedule_timeout(__tout); \ + else \ break; \ - schedule(); \ } \ finish_wait(&wq, &__wait); \ -} while (0) + __ret; \ +}) + +#define wait_event_common(wq, condition, state, tout) \ +({ \ + long __ret; \ + if (condition) \ + __ret = __wait_no_timeout(tout) ?: (tout) ?: 1; \ + else \ + __ret = __wait_event_common(wq, condition, state, tout);\ + __ret; \ +}) /** * wait_event - sleep until a condition gets true @@ -204,29 +238,13 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. */ -#define wait_event(wq, condition) \ -do { \ - if (condition) \ - break; \ - __wait_event(wq, condition); \ -} while (0) +#define __wait_event(wq, condition) \ + __wait_event_common(wq, condition, \ + TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) \ -#define __wait_event_timeout(wq, condition, ret) \ -do { \ - DEFINE_WAIT(__wait); \ - \ - for (;;) { \ - prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \ - if (condition) \ - break; \ - ret = schedule_timeout(ret); \ - if (!ret) \ - break; \ - } \ - if (!ret && (condition)) \ - ret = 1; \ - finish_wait(&wq, &__wait); \ -} while (0) +#define wait_event(wq, condition) \ + wait_event_common(wq, condition, \ + TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) \ /** * wait_event_timeout - sleep until a condition gets true or a timeout elapses @@ -245,31 +263,13 @@ do { \ * jiffies (at least 1) if the @condition evaluated to %true before * the @timeout elapsed. */ -#define wait_event_timeout(wq, condition, timeout) \ -({ \ - long __ret = timeout; \ - if (!(condition)) \ - __wait_event_timeout(wq, condition, __ret); \ - __ret; \ -}) +#define __wait_event_timeout(wq, condition, timeout) \ + __wait_event_common(wq, condition, \ + TASK_UNINTERRUPTIBLE, timeout) -#define __wait_event_interruptible(wq, condition, ret) \ -do { \ - DEFINE_WAIT(__wait); \ - \ - for (;;) { \ - prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ - if (condition) \ - break; \ - if (!signal_pending(current)) { \ - schedule(); \ - continue; \ - } \ - ret = -ERESTARTSYS; \ - break; \ - } \ - finish_wait(&wq, &__wait); \ -} while (0) +#define wait_event_timeout(wq, condition, timeout) \ + wait_event_common(wq, condition, \ + TASK_UNINTERRUPTIBLE, timeout) /** * wait_event_interruptible - sleep until a condition gets true @@ -286,35 +286,13 @@ do { \ * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ -#define wait_event_interruptible(wq, condition) \ -({ \ - int __ret = 0; \ - if (!(condition)) \ - __wait_event_interruptible(wq, condition, __ret); \ - __ret; \ -}) +#define __wait_event_interruptible(wq, condition) \ + __wait_event_common(wq, condition, \ + TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) -#define __wait_event_interruptible_timeout(wq, condition, ret) \ -do { \ - DEFINE_WAIT(__wait); \ - \ - for (;;) { \ - prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ - if (condition) \ - break; \ - if (!signal_pending(current)) { \ - ret = schedule_timeout(ret); \ - if (!ret) \ - break; \ - continue; \ - } \ - ret = -ERESTARTSYS; \ - break; \ - } \ - if (!ret && (condition)) \ - ret = 1; \ - finish_wait(&wq, &__wait); \ -} while (0) +#define wait_event_interruptible(wq, condition) \ + wait_event_common(wq, condition, \ + TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT) /** * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses @@ -334,13 +312,13 @@ do { \ * a signal, or the remaining jiffies (at least 1) if the @condition * evaluated to %true before the @timeout elapsed. */ +#define __wait_event_interruptible_timeout(wq, condition, timeout) \ + __wait_event_common(wq, condition, \ + TASK_INTERRUPTIBLE, timeout) + #define wait_event_interruptible_timeout(wq, condition, timeout) \ -({ \ - long __ret = timeout; \ - if (!(condition)) \ - __wait_event_interruptible_timeout(wq, condition, __ret); \ - __ret; \ -}) + wait_event_common(wq, condition, \ + TASK_INTERRUPTIBLE, timeout) #define __wait_event_hrtimeout(wq, condition, timeout, state) \ ({ \ @@ -607,24 +585,6 @@ do { \ -#define __wait_event_killable(wq, condition, ret) \ -do { \ - DEFINE_WAIT(__wait); \ - \ - for (;;) { \ - prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \ - if (condition) \ - break; \ - if (!fatal_signal_pending(current)) { \ - schedule(); \ - continue; \ - } \ - ret = -ERESTARTSYS; \ - break; \ - } \ - finish_wait(&wq, &__wait); \ -} while (0) - /** * wait_event_killable - sleep until a condition gets true * @wq: the waitqueue to wait on @@ -640,14 +600,13 @@ do { \ * The function will return -ERESTARTSYS if it was interrupted by a * signal and 0 if @condition evaluated to true. */ -#define wait_event_killable(wq, condition) \ -({ \ - int __ret = 0; \ - if (!(condition)) \ - __wait_event_killable(wq, condition, __ret); \ - __ret; \ -}) +#define __wait_event_killable(wq, condition) \ + __wait_event_common(wq, condition, \ + TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT) +#define wait_event_killable(wq, condition) \ + wait_event_common(wq, condition, \ + TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT) #define __wait_event_lock_irq(wq, condition, lock, cmd) \ do { \ diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 0578d4fa00a9..0f676908d15b 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -2563,9 +2563,8 @@ bed: jiffies + msecs_to_jiffies(val)); /* Wait for IR-LMP to call us back */ - __wait_event_interruptible(self->query_wait, - (self->cachedaddr != 0 || self->errno == -ETIME), - err); + err = __wait_event_interruptible(self->query_wait, + (self->cachedaddr != 0 || self->errno == -ETIME)); /* If watchdog is still activated, kill it! */ del_timer(&(self->watchdog)); diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index f6046d9af8d3..ce4239ea9ac9 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -1624,12 +1624,9 @@ static int sync_thread_master(void *data) continue; } while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { - int ret = 0; - __wait_event_interruptible(*sk_sleep(sk), sock_writeable(sk) || - kthread_should_stop(), - ret); + kthread_should_stop()); if (unlikely(kthread_should_stop())) goto done; } -- 2.39.5