]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sched/headers: fix up header file dependency on <linux/sched/signal.h>
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Mar 2017 23:33:14 +0000 (15:33 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 8 Mar 2017 18:36:03 +0000 (10:36 -0800)
The scheduler header file split and cleanups ended up exposing a few
nasty header file dependencies, and in particular it showed how we in
<linux/wait.h> ended up depending on "signal_pending()", which now comes
from <linux/sched/signal.h>.

That's a very subtle and annoying dependency, which already caused a
semantic merge conflict (see commit e58bc927835a "Pull overlayfs updates
from Miklos Szeredi", which added that fixup in the merge commit).

It turns out that we can avoid this dependency _and_ improve code
generation by moving the guts of the fairly nasty helper #define
__wait_event_interruptible_locked() to out-of-line code.  The code that
includes the signal_pending() check is all in the slow-path where we
actually go to sleep waiting for the event anyway, so using a helper
function is the right thing to do.

Using a helper function is also what we already did for the non-locked
versions, see the "__wait_event*()" macros and the "prepare_to_wait*()"
set of helper functions.

We might want to try to unify all these macro games, we have a _lot_ of
subtly different wait-event loops.  But this is the minimal patch to fix
the annoying header dependency.

Acked-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/wait.h
kernel/sched/wait.c

index aacb1282d19a38d7b633bf7aedcc40fa258584f8..db076ca7f11da03f474be67f792e1189b96425eb 100644 (file)
@@ -620,30 +620,19 @@ do {                                                                      \
        __ret;                                                          \
 })
 
+extern int do_wait_intr(wait_queue_head_t *, wait_queue_t *);
+extern int do_wait_intr_irq(wait_queue_head_t *, wait_queue_t *);
 
-#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
+#define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \
 ({                                                                     \
-       int __ret = 0;                                                  \
+       int __ret;                                                      \
        DEFINE_WAIT(__wait);                                            \
        if (exclusive)                                                  \
                __wait.flags |= WQ_FLAG_EXCLUSIVE;                      \
        do {                                                            \
-               if (likely(list_empty(&__wait.task_list)))              \
-                       __add_wait_queue_tail(&(wq), &__wait);          \
-               set_current_state(TASK_INTERRUPTIBLE);                  \
-               if (signal_pending(current)) {                          \
-                       __ret = -ERESTARTSYS;                           \
+               __ret = fn(&(wq), &__wait);                             \
+               if (__ret)                                              \
                        break;                                          \
-               }                                                       \
-               if (irq)                                                \
-                       spin_unlock_irq(&(wq).lock);                    \
-               else                                                    \
-                       spin_unlock(&(wq).lock);                        \
-               schedule();                                             \
-               if (irq)                                                \
-                       spin_lock_irq(&(wq).lock);                      \
-               else                                                    \
-                       spin_lock(&(wq).lock);                          \
        } while (!(condition));                                         \
        __remove_wait_queue(&(wq), &__wait);                            \
        __set_current_state(TASK_RUNNING);                              \
@@ -676,7 +665,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_locked(wq, condition)                 \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr))
 
 /**
  * wait_event_interruptible_locked_irq - sleep until a condition gets true
@@ -703,7 +692,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_locked_irq(wq, condition)             \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq))
 
 /**
  * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
@@ -734,7 +723,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_exclusive_locked(wq, condition)       \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr))
 
 /**
  * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
@@ -765,7 +754,7 @@ do {                                                                        \
  */
 #define wait_event_interruptible_exclusive_locked_irq(wq, condition)   \
        ((condition)                                                    \
-        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
+        ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq))
 
 
 #define __wait_event_killable(wq, condition)                           \
index 4d2ea6f255683811f6eefb5d95fb18eb9e7c7192..b8c84c6dee64bd31ca28b4cfe7283a55945aa596 100644 (file)
@@ -242,6 +242,45 @@ long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state)
 }
 EXPORT_SYMBOL(prepare_to_wait_event);
 
+/*
+ * Note! These two wait functions are entered with the
+ * wait-queue lock held (and interrupts off in the _irq
+ * case), so there is no race with testing the wakeup
+ * condition in the caller before they add the wait
+ * entry to the wake queue.
+ */
+int do_wait_intr(wait_queue_head_t *wq, wait_queue_t *wait)
+{
+       if (likely(list_empty(&wait->task_list)))
+               __add_wait_queue_tail(wq, wait);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       if (signal_pending(current))
+               return -ERESTARTSYS;
+
+       spin_unlock(&wq->lock);
+       schedule();
+       spin_lock(&wq->lock);
+       return 0;
+}
+EXPORT_SYMBOL(do_wait_intr);
+
+int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_t *wait)
+{
+       if (likely(list_empty(&wait->task_list)))
+               __add_wait_queue_tail(wq, wait);
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       if (signal_pending(current))
+               return -ERESTARTSYS;
+
+       spin_unlock_irq(&wq->lock);
+       schedule();
+       spin_lock_irq(&wq->lock);
+       return 0;
+}
+EXPORT_SYMBOL(do_wait_intr_irq);
+
 /**
  * finish_wait - clean up after waiting in a queue
  * @q: waitqueue waited on