]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
bcache: Drop some closure stuff
authorKent Overstreet <kmo@daterainc.com>
Thu, 25 Jul 2013 00:26:22 +0000 (17:26 -0700)
committerKent Overstreet <kmo@daterainc.com>
Wed, 11 Sep 2013 01:49:13 +0000 (18:49 -0700)
With a the recent bcache refactoring, some of the closure code isn't
needed anymore.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
drivers/md/bcache/btree.c
drivers/md/bcache/closure.c
drivers/md/bcache/closure.h

index 2d4ec53b71198979c9132f376854c0174a5253ae..876ef88851163302b2e56e63a6664b2347c4238e 100644 (file)
@@ -407,7 +407,7 @@ static void do_btree_node_write(struct btree *b)
        b->bio = bch_bbio_alloc(b->c);
 
        b->bio->bi_end_io       = btree_node_write_endio;
-       b->bio->bi_private      = &b->io.cl;
+       b->bio->bi_private      = cl;
        b->bio->bi_rw           = REQ_META|WRITE_SYNC|REQ_FUA;
        b->bio->bi_size         = set_blocks(i, b->c) * block_bytes(b->c);
        bch_bio_map(b->bio, i);
@@ -672,8 +672,8 @@ static int mca_reap(struct btree *b, unsigned min_order, bool flush)
        }
 
        /* wait for any in flight btree write */
-       closure_wait_event_sync(&b->io.wait, &cl,
-               atomic_read(&b->io.cl.remaining) == -1);
+       closure_wait_event(&b->io.wait, &cl,
+                          atomic_read(&b->io.cl.remaining) == -1);
 
        return 0;
 }
index 9aba2017f0d1685ac5858ccf716c1f829681c853..a9b5b93ad48fa327464b9b57692ba15e1194912d 100644 (file)
 
 #include "closure.h"
 
-void closure_queue(struct closure *cl)
-{
-       struct workqueue_struct *wq = cl->wq;
-       if (wq) {
-               INIT_WORK(&cl->work, cl->work.func);
-               BUG_ON(!queue_work(wq, &cl->work));
-       } else
-               cl->fn(cl);
-}
-EXPORT_SYMBOL_GPL(closure_queue);
-
 #define CL_FIELD(type, field)                                  \
        case TYPE_ ## type:                                     \
        return &container_of(cl, struct type, cl)->field
@@ -51,7 +40,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
        int r = flags & CLOSURE_REMAINING_MASK;
 
        BUG_ON(flags & CLOSURE_GUARD_MASK);
-       BUG_ON(!r && (flags & ~(CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING)));
+       BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
 
        /* Must deliver precisely one wakeup */
        if (r == 1 && (flags & CLOSURE_SLEEPING))
@@ -59,7 +48,6 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
 
        if (!r) {
                if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
-                       /* CLOSURE_BLOCKING might be set - clear it */
                        atomic_set(&cl->remaining,
                                   CLOSURE_REMAINING_INITIALIZER);
                        closure_queue(cl);
@@ -183,13 +171,13 @@ bool closure_trylock(struct closure *cl, struct closure *parent)
                           CLOSURE_REMAINING_INITIALIZER) != -1)
                return false;
 
-       closure_set_ret_ip(cl);
-
        smp_mb();
+
        cl->parent = parent;
        if (parent)
                closure_get(parent);
 
+       closure_set_ret_ip(cl);
        closure_debug_create(cl);
        return true;
 }
@@ -205,8 +193,8 @@ void __closure_lock(struct closure *cl, struct closure *parent,
                if (closure_trylock(cl, parent))
                        return;
 
-               closure_wait_event_sync(wait_list, &wait,
-                                       atomic_read(&cl->remaining) == -1);
+               closure_wait_event(wait_list, &wait,
+                                  atomic_read(&cl->remaining) == -1);
        }
 }
 EXPORT_SYMBOL_GPL(__closure_lock);
@@ -304,11 +292,10 @@ static int debug_seq_show(struct seq_file *f, void *data)
                           cl, (void *) cl->ip, cl->fn, cl->parent,
                           r & CLOSURE_REMAINING_MASK);
 
-               seq_printf(f, "%s%s%s%s%s%s\n",
+               seq_printf(f, "%s%s%s%s%s\n",
                           test_bit(WORK_STRUCT_PENDING,
                                    work_data_bits(&cl->work)) ? "Q" : "",
                           r & CLOSURE_RUNNING  ? "R" : "",
-                          r & CLOSURE_BLOCKING ? "B" : "",
                           r & CLOSURE_STACK    ? "S" : "",
                           r & CLOSURE_SLEEPING ? "Sl" : "",
                           r & CLOSURE_TIMER    ? "T" : "");
index ab011f03801f63d89d68cf18b6681612bfcdfeee..8d580599de137569e1776e59a00a8c7aab0b55e8 100644 (file)
@@ -189,9 +189,6 @@ enum closure_type {
 
 enum closure_state {
        /*
-        * CLOSURE_BLOCKING: Causes closure_wait_event() to block, instead of
-        * waiting asynchronously
-        *
         * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
         * the thread that owns the closure, and cleared by the thread that's
         * waking up the closure.
@@ -218,9 +215,8 @@ enum closure_state {
         * closure with this flag set
         */
 
-       CLOSURE_BITS_START      = (1 << 19),
-       CLOSURE_DESTRUCTOR      = (1 << 19),
-       CLOSURE_BLOCKING        = (1 << 21),
+       CLOSURE_BITS_START      = (1 << 21),
+       CLOSURE_DESTRUCTOR      = (1 << 21),
        CLOSURE_WAITING         = (1 << 23),
        CLOSURE_SLEEPING        = (1 << 25),
        CLOSURE_TIMER           = (1 << 27),
@@ -229,8 +225,8 @@ enum closure_state {
 };
 
 #define CLOSURE_GUARD_MASK                                     \
-       ((CLOSURE_DESTRUCTOR|CLOSURE_BLOCKING|CLOSURE_WAITING|  \
-         CLOSURE_SLEEPING|CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
+       ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_SLEEPING|  \
+         CLOSURE_TIMER|CLOSURE_RUNNING|CLOSURE_STACK) << 1)
 
 #define CLOSURE_REMAINING_MASK         (CLOSURE_BITS_START - 1)
 #define CLOSURE_REMAINING_INITIALIZER  (1|CLOSURE_RUNNING)
@@ -296,7 +292,6 @@ extern unsigned invalid_closure_type(void);
 
 void closure_sub(struct closure *cl, int v);
 void closure_put(struct closure *cl);
-void closure_queue(struct closure *cl);
 void __closure_wake_up(struct closure_waitlist *list);
 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
 void closure_sync(struct closure *cl);
@@ -354,11 +349,6 @@ static inline void closure_set_stopped(struct closure *cl)
        atomic_sub(CLOSURE_RUNNING, &cl->remaining);
 }
 
-static inline bool closure_is_stopped(struct closure *cl)
-{
-       return !(atomic_read(&cl->remaining) & CLOSURE_RUNNING);
-}
-
 static inline bool closure_is_unlocked(struct closure *cl)
 {
        return atomic_read(&cl->remaining) == -1;
@@ -429,8 +419,7 @@ do {                                                                \
 static inline void closure_init_stack(struct closure *cl)
 {
        memset(cl, 0, sizeof(struct closure));
-       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|
-                  CLOSURE_BLOCKING|CLOSURE_STACK);
+       atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER|CLOSURE_STACK);
 }
 
 /**
@@ -497,40 +486,6 @@ static inline void __closure_start_sleep(struct closure *cl)
                atomic_add(CLOSURE_SLEEPING, &cl->remaining);
 }
 
-/**
- * closure_blocking() - returns true if the closure is in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- */
-static inline bool closure_blocking(struct closure *cl)
-{
-       return atomic_read(&cl->remaining) & CLOSURE_BLOCKING;
-}
-
-/**
- * set_closure_blocking() - put a closure in blocking mode.
- *
- * If a closure is in blocking mode, closure_wait_event() will sleep until the
- * condition is true instead of waiting asynchronously.
- *
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void set_closure_blocking(struct closure *cl)
-{
-       if (!closure_blocking(cl))
-               atomic_add(CLOSURE_BLOCKING, &cl->remaining);
-}
-
-/*
- * Not thread safe - can only be called by the thread running the closure.
- */
-static inline void clear_closure_blocking(struct closure *cl)
-{
-       if (closure_blocking(cl))
-               atomic_sub(CLOSURE_BLOCKING, &cl->remaining);
-}
-
 /**
  * closure_wake_up() - wake up all closures on a wait list.
  */
@@ -561,63 +516,36 @@ static inline void closure_wake_up(struct closure_waitlist *list)
  * refcount on our closure. If this was a stack allocated closure, that would be
  * bad.
  */
-#define __closure_wait_event(list, cl, condition, _block)              \
+#define closure_wait_event(list, cl, condition)                                \
 ({                                                                     \
-       bool block = _block;                                            \
        typeof(condition) ret;                                          \
                                                                        \
        while (1) {                                                     \
                ret = (condition);                                      \
                if (ret) {                                              \
                        __closure_wake_up(list);                        \
-                       if (block)                                      \
-                               closure_sync(cl);                       \
-                                                                       \
+                       closure_sync(cl);                               \
                        break;                                          \
                }                                                       \
                                                                        \
-               if (block)                                              \
-                       __closure_start_sleep(cl);                      \
-                                                                       \
-               if (!closure_wait(list, cl)) {                          \
-                       if (!block)                                     \
-                               break;                                  \
+               __closure_start_sleep(cl);                              \
                                                                        \
+               if (!closure_wait(list, cl))                            \
                        schedule();                                     \
-               }                                                       \
        }                                                               \
                                                                        \
        ret;                                                            \
 })
 
-/**
- * closure_wait_event() - wait on a condition, synchronously or asynchronously.
- * @list:      the wait list to wait on
- * @cl:                the closure that is doing the waiting
- * @condition: a C expression for the event to wait for
- *
- * If the closure is in blocking mode, sleeps until the @condition evaluates to
- * true - exactly like wait_event().
- *
- * If the closure is not in blocking mode, waits asynchronously; if the
- * condition is currently false the @cl is put onto @list and returns. @list
- * owns a refcount on @cl; closure_sync() or continue_at() may be used later to
- * wait for another thread to wake up @list, which drops the refcount on @cl.
- *
- * Returns the value of @condition; @cl will be on @list iff @condition was
- * false.
- *
- * closure_wake_up(@list) must be called after changing any variable that could
- * cause @condition to become true.
- */
-#define closure_wait_event(list, cl, condition)                                \
-       __closure_wait_event(list, cl, condition, closure_blocking(cl))
-
-#define closure_wait_event_async(list, cl, condition)                  \
-       __closure_wait_event(list, cl, condition, false)
-
-#define closure_wait_event_sync(list, cl, condition)                   \
-       __closure_wait_event(list, cl, condition, true)
+static inline void closure_queue(struct closure *cl)
+{
+       struct workqueue_struct *wq = cl->wq;
+       if (wq) {
+               INIT_WORK(&cl->work, cl->work.func);
+               BUG_ON(!queue_work(wq, &cl->work));
+       } else
+               cl->fn(cl);
+}
 
 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
                                  struct workqueue_struct *wq)