]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
bcache: Kill op->cl
authorKent Overstreet <kmo@daterainc.com>
Thu, 25 Jul 2013 01:04:18 +0000 (18:04 -0700)
committerKent Overstreet <kmo@daterainc.com>
Wed, 11 Sep 2013 01:49:09 +0000 (18:49 -0700)
This isn't used for waiting asynchronously anymore - so this is a fairly
trivial refactoring.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
drivers/md/bcache/bset.c
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/journal.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/writeback.c

index af96cce99eeaf654433c09c780547d14d7a92e7e..8ac55fc818df92b7ae964f1a414f984b36a35f35 100644 (file)
@@ -1196,7 +1196,7 @@ int bch_bset_print_stats(struct cache_set *c, char *buf)
        int ret;
 
        memset(&t, 0, sizeof(struct bset_stats));
-       bch_btree_op_init_stack(&t.op);
+       bch_btree_op_init(&t.op, -1);
 
        ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
        if (ret < 0)
index 38d95afe96dde1cc395c92c910c568ab905a3bd6..2d4ec53b71198979c9132f376854c0174a5253ae 100644 (file)
@@ -115,13 +115,6 @@ enum {
 
 static struct workqueue_struct *btree_io_wq;
 
-void bch_btree_op_init_stack(struct btree_op *op)
-{
-       memset(op, 0, sizeof(struct btree_op));
-       closure_init_stack(&op->cl);
-       op->lock = -1;
-}
-
 static inline bool should_split(struct btree *b)
 {
        struct bset *i = write_block(b);
@@ -956,8 +949,7 @@ err:
  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
  * in from disk if necessary.
  *
- * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
- * if that closure is in non blocking mode, will return -EAGAIN.
+ * If IO is necessary and running under generic_make_request, returns -EAGAIN.
  *
  * The btree node will have either a read or a write lock held, depending on
  * level and op->lock.
@@ -1251,6 +1243,9 @@ static void btree_gc_coalesce(struct btree *b, struct gc_stat *gc,
 {
        unsigned nodes = 0, keys = 0, blocks;
        int i;
+       struct closure cl;
+
+       closure_init_stack(&cl);
 
        while (nodes < GC_MERGE_NODES && r[nodes].b)
                keys += r[nodes++].keys;
@@ -1344,9 +1339,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
 {
        void write(struct btree *r)
        {
-               if (!r->written)
-                       bch_btree_node_write(r, &op->cl);
-               else if (btree_node_dirty(r))
+               if (!r->written || btree_node_dirty(r))
                        bch_btree_node_write(r, writes);
 
                up_write(&r->lock);
@@ -1422,6 +1415,9 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
        struct btree *n = NULL;
        unsigned keys = 0;
        int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
+       struct closure cl;
+
+       closure_init_stack(&cl);
 
        if (b->level || stale > 10)
                n = btree_node_alloc_replacement(b);
@@ -1433,11 +1429,11 @@ static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
                ret = btree_gc_recurse(b, op, writes, gc);
 
        if (!b->written || btree_node_dirty(b)) {
-               bch_btree_node_write(b, n ? &op->cl : NULL);
+               bch_btree_node_write(b, n ? &cl : NULL);
        }
 
        if (!IS_ERR_OR_NULL(n)) {
-               closure_sync(&op->cl);
+               closure_sync(&cl);
                bch_btree_set_root(b);
                btree_node_free(n);
                rw_unlock(true, b);
@@ -1536,15 +1532,13 @@ static void bch_btree_gc(struct cache_set *c)
 
        memset(&stats, 0, sizeof(struct gc_stat));
        closure_init_stack(&writes);
-       bch_btree_op_init_stack(&op);
-       op.lock = SHRT_MAX;
+       bch_btree_op_init(&op, SHRT_MAX);
 
        btree_gc_start(c);
 
        atomic_inc(&c->prio_blocked);
 
        ret = btree_root(gc_root, c, &op, &writes, &stats);
-       closure_sync(&op.cl);
        closure_sync(&writes);
 
        if (ret) {
@@ -1553,8 +1547,8 @@ static void bch_btree_gc(struct cache_set *c)
        }
 
        /* Possibly wait for new UUIDs or whatever to hit disk */
-       bch_journal_meta(c, &op.cl);
-       closure_sync(&op.cl);
+       bch_journal_meta(c, &writes);
+       closure_sync(&writes);
 
        available = bch_btree_gc_finish(c);
 
@@ -1662,8 +1656,7 @@ int bch_btree_check(struct cache_set *c)
        struct btree_op op;
 
        memset(seen, 0, sizeof(seen));
-       bch_btree_op_init_stack(&op);
-       op.lock = SHRT_MAX;
+       bch_btree_op_init(&op, SHRT_MAX);
 
        for (i = 0; c->cache[i]; i++) {
                size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
@@ -1968,6 +1961,9 @@ static int btree_split(struct btree *b, struct btree_op *op,
        bool split;
        struct btree *n1, *n2 = NULL, *n3 = NULL;
        uint64_t start_time = local_clock();
+       struct closure cl;
+
+       closure_init_stack(&cl);
 
        n1 = btree_node_alloc_replacement(b);
        if (IS_ERR(n1))
@@ -2013,7 +2009,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
                bkey_copy_key(&n2->key, &b->key);
 
                bch_keylist_add(parent_keys, &n2->key);
-               bch_btree_node_write(n2, &op->cl);
+               bch_btree_node_write(n2, &cl);
                rw_unlock(true, n2);
        } else {
                trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
@@ -2022,23 +2018,23 @@ static int btree_split(struct btree *b, struct btree_op *op,
        }
 
        bch_keylist_add(parent_keys, &n1->key);
-       bch_btree_node_write(n1, &op->cl);
+       bch_btree_node_write(n1, &cl);
 
        if (n3) {
                /* Depth increases, make a new root */
 
                bkey_copy_key(&n3->key, &MAX_KEY);
                bch_btree_insert_keys(n3, op, parent_keys);
-               bch_btree_node_write(n3, &op->cl);
+               bch_btree_node_write(n3, &cl);
 
-               closure_sync(&op->cl);
+               closure_sync(&cl);
                bch_btree_set_root(n3);
                rw_unlock(true, n3);
        } else if (!b->parent) {
                /* Root filled up but didn't need to be split */
 
                bch_keylist_reset(parent_keys);
-               closure_sync(&op->cl);
+               closure_sync(&cl);
                bch_btree_set_root(n1);
        } else {
                unsigned i;
@@ -2053,7 +2049,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
                }
 
                bch_keylist_push(parent_keys);
-               closure_sync(&op->cl);
+               closure_sync(&cl);
                atomic_inc(&b->c->prio_blocked);
        }
 
@@ -2114,10 +2110,15 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
                        BUG_ON(write_block(b) != b->sets[b->nsets].data);
 
                        if (bch_btree_insert_keys(b, op, insert_keys)) {
-                               if (!b->level)
+                               if (!b->level) {
                                        bch_btree_leaf_dirty(b, journal_ref);
-                               else
-                                       bch_btree_node_write(b, &op->cl);
+                               } else {
+                                       struct closure cl;
+
+                                       closure_init_stack(&cl);
+                                       bch_btree_node_write(b, &cl);
+                                       closure_sync(&cl);
+                               }
                        }
                }
        } while (!bch_keylist_empty(&split_keys));
@@ -2192,12 +2193,6 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
 {
        int ret = 0;
 
-       /*
-        * Don't want to block with the btree locked unless we have to,
-        * otherwise we get deadlocks with try_harder and between split/gc
-        */
-       clear_closure_blocking(&op->cl);
-
        BUG_ON(bch_keylist_empty(keys));
 
        while (!bch_keylist_empty(keys)) {
@@ -2205,8 +2200,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
                ret = btree_root(insert_recurse, c, op, keys, journal_ref);
 
                if (ret == -EAGAIN) {
+                       BUG();
                        ret = 0;
-                       closure_sync(&op->cl);
                } else if (ret) {
                        struct bkey *k;
 
@@ -2280,10 +2275,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
                          struct bkey *from, btree_map_nodes_fn *fn, int flags)
 {
-       int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags);
-       if (closure_blocking(&op->cl))
-               closure_sync(&op->cl);
-       return ret;
+       return btree_root(map_nodes_recurse, c, op, from, fn, flags);
 }
 
 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
@@ -2316,10 +2308,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
                       struct bkey *from, btree_map_keys_fn *fn, int flags)
 {
-       int ret = btree_root(map_keys_recurse, c, op, from, fn, flags);
-       if (closure_blocking(&op->cl))
-               closure_sync(&op->cl);
-       return ret;
+       return btree_root(map_keys_recurse, c, op, from, fn, flags);
 }
 
 /* Keybuf code */
@@ -2397,7 +2386,7 @@ void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
 
        cond_resched();
 
-       bch_btree_op_init_stack(&refill.op);
+       bch_btree_op_init(&refill.op, -1);
        refill.buf = buf;
        refill.end = end;
        refill.pred = pred;
index 3f820b67150c46c3bc7f8f2643aadc08f15e8066..34ee5359b262a198d600da02ec4838d577f67576 100644 (file)
@@ -237,8 +237,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k);
 /* Recursing down the btree */
 
 struct btree_op {
-       struct closure          cl;
-
        /* Btree level at which we start taking write locks */
        short                   lock;
 
@@ -253,7 +251,11 @@ struct btree_op {
        BKEY_PADDED(replace);
 };
 
-void bch_btree_op_init_stack(struct btree_op *);
+static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+{
+       memset(op, 0, sizeof(struct btree_op));
+       op->lock = write_lock_level;
+}
 
 static inline void rw_lock(bool w, struct btree *b, int level)
 {
index 211b51a7d8f59df38d7c8fcbfc55f906494adb18..33991a2c460e4b0138f9d65fccffef9acfec0381 100644 (file)
@@ -305,8 +305,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
        struct btree_op op;
 
        bch_keylist_init(&keylist);
-       bch_btree_op_init_stack(&op);
-       op.lock = SHRT_MAX;
+       bch_btree_op_init(&op, SHRT_MAX);
 
        list_for_each_entry(i, list, list) {
                BUG_ON(i->pin && atomic_read(i->pin) != 1);
@@ -341,14 +340,13 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
 
        pr_info("journal replay done, %i keys in %i entries, seq %llu",
                keys, entries, end);
-
+err:
        while (!list_empty(list)) {
                i = list_first_entry(list, struct journal_replay, list);
                list_del(&i->list);
                kfree(i);
        }
-err:
-       closure_sync(&op.cl);
+
        return ret;
 }
 
index c91f173cb09813e85a0093cbee8b8d18960f8099..36ac7db9a53cbc8b6a73c7b359d8247486caccb2 100644 (file)
@@ -108,8 +108,8 @@ static void write_moving(struct closure *cl)
                s->op.type = BTREE_REPLACE;
                bkey_copy(&s->op.replace, &io->w->key);
 
-               closure_init(&s->op.cl, cl);
-               bch_data_insert(&s->op.cl);
+               closure_init(&s->btree, cl);
+               bch_data_insert(&s->btree);
        }
 
        continue_at(cl, write_moving_finish, NULL);
index 4e7d66f225615b4a42c4705510d7f78cc2d2b32d..82a7047b9c8e634b3d737e74eb641975e5f474e4 100644 (file)
@@ -215,8 +215,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
 
 static void bch_data_insert_keys(struct closure *cl)
 {
-       struct btree_op *op = container_of(cl, struct btree_op, cl);
-       struct search *s = container_of(op, struct search, op);
+       struct search *s = container_of(cl, struct search, btree);
        atomic_t *journal_ref = NULL;
 
        /*
@@ -236,7 +235,7 @@ static void bch_data_insert_keys(struct closure *cl)
                                          s->flush_journal
                                          ? &s->cl : NULL);
 
-       if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) {
+       if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) {
                s->error                = -ENOMEM;
                s->insert_data_done     = true;
        }
@@ -433,8 +432,7 @@ static bool bch_alloc_sectors(struct bkey *k, unsigned sectors,
 
 static void bch_data_invalidate(struct closure *cl)
 {
-       struct btree_op *op = container_of(cl, struct btree_op, cl);
-       struct search *s = container_of(op, struct search, op);
+       struct search *s = container_of(cl, struct search, btree);
        struct bio *bio = s->cache_bio;
 
        pr_debug("invalidating %i sectors from %llu",
@@ -461,8 +459,7 @@ out:
 
 static void bch_data_insert_error(struct closure *cl)
 {
-       struct btree_op *op = container_of(cl, struct btree_op, cl);
-       struct search *s = container_of(op, struct search, op);
+       struct search *s = container_of(cl, struct search, btree);
 
        /*
         * Our data write just errored, which means we've got a bunch of keys to
@@ -493,8 +490,7 @@ static void bch_data_insert_error(struct closure *cl)
 static void bch_data_insert_endio(struct bio *bio, int error)
 {
        struct closure *cl = bio->bi_private;
-       struct btree_op *op = container_of(cl, struct btree_op, cl);
-       struct search *s = container_of(op, struct search, op);
+       struct search *s = container_of(cl, struct search, btree);
 
        if (error) {
                /* TODO: We could try to recover from this. */
@@ -511,8 +507,7 @@ static void bch_data_insert_endio(struct bio *bio, int error)
 
 static void bch_data_insert_start(struct closure *cl)
 {
-       struct btree_op *op = container_of(cl, struct btree_op, cl);
-       struct search *s = container_of(op, struct search, op);
+       struct search *s = container_of(cl, struct search, btree);
        struct bio *bio = s->cache_bio, *n;
 
        if (s->bypass)
@@ -630,8 +625,7 @@ err:
  */
 void bch_data_insert(struct closure *cl)
 {
-       struct btree_op *op = container_of(cl, struct btree_op, cl);
-       struct search *s = container_of(op, struct search, op);
+       struct search *s = container_of(cl, struct search, btree);
 
        bch_keylist_init(&s->insert_keys);
        bio_get(s->cache_bio);
@@ -731,11 +725,10 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
 
 static void cache_lookup(struct closure *cl)
 {
-       struct btree_op *op = container_of(cl, struct btree_op, cl);
-       struct search *s = container_of(op, struct search, op);
+       struct search *s = container_of(cl, struct search, btree);
        struct bio *bio = &s->bio.bio;
 
-       int ret = bch_btree_map_keys(op, s->c,
+       int ret = bch_btree_map_keys(&s->op, s->c,
                                     &KEY(s->inode, bio->bi_sector, 0),
                                     cache_lookup_fn, MAP_END_KEY);
        if (ret == -EAGAIN)
@@ -1063,7 +1056,7 @@ static void cached_dev_read_done(struct closure *cl)
        if (s->cache_bio &&
            !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
                s->op.type = BTREE_REPLACE;
-               closure_call(&s->op.cl, bch_data_insert, NULL, cl);
+               closure_call(&s->btree, bch_data_insert, NULL, cl);
        }
 
        continue_at(cl, cached_dev_cache_miss_done, NULL);
@@ -1156,7 +1149,7 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
 {
        struct closure *cl = &s->cl;
 
-       closure_call(&s->op.cl, cache_lookup, NULL, cl);
+       closure_call(&s->btree, cache_lookup, NULL, cl);
        continue_at(cl, cached_dev_read_done_bh, NULL);
 }
 
@@ -1237,7 +1230,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
                closure_bio_submit(bio, cl, s->d);
        }
 
-       closure_call(&s->op.cl, bch_data_insert, NULL, cl);
+       closure_call(&s->btree, bch_data_insert, NULL, cl);
        continue_at(cl, cached_dev_write_complete, NULL);
 }
 
@@ -1416,9 +1409,9 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
                s->writeback    = true;
                s->cache_bio    = bio;
 
-               closure_call(&s->op.cl, bch_data_insert, NULL, cl);
+               closure_call(&s->btree, bch_data_insert, NULL, cl);
        } else {
-               closure_call(&s->op.cl, cache_lookup, NULL, cl);
+               closure_call(&s->btree, cache_lookup, NULL, cl);
        }
 
        continue_at(cl, search_free, NULL);
index 0f79177c4f335a27113ccded335a6e5600a0c1a4..ed578aa53ee2ec599de3f272e9d28cd7181c31a1 100644 (file)
@@ -6,6 +6,7 @@
 struct search {
        /* Stack frame for bio_complete */
        struct closure          cl;
+       struct closure          btree;
 
        struct bcache_device    *d;
        struct cache_set        *c;
index b58c2bc91e3f038a62e1cca411bdb86a5c750ff5..d0968e8938f75be0572d20cf4f1ba94caf18fba7 100644 (file)
@@ -143,7 +143,7 @@ static void write_dirty_finish(struct closure *cl)
                struct btree_op op;
                struct keylist keys;
 
-               bch_btree_op_init_stack(&op);
+               bch_btree_op_init(&op, -1);
                bch_keylist_init(&keys);
 
                op.type = BTREE_REPLACE;
@@ -156,7 +156,6 @@ static void write_dirty_finish(struct closure *cl)
                        atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
 
                bch_btree_insert(&op, dc->disk.c, &keys, NULL);
-               closure_sync(&op.cl);
 
                if (op.insert_collision)
                        trace_bcache_writeback_collision(&w->key);
@@ -457,7 +456,7 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
 {
        struct sectors_dirty_init op;
 
-       bch_btree_op_init_stack(&op.op);
+       bch_btree_op_init(&op.op, -1);
        op.inode = dc->disk.id;
 
        bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),