]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
bcache: Add btree_map() functions
authorKent Overstreet <kmo@daterainc.com>
Wed, 11 Sep 2013 01:48:51 +0000 (18:48 -0700)
committerKent Overstreet <kmo@daterainc.com>
Wed, 11 Sep 2013 01:48:51 +0000 (18:48 -0700)
Lots of stuff has been open coding its own btree traversal - which is
generally pretty simple code, but there are a few subtleties.

This adds new new functions, bch_btree_map_nodes() and
bch_btree_map_keys(), which do the traversal for you. Everything that's
open coding btree traversal now (with the exception of garbage
collection) is slowly going to be converted to these two functions;
being able to write other code at a higher level of abstraction  is a
big improvement w.r.t. overall code quality.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
drivers/md/bcache/bcache.h
drivers/md/bcache/bset.c
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/writeback.c

index 674e2f42e7789f3f9d8a1bcc361b9295147c835e..20fe96c121d928b971321fc27fbdedf873db7706 100644 (file)
@@ -384,8 +384,6 @@ struct keybuf_key {
        void                    *private;
 };
 
-typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
-
 struct keybuf {
        struct bkey             last_scanned;
        spinlock_t              lock;
index 1e7d946485ae120c3425aec7e525c33bf4929b07..52d30f86903dfa6ecf233652cda0b505039cd15b 100644 (file)
@@ -842,6 +842,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
 
 /* Btree iterator */
 
+/*
+ * Returns true if l > r - unless l == r, in which case returns true if l is
+ * older than r.
+ *
+ * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+ * equal in different sets, we have to process them newest to oldest.
+ */
 static inline bool btree_iter_cmp(struct btree_iter_set l,
                                  struct btree_iter_set r)
 {
@@ -1146,16 +1153,16 @@ out:
 /* Sysfs stuff */
 
 struct bset_stats {
+       struct btree_op op;
        size_t nodes;
        size_t sets_written, sets_unwritten;
        size_t bytes_written, bytes_unwritten;
        size_t floats, failed;
 };
 
-static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
-                           struct bset_stats *stats)
+static int btree_bset_stats(struct btree_op *op, struct btree *b)
 {
-       struct bkey *k;
+       struct bset_stats *stats = container_of(op, struct bset_stats, op);
        unsigned i;
 
        stats->nodes++;
@@ -1180,30 +1187,20 @@ static int bch_btree_bset_stats(struct btree *b, struct btree_op *op,
                }
        }
 
-       if (b->level) {
-               struct btree_iter iter;
-
-               for_each_key_filter(b, k, &iter, bch_ptr_bad) {
-                       int ret = btree(bset_stats, k, b, op, stats);
-                       if (ret)
-                               return ret;
-               }
-       }
-
-       return 0;
+       return MAP_CONTINUE;
 }
 
 int bch_bset_print_stats(struct cache_set *c, char *buf)
 {
-       struct btree_op op;
        struct bset_stats t;
        int ret;
 
-       bch_btree_op_init_stack(&op);
        memset(&t, 0, sizeof(struct bset_stats));
+       bch_btree_op_init_stack(&t.op);
+       t.op.c = c;
 
-       ret = btree_root(bset_stats, c, &op, &t);
-       if (ret)
+       ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
+       if (ret < 0)
                return ret;
 
        return snprintf(buf, PAGE_SIZE,
index f16a756a24256bd5df232418aca14481ff6b7333..91ed3ead7365dd956ae6af2803598b654d8f2a21 100644 (file)
@@ -2284,6 +2284,82 @@ int bch_btree_search_recurse(struct btree *b, struct btree_op *op)
        return ret;
 }
 
+/* Map across nodes or keys */
+
+static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
+                                      struct bkey *from,
+                                      btree_map_nodes_fn *fn, int flags)
+{
+       int ret = MAP_CONTINUE;
+
+       if (b->level) {
+               struct bkey *k;
+               struct btree_iter iter;
+
+               bch_btree_iter_init(b, &iter, from);
+
+               while ((k = bch_btree_iter_next_filter(&iter, b,
+                                                      bch_ptr_bad))) {
+                       ret = btree(map_nodes_recurse, k, b,
+                                   op, from, fn, flags);
+                       from = NULL;
+
+                       if (ret != MAP_CONTINUE)
+                               return ret;
+               }
+       }
+
+       if (!b->level || flags == MAP_ALL_NODES)
+               ret = fn(op, b);
+
+       return ret;
+}
+
+int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+                         struct bkey *from, btree_map_nodes_fn *fn, int flags)
+{
+       int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags);
+       if (closure_blocking(&op->cl))
+               closure_sync(&op->cl);
+       return ret;
+}
+
+static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
+                                     struct bkey *from, btree_map_keys_fn *fn,
+                                     int flags)
+{
+       int ret = MAP_CONTINUE;
+       struct bkey *k;
+       struct btree_iter iter;
+
+       bch_btree_iter_init(b, &iter, from);
+
+       while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad))) {
+               ret = !b->level
+                       ? fn(op, b, k)
+                       : btree(map_keys_recurse, k, b, op, from, fn, flags);
+               from = NULL;
+
+               if (ret != MAP_CONTINUE)
+                       return ret;
+       }
+
+       if (!b->level && (flags & MAP_END_KEY))
+               ret = fn(op, b, &KEY(KEY_INODE(&b->key),
+                                    KEY_OFFSET(&b->key), 0));
+
+       return ret;
+}
+
+int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
+                      struct bkey *from, btree_map_keys_fn *fn, int flags)
+{
+       int ret = btree_root(map_keys_recurse, c, op, from, fn, flags);
+       if (closure_blocking(&op->cl))
+               closure_sync(&op->cl);
+       return ret;
+}
+
 /* Keybuf code */
 
 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
@@ -2302,74 +2378,70 @@ static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
        return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
 }
 
-static int bch_btree_refill_keybuf(struct btree *b, struct btree_op *op,
-                                  struct keybuf *buf, struct bkey *end,
-                                  keybuf_pred_fn *pred)
-{
-       struct btree_iter iter;
-       bch_btree_iter_init(b, &iter, &buf->last_scanned);
-
-       while (!array_freelist_empty(&buf->freelist)) {
-               struct bkey *k = bch_btree_iter_next_filter(&iter, b,
-                                                           bch_ptr_bad);
-
-               if (!b->level) {
-                       if (!k) {
-                               buf->last_scanned = b->key;
-                               break;
-                       }
+struct refill {
+       struct btree_op op;
+       struct keybuf   *buf;
+       struct bkey     *end;
+       keybuf_pred_fn  *pred;
+};
 
-                       buf->last_scanned = *k;
-                       if (bkey_cmp(&buf->last_scanned, end) >= 0)
-                               break;
+static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
+                           struct bkey *k)
+{
+       struct refill *refill = container_of(op, struct refill, op);
+       struct keybuf *buf = refill->buf;
+       int ret = MAP_CONTINUE;
 
-                       if (pred(buf, k)) {
-                               struct keybuf_key *w;
+       if (bkey_cmp(k, refill->end) >= 0) {
+               ret = MAP_DONE;
+               goto out;
+       }
 
-                               spin_lock(&buf->lock);
+       if (!KEY_SIZE(k)) /* end key */
+               goto out;
 
-                               w = array_alloc(&buf->freelist);
+       if (refill->pred(buf, k)) {
+               struct keybuf_key *w;
 
-                               w->private = NULL;
-                               bkey_copy(&w->key, k);
+               spin_lock(&buf->lock);
 
-                               if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
-                                       array_free(&buf->freelist, w);
+               w = array_alloc(&buf->freelist);
+               if (!w) {
+                       spin_unlock(&buf->lock);
+                       return MAP_DONE;
+               }
 
-                               spin_unlock(&buf->lock);
-                       }
-               } else {
-                       if (!k)
-                               break;
+               w->private = NULL;
+               bkey_copy(&w->key, k);
 
-                       btree(refill_keybuf, k, b, op, buf, end, pred);
-                       /*
-                        * Might get an error here, but can't really do anything
-                        * and it'll get logged elsewhere. Just read what we
-                        * can.
-                        */
+               if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
+                       array_free(&buf->freelist, w);
 
-                       if (bkey_cmp(&buf->last_scanned, end) >= 0)
-                               break;
+               if (array_freelist_empty(&buf->freelist))
+                       ret = MAP_DONE;
 
-                       cond_resched();
-               }
+               spin_unlock(&buf->lock);
        }
-
-       return 0;
+out:
+       buf->last_scanned = *k;
+       return ret;
 }
 
 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
                       struct bkey *end, keybuf_pred_fn *pred)
 {
        struct bkey start = buf->last_scanned;
-       struct btree_op op;
-       bch_btree_op_init_stack(&op);
+       struct refill refill;
 
        cond_resched();
 
-       btree_root(refill_keybuf, c, &op, buf, end, pred);
-       closure_sync(&op.cl);
+       bch_btree_op_init_stack(&refill.op);
+       refill.buf = buf;
+       refill.end = end;
+       refill.pred = pred;
+
+       bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
+                          refill_keybuf_fn, MAP_END_KEY);
 
        pr_debug("found %s keys from %llu:%llu to %llu:%llu",
                 RB_EMPTY_ROOT(&buf->keys) ? "no" :
@@ -2453,9 +2525,9 @@ struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
 }
 
 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
-                                            struct keybuf *buf,
-                                            struct bkey *end,
-                                            keybuf_pred_fn *pred)
+                                         struct keybuf *buf,
+                                         struct bkey *end,
+                                         keybuf_pred_fn *pred)
 {
        struct keybuf_key *ret;
 
index fa9641aaed39bf761156dda2101e1d6a80c0cb48..cafdeb01e219dc2a6def1ae018aad3afbca51432 100644 (file)
@@ -400,9 +400,42 @@ static inline void wake_up_gc(struct cache_set *c)
                wake_up_process(c->gc_thread);
 }
 
+#define MAP_DONE       0
+#define MAP_CONTINUE   1
+
+#define MAP_ALL_NODES  0
+#define MAP_LEAF_NODES 1
+
+#define MAP_END_KEY    1
+
+typedef int (btree_map_nodes_fn)(struct btree_op *, struct btree *);
+int __bch_btree_map_nodes(struct btree_op *, struct cache_set *,
+                         struct bkey *, btree_map_nodes_fn *, int);
+
+static inline int bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
+                                     struct bkey *from, btree_map_nodes_fn *fn)
+{
+       return __bch_btree_map_nodes(op, c, from, fn, MAP_ALL_NODES);
+}
+
+static inline int bch_btree_map_leaf_nodes(struct btree_op *op,
+                                          struct cache_set *c,
+                                          struct bkey *from,
+                                          btree_map_nodes_fn *fn)
+{
+       return __bch_btree_map_nodes(op, c, from, fn, MAP_LEAF_NODES);
+}
+
+typedef int (btree_map_keys_fn)(struct btree_op *, struct btree *,
+                               struct bkey *);
+int bch_btree_map_keys(struct btree_op *, struct cache_set *,
+                      struct bkey *, btree_map_keys_fn *, int);
+
+typedef bool (keybuf_pred_fn)(struct keybuf *, struct bkey *);
+
 void bch_keybuf_init(struct keybuf *);
-void bch_refill_keybuf(struct cache_set *, struct keybuf *, struct bkey *,
-                      keybuf_pred_fn *);
+void bch_refill_keybuf(struct cache_set *, struct keybuf *,
+                      struct bkey *, keybuf_pred_fn *);
 bool bch_keybuf_check_overlapping(struct keybuf *, struct bkey *,
                                  struct bkey *);
 void bch_keybuf_del(struct keybuf *, struct keybuf_key *);
index 4392f3f38d623357608f014254543b8c74a73fbb..c68de9f126189803b8c8954db5d1cf2332865d1d 100644 (file)
@@ -433,31 +433,17 @@ static int bch_writeback_thread(void *arg)
 
 /* Init */
 
-static int bch_btree_sectors_dirty_init(struct btree *b, struct btree_op *op,
-                                       struct cached_dev *dc)
+static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
+                                struct bkey *k)
 {
-       struct bkey *k;
-       struct btree_iter iter;
-
-       bch_btree_iter_init(b, &iter, &KEY(dc->disk.id, 0, 0));
-       while ((k = bch_btree_iter_next_filter(&iter, b, bch_ptr_bad)))
-               if (!b->level) {
-                       if (KEY_INODE(k) > dc->disk.id)
-                               break;
-
-                       if (KEY_DIRTY(k))
-                               bcache_dev_sectors_dirty_add(b->c, dc->disk.id,
-                                                            KEY_START(k),
-                                                            KEY_SIZE(k));
-               } else {
-                       btree(sectors_dirty_init, k, b, op, dc);
-                       if (KEY_INODE(k) > dc->disk.id)
-                               break;
-
-                       cond_resched();
-               }
+       if (KEY_INODE(k) > op->inode)
+               return MAP_DONE;
 
-       return 0;
+       if (KEY_DIRTY(k))
+               bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
+                                            KEY_START(k), KEY_SIZE(k));
+
+       return MAP_CONTINUE;
 }
 
 void bch_sectors_dirty_init(struct cached_dev *dc)
@@ -465,7 +451,10 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
        struct btree_op op;
 
        bch_btree_op_init_stack(&op);
-       btree_root(sectors_dirty_init, dc->disk.c, &op, dc);
+       op.inode = dc->disk.id;
+
+       bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0),
+                          sectors_dirty_init_fn, 0);
 }
 
 int bch_cached_dev_writeback_init(struct cached_dev *dc)