]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
bcache: Kill op->replace
authorKent Overstreet <kmo@daterainc.com>
Wed, 11 Sep 2013 01:52:54 +0000 (18:52 -0700)
committerKent Overstreet <kmo@daterainc.com>
Wed, 11 Sep 2013 01:52:54 +0000 (18:52 -0700)
This is prep work for converting bch_btree_insert to
bch_btree_map_leaf_nodes() - we have to convert all its arguments to
actual arguments. Bunch of churn, but should be straightforward.

Signed-off-by: Kent Overstreet <kmo@daterainc.com>
drivers/md/bcache/btree.c
drivers/md/bcache/btree.h
drivers/md/bcache/journal.c
drivers/md/bcache/movinggc.c
drivers/md/bcache/request.c
drivers/md/bcache/request.h
drivers/md/bcache/writeback.c

index 876ef88851163302b2e56e63a6664b2347c4238e..1ada9dfe234fecb880ff312c1edebfc138fd160b 100644 (file)
  * Test module load/unload
  */
 
-static const char * const op_types[] = {
-       "insert", "replace"
-};
-
-static const char *op_type(struct btree_op *op)
-{
-       return op_types[op->type];
-}
-
 enum {
        BTREE_INSERT_STATUS_INSERT,
        BTREE_INSERT_STATUS_BACK_MERGE,
@@ -1690,10 +1681,9 @@ static void shift_keys(struct btree *b, struct bkey *where, struct bkey *insert)
        bch_bset_fix_lookup_table(b, where);
 }
 
-static bool fix_overlapping_extents(struct btree *b,
-                                   struct bkey *insert,
+static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
                                    struct btree_iter *iter,
-                                   struct btree_op *op)
+                                   struct bkey *replace_key)
 {
        void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
        {
@@ -1721,39 +1711,38 @@ static bool fix_overlapping_extents(struct btree *b,
                 * We might overlap with 0 size extents; we can't skip these
                 * because if they're in the set we're inserting to we have to
                 * adjust them so they don't overlap with the key we're
-                * inserting. But we don't want to check them for BTREE_REPLACE
+                * inserting. But we don't want to check them for replace
                 * operations.
                 */
 
-               if (op->type == BTREE_REPLACE &&
-                   KEY_SIZE(k)) {
+               if (replace_key && KEY_SIZE(k)) {
                        /*
                         * k might have been split since we inserted/found the
                         * key we're replacing
                         */
                        unsigned i;
                        uint64_t offset = KEY_START(k) -
-                               KEY_START(&op->replace);
+                               KEY_START(replace_key);
 
                        /* But it must be a subset of the replace key */
-                       if (KEY_START(k) < KEY_START(&op->replace) ||
-                           KEY_OFFSET(k) > KEY_OFFSET(&op->replace))
+                       if (KEY_START(k) < KEY_START(replace_key) ||
+                           KEY_OFFSET(k) > KEY_OFFSET(replace_key))
                                goto check_failed;
 
                        /* We didn't find a key that we were supposed to */
                        if (KEY_START(k) > KEY_START(insert) + sectors_found)
                                goto check_failed;
 
-                       if (KEY_PTRS(&op->replace) != KEY_PTRS(k))
+                       if (KEY_PTRS(replace_key) != KEY_PTRS(k))
                                goto check_failed;
 
                        /* skip past gen */
                        offset <<= 8;
 
-                       BUG_ON(!KEY_PTRS(&op->replace));
+                       BUG_ON(!KEY_PTRS(replace_key));
 
-                       for (i = 0; i < KEY_PTRS(&op->replace); i++)
-                               if (k->ptr[i] != op->replace.ptr[i] + offset)
+                       for (i = 0; i < KEY_PTRS(replace_key); i++)
+                               if (k->ptr[i] != replace_key->ptr[i] + offset)
                                        goto check_failed;
 
                        sectors_found = KEY_OFFSET(k) - KEY_START(insert);
@@ -1821,9 +1810,8 @@ static bool fix_overlapping_extents(struct btree *b,
        }
 
 check_failed:
-       if (op->type == BTREE_REPLACE) {
+       if (replace_key) {
                if (!sectors_found) {
-                       op->insert_collision = true;
                        return true;
                } else if (sectors_found < KEY_SIZE(insert)) {
                        SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
@@ -1836,7 +1824,7 @@ check_failed:
 }
 
 static bool btree_insert_key(struct btree *b, struct btree_op *op,
-                            struct bkey *k)
+                            struct bkey *k, struct bkey *replace_key)
 {
        struct bset *i = b->sets[b->nsets].data;
        struct bkey *m, *prev;
@@ -1862,8 +1850,10 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
                prev = NULL;
                m = bch_btree_iter_init(b, &iter, &search);
 
-               if (fix_overlapping_extents(b, k, &iter, op))
+               if (fix_overlapping_extents(b, k, &iter, replace_key)) {
+                       op->insert_collision = true;
                        return false;
+               }
 
                while (m != end(i) &&
                       bkey_cmp(k, &START_KEY(m)) > 0)
@@ -1887,8 +1877,10 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
                if (m != end(i) &&
                    bch_bkey_try_merge(b, k, m))
                        goto copy;
-       } else
+       } else {
+               BUG_ON(replace_key);
                m = bch_bset_search(b, &b->sets[b->nsets], k);
+       }
 
 insert:        shift_keys(b, m, k);
 copy:  bkey_copy(m, k);
@@ -1897,18 +1889,20 @@ merged:
                bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
                                             KEY_START(k), KEY_SIZE(k));
 
-       bch_check_keys(b, "%u for %s", status, op_type(op));
+       bch_check_keys(b, "%u for %s", status,
+                      replace_key ? "replace" : "insert");
 
        if (b->level && !KEY_OFFSET(k))
                btree_current_write(b)->prio_blocked++;
 
-       trace_bcache_btree_insert_key(b, k, op->type, status);
+       trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
 
        return true;
 }
 
 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
-                                 struct keylist *insert_keys)
+                                 struct keylist *insert_keys,
+                                 struct bkey *replace_key)
 {
        bool ret = false;
        unsigned oldsize = bch_count_data(b);
@@ -1924,11 +1918,11 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                if (bkey_cmp(k, &b->key) <= 0) {
                        bkey_put(b->c, k, b->level);
 
-                       ret |= btree_insert_key(b, op, k);
+                       ret |= btree_insert_key(b, op, k, replace_key);
                        bch_keylist_pop_front(insert_keys);
                } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
 #if 0
-                       if (op->type == BTREE_REPLACE) {
+                       if (replace_key) {
                                bkey_put(b->c, k, b->level);
                                bch_keylist_pop_front(insert_keys);
                                op->insert_collision = true;
@@ -1941,7 +1935,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
                        bch_cut_back(&b->key, &temp.key);
                        bch_cut_front(&b->key, insert_keys->keys);
 
-                       ret |= btree_insert_key(b, op, &temp.key);
+                       ret |= btree_insert_key(b, op, &temp.key, replace_key);
                        break;
                } else {
                        break;
@@ -1956,7 +1950,8 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
 
 static int btree_split(struct btree *b, struct btree_op *op,
                       struct keylist *insert_keys,
-                      struct keylist *parent_keys)
+                      struct keylist *parent_keys,
+                      struct bkey *replace_key)
 {
        bool split;
        struct btree *n1, *n2 = NULL, *n3 = NULL;
@@ -1986,7 +1981,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
                                goto err_free2;
                }
 
-               bch_btree_insert_keys(n1, op, insert_keys);
+               bch_btree_insert_keys(n1, op, insert_keys, replace_key);
 
                /*
                 * Has to be a linear search because we don't have an auxiliary
@@ -2014,7 +2009,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
        } else {
                trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
 
-               bch_btree_insert_keys(n1, op, insert_keys);
+               bch_btree_insert_keys(n1, op, insert_keys, replace_key);
        }
 
        bch_keylist_add(parent_keys, &n1->key);
@@ -2024,7 +2019,7 @@ static int btree_split(struct btree *b, struct btree_op *op,
                /* Depth increases, make a new root */
 
                bkey_copy_key(&n3->key, &MAX_KEY);
-               bch_btree_insert_keys(n3, op, parent_keys);
+               bch_btree_insert_keys(n3, op, parent_keys, NULL);
                bch_btree_node_write(n3, &cl);
 
                closure_sync(&cl);
@@ -2079,7 +2074,8 @@ err:
 
 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
                                 struct keylist *insert_keys,
-                                atomic_t *journal_ref)
+                                atomic_t *journal_ref,
+                                struct bkey *replace_key)
 {
        int ret = 0;
        struct keylist split_keys;
@@ -2089,6 +2085,8 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
        BUG_ON(b->level);
 
        do {
+               BUG_ON(b->level && replace_key);
+
                if (should_split(b)) {
                        if (current->bio_list) {
                                op->lock = b->c->root->level + 1;
@@ -2100,8 +2098,9 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
                                struct btree *parent = b->parent;
 
                                ret = btree_split(b, op, insert_keys,
-                                                 &split_keys);
+                                                 &split_keys, replace_key);
                                insert_keys = &split_keys;
+                               replace_key = NULL;
                                b = parent;
                                if (!ret)
                                        ret = -EINTR;
@@ -2109,7 +2108,8 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
                } else {
                        BUG_ON(write_block(b) != b->sets[b->nsets].data);
 
-                       if (bch_btree_insert_keys(b, op, insert_keys)) {
+                       if (bch_btree_insert_keys(b, op, insert_keys,
+                                                 replace_key)) {
                                if (!b->level) {
                                        bch_btree_leaf_dirty(b, journal_ref);
                                } else {
@@ -2153,9 +2153,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
 
        bch_keylist_add(&insert, check_key);
 
-       BUG_ON(op->type != BTREE_INSERT);
-
-       ret = bch_btree_insert_node(b, op, &insert, NULL);
+       ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
 
        BUG_ON(!ret && !bch_keylist_empty(&insert));
 out:
@@ -2165,7 +2163,8 @@ out:
 }
 
 static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
-                                   struct keylist *keys, atomic_t *journal_ref)
+                                   struct keylist *keys, atomic_t *journal_ref,
+                                   struct bkey *replace_key)
 {
        if (bch_keylist_empty(keys))
                return 0;
@@ -2182,14 +2181,17 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
                        return -EIO;
                }
 
-               return btree(insert_recurse, k, b, op, keys, journal_ref);
+               return btree(insert_recurse, k, b, op, keys,
+                            journal_ref, replace_key);
        } else {
-               return bch_btree_insert_node(b, op, keys, journal_ref);
+               return bch_btree_insert_node(b, op, keys,
+                                            journal_ref, replace_key);
        }
 }
 
 int bch_btree_insert(struct btree_op *op, struct cache_set *c,
-                    struct keylist *keys, atomic_t *journal_ref)
+                    struct keylist *keys, atomic_t *journal_ref,
+                    struct bkey *replace_key)
 {
        int ret = 0;
 
@@ -2197,7 +2199,8 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
 
        while (!bch_keylist_empty(keys)) {
                op->lock = 0;
-               ret = btree_root(insert_recurse, c, op, keys, journal_ref);
+               ret = btree_root(insert_recurse, c, op, keys,
+                                journal_ref, replace_key);
 
                if (ret == -EAGAIN) {
                        BUG();
@@ -2205,8 +2208,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
                } else if (ret) {
                        struct bkey *k;
 
-                       pr_err("error %i trying to insert key for %s",
-                              ret, op_type(op));
+                       pr_err("error %i", ret);
 
                        while ((k = bch_keylist_pop(keys)))
                                bkey_put(c, k, 0);
index 34ee5359b262a198d600da02ec4838d577f67576..6ff08be3e0c97313ade26c775aa3500115261e07 100644 (file)
@@ -240,15 +240,7 @@ struct btree_op {
        /* Btree level at which we start taking write locks */
        short                   lock;
 
-       /* Btree insertion type */
-       enum {
-               BTREE_INSERT,
-               BTREE_REPLACE
-       } type:8;
-
        unsigned                insert_collision:1;
-
-       BKEY_PADDED(replace);
 };
 
 static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
@@ -290,7 +282,7 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
 int bch_btree_insert_check_key(struct btree *, struct btree_op *,
                               struct bkey *);
 int bch_btree_insert(struct btree_op *, struct cache_set *,
-                    struct keylist *, atomic_t *);
+                    struct keylist *, atomic_t *, struct bkey *);
 
 int bch_gc_thread_start(struct cache_set *);
 size_t bch_btree_gc_finish(struct cache_set *);
index 33991a2c460e4b0138f9d65fccffef9acfec0381..bdbca6416ef3f734c5ce0c2890a2662636bfb484 100644 (file)
@@ -322,7 +322,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
                        bkey_copy(keylist.top, k);
                        bch_keylist_push(&keylist);
 
-                       ret = bch_btree_insert(&op, s, &keylist, i->pin);
+                       ret = bch_btree_insert(&op, s, &keylist, i->pin, NULL);
                        if (ret)
                                goto err;
 
index 36ac7db9a53cbc8b6a73c7b359d8247486caccb2..d7404e449b4f3c27551b8dd594ad2707a933b3eb 100644 (file)
@@ -105,8 +105,8 @@ static void write_moving(struct closure *cl)
                s->writeback            = KEY_DIRTY(&io->w->key);
                s->csum                 = KEY_CSUM(&io->w->key);
 
-               s->op.type = BTREE_REPLACE;
-               bkey_copy(&s->op.replace, &io->w->key);
+               bkey_copy(&s->replace_key, &io->w->key);
+               s->replace = true;
 
                closure_init(&s->btree, cl);
                bch_data_insert(&s->btree);
index 82a7047b9c8e634b3d737e74eb641975e5f474e4..0607ba496ea4426c69b45fa8b86f359aaacecc3a 100644 (file)
@@ -217,6 +217,7 @@ static void bch_data_insert_keys(struct closure *cl)
 {
        struct search *s = container_of(cl, struct search, btree);
        atomic_t *journal_ref = NULL;
+       struct bkey *replace_key = s->replace ? &s->replace_key : NULL;
 
        /*
         * If we're looping, might already be waiting on
@@ -235,7 +236,8 @@ static void bch_data_insert_keys(struct closure *cl)
                                          s->flush_journal
                                          ? &s->cl : NULL);
 
-       if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) {
+       if (bch_btree_insert(&s->op, s->c, &s->insert_keys,
+                            journal_ref, replace_key)) {
                s->error                = -ENOMEM;
                s->insert_data_done     = true;
        }
@@ -1055,7 +1057,7 @@ static void cached_dev_read_done(struct closure *cl)
 
        if (s->cache_bio &&
            !test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
-               s->op.type = BTREE_REPLACE;
+               BUG_ON(!s->replace);
                closure_call(&s->btree, bch_data_insert, NULL, cl);
        }
 
@@ -1100,14 +1102,16 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
 
        s->cache_bio_sectors = min(sectors, bio_sectors(bio) + reada);
 
-       s->op.replace = KEY(s->inode, bio->bi_sector +
-                           s->cache_bio_sectors,
-                           s->cache_bio_sectors);
+       s->replace_key = KEY(s->inode, bio->bi_sector +
+                            s->cache_bio_sectors,
+                            s->cache_bio_sectors);
 
-       ret = bch_btree_insert_check_key(b, &s->op, &s->op.replace);
+       ret = bch_btree_insert_check_key(b, &s->op, &s->replace_key);
        if (ret)
                return ret;
 
+       s->replace = true;
+
        miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split);
 
        /* btree_search_recurse()'s btree iterator is no good anymore */
index ed578aa53ee2ec599de3f272e9d28cd7181c31a1..5ea7a0e6fca039f610a1c43c18dac40fa0123b99 100644 (file)
@@ -33,6 +33,7 @@ struct search {
        unsigned                flush_journal:1;
 
        unsigned                insert_data_done:1;
+       unsigned                replace:1;
 
        uint16_t                write_prio;
 
@@ -44,6 +45,7 @@ struct search {
 
        /* Anything past this point won't get zeroed in search_alloc() */
        struct keylist          insert_keys;
+       BKEY_PADDED(replace_key);
 };
 
 unsigned bch_get_congested(struct cache_set *);
index d0968e8938f75be0572d20cf4f1ba94caf18fba7..346a5341facac202927fc8f952dcf6197e4c53f8 100644 (file)
@@ -146,16 +146,14 @@ static void write_dirty_finish(struct closure *cl)
                bch_btree_op_init(&op, -1);
                bch_keylist_init(&keys);
 
-               op.type = BTREE_REPLACE;
-               bkey_copy(&op.replace, &w->key);
-
-               SET_KEY_DIRTY(&w->key, false);
-               bch_keylist_add(&keys, &w->key);
+               bkey_copy(keys.top, &w->key);
+               SET_KEY_DIRTY(keys.top, false);
+               bch_keylist_push(&keys);
 
                for (i = 0; i < KEY_PTRS(&w->key); i++)
                        atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
 
-               bch_btree_insert(&op, dc->disk.c, &keys, NULL);
+               bch_btree_insert(&op, dc->disk.c, &keys, NULL, &w->key);
 
                if (op.insert_collision)
                        trace_bcache_writeback_collision(&w->key);