int ret;
memset(&t, 0, sizeof(struct bset_stats));
- bch_btree_op_init_stack(&t.op);
+ bch_btree_op_init(&t.op, -1);
ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
if (ret < 0)
static struct workqueue_struct *btree_io_wq;
-void bch_btree_op_init_stack(struct btree_op *op)
-{
- memset(op, 0, sizeof(struct btree_op));
- closure_init_stack(&op->cl);
- op->lock = -1;
-}
-
static inline bool should_split(struct btree *b)
{
struct bset *i = write_block(b);
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
* in from disk if necessary.
*
- * If IO is necessary, it uses the closure embedded in struct btree_op to wait;
- * if that closure is in non blocking mode, will return -EAGAIN.
+ * If IO is necessary and running under generic_make_request, returns -EAGAIN.
*
* The btree node will have either a read or a write lock held, depending on
* level and op->lock.
{
unsigned nodes = 0, keys = 0, blocks;
int i;
+ struct closure cl;
+
+ closure_init_stack(&cl);
while (nodes < GC_MERGE_NODES && r[nodes].b)
keys += r[nodes++].keys;
{
void write(struct btree *r)
{
- if (!r->written)
- bch_btree_node_write(r, &op->cl);
- else if (btree_node_dirty(r))
+ if (!r->written || btree_node_dirty(r))
bch_btree_node_write(r, writes);
up_write(&r->lock);
struct btree *n = NULL;
unsigned keys = 0;
int ret = 0, stale = btree_gc_mark_node(b, &keys, gc);
+ struct closure cl;
+
+ closure_init_stack(&cl);
if (b->level || stale > 10)
n = btree_node_alloc_replacement(b);
ret = btree_gc_recurse(b, op, writes, gc);
if (!b->written || btree_node_dirty(b)) {
- bch_btree_node_write(b, n ? &op->cl : NULL);
+ bch_btree_node_write(b, n ? &cl : NULL);
}
if (!IS_ERR_OR_NULL(n)) {
- closure_sync(&op->cl);
+ closure_sync(&cl);
bch_btree_set_root(b);
btree_node_free(n);
rw_unlock(true, b);
memset(&stats, 0, sizeof(struct gc_stat));
closure_init_stack(&writes);
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ bch_btree_op_init(&op, SHRT_MAX);
btree_gc_start(c);
atomic_inc(&c->prio_blocked);
ret = btree_root(gc_root, c, &op, &writes, &stats);
- closure_sync(&op.cl);
closure_sync(&writes);
if (ret) {
}
/* Possibly wait for new UUIDs or whatever to hit disk */
- bch_journal_meta(c, &op.cl);
- closure_sync(&op.cl);
+ bch_journal_meta(c, &writes);
+ closure_sync(&writes);
available = bch_btree_gc_finish(c);
struct btree_op op;
memset(seen, 0, sizeof(seen));
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ bch_btree_op_init(&op, SHRT_MAX);
for (i = 0; c->cache[i]; i++) {
size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
bool split;
struct btree *n1, *n2 = NULL, *n3 = NULL;
uint64_t start_time = local_clock();
+ struct closure cl;
+
+ closure_init_stack(&cl);
n1 = btree_node_alloc_replacement(b);
if (IS_ERR(n1))
bkey_copy_key(&n2->key, &b->key);
bch_keylist_add(parent_keys, &n2->key);
- bch_btree_node_write(n2, &op->cl);
+ bch_btree_node_write(n2, &cl);
rw_unlock(true, n2);
} else {
trace_bcache_btree_node_compact(b, n1->sets[0].data->keys);
}
bch_keylist_add(parent_keys, &n1->key);
- bch_btree_node_write(n1, &op->cl);
+ bch_btree_node_write(n1, &cl);
if (n3) {
/* Depth increases, make a new root */
bkey_copy_key(&n3->key, &MAX_KEY);
bch_btree_insert_keys(n3, op, parent_keys);
- bch_btree_node_write(n3, &op->cl);
+ bch_btree_node_write(n3, &cl);
- closure_sync(&op->cl);
+ closure_sync(&cl);
bch_btree_set_root(n3);
rw_unlock(true, n3);
} else if (!b->parent) {
/* Root filled up but didn't need to be split */
bch_keylist_reset(parent_keys);
- closure_sync(&op->cl);
+ closure_sync(&cl);
bch_btree_set_root(n1);
} else {
unsigned i;
}
bch_keylist_push(parent_keys);
- closure_sync(&op->cl);
+ closure_sync(&cl);
atomic_inc(&b->c->prio_blocked);
}
BUG_ON(write_block(b) != b->sets[b->nsets].data);
if (bch_btree_insert_keys(b, op, insert_keys)) {
- if (!b->level)
+ if (!b->level) {
bch_btree_leaf_dirty(b, journal_ref);
- else
- bch_btree_node_write(b, &op->cl);
+ } else {
+ struct closure cl;
+
+ closure_init_stack(&cl);
+ bch_btree_node_write(b, &cl);
+ closure_sync(&cl);
+ }
}
}
} while (!bch_keylist_empty(&split_keys));
{
int ret = 0;
- /*
- * Don't want to block with the btree locked unless we have to,
- * otherwise we get deadlocks with try_harder and between split/gc
- */
- clear_closure_blocking(&op->cl);
-
BUG_ON(bch_keylist_empty(keys));
while (!bch_keylist_empty(keys)) {
ret = btree_root(insert_recurse, c, op, keys, journal_ref);
if (ret == -EAGAIN) {
+ BUG();
ret = 0;
- closure_sync(&op->cl);
} else if (ret) {
struct bkey *k;
int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_nodes_fn *fn, int flags)
{
- int ret = btree_root(map_nodes_recurse, c, op, from, fn, flags);
- if (closure_blocking(&op->cl))
- closure_sync(&op->cl);
- return ret;
+ return btree_root(map_nodes_recurse, c, op, from, fn, flags);
}
static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
struct bkey *from, btree_map_keys_fn *fn, int flags)
{
- int ret = btree_root(map_keys_recurse, c, op, from, fn, flags);
- if (closure_blocking(&op->cl))
- closure_sync(&op->cl);
- return ret;
+ return btree_root(map_keys_recurse, c, op, from, fn, flags);
}
/* Keybuf code */
cond_resched();
- bch_btree_op_init_stack(&refill.op);
+ bch_btree_op_init(&refill.op, -1);
refill.buf = buf;
refill.end = end;
refill.pred = pred;
/* Recursing down the btree */
struct btree_op {
- struct closure cl;
-
/* Btree level at which we start taking write locks */
short lock;
BKEY_PADDED(replace);
};
-void bch_btree_op_init_stack(struct btree_op *);
+static inline void bch_btree_op_init(struct btree_op *op, int write_lock_level)
+{
+ memset(op, 0, sizeof(struct btree_op));
+ op->lock = write_lock_level;
+}
static inline void rw_lock(bool w, struct btree *b, int level)
{
struct btree_op op;
bch_keylist_init(&keylist);
- bch_btree_op_init_stack(&op);
- op.lock = SHRT_MAX;
+ bch_btree_op_init(&op, SHRT_MAX);
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
pr_info("journal replay done, %i keys in %i entries, seq %llu",
keys, entries, end);
-
+err:
while (!list_empty(list)) {
i = list_first_entry(list, struct journal_replay, list);
list_del(&i->list);
kfree(i);
}
-err:
- closure_sync(&op.cl);
+
return ret;
}
s->op.type = BTREE_REPLACE;
bkey_copy(&s->op.replace, &io->w->key);
- closure_init(&s->op.cl, cl);
- bch_data_insert(&s->op.cl);
+ closure_init(&s->btree, cl);
+ bch_data_insert(&s->btree);
}
continue_at(cl, write_moving_finish, NULL);
static void bch_data_insert_keys(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct search *s = container_of(cl, struct search, btree);
atomic_t *journal_ref = NULL;
/*
s->flush_journal
? &s->cl : NULL);
- if (bch_btree_insert(op, s->c, &s->insert_keys, journal_ref)) {
+ if (bch_btree_insert(&s->op, s->c, &s->insert_keys, journal_ref)) {
s->error = -ENOMEM;
s->insert_data_done = true;
}
static void bch_data_invalidate(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct search *s = container_of(cl, struct search, btree);
struct bio *bio = s->cache_bio;
pr_debug("invalidating %i sectors from %llu",
static void bch_data_insert_error(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct search *s = container_of(cl, struct search, btree);
/*
* Our data write just errored, which means we've got a bunch of keys to
static void bch_data_insert_endio(struct bio *bio, int error)
{
struct closure *cl = bio->bi_private;
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct search *s = container_of(cl, struct search, btree);
if (error) {
/* TODO: We could try to recover from this. */
static void bch_data_insert_start(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct search *s = container_of(cl, struct search, btree);
struct bio *bio = s->cache_bio, *n;
if (s->bypass)
*/
void bch_data_insert(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct search *s = container_of(cl, struct search, btree);
bch_keylist_init(&s->insert_keys);
bio_get(s->cache_bio);
static void cache_lookup(struct closure *cl)
{
- struct btree_op *op = container_of(cl, struct btree_op, cl);
- struct search *s = container_of(op, struct search, op);
+ struct search *s = container_of(cl, struct search, btree);
struct bio *bio = &s->bio.bio;
- int ret = bch_btree_map_keys(op, s->c,
+ int ret = bch_btree_map_keys(&s->op, s->c,
&KEY(s->inode, bio->bi_sector, 0),
cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN)
if (s->cache_bio &&
!test_bit(CACHE_SET_STOPPING, &s->c->flags)) {
s->op.type = BTREE_REPLACE;
- closure_call(&s->op.cl, bch_data_insert, NULL, cl);
+ closure_call(&s->btree, bch_data_insert, NULL, cl);
}
continue_at(cl, cached_dev_cache_miss_done, NULL);
{
struct closure *cl = &s->cl;
- closure_call(&s->op.cl, cache_lookup, NULL, cl);
+ closure_call(&s->btree, cache_lookup, NULL, cl);
continue_at(cl, cached_dev_read_done_bh, NULL);
}
closure_bio_submit(bio, cl, s->d);
}
- closure_call(&s->op.cl, bch_data_insert, NULL, cl);
+ closure_call(&s->btree, bch_data_insert, NULL, cl);
continue_at(cl, cached_dev_write_complete, NULL);
}
s->writeback = true;
s->cache_bio = bio;
- closure_call(&s->op.cl, bch_data_insert, NULL, cl);
+ closure_call(&s->btree, bch_data_insert, NULL, cl);
} else {
- closure_call(&s->op.cl, cache_lookup, NULL, cl);
+ closure_call(&s->btree, cache_lookup, NULL, cl);
}
continue_at(cl, search_free, NULL);
struct search {
/* Stack frame for bio_complete */
struct closure cl;
+ struct closure btree;
struct bcache_device *d;
struct cache_set *c;
struct btree_op op;
struct keylist keys;
- bch_btree_op_init_stack(&op);
+ bch_btree_op_init(&op, -1);
bch_keylist_init(&keys);
op.type = BTREE_REPLACE;
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
bch_btree_insert(&op, dc->disk.c, &keys, NULL);
- closure_sync(&op.cl);
if (op.insert_collision)
trace_bcache_writeback_collision(&w->key);
{
struct sectors_dirty_init op;
- bch_btree_op_init_stack(&op.op);
+ bch_btree_op_init(&op.op, -1);
op.inode = dc->disk.id;
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),