list_entry(list->prev, struct journal_replay, list);
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
+ struct keylist keylist;
+
+ bch_keylist_init(&keylist);
list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1);
k = bkey_next(k)) {
trace_bcache_journal_replay_key(k);
- bkey_copy(op->keys.top, k);
- bch_keylist_push(&op->keys);
+ bkey_copy(keylist.top, k);
+ bch_keylist_push(&keylist);
op->journal = i->pin;
- ret = bch_btree_insert(op, s, &op->keys);
+ ret = bch_btree_insert(op, s, &keylist);
if (ret)
goto err;
- BUG_ON(!bch_keylist_empty(&op->keys));
+ BUG_ON(!bch_keylist_empty(&keylist));
keys++;
cond_resched();
#endif
if (s->write)
- op->journal = bch_journal(op->c, &op->keys,
+ op->journal = bch_journal(op->c, &s->insert_keys,
op->flush_journal
? &s->cl : NULL);
- if (bch_btree_insert(op, op->c, &op->keys)) {
+ if (bch_btree_insert(op, op->c, &s->insert_keys)) {
s->error = -ENOMEM;
op->insert_data_done = true;
}
if (!op->insert_data_done)
continue_at(cl, bch_data_insert_start, bcache_wq);
- bch_keylist_free(&op->keys);
+ bch_keylist_free(&s->insert_keys);
closure_return(cl);
}
static void bch_data_invalidate(struct closure *cl)
{
struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct search *s = container_of(op, struct search, op);
struct bio *bio = op->cache_bio;
pr_debug("invalidating %i sectors from %llu",
while (bio_sectors(bio)) {
unsigned len = min(bio_sectors(bio), 1U << 14);
- if (bch_keylist_realloc(&op->keys, 0, op->c))
+ if (bch_keylist_realloc(&s->insert_keys, 0, op->c))
goto out;
bio->bi_sector += len;
bio->bi_size -= len << 9;
- bch_keylist_add(&op->keys, &KEY(op->inode,
- bio->bi_sector, len));
+ bch_keylist_add(&s->insert_keys,
+ &KEY(op->inode, bio->bi_sector, len));
}
op->insert_data_done = true;
static void bch_data_insert_error(struct closure *cl)
{
struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct search *s = container_of(op, struct search, op);
/*
* Our data write just errored, which means we've got a bunch of keys to
* from the keys we'll accomplish just that.
*/
- struct bkey *src = op->keys.keys, *dst = op->keys.keys;
+ struct bkey *src = s->insert_keys.keys, *dst = s->insert_keys.keys;
- while (src != op->keys.top) {
+ while (src != s->insert_keys.top) {
struct bkey *n = bkey_next(src);
SET_KEY_PTRS(src, 0);
src = n;
}
- op->keys.top = dst;
+ s->insert_keys.top = dst;
bch_data_insert_keys(cl);
}
? s->d->bio_split : op->c->bio_split;
/* 1 for the device pointer and 1 for the chksum */
- if (bch_keylist_realloc(&op->keys,
+ if (bch_keylist_realloc(&s->insert_keys,
1 + (op->csum ? 1 : 0),
op->c))
continue_at(cl, bch_data_insert_keys, bcache_wq);
- k = op->keys.top;
+ k = s->insert_keys.top;
bkey_init(k);
SET_KEY_INODE(k, op->inode);
SET_KEY_OFFSET(k, bio->bi_sector);
bio_csum(n, k);
trace_bcache_cache_insert(k);
- bch_keylist_push(&op->keys);
+ bch_keylist_push(&s->insert_keys);
n->bi_rw |= REQ_WRITE;
bch_submit_bbio(n, op->c, k, 0);
op->insert_data_done = true;
bio_put(bio);
- if (!bch_keylist_empty(&op->keys))
+ if (!bch_keylist_empty(&s->insert_keys))
continue_at(cl, bch_data_insert_keys, bcache_wq);
else
closure_return(cl);
void bch_data_insert(struct closure *cl)
{
struct btree_op *op = container_of(cl, struct btree_op, cl);
+ struct search *s = container_of(op, struct search, op);
- bch_keylist_init(&op->keys);
+ bch_keylist_init(&s->insert_keys);
bio_get(op->cache_bio);
bch_data_insert_start(cl);
}
static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
{
struct bio_vec *bv;
- struct search *s = mempool_alloc(d->c->search, GFP_NOIO);
- memset(s, 0, offsetof(struct search, op.keys));
+ struct search *s;
+
+ s = mempool_alloc(d->c->search, GFP_NOIO);
+ memset(s, 0, offsetof(struct search, insert_keys));
__closure_init(&s->cl, NULL);