}
}
+ if (op->insert_collision)
+ return -ESRCH;
+
return ret;
}
bio_for_each_segment_all(bv, bio, i)
__free_page(bv->bv_page);
- if (io->s.op.insert_collision)
+ if (io->s.insert_collision)
trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
struct search *s = container_of(cl, struct search, btree);
atomic_t *journal_ref = NULL;
struct bkey *replace_key = s->replace ? &s->replace_key : NULL;
+ int ret;
/*
* If we're looping, might already be waiting on
s->flush_journal
? &s->cl : NULL);
- if (bch_btree_insert(&s->op, s->c, &s->insert_keys,
- journal_ref, replace_key)) {
+ ret = bch_btree_insert(&s->op, s->c, &s->insert_keys,
+ journal_ref, replace_key);
+ if (ret == -ESRCH) {
+ s->insert_collision = true;
+ } else if (ret) {
s->error = -ENOMEM;
s->insert_data_done = true;
}
{
struct search *s = container_of(cl, struct search, cl);
- if (s->op.insert_collision)
+ if (s->insert_collision)
bch_mark_cache_miss_collision(s);
if (s->cache_bio) {
unsigned insert_data_done:1;
unsigned replace:1;
+ unsigned insert_collision:1;
uint16_t write_prio;
unsigned i;
struct btree_op op;
struct keylist keys;
+ int ret;
bch_btree_op_init(&op, -1);
bch_keylist_init(&keys);
for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
- bch_btree_insert(&op, dc->disk.c, &keys, NULL, &w->key);
+ ret = bch_btree_insert(&op, dc->disk.c, &keys, NULL, &w->key);
- if (op.insert_collision)
+ if (ret)
trace_bcache_writeback_collision(&w->key);
- atomic_long_inc(op.insert_collision
+ atomic_long_inc(ret
? &dc->disk.c->writeback_keys_failed
: &dc->disk.c->writeback_keys_done);
}