/* Btree key manipulation */
-void __bkey_put(struct cache_set *c, struct bkey *k)
+void bkey_put(struct cache_set *c, struct bkey *k)
{
unsigned i;
atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
}
-static void bkey_put(struct cache_set *c, struct bkey *k, int level)
-{
- if ((level && KEY_OFFSET(k)) || !level)
- __bkey_put(c, k);
-}
-
/* Btree IO */
static uint64_t btree_csum_set(struct btree *b, struct bset *i)
if (__bch_bucket_alloc_set(c, WATERMARK_METADATA, &k.key, 1, true))
goto err;
+ bkey_put(c, &k.key);
SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
b = mca_alloc(c, &k.key, level);
if (!b) {
cache_bug(c,
"Tried to allocate bucket that was in btree cache");
- __bkey_put(c, &k.key);
goto retry;
}
return b;
err_free:
bch_bucket_free(c, &k.key);
- __bkey_put(c, &k.key);
err:
mutex_unlock(&c->bucket_lock);
if (!IS_ERR_OR_NULL(n)) {
swap(b, n);
- __bkey_put(b->c, &b->key);
memcpy(k->ptr, b->key.ptr,
sizeof(uint64_t) * KEY_PTRS(&b->key));
break;
if (bkey_cmp(k, &b->key) <= 0) {
- bkey_put(b->c, k, b->level);
+ if (!b->level)
+ bkey_put(b->c, k);
ret |= btree_insert_key(b, op, k, replace_key);
bch_keylist_pop_front(insert_keys);
} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
-#if 0
- if (replace_key) {
- bkey_put(b->c, k, b->level);
- bch_keylist_pop_front(insert_keys);
- op->insert_collision = true;
- break;
- }
-#endif
BKEY_PADDED(key) temp;
bkey_copy(&temp.key, insert_keys->keys);
return 0;
err_free2:
- __bkey_put(n2->c, &n2->key);
btree_node_free(n2);
rw_unlock(true, n2);
err_free1:
- __bkey_put(n1->c, &n1->key);
btree_node_free(n1);
rw_unlock(true, n1);
err:
pr_err("error %i", ret);
while ((k = bch_keylist_pop(keys)))
- bkey_put(c, k, 0);
+ bkey_put(c, k);
} else if (op.op.insert_collision)
ret = -ESRCH;
mutex_unlock(&b->c->bucket_lock);
b->c->root = b;
- __bkey_put(b->c, &b->key);
bch_journal_meta(b->c, &cl);
closure_sync(&cl);