reserve += min_t(unsigned, 1,
c->btree_roots[i].b->level) * 8;
- c->btree_cache_reserve = reserve;
+ c->btree_cache.reserve = reserve;
}
-#define mca_can_free(c) \
- max_t(int, 0, c->btree_cache_used - c->btree_cache_reserve)
+static inline unsigned btree_cache_can_free(struct btree_cache *bc)
+{
+ return max_t(int, 0, bc->used - bc->reserve);
+}
-static void __mca_data_free(struct bch_fs *c, struct btree *b)
+static void __btree_node_data_free(struct bch_fs *c, struct btree *b)
{
EBUG_ON(btree_node_write_in_flight(b));
- free_pages((unsigned long) b->data, btree_page_order(c));
+ kvpfree(b->data, btree_bytes(c));
b->data = NULL;
bch2_btree_keys_free(b);
}
-static void mca_data_free(struct bch_fs *c, struct btree *b)
+static void btree_node_data_free(struct bch_fs *c, struct btree *b)
{
- __mca_data_free(c, b);
- c->btree_cache_used--;
- list_move(&b->list, &c->btree_cache_freed);
-}
+ struct btree_cache *bc = &c->btree_cache;
-#define PTR_HASH(_k) (bkey_i_to_extent_c(_k)->v._data[0])
+ __btree_node_data_free(c, b);
+ bc->used--;
+ list_move(&b->list, &bc->freed);
+}
static const struct rhashtable_params bch_btree_cache_params = {
.head_offset = offsetof(struct btree, hash),
.key_len = sizeof(struct bch_extent_ptr),
};
-static void mca_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{
- unsigned order = ilog2(btree_pages(c));
+ struct btree_cache *bc = &c->btree_cache;
- b->data = (void *) __get_free_pages(gfp, order);
+ b->data = kvpmalloc(btree_bytes(c), gfp);
if (!b->data)
goto err;
- if (bch2_btree_keys_alloc(b, order, gfp))
+ if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp))
goto err;
- c->btree_cache_used++;
- list_move(&b->list, &c->btree_cache_freeable);
+ bc->used++;
+ list_move(&b->list, &bc->freeable);
return;
err:
- free_pages((unsigned long) b->data, order);
+ kvpfree(b->data, btree_bytes(c));
b->data = NULL;
- list_move(&b->list, &c->btree_cache_freed);
+ list_move(&b->list, &bc->freed);
}
-static struct btree *mca_bucket_alloc(struct bch_fs *c, gfp_t gfp)
+static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
{
struct btree *b = kzalloc(sizeof(struct btree), gfp);
if (!b)
six_lock_init(&b->lock);
INIT_LIST_HEAD(&b->list);
INIT_LIST_HEAD(&b->write_blocked);
- INIT_LIST_HEAD(&b->reachable);
- mca_data_alloc(c, b, gfp);
+ btree_node_data_alloc(c, b, gfp);
return b->data ? b : NULL;
}
/* Btree in memory cache - hash table */
-void bch2_btree_node_hash_remove(struct bch_fs *c, struct btree *b)
+void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
{
- BUG_ON(btree_node_dirty(b));
-
- b->nsets = 0;
-
- rhashtable_remove_fast(&c->btree_cache_table, &b->hash,
- bch_btree_cache_params);
+ rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
/* Cause future lookups for this node to fail: */
bkey_i_to_extent(&b->key)->v._data[0] = 0;
}
-int bch2_btree_node_hash_insert(struct bch_fs *c, struct btree *b,
- unsigned level, enum btree_id id)
+int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
+{
+ return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
+ bch_btree_cache_params);
+}
+
+int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
+ unsigned level, enum btree_id id)
{
int ret;
+
b->level = level;
b->btree_id = id;
- ret = rhashtable_lookup_insert_fast(&c->btree_cache_table, &b->hash,
- bch_btree_cache_params);
- if (ret)
- return ret;
-
- mutex_lock(&c->btree_cache_lock);
- list_add(&b->list, &c->btree_cache);
- mutex_unlock(&c->btree_cache_lock);
+ mutex_lock(&bc->lock);
+ ret = __bch2_btree_node_hash_insert(bc, b);
+ if (!ret)
+ list_add(&b->list, &bc->live);
+ mutex_unlock(&bc->lock);
- return 0;
+ return ret;
}
__flatten
-static inline struct btree *mca_find(struct bch_fs *c,
+static inline struct btree *btree_cache_find(struct btree_cache *bc,
const struct bkey_i *k)
{
- return rhashtable_lookup_fast(&c->btree_cache_table, &PTR_HASH(k),
+ return rhashtable_lookup_fast(&bc->table, &PTR_HASH(k),
bch_btree_cache_params);
}
*/
static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
{
+ struct btree_cache *bc = &c->btree_cache;
int ret = 0;
- lockdep_assert_held(&c->btree_cache_lock);
+ lockdep_assert_held(&bc->lock);
if (!six_trylock_intent(&b->lock))
return -ENOMEM;
if (!six_trylock_write(&b->lock))
goto out_unlock_intent;
- if (btree_node_write_error(b) ||
- btree_node_noevict(b))
+ if (btree_node_noevict(b))
goto out_unlock;
if (!btree_node_may_write(b))
btree_node_wait_on_io(b);
}
out:
- if (PTR_HASH(&b->key))
- trace_btree_node_reap(c, b, ret);
+ if (PTR_HASH(&b->key) && !ret)
+ trace_btree_node_reap(c, b);
return ret;
out_unlock:
six_unlock_write(&b->lock);
return __btree_node_reclaim(c, b, true);
}
-static unsigned long bch2_mca_scan(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct bch_fs *c = container_of(shrink, struct bch_fs,
- btree_cache_shrink);
+ btree_cache.shrink);
+ struct btree_cache *bc = &c->btree_cache;
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
unsigned long can_free;
if (btree_shrinker_disabled(c))
return SHRINK_STOP;
- if (c->btree_cache_alloc_lock)
- return SHRINK_STOP;
-
/* Return -1 if we can't do anything right now */
if (sc->gfp_mask & __GFP_IO)
- mutex_lock(&c->btree_cache_lock);
- else if (!mutex_trylock(&c->btree_cache_lock))
+ mutex_lock(&bc->lock);
+ else if (!mutex_trylock(&bc->lock))
return -1;
/*
* IO can always make forward progress:
*/
nr /= btree_pages(c);
- can_free = mca_can_free(c);
+ can_free = btree_cache_can_free(bc);
nr = min_t(unsigned long, nr, can_free);
i = 0;
- list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
+ list_for_each_entry_safe(b, t, &bc->freeable, list) {
touched++;
if (freed >= nr)
if (++i > 3 &&
!btree_node_reclaim(c, b)) {
- mca_data_free(c, b);
+ btree_node_data_free(c, b);
six_unlock_write(&b->lock);
six_unlock_intent(&b->lock);
freed++;
}
}
restart:
- list_for_each_entry_safe(b, t, &c->btree_cache, list) {
+ list_for_each_entry_safe(b, t, &bc->live, list) {
touched++;
if (freed >= nr) {
/* Save position */
- if (&t->list != &c->btree_cache)
- list_move_tail(&c->btree_cache, &t->list);
+ if (&t->list != &bc->live)
+ list_move_tail(&bc->live, &t->list);
break;
}
if (!btree_node_accessed(b) &&
!btree_node_reclaim(c, b)) {
- /* can't call bch2_btree_node_hash_remove under btree_cache_lock */
+ /* can't call bch2_btree_node_hash_remove under lock */
freed++;
- if (&t->list != &c->btree_cache)
- list_move_tail(&c->btree_cache, &t->list);
+ if (&t->list != &bc->live)
+ list_move_tail(&bc->live, &t->list);
- mca_data_free(c, b);
- mutex_unlock(&c->btree_cache_lock);
+ btree_node_data_free(c, b);
+ mutex_unlock(&bc->lock);
- bch2_btree_node_hash_remove(c, b);
+ bch2_btree_node_hash_remove(bc, b);
six_unlock_write(&b->lock);
six_unlock_intent(&b->lock);
goto out;
if (sc->gfp_mask & __GFP_IO)
- mutex_lock(&c->btree_cache_lock);
- else if (!mutex_trylock(&c->btree_cache_lock))
+ mutex_lock(&bc->lock);
+ else if (!mutex_trylock(&bc->lock))
goto out;
goto restart;
} else
clear_btree_node_accessed(b);
}
- mutex_unlock(&c->btree_cache_lock);
+ mutex_unlock(&bc->lock);
out:
return (unsigned long) freed * btree_pages(c);
}
-static unsigned long bch2_mca_count(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct bch_fs *c = container_of(shrink, struct bch_fs,
- btree_cache_shrink);
+ btree_cache.shrink);
+ struct btree_cache *bc = &c->btree_cache;
if (btree_shrinker_disabled(c))
return 0;
- if (c->btree_cache_alloc_lock)
- return 0;
-
- return mca_can_free(c) * btree_pages(c);
+ return btree_cache_can_free(bc) * btree_pages(c);
}
-void bch2_fs_btree_exit(struct bch_fs *c)
+void bch2_fs_btree_cache_exit(struct bch_fs *c)
{
+ struct btree_cache *bc = &c->btree_cache;
struct btree *b;
unsigned i;
- if (c->btree_cache_shrink.list.next)
- unregister_shrinker(&c->btree_cache_shrink);
+ if (bc->shrink.list.next)
+ unregister_shrinker(&bc->shrink);
- mutex_lock(&c->btree_cache_lock);
+ mutex_lock(&bc->lock);
#ifdef CONFIG_BCACHEFS_DEBUG
if (c->verify_data)
- list_move(&c->verify_data->list, &c->btree_cache);
+ list_move(&c->verify_data->list, &bc->live);
- free_pages((unsigned long) c->verify_ondisk, ilog2(btree_pages(c)));
+ kvpfree(c->verify_ondisk, btree_bytes(c));
#endif
for (i = 0; i < BTREE_ID_NR; i++)
if (c->btree_roots[i].b)
- list_add(&c->btree_roots[i].b->list, &c->btree_cache);
+ list_add(&c->btree_roots[i].b->list, &bc->live);
- list_splice(&c->btree_cache_freeable,
- &c->btree_cache);
+ list_splice(&bc->freeable, &bc->live);
- while (!list_empty(&c->btree_cache)) {
- b = list_first_entry(&c->btree_cache, struct btree, list);
+ while (!list_empty(&bc->live)) {
+ b = list_first_entry(&bc->live, struct btree, list);
if (btree_node_dirty(b))
bch2_btree_complete_write(c, b, btree_current_write(b));
clear_btree_node_dirty(b);
- mca_data_free(c, b);
+ btree_node_data_free(c, b);
}
- while (!list_empty(&c->btree_cache_freed)) {
- b = list_first_entry(&c->btree_cache_freed,
- struct btree, list);
+ while (!list_empty(&bc->freed)) {
+ b = list_first_entry(&bc->freed, struct btree, list);
list_del(&b->list);
kfree(b);
}
- mutex_unlock(&c->btree_cache_lock);
+ mutex_unlock(&bc->lock);
- if (c->btree_cache_table_init_done)
- rhashtable_destroy(&c->btree_cache_table);
+ if (bc->table_init_done)
+ rhashtable_destroy(&bc->table);
}
-int bch2_fs_btree_init(struct bch_fs *c)
+int bch2_fs_btree_cache_init(struct bch_fs *c)
{
+ struct btree_cache *bc = &c->btree_cache;
unsigned i;
int ret;
- ret = rhashtable_init(&c->btree_cache_table, &bch_btree_cache_params);
+ ret = rhashtable_init(&bc->table, &bch_btree_cache_params);
if (ret)
return ret;
- c->btree_cache_table_init_done = true;
+ bc->table_init_done = true;
bch2_recalc_btree_reserve(c);
- for (i = 0; i < c->btree_cache_reserve; i++)
- if (!mca_bucket_alloc(c, GFP_KERNEL))
+ for (i = 0; i < bc->reserve; i++)
+ if (!btree_node_mem_alloc(c, GFP_KERNEL))
return -ENOMEM;
- list_splice_init(&c->btree_cache,
- &c->btree_cache_freeable);
+ list_splice_init(&bc->live, &bc->freeable);
#ifdef CONFIG_BCACHEFS_DEBUG
mutex_init(&c->verify_lock);
- c->verify_ondisk = (void *)
- __get_free_pages(GFP_KERNEL, ilog2(btree_pages(c)));
+ c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL);
if (!c->verify_ondisk)
return -ENOMEM;
- c->verify_data = mca_bucket_alloc(c, GFP_KERNEL);
+ c->verify_data = btree_node_mem_alloc(c, GFP_KERNEL);
if (!c->verify_data)
return -ENOMEM;
list_del_init(&c->verify_data->list);
#endif
- c->btree_cache_shrink.count_objects = bch2_mca_count;
- c->btree_cache_shrink.scan_objects = bch2_mca_scan;
- c->btree_cache_shrink.seeks = 4;
- c->btree_cache_shrink.batch = btree_pages(c) * 2;
- register_shrinker(&c->btree_cache_shrink);
+ bc->shrink.count_objects = bch2_btree_cache_count;
+ bc->shrink.scan_objects = bch2_btree_cache_scan;
+ bc->shrink.seeks = 4;
+ bc->shrink.batch = btree_pages(c) * 2;
+ register_shrinker(&bc->shrink);
return 0;
}
+void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
+{
+ mutex_init(&bc->lock);
+ INIT_LIST_HEAD(&bc->live);
+ INIT_LIST_HEAD(&bc->freeable);
+ INIT_LIST_HEAD(&bc->freed);
+}
+
/*
* We can only have one thread cannibalizing other cached btree nodes at a time,
* or we'll deadlock. We use an open coded mutex to ensure that, which a
* cannibalize_bucket() will take. This means every time we unlock the root of
* the btree, we need to release this lock if we have it held.
*/
-void bch2_btree_node_cannibalize_unlock(struct bch_fs *c)
+void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c)
{
- if (c->btree_cache_alloc_lock == current) {
+ struct btree_cache *bc = &c->btree_cache;
+
+ if (bc->alloc_lock == current) {
trace_btree_node_cannibalize_unlock(c);
- c->btree_cache_alloc_lock = NULL;
- closure_wake_up(&c->mca_wait);
+ bc->alloc_lock = NULL;
+ closure_wake_up(&bc->alloc_wait);
}
}
-int bch2_btree_node_cannibalize_lock(struct bch_fs *c, struct closure *cl)
+int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
{
+ struct btree_cache *bc = &c->btree_cache;
struct task_struct *old;
- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
+ old = cmpxchg(&bc->alloc_lock, NULL, current);
if (old == NULL || old == current)
goto success;
return -ENOMEM;
}
- closure_wait(&c->mca_wait, cl);
+ closure_wait(&bc->alloc_wait, cl);
/* Try again, after adding ourselves to waitlist */
- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
+ old = cmpxchg(&bc->alloc_lock, NULL, current);
if (old == NULL || old == current) {
/* We raced */
- closure_wake_up(&c->mca_wait);
+ closure_wake_up(&bc->alloc_wait);
goto success;
}
return 0;
}
-static struct btree *mca_cannibalize(struct bch_fs *c)
+static struct btree *btree_node_cannibalize(struct bch_fs *c)
{
+ struct btree_cache *bc = &c->btree_cache;
struct btree *b;
- list_for_each_entry_reverse(b, &c->btree_cache, list)
+ list_for_each_entry_reverse(b, &bc->live, list)
if (!btree_node_reclaim(c, b))
return b;
while (1) {
- list_for_each_entry_reverse(b, &c->btree_cache, list)
+ list_for_each_entry_reverse(b, &bc->live, list)
if (!btree_node_write_and_reclaim(c, b))
return b;
struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
{
+ struct btree_cache *bc = &c->btree_cache;
struct btree *b;
u64 start_time = local_clock();
- mutex_lock(&c->btree_cache_lock);
+ mutex_lock(&bc->lock);
/*
* btree_free() doesn't free memory; it sticks the node on the end of
* the list. Check if there's any freed nodes there:
*/
- list_for_each_entry(b, &c->btree_cache_freeable, list)
+ list_for_each_entry(b, &bc->freeable, list)
if (!btree_node_reclaim(c, b))
goto out_unlock;
* We never free struct btree itself, just the memory that holds the on
* disk node. Check the freed list before allocating a new one:
*/
- list_for_each_entry(b, &c->btree_cache_freed, list)
+ list_for_each_entry(b, &bc->freed, list)
if (!btree_node_reclaim(c, b)) {
- mca_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO);
+ btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO);
if (b->data)
goto out_unlock;
goto err;
}
- b = mca_bucket_alloc(c, __GFP_NOWARN|GFP_NOIO);
+ b = btree_node_mem_alloc(c, __GFP_NOWARN|GFP_NOIO);
if (!b)
goto err;
BUG_ON(!six_trylock_intent(&b->lock));
BUG_ON(!six_trylock_write(&b->lock));
out_unlock:
- BUG_ON(bkey_extent_is_data(&b->key.k) && PTR_HASH(&b->key));
+ BUG_ON(btree_node_hashed(b));
BUG_ON(btree_node_write_in_flight(b));
list_del_init(&b->list);
- mutex_unlock(&c->btree_cache_lock);
+ mutex_unlock(&bc->lock);
out:
b->flags = 0;
b->written = 0;
return b;
err:
/* Try to cannibalize another cached btree node: */
- if (c->btree_cache_alloc_lock == current) {
- b = mca_cannibalize(c);
+ if (bc->alloc_lock == current) {
+ b = btree_node_cannibalize(c);
list_del_init(&b->list);
- mutex_unlock(&c->btree_cache_lock);
+ mutex_unlock(&bc->lock);
- bch2_btree_node_hash_remove(c, b);
+ bch2_btree_node_hash_remove(bc, b);
trace_btree_node_cannibalize(c);
goto out;
}
- mutex_unlock(&c->btree_cache_lock);
+ mutex_unlock(&bc->lock);
return ERR_PTR(-ENOMEM);
}
/* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch2_btree_node_fill(struct btree_iter *iter,
+static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
+ struct btree_iter *iter,
const struct bkey_i *k,
unsigned level,
enum six_lock_type lock_type)
{
- struct bch_fs *c = iter->c;
+ struct btree_cache *bc = &c->btree_cache;
struct btree *b;
+ /*
+ * Parent node must be locked, else we could read in a btree node that's
+ * been freed:
+ */
+ BUG_ON(!btree_node_locked(iter, level + 1));
+
b = bch2_btree_node_mem_alloc(c);
if (IS_ERR(b))
return b;
bkey_copy(&b->key, k);
- if (bch2_btree_node_hash_insert(c, b, level, iter->btree_id)) {
+ if (bch2_btree_node_hash_insert(bc, b, level, iter->btree_id)) {
/* raced with another fill: */
/* mark as unhashed... */
bkey_i_to_extent(&b->key)->v._data[0] = 0;
- mutex_lock(&c->btree_cache_lock);
- list_add(&b->list, &c->btree_cache_freeable);
- mutex_unlock(&c->btree_cache_lock);
+ mutex_lock(&bc->lock);
+ list_add(&b->list, &bc->freeable);
+ mutex_unlock(&bc->lock);
six_unlock_write(&b->lock);
six_unlock_intent(&b->lock);
* The btree node will have either a read or a write lock held, depending on
* the @write parameter.
*/
-struct btree *bch2_btree_node_get(struct btree_iter *iter,
+struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
const struct bkey_i *k, unsigned level,
enum six_lock_type lock_type)
{
+ struct btree_cache *bc = &c->btree_cache;
struct btree *b;
struct bset_tree *t;
BUG_ON(level >= BTREE_MAX_DEPTH);
retry:
rcu_read_lock();
- b = mca_find(iter->c, k);
+ b = btree_cache_find(bc, k);
rcu_read_unlock();
if (unlikely(!b)) {
* else we could read in a btree node from disk that's been
* freed:
*/
- b = bch2_btree_node_fill(iter, k, level, lock_type);
+ b = bch2_btree_node_fill(c, iter, k, level, lock_type);
/* We raced and found the btree node in the cache */
if (!b)
return b;
}
-void bch2_btree_node_prefetch(struct btree_iter *iter,
- const struct bkey_i *k, unsigned level)
+struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
+ struct btree_iter *iter,
+ struct btree *b,
+ enum btree_node_sibling sib)
+{
+ struct btree *parent;
+ struct btree_node_iter node_iter;
+ struct bkey_packed *k;
+ BKEY_PADDED(k) tmp;
+ struct btree *ret;
+ unsigned level = b->level;
+
+ parent = iter->nodes[level + 1];
+ if (!parent)
+ return NULL;
+
+ if (!bch2_btree_node_relock(iter, level + 1)) {
+ bch2_btree_iter_set_locks_want(iter, level + 2);
+ return ERR_PTR(-EINTR);
+ }
+
+ node_iter = iter->node_iters[parent->level];
+
+ k = bch2_btree_node_iter_peek_all(&node_iter, parent);
+ BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
+
+ do {
+ k = sib == btree_prev_sib
+ ? bch2_btree_node_iter_prev_all(&node_iter, parent)
+ : (bch2_btree_node_iter_advance(&node_iter, parent),
+ bch2_btree_node_iter_peek_all(&node_iter, parent));
+ if (!k)
+ return NULL;
+ } while (bkey_deleted(k));
+
+ bch2_bkey_unpack(parent, &tmp.k, k);
+
+ ret = bch2_btree_node_get(c, iter, &tmp.k, level, SIX_LOCK_intent);
+
+ if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
+ btree_node_unlock(iter, level);
+ ret = bch2_btree_node_get(c, iter, &tmp.k, level, SIX_LOCK_intent);
+ }
+
+ if (!IS_ERR(ret) && !bch2_btree_node_relock(iter, level)) {
+ six_unlock_intent(&ret->lock);
+ ret = ERR_PTR(-EINTR);
+ }
+
+ return ret;
+}
+
+void bch2_btree_node_prefetch(struct bch_fs *c, const struct bkey_i *k,
+ unsigned level, enum btree_id btree_id)
{
- struct bch_fs *c = iter->c;
+ struct btree_cache *bc = &c->btree_cache;
struct btree *b;
BUG_ON(level >= BTREE_MAX_DEPTH);
rcu_read_lock();
- b = mca_find(c, k);
+ b = btree_cache_find(bc, k);
rcu_read_unlock();
if (b)
return;
bkey_copy(&b->key, k);
- if (bch2_btree_node_hash_insert(c, b, level, iter->btree_id)) {
+ if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
/* raced with another fill: */
/* mark as unhashed... */
bkey_i_to_extent(&b->key)->v._data[0] = 0;
- mutex_lock(&c->btree_cache_lock);
- list_add(&b->list, &c->btree_cache_freeable);
- mutex_unlock(&c->btree_cache_lock);
+ mutex_lock(&bc->lock);
+ list_add(&b->list, &bc->freeable);
+ mutex_unlock(&bc->lock);
goto out;
}