X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_cache.c;h=13c88d9533e5cdf5fa6bf58a397b5d71284494a3;hb=5d507f795b0b679a67e972a48cbd0854c4ad0f02;hp=f8402709190079a6e108f901193880212e0a794a;hpb=1f78fed4693a5361f56508daac59bebd5b556379;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c index f840270..13c88d9 100644 --- a/libbcachefs/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -32,13 +32,15 @@ void bch2_recalc_btree_reserve(struct bch_fs *c) { unsigned i, reserve = 16; - if (!c->btree_roots[0].b) + if (!c->btree_roots_known[0].b) reserve += 8; - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].b) - reserve += min_t(unsigned, 1, - c->btree_roots[i].b->c.level) * 8; + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->b) + reserve += min_t(unsigned, 1, r->b->c.level) * 8; + } c->btree_cache.reserve = reserve; } @@ -128,9 +130,6 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) return NULL; bkey_btree_ptr_init(&b->key); -#ifdef CONFIG_DEBUG_LOCK_ALLOC - lockdep_set_no_check_recursion(&b->c.lock.dep_map); -#endif INIT_LIST_HEAD(&b->list); INIT_LIST_HEAD(&b->write_blocked); b->byte_order = ilog2(btree_bytes(c)); @@ -449,8 +448,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) struct btree *b; unsigned i, flags; - if (bc->shrink.list.next) - unregister_shrinker(&bc->shrink); + unregister_shrinker(&bc->shrink); /* vfree() can allocate memory: */ flags = memalloc_nofs_save(); @@ -461,9 +459,12 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) kvpfree(c->verify_ondisk, btree_bytes(c)); - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].b) - list_add(&c->btree_roots[i].b->list, &bc->live); + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->b) + list_add(&r->b->list, &bc->live); + } list_splice(&bc->freeable, &bc->live); @@ -504,21 +505,17 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) unsigned i; int ret = 0; - pr_verbose_init(c->opts, ""); - ret = rhashtable_init(&bc->table, &bch_btree_cache_params); if (ret) - goto out; + goto err; bc->table_init_done = true; bch2_recalc_btree_reserve(c); for (i = 0; i < bc->reserve; i++) - if (!__bch2_btree_node_mem_alloc(c)) { - ret = -BCH_ERR_ENOMEM_fs_btree_cache_init; - goto out; - } + if (!__bch2_btree_node_mem_alloc(c)) + goto err; list_splice_init(&bc->live, &bc->freeable); @@ -529,9 +526,12 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bc->shrink.to_text = bch2_btree_cache_shrinker_to_text; bc->shrink.seeks = 4; ret = register_shrinker(&bc->shrink, "%s/btree_cache", c->name); -out: - pr_verbose_init(c->opts, "ret %i", ret); - return ret; + if (ret) + goto err; + + return 0; +err: + return -BCH_ERR_ENOMEM_fs_btree_cache_init; } void bch2_fs_btree_cache_init_early(struct btree_cache *bc) @@ -639,9 +639,10 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea goto got_node; } - b = __btree_node_mem_alloc(c, __GFP_NOWARN); + b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); if (!b) { mutex_unlock(&bc->lock); + bch2_trans_unlock(trans); b = __btree_node_mem_alloc(c, GFP_KERNEL); if (!b) goto err; @@ -670,8 +671,11 @@ got_node: mutex_unlock(&bc->lock); - if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL)) - goto err; + if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { + bch2_trans_unlock(trans); + if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) + goto err; + } mutex_lock(&bc->lock); bc->used++; @@ -792,7 +796,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, /* Unlock before doing IO: */ if (trans && sync) - bch2_trans_unlock(trans); + bch2_trans_unlock_noassert(trans); bch2_btree_node_read(c, b, sync); @@ -821,7 +825,7 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) { struct printbuf buf = PRINTBUF; - if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) + if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) return; prt_printf(&buf, @@ -864,6 +868,7 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr struct btree_cache *bc = &c->btree_cache; struct btree *b; struct bset_tree *t; + bool need_relock = false; int ret; EBUG_ON(level >= BTREE_MAX_DEPTH); @@ -877,6 +882,7 @@ retry: */ b = bch2_btree_node_fill(trans, path, k, path->btree_id, level, lock_type, true); + need_relock = true; /* We raced and found the btree node in the cache */ if (!b) @@ -915,6 +921,7 @@ retry: six_unlock_type(&b->c.lock, lock_type); bch2_trans_unlock(trans); + need_relock = true; bch2_btree_node_wait_on_read(b); @@ -922,19 +929,19 @@ retry: * should_be_locked is not set on this path yet, so we need to * relock it specifically: */ - if (trans) { - int ret = bch2_trans_relock(trans) ?: - bch2_btree_path_relock_intent(trans, path); - if (ret) { - BUG_ON(!trans->restarted); - return ERR_PTR(ret); - } - } - if (!six_relock_type(&b->c.lock, lock_type, seq)) goto retry; } + if (unlikely(need_relock)) { + int ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + six_unlock_type(&b->c.lock, lock_type); + return ERR_PTR(ret); + } + } + prefetch(b->aux_data); for_each_bset(b, t) {