X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_cache.c;h=13c88d9533e5cdf5fa6bf58a397b5d71284494a3;hb=5d507f795b0b679a67e972a48cbd0854c4ad0f02;hp=2c9c3c18defecb5445baa1762519c90ae6346be2;hpb=ac0d08877aa87a9cdf493bc6f336c391fb4e04a0;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c index 2c9c3c1..13c88d9 100644 --- a/libbcachefs/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -1,19 +1,29 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "bkey_buf.h" #include "btree_cache.h" #include "btree_io.h" #include "btree_iter.h" #include "btree_locking.h" #include "debug.h" +#include "errcode.h" +#include "error.h" +#include "trace.h" #include #include -#include +#include -const char * const bch2_btree_ids[] = { -#define x(kwd, val, name) name, - BCH_BTREE_IDS() +#define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \ +do { \ + if (shrinker_counter) \ + bc->not_freed_##counter++; \ +} while (0) + +const char * const bch2_btree_node_flags[] = { +#define x(f) #f, + BTREE_FLAGS() #undef x NULL }; @@ -22,13 +32,15 @@ void bch2_recalc_btree_reserve(struct bch_fs *c) { unsigned i, reserve = 16; - if (!c->btree_roots[0].b) + if (!c->btree_roots_known[0].b) reserve += 8; - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].b) - reserve += min_t(unsigned, 1, - c->btree_roots[i].b->level) * 8; + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->b) + reserve += min_t(unsigned, 1, r->b->c.level) * 8; + } c->btree_cache.reserve = reserve; } @@ -38,22 +50,34 @@ static inline unsigned btree_cache_can_free(struct btree_cache *bc) return max_t(int, 0, bc->used - bc->reserve); } -static void __btree_node_data_free(struct bch_fs *c, struct btree *b) +static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) { - EBUG_ON(btree_node_write_in_flight(b)); - - kvpfree(b->data, btree_bytes(c)); - b->data = NULL; - bch2_btree_keys_free(b); + if (b->c.lock.readers) + list_move(&b->list, &bc->freed_pcpu); + else + list_move(&b->list, &bc->freed_nonpcpu); } static void btree_node_data_free(struct bch_fs *c, struct btree *b) { struct btree_cache *bc = &c->btree_cache; - __btree_node_data_free(c, b); + EBUG_ON(btree_node_write_in_flight(b)); + + clear_btree_node_just_written(b); + + kvpfree(b->data, btree_bytes(c)); + b->data = NULL; +#ifdef __KERNEL__ + kvfree(b->aux_data); +#else + munmap(b->aux_data, btree_aux_data_bytes(b)); +#endif + b->aux_data = NULL; + bc->used--; - list_move(&b->list, &bc->freed); + + btree_node_to_freedlist(bc, b); } static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg, @@ -72,46 +96,74 @@ static const struct rhashtable_params bch_btree_cache_params = { .obj_cmpfn = bch2_btree_cache_cmp_fn, }; -static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) +static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) { - struct btree_cache *bc = &c->btree_cache; + BUG_ON(b->data || b->aux_data); b->data = kvpmalloc(btree_bytes(c), gfp); if (!b->data) - goto err; - - if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp)) - goto err; + return -BCH_ERR_ENOMEM_btree_node_mem_alloc; +#ifdef __KERNEL__ + b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp); +#else + b->aux_data = mmap(NULL, btree_aux_data_bytes(b), + PROT_READ|PROT_WRITE|PROT_EXEC, + MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); + if (b->aux_data == MAP_FAILED) + b->aux_data = NULL; +#endif + if (!b->aux_data) { + kvpfree(b->data, btree_bytes(c)); + b->data = NULL; + return -BCH_ERR_ENOMEM_btree_node_mem_alloc; + } - bc->used++; - list_move(&b->list, &bc->freeable); - return; -err: - kvpfree(b->data, btree_bytes(c)); - b->data = NULL; - list_move(&b->list, &bc->freed); + return 0; } -static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) +static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) { - struct btree *b = kzalloc(sizeof(struct btree), gfp); + struct btree *b; + + b = kzalloc(sizeof(struct btree), gfp); if (!b) return NULL; bkey_btree_ptr_init(&b->key); - six_lock_init(&b->lock); INIT_LIST_HEAD(&b->list); INIT_LIST_HEAD(&b->write_blocked); + b->byte_order = ilog2(btree_bytes(c)); + return b; +} - btree_node_data_alloc(c, b, gfp); - return b->data ? b : NULL; +struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) +{ + struct btree_cache *bc = &c->btree_cache; + struct btree *b; + + b = __btree_node_mem_alloc(c, GFP_KERNEL); + if (!b) + return NULL; + + if (btree_node_data_alloc(c, b, GFP_KERNEL)) { + kfree(b); + return NULL; + } + + bch2_btree_lock_init(&b->c, 0); + + bc->used++; + list_add(&b->list, &bc->freeable); + return b; } /* Btree in memory cache - hash table */ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) { - rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); + int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); + + BUG_ON(ret); /* Cause future lookups for this node to fail: */ b->hash_val = 0; @@ -131,13 +183,13 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, { int ret; - b->level = level; - b->btree_id = id; + b->c.level = level; + b->c.btree_id = id; mutex_lock(&bc->lock); ret = __bch2_btree_node_hash_insert(bc, b); if (!ret) - list_add(&b->list, &bc->live); + list_add_tail(&b->list, &bc->live); mutex_unlock(&bc->lock); return ret; @@ -156,72 +208,111 @@ static inline struct btree *btree_cache_find(struct btree_cache *bc, * this version is for btree nodes that have already been freed (we're not * reaping a real btree node) */ -static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) +static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush, bool shrinker_counter) { struct btree_cache *bc = &c->btree_cache; int ret = 0; lockdep_assert_held(&bc->lock); +wait_on_io: + if (b->flags & ((1U << BTREE_NODE_dirty)| + (1U << BTREE_NODE_read_in_flight)| + (1U << BTREE_NODE_write_in_flight))) { + if (!flush) { + if (btree_node_dirty(b)) + BTREE_CACHE_NOT_FREED_INCREMENT(dirty); + else if (btree_node_read_in_flight(b)) + BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight); + else if (btree_node_write_in_flight(b)) + BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight); + return -BCH_ERR_ENOMEM_btree_node_reclaim; + } - if (!six_trylock_intent(&b->lock)) - return -ENOMEM; + /* XXX: waiting on IO with btree cache lock held */ + bch2_btree_node_wait_on_read(b); + bch2_btree_node_wait_on_write(b); + } + + if (!six_trylock_intent(&b->c.lock)) { + BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent); + return -BCH_ERR_ENOMEM_btree_node_reclaim; + } - if (!six_trylock_write(&b->lock)) + if (!six_trylock_write(&b->c.lock)) { + BTREE_CACHE_NOT_FREED_INCREMENT(lock_write); goto out_unlock_intent; + } - if (btree_node_noevict(b)) - goto out_unlock; + /* recheck under lock */ + if (b->flags & ((1U << BTREE_NODE_read_in_flight)| + (1U << BTREE_NODE_write_in_flight))) { + if (!flush) { + if (btree_node_read_in_flight(b)) + BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight); + else if (btree_node_write_in_flight(b)) + BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight); + goto out_unlock; + } + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + goto wait_on_io; + } - if (!btree_node_may_write(b)) + if (btree_node_noevict(b)) { + BTREE_CACHE_NOT_FREED_INCREMENT(noevict); goto out_unlock; - - if (btree_node_dirty(b) && - test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags)) + } + if (btree_node_write_blocked(b)) { + BTREE_CACHE_NOT_FREED_INCREMENT(write_blocked); goto out_unlock; + } + if (btree_node_will_make_reachable(b)) { + BTREE_CACHE_NOT_FREED_INCREMENT(will_make_reachable); + goto out_unlock; + } - if (btree_node_dirty(b) || - btree_node_write_in_flight(b) || - btree_node_read_in_flight(b)) { - if (!flush) + if (btree_node_dirty(b)) { + if (!flush) { + BTREE_CACHE_NOT_FREED_INCREMENT(dirty); goto out_unlock; - - wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, - TASK_UNINTERRUPTIBLE); - + } /* * Using the underscore version because we don't want to compact * bsets after the write, since this node is about to be evicted * - unless btree verify mode is enabled, since it runs out of * the post write cleanup: */ - if (verify_btree_ondisk(c)) - bch2_btree_node_write(c, b, SIX_LOCK_intent); + if (bch2_verify_btree_ondisk) + bch2_btree_node_write(c, b, SIX_LOCK_intent, + BTREE_WRITE_cache_reclaim); else - __bch2_btree_node_write(c, b, SIX_LOCK_read); + __bch2_btree_node_write(c, b, + BTREE_WRITE_cache_reclaim); - /* wait for any in flight btree write */ - btree_node_wait_on_io(b); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + goto wait_on_io; } out: if (b->hash_val && !ret) - trace_btree_node_reap(c, b); + trace_and_count(c, btree_cache_reap, c, b); return ret; out_unlock: - six_unlock_write(&b->lock); + six_unlock_write(&b->c.lock); out_unlock_intent: - six_unlock_intent(&b->lock); - ret = -ENOMEM; + six_unlock_intent(&b->c.lock); + ret = -BCH_ERR_ENOMEM_btree_node_reclaim; goto out; } -static int btree_node_reclaim(struct bch_fs *c, struct btree *b) +static int btree_node_reclaim(struct bch_fs *c, struct btree *b, bool shrinker_counter) { - return __btree_node_reclaim(c, b, false); + return __btree_node_reclaim(c, b, false, shrinker_counter); } static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b) { - return __btree_node_reclaim(c, b, true); + return __btree_node_reclaim(c, b, true, false); } static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, @@ -232,19 +323,19 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, struct btree_cache *bc = &c->btree_cache; struct btree *b, *t; unsigned long nr = sc->nr_to_scan; - unsigned long can_free; - unsigned long touched = 0; + unsigned long can_free = 0; unsigned long freed = 0; - unsigned i; + unsigned long touched = 0; + unsigned i, flags; + unsigned long ret = SHRINK_STOP; + bool trigger_writes = atomic_read(&bc->dirty) + nr >= + bc->used * 3 / 4; - if (btree_shrinker_disabled(c)) + if (bch2_btree_shrinker_disabled) return SHRINK_STOP; - /* Return -1 if we can't do anything right now */ - if (sc->gfp_mask & __GFP_IO) - mutex_lock(&bc->lock); - else if (!mutex_trylock(&bc->lock)) - return -1; + mutex_lock(&bc->lock); + flags = memalloc_nofs_save(); /* * It's _really_ critical that we don't free too many btree nodes - we @@ -253,65 +344,77 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, * succeed, so that inserting keys into the btree can always succeed and * IO can always make forward progress: */ - nr /= btree_pages(c); can_free = btree_cache_can_free(bc); nr = min_t(unsigned long, nr, can_free); i = 0; list_for_each_entry_safe(b, t, &bc->freeable, list) { + /* + * Leave a few nodes on the freeable list, so that a btree split + * won't have to hit the system allocator: + */ + if (++i <= 3) + continue; + touched++; - if (freed >= nr) - break; + if (touched >= nr) + goto out; - if (++i > 3 && - !btree_node_reclaim(c, b)) { + if (!btree_node_reclaim(c, b, true)) { btree_node_data_free(c, b); - six_unlock_write(&b->lock); - six_unlock_intent(&b->lock); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); freed++; + bc->freed++; } } restart: list_for_each_entry_safe(b, t, &bc->live, list) { touched++; - if (freed >= nr) { - /* Save position */ - if (&t->list != &bc->live) - list_move_tail(&bc->live, &t->list); - break; - } - - if (!btree_node_accessed(b) && - !btree_node_reclaim(c, b)) { - /* can't call bch2_btree_node_hash_remove under lock */ + if (btree_node_accessed(b)) { + clear_btree_node_accessed(b); + bc->not_freed_access_bit++; + } else if (!btree_node_reclaim(c, b, true)) { freed++; - if (&t->list != &bc->live) - list_move_tail(&bc->live, &t->list); - btree_node_data_free(c, b); - mutex_unlock(&bc->lock); + bc->freed++; bch2_btree_node_hash_remove(bc, b); - six_unlock_write(&b->lock); - six_unlock_intent(&b->lock); - - if (freed >= nr) - goto out; - - if (sc->gfp_mask & __GFP_IO) - mutex_lock(&bc->lock); - else if (!mutex_trylock(&bc->lock)) - goto out; + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + + if (freed == nr) + goto out_rotate; + } else if (trigger_writes && + btree_node_dirty(b) && + !btree_node_will_make_reachable(b) && + !btree_node_write_blocked(b) && + six_trylock_read(&b->c.lock)) { + list_move(&bc->live, &b->list); + mutex_unlock(&bc->lock); + __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); + six_unlock_read(&b->c.lock); + if (touched >= nr) + goto out_nounlock; + mutex_lock(&bc->lock); goto restart; - } else - clear_btree_node_accessed(b); - } + } - mutex_unlock(&bc->lock); + if (touched >= nr) + break; + } +out_rotate: + if (&t->list != &bc->live) + list_move_tail(&bc->live, &t->list); out: - return (unsigned long) freed * btree_pages(c); + mutex_unlock(&bc->lock); +out_nounlock: + ret = freed; + memalloc_nofs_restore(flags); + trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret); + return ret; } static unsigned long bch2_btree_cache_count(struct shrinker *shrink, @@ -321,33 +424,47 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink, btree_cache.shrink); struct btree_cache *bc = &c->btree_cache; - if (btree_shrinker_disabled(c)) + if (bch2_btree_shrinker_disabled) return 0; - return btree_cache_can_free(bc) * btree_pages(c); + return btree_cache_can_free(bc); +} + +static void bch2_btree_cache_shrinker_to_text(struct seq_buf *s, struct shrinker *shrink) +{ + struct bch_fs *c = container_of(shrink, struct bch_fs, + btree_cache.shrink); + char *cbuf; + size_t buflen = seq_buf_get_buf(s, &cbuf); + struct printbuf out = PRINTBUF_EXTERN(cbuf, buflen); + + bch2_btree_cache_to_text(&out, &c->btree_cache); + seq_buf_commit(s, out.pos); } void bch2_fs_btree_cache_exit(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; struct btree *b; - unsigned i; + unsigned i, flags; - if (bc->shrink.list.next) - unregister_shrinker(&bc->shrink); + unregister_shrinker(&bc->shrink); + /* vfree() can allocate memory: */ + flags = memalloc_nofs_save(); mutex_lock(&bc->lock); -#ifdef CONFIG_BCACHEFS_DEBUG if (c->verify_data) list_move(&c->verify_data->list, &bc->live); kvpfree(c->verify_ondisk, btree_bytes(c)); -#endif - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].b) - list_add(&c->btree_roots[i].b->list, &bc->live); + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->b) + list_add(&r->b->list, &bc->live); + } list_splice(&bc->freeable, &bc->live); @@ -359,18 +476,24 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) if (btree_node_dirty(b)) bch2_btree_complete_write(c, b, btree_current_write(b)); - clear_btree_node_dirty(b); + clear_btree_node_dirty_acct(c, b); btree_node_data_free(c, b); } - while (!list_empty(&bc->freed)) { - b = list_first_entry(&bc->freed, struct btree, list); + BUG_ON(atomic_read(&c->btree_cache.dirty)); + + list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); + + while (!list_empty(&bc->freed_nonpcpu)) { + b = list_first_entry(&bc->freed_nonpcpu, struct btree, list); list_del(&b->list); + six_lock_exit(&b->c.lock); kfree(b); } mutex_unlock(&bc->lock); + memalloc_nofs_restore(flags); if (bc->table_init_done) rhashtable_destroy(&bc->table); @@ -382,50 +505,33 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) unsigned i; int ret = 0; - pr_verbose_init(c->opts, ""); - ret = rhashtable_init(&bc->table, &bch_btree_cache_params); if (ret) - goto out; + goto err; bc->table_init_done = true; bch2_recalc_btree_reserve(c); for (i = 0; i < bc->reserve; i++) - if (!btree_node_mem_alloc(c, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } + if (!__bch2_btree_node_mem_alloc(c)) + goto err; list_splice_init(&bc->live, &bc->freeable); -#ifdef CONFIG_BCACHEFS_DEBUG mutex_init(&c->verify_lock); - c->verify_ondisk = kvpmalloc(btree_bytes(c), GFP_KERNEL); - if (!c->verify_ondisk) { - ret = -ENOMEM; - goto out; - } - - c->verify_data = btree_node_mem_alloc(c, GFP_KERNEL); - if (!c->verify_data) { - ret = -ENOMEM; - goto out; - } - - list_del_init(&c->verify_data->list); -#endif - bc->shrink.count_objects = bch2_btree_cache_count; bc->shrink.scan_objects = bch2_btree_cache_scan; + bc->shrink.to_text = bch2_btree_cache_shrinker_to_text; bc->shrink.seeks = 4; - bc->shrink.batch = btree_pages(c) * 2; - register_shrinker(&bc->shrink); -out: - pr_verbose_init(c->opts, "ret %i", ret); - return ret; + ret = register_shrinker(&bc->shrink, "%s/btree_cache", c->name); + if (ret) + goto err; + + return 0; +err: + return -BCH_ERR_ENOMEM_fs_btree_cache_init; } void bch2_fs_btree_cache_init_early(struct btree_cache *bc) @@ -433,7 +539,8 @@ void bch2_fs_btree_cache_init_early(struct btree_cache *bc) mutex_init(&bc->lock); INIT_LIST_HEAD(&bc->live); INIT_LIST_HEAD(&bc->freeable); - INIT_LIST_HEAD(&bc->freed); + INIT_LIST_HEAD(&bc->freed_pcpu); + INIT_LIST_HEAD(&bc->freed_nonpcpu); } /* @@ -447,7 +554,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c) struct btree_cache *bc = &c->btree_cache; if (bc->alloc_lock == current) { - trace_btree_node_cannibalize_unlock(c); + trace_and_count(c, btree_cache_cannibalize_unlock, c); bc->alloc_lock = NULL; closure_wake_up(&bc->alloc_wait); } @@ -463,8 +570,8 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; if (!cl) { - trace_btree_node_cannibalize_lock_fail(c); - return -ENOMEM; + trace_and_count(c, btree_cache_cannibalize_lock_fail, c); + return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock; } closure_wait(&bc->alloc_wait, cl); @@ -477,11 +584,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; } - trace_btree_node_cannibalize_lock_fail(c); - return -EAGAIN; + trace_and_count(c, btree_cache_cannibalize_lock_fail, c); + return -BCH_ERR_btree_cache_cannibalize_lock_blocked; success: - trace_btree_node_cannibalize_lock(c); + trace_and_count(c, btree_cache_cannibalize_lock, c); return 0; } @@ -491,7 +598,7 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c) struct btree *b; list_for_each_entry_reverse(b, &bc->live, list) - if (!btree_node_reclaim(c, b)) + if (!btree_node_reclaim(c, b, false)) return b; while (1) { @@ -508,52 +615,76 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c) } } -struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c) +struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; - struct btree *b; + struct list_head *freed = pcpu_read_locks + ? &bc->freed_pcpu + : &bc->freed_nonpcpu; + struct btree *b, *b2; u64 start_time = local_clock(); unsigned flags; flags = memalloc_nofs_save(); mutex_lock(&bc->lock); - /* - * btree_free() doesn't free memory; it sticks the node on the end of - * the list. Check if there's any freed nodes there: - */ - list_for_each_entry(b, &bc->freeable, list) - if (!btree_node_reclaim(c, b)) - goto out_unlock; - /* * We never free struct btree itself, just the memory that holds the on * disk node. Check the freed list before allocating a new one: */ - list_for_each_entry(b, &bc->freed, list) - if (!btree_node_reclaim(c, b)) { - btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO); - if (b->data) - goto out_unlock; - - six_unlock_write(&b->lock); - six_unlock_intent(&b->lock); + list_for_each_entry(b, freed, list) + if (!btree_node_reclaim(c, b, false)) { + list_del_init(&b->list); + goto got_node; + } + + b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); + if (!b) { + mutex_unlock(&bc->lock); + bch2_trans_unlock(trans); + b = __btree_node_mem_alloc(c, GFP_KERNEL); + if (!b) goto err; + mutex_lock(&bc->lock); + } + + bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0); + + BUG_ON(!six_trylock_intent(&b->c.lock)); + BUG_ON(!six_trylock_write(&b->c.lock)); +got_node: + + /* + * btree_free() doesn't free memory; it sticks the node on the end of + * the list. Check if there's any freed nodes there: + */ + list_for_each_entry(b2, &bc->freeable, list) + if (!btree_node_reclaim(c, b2, false)) { + swap(b->data, b2->data); + swap(b->aux_data, b2->aux_data); + btree_node_to_freedlist(bc, b2); + six_unlock_write(&b2->c.lock); + six_unlock_intent(&b2->c.lock); + goto got_mem; } - b = btree_node_mem_alloc(c, __GFP_NOWARN|GFP_NOIO); - if (!b) - goto err; + mutex_unlock(&bc->lock); - BUG_ON(!six_trylock_intent(&b->lock)); - BUG_ON(!six_trylock_write(&b->lock)); -out_unlock: - BUG_ON(btree_node_hashed(b)); - BUG_ON(btree_node_write_in_flight(b)); + if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { + bch2_trans_unlock(trans); + if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) + goto err; + } - list_del_init(&b->list); + mutex_lock(&bc->lock); + bc->used++; +got_mem: mutex_unlock(&bc->lock); - memalloc_nofs_restore(flags); + + BUG_ON(btree_node_hashed(b)); + BUG_ON(btree_node_dirty(b)); + BUG_ON(btree_node_write_in_flight(b)); out: b->flags = 0; b->written = 0; @@ -561,53 +692,88 @@ out: b->sib_u64s[0] = 0; b->sib_u64s[1] = 0; b->whiteout_u64s = 0; - bch2_btree_keys_init(b, &c->expensive_debug_checks); + bch2_btree_keys_init(b); + set_btree_node_accessed(b); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc], start_time); + memalloc_nofs_restore(flags); return b; err: + mutex_lock(&bc->lock); + /* Try to cannibalize another cached btree node: */ if (bc->alloc_lock == current) { - b = btree_node_cannibalize(c); - list_del_init(&b->list); - mutex_unlock(&bc->lock); + b2 = btree_node_cannibalize(c); + clear_btree_node_just_written(b2); + bch2_btree_node_hash_remove(bc, b2); + + if (b) { + swap(b->data, b2->data); + swap(b->aux_data, b2->aux_data); + btree_node_to_freedlist(bc, b2); + six_unlock_write(&b2->c.lock); + six_unlock_intent(&b2->c.lock); + } else { + b = b2; + list_del_init(&b->list); + } - bch2_btree_node_hash_remove(bc, b); + mutex_unlock(&bc->lock); - trace_btree_node_cannibalize(c); + trace_and_count(c, btree_cache_cannibalize, c); goto out; } mutex_unlock(&bc->lock); - return ERR_PTR(-ENOMEM); + memalloc_nofs_restore(flags); + return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc); } /* Slowpath, don't want it inlined into btree_iter_traverse() */ -static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, - struct btree_iter *iter, +static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, + struct btree_path *path, const struct bkey_i *k, + enum btree_id btree_id, unsigned level, enum six_lock_type lock_type, bool sync) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; + u32 seq; + BUG_ON(level + 1 >= BTREE_MAX_DEPTH); /* * Parent node must be locked, else we could read in a btree node that's * been freed: */ - BUG_ON(!btree_node_locked(iter, level + 1)); - BUG_ON(level >= BTREE_MAX_DEPTH); + if (path && !bch2_btree_node_relock(trans, path, level + 1)) { + trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock)); + } + + b = bch2_btree_node_mem_alloc(trans, level != 0); + + if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) { + trans->memory_allocation_failure = true; + trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail)); + } - b = bch2_btree_node_mem_alloc(c); if (IS_ERR(b)) return b; + /* + * Btree nodes read in from disk should not have the accessed bit set + * initially, so that linear scans don't thrash the cache: + */ + clear_btree_node_accessed(b); + bkey_copy(&b->key, k); - if (bch2_btree_node_hash_insert(bc, b, level, iter->btree_id)) { + if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) { /* raced with another fill: */ /* mark as unhashed... */ @@ -617,64 +783,94 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, list_add(&b->list, &bc->freeable); mutex_unlock(&bc->lock); - six_unlock_write(&b->lock); - six_unlock_intent(&b->lock); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); return NULL; } - /* - * If the btree node wasn't cached, we can't drop our lock on - * the parent until after it's added to the cache - because - * otherwise we could race with a btree_split() freeing the node - * we're trying to lock. - * - * But the deadlock described below doesn't exist in this case, - * so it's safe to not drop the parent lock until here: - */ - if (btree_node_read_locked(iter, level + 1)) - btree_node_unlock(iter, level + 1); + set_btree_node_read_in_flight(b); - bch2_btree_node_read(c, b, sync); + six_unlock_write(&b->c.lock); + seq = six_lock_seq(&b->c.lock); + six_unlock_intent(&b->c.lock); - six_unlock_write(&b->lock); + /* Unlock before doing IO: */ + if (trans && sync) + bch2_trans_unlock_noassert(trans); + + bch2_btree_node_read(c, b, sync); - if (!sync) { - six_unlock_intent(&b->lock); + if (!sync) return NULL; + + if (path) { + int ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + BUG_ON(!trans->restarted); + return ERR_PTR(ret); + } } - if (lock_type == SIX_LOCK_read) - six_lock_downgrade(&b->lock); + if (!six_relock_type(&b->c.lock, lock_type, seq)) { + if (path) + trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill)); + } return b; } -/** - * bch_btree_node_get - find a btree node in the cache and lock it, reading it - * in from disk if necessary. - * - * If IO is necessary and running under generic_make_request, returns -EAGAIN. - * - * The btree node will have either a read or a write lock held, depending on - * the @write parameter. - */ -struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter, - const struct bkey_i *k, unsigned level, - enum six_lock_type lock_type) +static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) { + struct printbuf buf = PRINTBUF; + + if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) + return; + + prt_printf(&buf, + "btree node header doesn't match ptr\n" + "btree %s level %u\n" + "ptr: ", + bch2_btree_ids[b->c.btree_id], b->c.level); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + + prt_printf(&buf, "\nheader: btree %s level %llu\n" + "min ", + bch2_btree_ids[BTREE_NODE_ID(b->data)], + BTREE_NODE_LEVEL(b->data)); + bch2_bpos_to_text(&buf, b->data->min_key); + + prt_printf(&buf, "\nmax "); + bch2_bpos_to_text(&buf, b->data->max_key); + + bch2_fs_inconsistent(c, "%s", buf.buf); + printbuf_exit(&buf); +} + +static inline void btree_check_header(struct bch_fs *c, struct btree *b) +{ + if (b->c.btree_id != BTREE_NODE_ID(b->data) || + b->c.level != BTREE_NODE_LEVEL(b->data) || + !bpos_eq(b->data->max_key, b->key.k.p) || + (b->key.k.type == KEY_TYPE_btree_ptr_v2 && + !bpos_eq(b->data->min_key, + bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) + btree_bad_header(c, b); +} + +static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path, + const struct bkey_i *k, unsigned level, + enum six_lock_type lock_type, + unsigned long trace_ip) +{ + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; struct bset_tree *t; + bool need_relock = false; + int ret; - /* - * XXX: locking optimization - * - * we can make the locking looser here - caller can drop lock on parent - * node before locking child node (and potentially blocking): we just - * have to have bch2_btree_node_fill() call relock on the parent and - * return -EINTR if that fails - */ - EBUG_ON(!btree_node_locked(iter, level + 1)); EBUG_ON(level >= BTREE_MAX_DEPTH); retry: b = btree_cache_find(bc, k); @@ -684,7 +880,9 @@ retry: * else we could read in a btree node from disk that's been * freed: */ - b = bch2_btree_node_fill(c, iter, k, level, lock_type, true); + b = bch2_btree_node_fill(trans, path, k, path->btree_id, + level, lock_type, true); + need_relock = true; /* We raced and found the btree node in the cache */ if (!b) @@ -693,54 +891,56 @@ retry: if (IS_ERR(b)) return b; } else { - /* - * There's a potential deadlock with splits and insertions into - * interior nodes we have to avoid: - * - * The other thread might be holding an intent lock on the node - * we want, and they want to update its parent node so they're - * going to upgrade their intent lock on the parent node to a - * write lock. - * - * But if we're holding a read lock on the parent, and we're - * trying to get the intent lock they're holding, we deadlock. - * - * So to avoid this we drop the read locks on parent nodes when - * we're starting to take intent locks - and handle the race. - * - * The race is that they might be about to free the node we - * want, and dropping our read lock on the parent node lets them - * update the parent marking the node we want as freed, and then - * free it: - * - * To guard against this, btree nodes are evicted from the cache - * when they're freed - and b->hash_val is zeroed out, which we - * check for after we lock the node. - * - * Then, bch2_btree_node_relock() on the parent will fail - because - * the parent was modified, when the pointer to the node we want - * was removed - and we'll bail out: - */ - if (btree_node_read_locked(iter, level + 1)) - btree_node_unlock(iter, level + 1); + if (btree_node_read_locked(path, level + 1)) + btree_node_unlock(trans, path, level + 1); + + ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ERR_PTR(ret); - if (!btree_node_lock(b, k->k.p, level, iter, lock_type)) - return ERR_PTR(-EINTR); + BUG_ON(ret); if (unlikely(b->hash_val != btree_ptr_hash_val(k) || - b->level != level || + b->c.level != level || race_fault())) { - six_unlock_type(&b->lock, lock_type); - if (bch2_btree_node_relock(iter, level + 1)) + six_unlock_type(&b->c.lock, lock_type); + if (bch2_btree_node_relock(trans, path, level + 1)) goto retry; - trace_trans_restart_btree_node_reused(iter->trans->ip); - return ERR_PTR(-EINTR); + trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); } + + /* avoid atomic set bit if it's not needed: */ + if (!btree_node_accessed(b)) + set_btree_node_accessed(b); } - wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight, - TASK_UNINTERRUPTIBLE); + if (unlikely(btree_node_read_in_flight(b))) { + u32 seq = six_lock_seq(&b->c.lock); + + six_unlock_type(&b->c.lock, lock_type); + bch2_trans_unlock(trans); + need_relock = true; + + bch2_btree_node_wait_on_read(b); + + /* + * should_be_locked is not set on this path yet, so we need to + * relock it specifically: + */ + if (!six_relock_type(&b->c.lock, lock_type, seq)) + goto retry; + } + + if (unlikely(need_relock)) { + int ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + six_unlock_type(&b->c.lock, lock_type); + return ERR_PTR(ret); + } + } prefetch(b->aux_data); @@ -752,141 +952,267 @@ retry: prefetch(p + L1_CACHE_BYTES * 2); } - /* avoid atomic set bit if it's not needed: */ - if (btree_node_accessed(b)) - set_btree_node_accessed(b); - if (unlikely(btree_node_read_error(b))) { - six_unlock_type(&b->lock, lock_type); + six_unlock_type(&b->c.lock, lock_type); return ERR_PTR(-EIO); } - EBUG_ON(b->btree_id != iter->btree_id || - BTREE_NODE_LEVEL(b->data) != level || - bkey_cmp(b->data->max_key, k->k.p)); + EBUG_ON(b->c.btree_id != path->btree_id); + EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); + btree_check_header(c, b); return b; } -struct btree *bch2_btree_node_get_sibling(struct bch_fs *c, - struct btree_iter *iter, - struct btree *b, - enum btree_node_sibling sib) +/** + * bch_btree_node_get - find a btree node in the cache and lock it, reading it + * in from disk if necessary. + * + * The btree node will have either a read or a write lock held, depending on + * the @write parameter. + */ +struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path, + const struct bkey_i *k, unsigned level, + enum six_lock_type lock_type, + unsigned long trace_ip) { - struct btree_trans *trans = iter->trans; - struct btree *parent; - struct btree_node_iter node_iter; - struct bkey_packed *k; - BKEY_PADDED(k) tmp; - struct btree *ret = NULL; - unsigned level = b->level; + struct bch_fs *c = trans->c; + struct btree *b; + struct bset_tree *t; + int ret; - parent = btree_iter_node(iter, level + 1); - if (!parent) - return NULL; + EBUG_ON(level >= BTREE_MAX_DEPTH); - if (!bch2_btree_node_relock(iter, level + 1)) { - ret = ERR_PTR(-EINTR); - goto out; - } + b = btree_node_mem_ptr(k); - node_iter = iter->l[parent->level].iter; + /* + * Check b->hash_val _before_ calling btree_node_lock() - this might not + * be the node we want anymore, and trying to lock the wrong node could + * cause an unneccessary transaction restart: + */ + if (unlikely(!c->opts.btree_node_mem_ptr_optimization || + !b || + b->hash_val != btree_ptr_hash_val(k))) + return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip); - k = bch2_btree_node_iter_peek_all(&node_iter, parent); - BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p)); + if (btree_node_read_locked(path, level + 1)) + btree_node_unlock(trans, path, level + 1); - k = sib == btree_prev_sib - ? bch2_btree_node_iter_prev(&node_iter, parent) - : (bch2_btree_node_iter_advance(&node_iter, parent), - bch2_btree_node_iter_peek(&node_iter, parent)); - if (!k) - goto out; + ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ERR_PTR(ret); - bch2_bkey_unpack(parent, &tmp.k, k); + BUG_ON(ret); - ret = bch2_btree_node_get(c, iter, &tmp.k, level, - SIX_LOCK_intent); + if (unlikely(b->hash_val != btree_ptr_hash_val(k) || + b->c.level != level || + race_fault())) { + six_unlock_type(&b->c.lock, lock_type); + if (bch2_btree_node_relock(trans, path, level + 1)) + return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip); - if (PTR_ERR_OR_ZERO(ret) == -EINTR && !trans->nounlock) { - struct btree_iter *linked; + trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); + } - if (!bch2_btree_node_relock(iter, level + 1)) - goto out; + if (unlikely(btree_node_read_in_flight(b))) { + u32 seq = six_lock_seq(&b->c.lock); + + six_unlock_type(&b->c.lock, lock_type); + bch2_trans_unlock(trans); + + bch2_btree_node_wait_on_read(b); /* - * We might have got -EINTR because trylock failed, and we're - * holding other locks that would cause us to deadlock: + * should_be_locked is not set on this path yet, so we need to + * relock it specifically: */ - trans_for_each_iter(trans, linked) - if (btree_iter_cmp(iter, linked) < 0) - __bch2_btree_iter_unlock(linked); + if (trans) { + int ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + BUG_ON(!trans->restarted); + return ERR_PTR(ret); + } + } - if (sib == btree_prev_sib) - btree_node_unlock(iter, level); + if (!six_relock_type(&b->c.lock, lock_type, seq)) + return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip); + } - ret = bch2_btree_node_get(c, iter, &tmp.k, level, - SIX_LOCK_intent); + prefetch(b->aux_data); - /* - * before btree_iter_relock() calls btree_iter_verify_locks(): - */ - if (btree_lock_want(iter, level + 1) == BTREE_NODE_UNLOCKED) - btree_node_unlock(iter, level + 1); + for_each_bset(b, t) { + void *p = (u64 *) b->aux_data + t->aux_data_offset; - if (!bch2_btree_node_relock(iter, level)) { - btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK); + prefetch(p + L1_CACHE_BYTES * 0); + prefetch(p + L1_CACHE_BYTES * 1); + prefetch(p + L1_CACHE_BYTES * 2); + } - if (!IS_ERR(ret)) { - six_unlock_intent(&ret->lock); - ret = ERR_PTR(-EINTR); - } - } + /* avoid atomic set bit if it's not needed: */ + if (!btree_node_accessed(b)) + set_btree_node_accessed(b); - bch2_trans_relock(trans); + if (unlikely(btree_node_read_error(b))) { + six_unlock_type(&b->c.lock, lock_type); + return ERR_PTR(-EIO); } -out: - if (btree_lock_want(iter, level + 1) == BTREE_NODE_UNLOCKED) - btree_node_unlock(iter, level + 1); - if (PTR_ERR_OR_ZERO(ret) == -EINTR) - bch2_btree_iter_upgrade(iter, level + 2); + EBUG_ON(b->c.btree_id != path->btree_id); + EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); + btree_check_header(c, b); + + return b; +} + +struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans, + const struct bkey_i *k, + enum btree_id btree_id, + unsigned level, + bool nofill) +{ + struct bch_fs *c = trans->c; + struct btree_cache *bc = &c->btree_cache; + struct btree *b; + struct bset_tree *t; + int ret; + + EBUG_ON(level >= BTREE_MAX_DEPTH); - BUG_ON(!IS_ERR(ret) && !btree_node_locked(iter, level)); + if (c->opts.btree_node_mem_ptr_optimization) { + b = btree_node_mem_ptr(k); + if (b) + goto lock_node; + } +retry: + b = btree_cache_find(bc, k); + if (unlikely(!b)) { + if (nofill) + goto out; - if (!IS_ERR_OR_NULL(ret)) { - struct btree *n1 = ret, *n2 = b; + b = bch2_btree_node_fill(trans, NULL, k, btree_id, + level, SIX_LOCK_read, true); - if (sib != btree_prev_sib) - swap(n1, n2); + /* We raced and found the btree node in the cache */ + if (!b) + goto retry; + + if (IS_ERR(b) && + !bch2_btree_cache_cannibalize_lock(c, NULL)) + goto retry; - BUG_ON(bkey_cmp(btree_type_successor(n1->btree_id, - n1->key.k.p), - n2->data->min_key)); + if (IS_ERR(b)) + goto out; + } else { +lock_node: + ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ERR_PTR(ret); + + BUG_ON(ret); + + if (unlikely(b->hash_val != btree_ptr_hash_val(k) || + b->c.btree_id != btree_id || + b->c.level != level)) { + six_unlock_read(&b->c.lock); + goto retry; + } } - bch2_btree_trans_verify_locks(trans); + /* XXX: waiting on IO with btree locks held: */ + __bch2_btree_node_wait_on_read(b); - return ret; + prefetch(b->aux_data); + + for_each_bset(b, t) { + void *p = (u64 *) b->aux_data + t->aux_data_offset; + + prefetch(p + L1_CACHE_BYTES * 0); + prefetch(p + L1_CACHE_BYTES * 1); + prefetch(p + L1_CACHE_BYTES * 2); + } + + /* avoid atomic set bit if it's not needed: */ + if (!btree_node_accessed(b)) + set_btree_node_accessed(b); + + if (unlikely(btree_node_read_error(b))) { + six_unlock_read(&b->c.lock); + b = ERR_PTR(-EIO); + goto out; + } + + EBUG_ON(b->c.btree_id != btree_id); + EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); + btree_check_header(c, b); +out: + bch2_btree_cache_cannibalize_unlock(c); + return b; } -void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter, - const struct bkey_i *k, unsigned level) +int bch2_btree_node_prefetch(struct btree_trans *trans, + struct btree_path *path, + const struct bkey_i *k, + enum btree_id btree_id, unsigned level) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; - BUG_ON(!btree_node_locked(iter, level + 1)); + BUG_ON(trans && !btree_node_locked(path, level + 1)); BUG_ON(level >= BTREE_MAX_DEPTH); b = btree_cache_find(bc, k); if (b) + return 0; + + b = bch2_btree_node_fill(trans, path, k, btree_id, + level, SIX_LOCK_read, false); + return PTR_ERR_OR_ZERO(b); +} + +void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k) +{ + struct bch_fs *c = trans->c; + struct btree_cache *bc = &c->btree_cache; + struct btree *b; + + b = btree_cache_find(bc, k); + if (!b) return; +wait_on_io: + /* not allowed to wait on io with btree locks held: */ + + /* XXX we're called from btree_gc which will be holding other btree + * nodes locked + */ + __bch2_btree_node_wait_on_read(b); + __bch2_btree_node_wait_on_write(b); - bch2_btree_node_fill(c, iter, k, level, SIX_LOCK_read, false); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); + + if (btree_node_dirty(b)) { + __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); + goto wait_on_io; + } + + BUG_ON(btree_node_dirty(b)); + + mutex_lock(&bc->lock); + btree_node_data_free(c, b); + bch2_btree_node_hash_remove(bc, b); + mutex_unlock(&bc->lock); + + six_unlock_write(&b->c.lock); + six_unlock_intent(&b->c.lock); } void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, - struct btree *b) + const struct btree *b) { const struct bkey_format *f = &b->format; struct bset_stats stats; @@ -895,20 +1221,19 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, bch2_btree_keys_stats(b, &stats); - pr_buf(out, - "l %u %llu:%llu - %llu:%llu:\n" - " ptrs: ", - b->level, - b->data->min_key.inode, - b->data->min_key.offset, - b->data->max_key.inode, - b->data->max_key.offset); + prt_printf(out, "l %u ", b->c.level); + bch2_bpos_to_text(out, b->data->min_key); + prt_printf(out, " - "); + bch2_bpos_to_text(out, b->data->max_key); + prt_printf(out, ":\n" + " ptrs: "); bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key)); - pr_buf(out, "\n" + + prt_printf(out, "\n" " format: u64s %u fields %u %u %u %u %u\n" " unpack fn len: %u\n" " bytes used %zu/%zu (%zu%% full)\n" - " sib u64s: %u, %u (merge threshold %zu)\n" + " sib u64s: %u, %u (merge threshold %u)\n" " nr packed keys %u\n" " nr unpacked keys %u\n" " floats %zu\n" @@ -925,9 +1250,28 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, b->nr.live_u64s * 100 / btree_max_u64s(c), b->sib_u64s[0], b->sib_u64s[1], - BTREE_FOREGROUND_MERGE_THRESHOLD(c), + c->btree_foreground_merge_threshold, b->nr.packed_keys, b->nr.unpacked_keys, stats.floats, stats.failed); } + +void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc) +{ + prt_printf(out, "nr nodes:\t\t%u\n", bc->used); + prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&bc->dirty)); + prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); + + prt_printf(out, "freed:\t\t\t\t%u\n", bc->freed); + prt_printf(out, "not freed, dirty:\t\t%u\n", bc->not_freed_dirty); + prt_printf(out, "not freed, write in flight:\t%u\n", bc->not_freed_write_in_flight); + prt_printf(out, "not freed, read in flight:\t%u\n", bc->not_freed_read_in_flight); + prt_printf(out, "not freed, lock intent failed:\t%u\n", bc->not_freed_lock_intent); + prt_printf(out, "not freed, lock write failed:\t%u\n", bc->not_freed_lock_write); + prt_printf(out, "not freed, access bit:\t\t%u\n", bc->not_freed_access_bit); + prt_printf(out, "not freed, no evict failed:\t%u\n", bc->not_freed_noevict); + prt_printf(out, "not freed, write blocked:\t%u\n", bc->not_freed_write_blocked); + prt_printf(out, "not freed, will make reachable:\t%u\n", bc->not_freed_will_make_reachable); + +}