X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_cache.c;h=799750464969a57b158247cfc0259c0a7f5d84c7;hb=d1900b637ee57c2e523e308cc335deee0164b882;hp=d91408c20a0709c850cf6c79c52276249d90bfec;hpb=1ffc9b4554b29316a51f6bf3374540b3c878b783;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_cache.c b/libbcachefs/btree_cache.c index d91408c..7997504 100644 --- a/libbcachefs/btree_cache.c +++ b/libbcachefs/btree_cache.c @@ -1,19 +1,20 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "bbpos.h" #include "bkey_buf.h" #include "btree_cache.h" #include "btree_io.h" #include "btree_iter.h" #include "btree_locking.h" #include "debug.h" +#include "errcode.h" #include "error.h" +#include "journal.h" +#include "trace.h" #include #include -#include - -struct lock_class_key bch2_btree_node_lock_key; const char * const bch2_btree_node_flags[] = { #define x(f) #f, @@ -26,13 +27,15 @@ void bch2_recalc_btree_reserve(struct bch_fs *c) { unsigned i, reserve = 16; - if (!c->btree_roots[0].b) + if (!c->btree_roots_known[0].b) reserve += 8; - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].b) - reserve += min_t(unsigned, 1, - c->btree_roots[i].b->c.level) * 8; + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); + + if (r->b) + reserve += min_t(unsigned, 1, r->b->c.level) * 8; + } c->btree_cache.reserve = reserve; } @@ -56,10 +59,12 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b) EBUG_ON(btree_node_write_in_flight(b)); - kvpfree(b->data, btree_bytes(c)); + clear_btree_node_just_written(b); + + kvfree(b->data); b->data = NULL; #ifdef __KERNEL__ - vfree(b->aux_data); + kvfree(b->aux_data); #else munmap(b->aux_data, btree_aux_data_bytes(b)); #endif @@ -90,11 +95,11 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) { BUG_ON(b->data || b->aux_data); - b->data = kvpmalloc(btree_bytes(c), gfp); + b->data = kvmalloc(btree_buf_bytes(b), gfp); if (!b->data) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_node_mem_alloc; #ifdef __KERNEL__ - b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp); + b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp); #else b->aux_data = mmap(NULL, btree_aux_data_bytes(b), PROT_READ|PROT_WRITE|PROT_EXEC, @@ -103,32 +108,35 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) b->aux_data = NULL; #endif if (!b->aux_data) { - kvpfree(b->data, btree_bytes(c)); + kvfree(b->data); b->data = NULL; - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_node_mem_alloc; } return 0; } -static struct btree *__btree_node_mem_alloc(struct bch_fs *c) +static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp) { - struct btree *b = kzalloc(sizeof(struct btree), GFP_KERNEL); + struct btree *b; + + b = kzalloc(sizeof(struct btree), gfp); if (!b) return NULL; bkey_btree_ptr_init(&b->key); - __six_lock_init(&b->c.lock, "b->c.lock", &bch2_btree_node_lock_key); INIT_LIST_HEAD(&b->list); INIT_LIST_HEAD(&b->write_blocked); - b->byte_order = ilog2(btree_bytes(c)); + b->byte_order = ilog2(c->opts.btree_node_size); return b; } struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; - struct btree *b = __btree_node_mem_alloc(c); + struct btree *b; + + b = __btree_node_mem_alloc(c, GFP_KERNEL); if (!b) return NULL; @@ -137,6 +145,8 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) return NULL; } + bch2_btree_lock_init(&b->c, 0); + bc->used++; list_add(&b->list, &bc->freeable); return b; @@ -147,12 +157,11 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) { int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params); + BUG_ON(ret); /* Cause future lookups for this node to fail: */ b->hash_val = 0; - - six_lock_wakeup_all(&b->c.lock); } int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) @@ -175,7 +184,7 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b, mutex_lock(&bc->lock); ret = __bch2_btree_node_hash_insert(bc, b); if (!ret) - list_add(&b->list, &bc->live); + list_add_tail(&b->list, &bc->live); mutex_unlock(&bc->lock); return ret; @@ -200,12 +209,24 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) int ret = 0; lockdep_assert_held(&bc->lock); + + struct bbpos pos = BBPOS(b->c.btree_id, b->key.k.p); + + u64 mask = b->c.level + ? bc->pinned_nodes_interior_mask + : bc->pinned_nodes_leaf_mask; + + if ((mask & BIT_ULL(b->c.btree_id)) && + bbpos_cmp(bc->pinned_nodes_start, pos) < 0 && + bbpos_cmp(bc->pinned_nodes_end, pos) >= 0) + return -BCH_ERR_ENOMEM_btree_node_reclaim; + wait_on_io: if (b->flags & ((1U << BTREE_NODE_dirty)| (1U << BTREE_NODE_read_in_flight)| (1U << BTREE_NODE_write_in_flight))) { if (!flush) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_node_reclaim; /* XXX: waiting on IO with btree cache lock held */ bch2_btree_node_wait_on_read(b); @@ -213,7 +234,7 @@ wait_on_io: } if (!six_trylock_intent(&b->c.lock)) - return -ENOMEM; + return -BCH_ERR_ENOMEM_btree_node_reclaim; if (!six_trylock_write(&b->c.lock)) goto out_unlock_intent; @@ -243,9 +264,11 @@ wait_on_io: * the post write cleanup: */ if (bch2_verify_btree_ondisk) - bch2_btree_node_write(c, b, SIX_LOCK_intent, 0); + bch2_btree_node_write(c, b, SIX_LOCK_intent, + BTREE_WRITE_cache_reclaim); else - __bch2_btree_node_write(c, b, 0); + __bch2_btree_node_write(c, b, + BTREE_WRITE_cache_reclaim); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); @@ -253,13 +276,13 @@ wait_on_io: } out: if (b->hash_val && !ret) - trace_btree_node_reap(c, b); + trace_and_count(c, btree_cache_reap, c, b); return ret; out_unlock: six_unlock_write(&b->c.lock); out_unlock_intent: six_unlock_intent(&b->c.lock); - ret = -ENOMEM; + ret = -BCH_ERR_ENOMEM_btree_node_reclaim; goto out; } @@ -276,26 +299,22 @@ static int btree_node_write_and_reclaim(struct bch_fs *c, struct btree *b) static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { - struct bch_fs *c = container_of(shrink, struct bch_fs, - btree_cache.shrink); + struct bch_fs *c = shrink->private_data; struct btree_cache *bc = &c->btree_cache; struct btree *b, *t; unsigned long nr = sc->nr_to_scan; - unsigned long can_free; - unsigned long touched = 0; + unsigned long can_free = 0; unsigned long freed = 0; + unsigned long touched = 0; unsigned i, flags; unsigned long ret = SHRINK_STOP; + bool trigger_writes = atomic_read(&bc->dirty) + nr >= + bc->used * 3 / 4; if (bch2_btree_shrinker_disabled) return SHRINK_STOP; - /* Return -1 if we can't do anything right now */ - if (sc->gfp_mask & __GFP_FS) - mutex_lock(&bc->lock); - else if (!mutex_trylock(&bc->lock)) - goto out_norestore; - + mutex_lock(&bc->lock); flags = memalloc_nofs_save(); /* @@ -305,7 +324,6 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, * succeed, so that inserting keys into the btree can always succeed and * IO can always make forward progress: */ - nr /= btree_pages(c); can_free = btree_cache_can_free(bc); nr = min_t(unsigned long, nr, can_free); @@ -321,7 +339,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, touched++; if (touched >= nr) - break; + goto out; if (!btree_node_reclaim(c, b)) { btree_node_data_free(c, b); @@ -332,70 +350,60 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, } restart: list_for_each_entry_safe(b, t, &bc->live, list) { - /* tweak this */ + touched++; + if (btree_node_accessed(b)) { clear_btree_node_accessed(b); - goto touched; - } - - if (!btree_node_reclaim(c, b)) { - /* can't call bch2_btree_node_hash_remove under lock */ + } else if (!btree_node_reclaim(c, b)) { freed++; - if (&t->list != &bc->live) - list_move_tail(&bc->live, &t->list); - btree_node_data_free(c, b); - mutex_unlock(&bc->lock); bch2_btree_node_hash_remove(bc, b); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); - if (freed >= nr) - goto out; - - if (sc->gfp_mask & __GFP_FS) - mutex_lock(&bc->lock); - else if (!mutex_trylock(&bc->lock)) - goto out; + if (freed == nr) + goto out_rotate; + } else if (trigger_writes && + btree_node_dirty(b) && + !btree_node_will_make_reachable(b) && + !btree_node_write_blocked(b) && + six_trylock_read(&b->c.lock)) { + list_move(&bc->live, &b->list); + mutex_unlock(&bc->lock); + __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); + six_unlock_read(&b->c.lock); + if (touched >= nr) + goto out_nounlock; + mutex_lock(&bc->lock); goto restart; - } else { - continue; } -touched: - touched++; - if (touched >= nr) { - /* Save position */ - if (&t->list != &bc->live) - list_move_tail(&bc->live, &t->list); + if (touched >= nr) break; - } } - - mutex_unlock(&bc->lock); +out_rotate: + if (&t->list != &bc->live) + list_move_tail(&bc->live, &t->list); out: - ret = (unsigned long) freed * btree_pages(c); + mutex_unlock(&bc->lock); +out_nounlock: + ret = freed; memalloc_nofs_restore(flags); -out_norestore: - trace_btree_cache_scan(sc->nr_to_scan, - sc->nr_to_scan / btree_pages(c), - btree_cache_can_free(bc), - ret); + trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret); return ret; } static unsigned long bch2_btree_cache_count(struct shrinker *shrink, struct shrink_control *sc) { - struct bch_fs *c = container_of(shrink, struct bch_fs, - btree_cache.shrink); + struct bch_fs *c = shrink->private_data; struct btree_cache *bc = &c->btree_cache; if (bch2_btree_shrinker_disabled) return 0; - return btree_cache_can_free(bc) * btree_pages(c); + return btree_cache_can_free(bc); } void bch2_fs_btree_cache_exit(struct bch_fs *c) @@ -404,8 +412,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) struct btree *b; unsigned i, flags; - if (bc->shrink.list.next) - unregister_shrinker(&bc->shrink); + shrinker_free(bc->shrink); /* vfree() can allocate memory: */ flags = memalloc_nofs_save(); @@ -414,11 +421,14 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) if (c->verify_data) list_move(&c->verify_data->list, &bc->live); - kvpfree(c->verify_ondisk, btree_bytes(c)); + kvfree(c->verify_ondisk); + + for (i = 0; i < btree_id_nr_alive(c); i++) { + struct btree_root *r = bch2_btree_id_root(c, i); - for (i = 0; i < BTREE_ID_NR; i++) - if (c->btree_roots[i].b) - list_add(&c->btree_roots[i].b->list, &bc->live); + if (r->b) + list_add(&r->b->list, &bc->live); + } list_splice(&bc->freeable, &bc->live); @@ -428,21 +438,18 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) BUG_ON(btree_node_read_in_flight(b) || btree_node_write_in_flight(b)); - if (btree_node_dirty(b)) - bch2_btree_complete_write(c, b, btree_current_write(b)); - clear_btree_node_dirty_acct(c, b); - btree_node_data_free(c, b); } - BUG_ON(atomic_read(&c->btree_cache.dirty)); + BUG_ON(!bch2_journal_error(&c->journal) && + atomic_read(&c->btree_cache.dirty)); list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); while (!list_empty(&bc->freed_nonpcpu)) { b = list_first_entry(&bc->freed_nonpcpu, struct btree, list); list_del(&b->list); - six_lock_pcpu_free(&b->c.lock); + six_lock_exit(&b->c.lock); kfree(b); } @@ -456,37 +463,39 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) int bch2_fs_btree_cache_init(struct bch_fs *c) { struct btree_cache *bc = &c->btree_cache; + struct shrinker *shrink; unsigned i; int ret = 0; - pr_verbose_init(c->opts, ""); - ret = rhashtable_init(&bc->table, &bch_btree_cache_params); if (ret) - goto out; + goto err; bc->table_init_done = true; bch2_recalc_btree_reserve(c); for (i = 0; i < bc->reserve; i++) - if (!__bch2_btree_node_mem_alloc(c)) { - ret = -ENOMEM; - goto out; - } + if (!__bch2_btree_node_mem_alloc(c)) + goto err; list_splice_init(&bc->live, &bc->freeable); mutex_init(&c->verify_lock); - bc->shrink.count_objects = bch2_btree_cache_count; - bc->shrink.scan_objects = bch2_btree_cache_scan; - bc->shrink.seeks = 4; - bc->shrink.batch = btree_pages(c) * 2; - ret = register_shrinker(&bc->shrink); -out: - pr_verbose_init(c->opts, "ret %i", ret); - return ret; + shrink = shrinker_alloc(0, "%s-btree_cache", c->name); + if (!shrink) + goto err; + bc->shrink = shrink; + shrink->count_objects = bch2_btree_cache_count; + shrink->scan_objects = bch2_btree_cache_scan; + shrink->seeks = 4; + shrink->private_data = c; + shrinker_register(shrink); + + return 0; +err: + return -BCH_ERR_ENOMEM_fs_btree_cache_init; } void bch2_fs_btree_cache_init_early(struct btree_cache *bc) @@ -504,19 +513,21 @@ void bch2_fs_btree_cache_init_early(struct btree_cache *bc) * cannibalize_bucket() will take. This means every time we unlock the root of * the btree, we need to release this lock if we have it held. */ -void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c) +void bch2_btree_cache_cannibalize_unlock(struct btree_trans *trans) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; if (bc->alloc_lock == current) { - trace_btree_node_cannibalize_unlock(c); + trace_and_count(c, btree_cache_cannibalize_unlock, trans); bc->alloc_lock = NULL; closure_wake_up(&bc->alloc_wait); } } -int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) +int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure *cl) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct task_struct *old; @@ -525,8 +536,8 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; if (!cl) { - trace_btree_node_cannibalize_lock_fail(c); - return -ENOMEM; + trace_and_count(c, btree_cache_cannibalize_lock_fail, trans); + return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock; } closure_wait(&bc->alloc_wait, cl); @@ -539,11 +550,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) goto success; } - trace_btree_node_cannibalize_lock_fail(c); - return -EAGAIN; + trace_and_count(c, btree_cache_cannibalize_lock_fail, trans); + return -BCH_ERR_btree_cache_cannibalize_lock_blocked; success: - trace_btree_node_cannibalize_lock(c); + trace_and_count(c, btree_cache_cannibalize_lock, trans); return 0; } @@ -570,8 +581,9 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c) } } -struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks) +struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct list_head *freed = pcpu_read_locks ? &bc->freed_pcpu @@ -593,12 +605,17 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks) goto got_node; } - b = __btree_node_mem_alloc(c); - if (!b) - goto err_locked; + b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN); + if (!b) { + mutex_unlock(&bc->lock); + bch2_trans_unlock(trans); + b = __btree_node_mem_alloc(c, GFP_KERNEL); + if (!b) + goto err; + mutex_lock(&bc->lock); + } - if (pcpu_read_locks) - six_lock_pcpu_alloc(&b->c.lock); + bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0); BUG_ON(!six_trylock_intent(&b->c.lock)); BUG_ON(!six_trylock_write(&b->c.lock)); @@ -620,8 +637,11 @@ got_node: mutex_unlock(&bc->lock); - if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL)) - goto err; + if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) { + bch2_trans_unlock(trans); + if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN)) + goto err; + } mutex_lock(&bc->lock); bc->used++; @@ -641,17 +661,20 @@ out: bch2_btree_keys_init(b); set_btree_node_accessed(b); - bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc], + time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc], start_time); memalloc_nofs_restore(flags); return b; err: mutex_lock(&bc->lock); -err_locked: + /* Try to cannibalize another cached btree node: */ if (bc->alloc_lock == current) { b2 = btree_node_cannibalize(c); + clear_btree_node_just_written(b2); + bch2_btree_node_hash_remove(bc, b2); + if (b) { swap(b->data, b2->data); swap(b->aux_data, b2->aux_data); @@ -665,20 +688,17 @@ err_locked: mutex_unlock(&bc->lock); - bch2_btree_node_hash_remove(bc, b); - - trace_btree_node_cannibalize(c); + trace_and_count(c, btree_cache_cannibalize, trans); goto out; } mutex_unlock(&bc->lock); memalloc_nofs_restore(flags); - return ERR_PTR(-ENOMEM); + return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc); } /* Slowpath, don't want it inlined into btree_iter_traverse() */ -static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, - struct btree_trans *trans, +static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans, struct btree_path *path, const struct bkey_i *k, enum btree_id btree_id, @@ -686,6 +706,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, enum six_lock_type lock_type, bool sync) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; u32 seq; @@ -695,21 +716,20 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, * Parent node must be locked, else we could read in a btree node that's * been freed: */ - if (trans && !bch2_btree_node_relock(trans, path, level + 1)) { - trace_trans_restart_relock_parent_for_fill(trans->fn, - _THIS_IP_, btree_id, &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + if (path && !bch2_btree_node_relock(trans, path, level + 1)) { + trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock)); } - b = bch2_btree_node_mem_alloc(c, level != 0); + b = bch2_btree_node_mem_alloc(trans, level != 0); + + if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) { + if (!path) + return b; - if (trans && b == ERR_PTR(-ENOMEM)) { trans->memory_allocation_failure = true; - trace_trans_restart_memory_allocation_failure(trans->fn, - _THIS_IP_, btree_id, &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail)); } if (IS_ERR(b)) @@ -734,115 +754,88 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, set_btree_node_read_in_flight(b); six_unlock_write(&b->c.lock); - seq = b->c.lock.state.seq; + seq = six_lock_seq(&b->c.lock); six_unlock_intent(&b->c.lock); /* Unlock before doing IO: */ - if (trans && sync) - bch2_trans_unlock(trans); + if (path && sync) + bch2_trans_unlock_noassert(trans); - bch2_btree_node_read(c, b, sync); + bch2_btree_node_read(trans, b, sync); if (!sync) return NULL; - if (trans && - (!bch2_trans_relock(trans) || - !bch2_btree_path_relock_intent(trans, path))) { - BUG_ON(!trans->restarted); - return ERR_PTR(-EINTR); + if (path) { + int ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + BUG_ON(!trans->restarted); + return ERR_PTR(ret); + } } if (!six_relock_type(&b->c.lock, lock_type, seq)) { - trace_trans_restart_relock_after_fill(trans->fn, _THIS_IP_, - btree_id, &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + BUG_ON(!path); + + trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill)); } return b; } -static int lock_node_check_fn(struct six_lock *lock, void *p) -{ - struct btree *b = container_of(lock, struct btree, c.lock); - const struct bkey_i *k = p; - - return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1; -} - static noinline void btree_bad_header(struct bch_fs *c, struct btree *b) { - struct printbuf buf1 = PRINTBUF; - struct printbuf buf2 = PRINTBUF; - struct printbuf buf3 = PRINTBUF; + struct printbuf buf = PRINTBUF; - if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) + if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) return; - bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&b->key)); - bch2_bpos_to_text(&buf2, b->data->min_key); - bch2_bpos_to_text(&buf3, b->data->max_key); - - bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n" - "btree %s level %u\n" - "ptr: %s\n" - "header: btree %s level %llu\n" - "min %s max %s\n", - bch2_btree_ids[b->c.btree_id], b->c.level, - buf1.buf, - bch2_btree_ids[BTREE_NODE_ID(b->data)], - BTREE_NODE_LEVEL(b->data), - buf2.buf, buf3.buf); - - printbuf_exit(&buf3); - printbuf_exit(&buf2); - printbuf_exit(&buf1); + prt_printf(&buf, + "btree node header doesn't match ptr\n" + "btree %s level %u\n" + "ptr: ", + bch2_btree_id_str(b->c.btree_id), b->c.level); + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key)); + + prt_printf(&buf, "\nheader: btree %s level %llu\n" + "min ", + bch2_btree_id_str(BTREE_NODE_ID(b->data)), + BTREE_NODE_LEVEL(b->data)); + bch2_bpos_to_text(&buf, b->data->min_key); + + prt_printf(&buf, "\nmax "); + bch2_bpos_to_text(&buf, b->data->max_key); + + bch2_fs_inconsistent(c, "%s", buf.buf); + printbuf_exit(&buf); } static inline void btree_check_header(struct bch_fs *c, struct btree *b) { if (b->c.btree_id != BTREE_NODE_ID(b->data) || b->c.level != BTREE_NODE_LEVEL(b->data) || - bpos_cmp(b->data->max_key, b->key.k.p) || + !bpos_eq(b->data->max_key, b->key.k.p) || (b->key.k.type == KEY_TYPE_btree_ptr_v2 && - bpos_cmp(b->data->min_key, + !bpos_eq(b->data->min_key, bkey_i_to_btree_ptr_v2(&b->key)->v.min_key))) btree_bad_header(c, b); } -/** - * bch_btree_node_get - find a btree node in the cache and lock it, reading it - * in from disk if necessary. - * - * If IO is necessary and running under generic_make_request, returns -EAGAIN. - * - * The btree node will have either a read or a write lock held, depending on - * the @write parameter. - */ -struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path, - const struct bkey_i *k, unsigned level, - enum six_lock_type lock_type, - unsigned long trace_ip) +static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path, + const struct bkey_i *k, unsigned level, + enum six_lock_type lock_type, + unsigned long trace_ip) { struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; struct bset_tree *t; + bool need_relock = false; + int ret; EBUG_ON(level >= BTREE_MAX_DEPTH); - - b = btree_node_mem_ptr(k); - - /* - * Check b->hash_val _before_ calling btree_node_lock() - this might not - * be the node we want anymore, and trying to lock the wrong node could - * cause an unneccessary transaction restart: - */ - if (likely(c->opts.btree_node_mem_ptr_optimization && - b && - b->hash_val == btree_ptr_hash_val(k))) - goto lock_node; retry: b = btree_cache_find(bc, k); if (unlikely(!b)) { @@ -851,8 +844,9 @@ retry: * else we could read in a btree node from disk that's been * freed: */ - b = bch2_btree_node_fill(c, trans, path, k, path->btree_id, + b = bch2_btree_node_fill(trans, path, k, path->btree_id, level, lock_type, true); + need_relock = true; /* We raced and found the btree node in the cache */ if (!b) @@ -861,44 +855,14 @@ retry: if (IS_ERR(b)) return b; } else { -lock_node: - /* - * There's a potential deadlock with splits and insertions into - * interior nodes we have to avoid: - * - * The other thread might be holding an intent lock on the node - * we want, and they want to update its parent node so they're - * going to upgrade their intent lock on the parent node to a - * write lock. - * - * But if we're holding a read lock on the parent, and we're - * trying to get the intent lock they're holding, we deadlock. - * - * So to avoid this we drop the read locks on parent nodes when - * we're starting to take intent locks - and handle the race. - * - * The race is that they might be about to free the node we - * want, and dropping our read lock on the parent node lets them - * update the parent marking the node we want as freed, and then - * free it: - * - * To guard against this, btree nodes are evicted from the cache - * when they're freed - and b->hash_val is zeroed out, which we - * check for after we lock the node. - * - * Then, bch2_btree_node_relock() on the parent will fail - because - * the parent was modified, when the pointer to the node we want - * was removed - and we'll bail out: - */ if (btree_node_read_locked(path, level + 1)) - btree_node_unlock(path, level + 1); + btree_node_unlock(trans, path, level + 1); - if (!btree_node_lock(trans, path, b, k->k.p, level, lock_type, - lock_node_check_fn, (void *) k, trace_ip)) { - if (!trans->restarted) - goto retry; - return ERR_PTR(-EINTR); - } + ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ERR_PTR(ret); + + BUG_ON(ret); if (unlikely(b->hash_val != btree_ptr_hash_val(k) || b->c.level != level || @@ -907,20 +871,21 @@ lock_node: if (bch2_btree_node_relock(trans, path, level + 1)) goto retry; - trace_trans_restart_btree_node_reused(trans->fn, - trace_ip, - path->btree_id, - &path->pos); - btree_trans_restart(trans); - return ERR_PTR(-EINTR); + trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); } + + /* avoid atomic set bit if it's not needed: */ + if (!btree_node_accessed(b)) + set_btree_node_accessed(b); } if (unlikely(btree_node_read_in_flight(b))) { - u32 seq = b->c.lock.state.seq; + u32 seq = six_lock_seq(&b->c.lock); six_unlock_type(&b->c.lock, lock_type); bch2_trans_unlock(trans); + need_relock = true; bch2_btree_node_wait_on_read(b); @@ -928,17 +893,106 @@ lock_node: * should_be_locked is not set on this path yet, so we need to * relock it specifically: */ - if (trans && - (!bch2_trans_relock(trans) || - !bch2_btree_path_relock_intent(trans, path))) { - BUG_ON(!trans->restarted); - return ERR_PTR(-EINTR); - } - if (!six_relock_type(&b->c.lock, lock_type, seq)) goto retry; } + if (unlikely(need_relock)) { + ret = bch2_trans_relock(trans) ?: + bch2_btree_path_relock_intent(trans, path); + if (ret) { + six_unlock_type(&b->c.lock, lock_type); + return ERR_PTR(ret); + } + } + + prefetch(b->aux_data); + + for_each_bset(b, t) { + void *p = (u64 *) b->aux_data + t->aux_data_offset; + + prefetch(p + L1_CACHE_BYTES * 0); + prefetch(p + L1_CACHE_BYTES * 1); + prefetch(p + L1_CACHE_BYTES * 2); + } + + if (unlikely(btree_node_read_error(b))) { + six_unlock_type(&b->c.lock, lock_type); + return ERR_PTR(-BCH_ERR_btree_node_read_error); + } + + EBUG_ON(b->c.btree_id != path->btree_id); + EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); + btree_check_header(c, b); + + return b; +} + +/** + * bch2_btree_node_get - find a btree node in the cache and lock it, reading it + * in from disk if necessary. + * + * @trans: btree transaction object + * @path: btree_path being traversed + * @k: pointer to btree node (generally KEY_TYPE_btree_ptr_v2) + * @level: level of btree node being looked up (0 == leaf node) + * @lock_type: SIX_LOCK_read or SIX_LOCK_intent + * @trace_ip: ip of caller of btree iterator code (i.e. caller of bch2_btree_iter_peek()) + * + * The btree node will have either a read or a write lock held, depending on + * the @write parameter. + * + * Returns: btree node or ERR_PTR() + */ +struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path, + const struct bkey_i *k, unsigned level, + enum six_lock_type lock_type, + unsigned long trace_ip) +{ + struct bch_fs *c = trans->c; + struct btree *b; + struct bset_tree *t; + int ret; + + EBUG_ON(level >= BTREE_MAX_DEPTH); + + b = btree_node_mem_ptr(k); + + /* + * Check b->hash_val _before_ calling btree_node_lock() - this might not + * be the node we want anymore, and trying to lock the wrong node could + * cause an unneccessary transaction restart: + */ + if (unlikely(!c->opts.btree_node_mem_ptr_optimization || + !b || + b->hash_val != btree_ptr_hash_val(k))) + return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip); + + if (btree_node_read_locked(path, level + 1)) + btree_node_unlock(trans, path, level + 1); + + ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ERR_PTR(ret); + + BUG_ON(ret); + + if (unlikely(b->hash_val != btree_ptr_hash_val(k) || + b->c.level != level || + race_fault())) { + six_unlock_type(&b->c.lock, lock_type); + if (bch2_btree_node_relock(trans, path, level + 1)) + return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip); + + trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path); + return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); + } + + if (unlikely(btree_node_read_in_flight(b))) { + six_unlock_type(&b->c.lock, lock_type); + return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip); + } + prefetch(b->aux_data); for_each_bset(b, t) { @@ -955,7 +1009,7 @@ lock_node: if (unlikely(btree_node_read_error(b))) { six_unlock_type(&b->c.lock, lock_type); - return ERR_PTR(-EIO); + return ERR_PTR(-BCH_ERR_btree_node_read_error); } EBUG_ON(b->c.btree_id != path->btree_id); @@ -965,12 +1019,13 @@ lock_node: return b; } -struct btree *bch2_btree_node_get_noiter(struct bch_fs *c, +struct btree *bch2_btree_node_get_noiter(struct btree_trans *trans, const struct bkey_i *k, enum btree_id btree_id, unsigned level, bool nofill) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; struct bset_tree *t; @@ -989,7 +1044,7 @@ retry: if (nofill) goto out; - b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id, + b = bch2_btree_node_fill(trans, NULL, k, btree_id, level, SIX_LOCK_read, true); /* We raced and found the btree node in the cache */ @@ -997,16 +1052,18 @@ retry: goto retry; if (IS_ERR(b) && - !bch2_btree_cache_cannibalize_lock(c, NULL)) + !bch2_btree_cache_cannibalize_lock(trans, NULL)) goto retry; if (IS_ERR(b)) goto out; } else { lock_node: - ret = six_lock_read(&b->c.lock, lock_node_check_fn, (void *) k); - if (ret) - goto retry; + ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ERR_PTR(ret); + + BUG_ON(ret); if (unlikely(b->hash_val != btree_ptr_hash_val(k) || b->c.btree_id != btree_id || @@ -1035,7 +1092,7 @@ lock_node: if (unlikely(btree_node_read_error(b))) { six_unlock_read(&b->c.lock); - b = ERR_PTR(-EIO); + b = ERR_PTR(-BCH_ERR_btree_node_read_error); goto out; } @@ -1043,33 +1100,34 @@ lock_node: EBUG_ON(BTREE_NODE_LEVEL(b->data) != level); btree_check_header(c, b); out: - bch2_btree_cache_cannibalize_unlock(c); + bch2_btree_cache_cannibalize_unlock(trans); return b; } -int bch2_btree_node_prefetch(struct bch_fs *c, - struct btree_trans *trans, +int bch2_btree_node_prefetch(struct btree_trans *trans, struct btree_path *path, const struct bkey_i *k, enum btree_id btree_id, unsigned level) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; - BUG_ON(trans && !btree_node_locked(path, level + 1)); + BUG_ON(path && !btree_node_locked(path, level + 1)); BUG_ON(level >= BTREE_MAX_DEPTH); b = btree_cache_find(bc, k); if (b) return 0; - b = bch2_btree_node_fill(c, trans, path, k, btree_id, + b = bch2_btree_node_fill(trans, path, k, btree_id, level, SIX_LOCK_read, false); return PTR_ERR_OR_ZERO(b); } -void bch2_btree_node_evict(struct bch_fs *c, const struct bkey_i *k) +void bch2_btree_node_evict(struct btree_trans *trans, const struct bkey_i *k) { + struct bch_fs *c = trans->c; struct btree_cache *bc = &c->btree_cache; struct btree *b; @@ -1081,15 +1139,15 @@ wait_on_io: /* XXX we're called from btree_gc which will be holding other btree * nodes locked - * */ + */ __bch2_btree_node_wait_on_read(b); __bch2_btree_node_wait_on_write(b); - six_lock_intent(&b->c.lock, NULL, NULL); - six_lock_write(&b->c.lock, NULL, NULL); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent); + btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write); if (btree_node_dirty(b)) { - __bch2_btree_node_write(c, b, 0); + __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim); six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); goto wait_on_io; @@ -1106,26 +1164,42 @@ wait_on_io: six_unlock_intent(&b->c.lock); } -void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, - struct btree *b) +const char *bch2_btree_id_str(enum btree_id btree) +{ + return btree < BTREE_ID_NR ? __bch2_btree_ids[btree] : "(unknown)"; +} + +void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) +{ + prt_printf(out, "%s level %u/%u\n ", + bch2_btree_id_str(b->c.btree_id), + b->c.level, + bch2_btree_id_root(c, b->c.btree_id)->level); + bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key)); +} + +void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b) { - const struct bkey_format *f = &b->format; struct bset_stats stats; memset(&stats, 0, sizeof(stats)); bch2_btree_keys_stats(b, &stats); - pr_buf(out, "l %u ", b->c.level); + prt_printf(out, "l %u ", b->c.level); bch2_bpos_to_text(out, b->data->min_key); - pr_buf(out, " - "); + prt_printf(out, " - "); bch2_bpos_to_text(out, b->data->max_key); - pr_buf(out, ":\n" + prt_printf(out, ":\n" " ptrs: "); bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key)); + prt_newline(out); + + prt_printf(out, + " format: "); + bch2_bkey_format_to_text(out, &b->format); - pr_buf(out, "\n" - " format: u64s %u fields %u %u %u %u %u\n" + prt_printf(out, " unpack fn len: %u\n" " bytes used %zu/%zu (%zu%% full)\n" " sib u64s: %u, %u (merge threshold %u)\n" @@ -1133,15 +1207,9 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, " nr unpacked keys %u\n" " floats %zu\n" " failed unpacked %zu\n", - f->key_u64s, - f->bits_per_field[0], - f->bits_per_field[1], - f->bits_per_field[2], - f->bits_per_field[3], - f->bits_per_field[4], b->unpack_fn_len, b->nr.live_u64s * sizeof(u64), - btree_bytes(c) - sizeof(struct btree_node), + btree_buf_bytes(b) - sizeof(struct btree_node), b->nr.live_u64s * 100 / btree_max_u64s(c), b->sib_u64s[0], b->sib_u64s[1], @@ -1152,9 +1220,9 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, stats.failed); } -void bch2_btree_cache_to_text(struct printbuf *out, struct bch_fs *c) +void bch2_btree_cache_to_text(struct printbuf *out, const struct bch_fs *c) { - pr_buf(out, "nr nodes:\t\t%u\n", c->btree_cache.used); - pr_buf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty)); - pr_buf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock); + prt_printf(out, "nr nodes:\t\t%u\n", c->btree_cache.used); + prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty)); + prt_printf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock); }