]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_cache.c
Update bcachefs sources to fb39031ade bcachefs: bch2_sb_maybe_downgrade(), bch2_sb_up...
[bcachefs-tools-debian] / libbcachefs / btree_cache.c
index 8dd2db4121a6b57db2a2c20fa4c9f3e0ad781428..13c88d9533e5cdf5fa6bf58a397b5d71284494a3 100644 (file)
@@ -9,10 +9,11 @@
 #include "debug.h"
 #include "errcode.h"
 #include "error.h"
+#include "trace.h"
 
 #include <linux/prefetch.h>
 #include <linux/sched/mm.h>
-#include <trace/events/bcachefs.h>
+#include <linux/seq_buf.h>
 
 #define BTREE_CACHE_NOT_FREED_INCREMENT(counter) \
 do {                                            \
@@ -31,13 +32,15 @@ void bch2_recalc_btree_reserve(struct bch_fs *c)
 {
        unsigned i, reserve = 16;
 
-       if (!c->btree_roots[0].b)
+       if (!c->btree_roots_known[0].b)
                reserve += 8;
 
-       for (i = 0; i < BTREE_ID_NR; i++)
-               if (c->btree_roots[i].b)
-                       reserve += min_t(unsigned, 1,
-                                        c->btree_roots[i].b->c.level) * 8;
+       for (i = 0; i < btree_id_nr_alive(c); i++) {
+               struct btree_root *r = bch2_btree_id_root(c, i);
+
+               if (r->b)
+                       reserve += min_t(unsigned, 1, r->b->c.level) * 8;
+       }
 
        c->btree_cache.reserve = reserve;
 }
@@ -61,10 +64,12 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
 
        EBUG_ON(btree_node_write_in_flight(b));
 
+       clear_btree_node_just_written(b);
+
        kvpfree(b->data, btree_bytes(c));
        b->data = NULL;
 #ifdef __KERNEL__
-       vfree(b->aux_data);
+       kvfree(b->aux_data);
 #else
        munmap(b->aux_data, btree_aux_data_bytes(b));
 #endif
@@ -97,9 +102,9 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 
        b->data = kvpmalloc(btree_bytes(c), gfp);
        if (!b->data)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
 #ifdef __KERNEL__
-       b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
+       b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
 #else
        b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
                           PROT_READ|PROT_WRITE|PROT_EXEC,
@@ -110,7 +115,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
        if (!b->aux_data) {
                kvpfree(b->data, btree_bytes(c));
                b->data = NULL;
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
        }
 
        return 0;
@@ -118,15 +123,13 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 
 static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
 {
-       struct btree *b = kzalloc(sizeof(struct btree), gfp);
+       struct btree *b;
+
+       b = kzalloc(sizeof(struct btree), gfp);
        if (!b)
                return NULL;
 
        bkey_btree_ptr_init(&b->key);
-       __six_lock_init(&b->c.lock, "b->c.lock", &bch2_btree_node_lock_key);
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       lockdep_set_no_check_recursion(&b->c.lock.dep_map);
-#endif
        INIT_LIST_HEAD(&b->list);
        INIT_LIST_HEAD(&b->write_blocked);
        b->byte_order = ilog2(btree_bytes(c));
@@ -136,7 +139,9 @@ static struct btree *__btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
 struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
 {
        struct btree_cache *bc = &c->btree_cache;
-       struct btree *b = __btree_node_mem_alloc(c, GFP_KERNEL);
+       struct btree *b;
+
+       b = __btree_node_mem_alloc(c, GFP_KERNEL);
        if (!b)
                return NULL;
 
@@ -145,6 +150,8 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
                return NULL;
        }
 
+       bch2_btree_lock_init(&b->c, 0);
+
        bc->used++;
        list_add(&b->list, &bc->freeable);
        return b;
@@ -155,6 +162,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
 void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
 {
        int ret = rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
+
        BUG_ON(ret);
 
        /* Cause future lookups for this node to fail: */
@@ -217,7 +225,7 @@ wait_on_io:
                                BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
                        else if (btree_node_write_in_flight(b))
                                BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_btree_node_reclaim;
                }
 
                /* XXX: waiting on IO with btree cache lock held */
@@ -227,7 +235,7 @@ wait_on_io:
 
        if (!six_trylock_intent(&b->c.lock)) {
                BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_node_reclaim;
        }
 
        if (!six_trylock_write(&b->c.lock)) {
@@ -275,9 +283,11 @@ wait_on_io:
                 * the post write cleanup:
                 */
                if (bch2_verify_btree_ondisk)
-                       bch2_btree_node_write(c, b, SIX_LOCK_intent, 0);
+                       bch2_btree_node_write(c, b, SIX_LOCK_intent,
+                                             BTREE_WRITE_cache_reclaim);
                else
-                       __bch2_btree_node_write(c, b, 0);
+                       __bch2_btree_node_write(c, b,
+                                               BTREE_WRITE_cache_reclaim);
 
                six_unlock_write(&b->c.lock);
                six_unlock_intent(&b->c.lock);
@@ -291,7 +301,7 @@ out_unlock:
        six_unlock_write(&b->c.lock);
 out_unlock_intent:
        six_unlock_intent(&b->c.lock);
-       ret = -ENOMEM;
+       ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
        goto out;
 }
 
@@ -384,7 +394,7 @@ restart:
                           six_trylock_read(&b->c.lock)) {
                        list_move(&bc->live, &b->list);
                        mutex_unlock(&bc->lock);
-                       __bch2_btree_node_write(c, b, 0);
+                       __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
                        six_unlock_read(&b->c.lock);
                        if (touched >= nr)
                                goto out_nounlock;
@@ -420,12 +430,16 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
        return btree_cache_can_free(bc);
 }
 
-static void bch2_btree_cache_shrinker_to_text(struct printbuf *out, struct shrinker *shrink)
+static void bch2_btree_cache_shrinker_to_text(struct seq_buf *s, struct shrinker *shrink)
 {
        struct bch_fs *c = container_of(shrink, struct bch_fs,
                                        btree_cache.shrink);
+       char *cbuf;
+       size_t buflen = seq_buf_get_buf(s, &cbuf);
+       struct printbuf out = PRINTBUF_EXTERN(cbuf, buflen);
 
-       bch2_btree_cache_to_text(out, &c->btree_cache);
+       bch2_btree_cache_to_text(&out, &c->btree_cache);
+       seq_buf_commit(s, out.pos);
 }
 
 void bch2_fs_btree_cache_exit(struct bch_fs *c)
@@ -434,8 +448,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        struct btree *b;
        unsigned i, flags;
 
-       if (bc->shrink.list.next)
-               unregister_shrinker(&bc->shrink);
+       unregister_shrinker(&bc->shrink);
 
        /* vfree() can allocate memory: */
        flags = memalloc_nofs_save();
@@ -446,9 +459,12 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
 
        kvpfree(c->verify_ondisk, btree_bytes(c));
 
-       for (i = 0; i < BTREE_ID_NR; i++)
-               if (c->btree_roots[i].b)
-                       list_add(&c->btree_roots[i].b->list, &bc->live);
+       for (i = 0; i < btree_id_nr_alive(c); i++) {
+               struct btree_root *r = bch2_btree_id_root(c, i);
+
+               if (r->b)
+                       list_add(&r->b->list, &bc->live);
+       }
 
        list_splice(&bc->freeable, &bc->live);
 
@@ -472,7 +488,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
        while (!list_empty(&bc->freed_nonpcpu)) {
                b = list_first_entry(&bc->freed_nonpcpu, struct btree, list);
                list_del(&b->list);
-               six_lock_pcpu_free(&b->c.lock);
+               six_lock_exit(&b->c.lock);
                kfree(b);
        }
 
@@ -489,21 +505,17 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
        unsigned i;
        int ret = 0;
 
-       pr_verbose_init(c->opts, "");
-
        ret = rhashtable_init(&bc->table, &bch_btree_cache_params);
        if (ret)
-               goto out;
+               goto err;
 
        bc->table_init_done = true;
 
        bch2_recalc_btree_reserve(c);
 
        for (i = 0; i < bc->reserve; i++)
-               if (!__bch2_btree_node_mem_alloc(c)) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+               if (!__bch2_btree_node_mem_alloc(c))
+                       goto err;
 
        list_splice_init(&bc->live, &bc->freeable);
 
@@ -514,9 +526,12 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
        bc->shrink.to_text              = bch2_btree_cache_shrinker_to_text;
        bc->shrink.seeks                = 4;
        ret = register_shrinker(&bc->shrink, "%s/btree_cache", c->name);
-out:
-       pr_verbose_init(c->opts, "ret %i", ret);
-       return ret;
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       return -BCH_ERR_ENOMEM_fs_btree_cache_init;
 }
 
 void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
@@ -556,7 +571,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
 
        if (!cl) {
                trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
        }
 
        closure_wait(&bc->alloc_wait, cl);
@@ -570,7 +585,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
        }
 
        trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
-       return -EAGAIN;
+       return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
 
 success:
        trace_and_count(c, btree_cache_cannibalize_lock, c);
@@ -600,8 +615,9 @@ static struct btree *btree_node_cannibalize(struct bch_fs *c)
        }
 }
 
-struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
+struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_read_locks)
 {
+       struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
        struct list_head *freed = pcpu_read_locks
                ? &bc->freed_pcpu
@@ -623,17 +639,17 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
                        goto got_node;
                }
 
-       b = __btree_node_mem_alloc(c, __GFP_NOWARN);
+       b = __btree_node_mem_alloc(c, GFP_NOWAIT|__GFP_NOWARN);
        if (!b) {
                mutex_unlock(&bc->lock);
+               bch2_trans_unlock(trans);
                b = __btree_node_mem_alloc(c, GFP_KERNEL);
                if (!b)
                        goto err;
                mutex_lock(&bc->lock);
        }
 
-       if (pcpu_read_locks)
-               six_lock_pcpu_alloc(&b->c.lock);
+       bch2_btree_lock_init(&b->c, pcpu_read_locks ? SIX_LOCK_INIT_PCPU : 0);
 
        BUG_ON(!six_trylock_intent(&b->c.lock));
        BUG_ON(!six_trylock_write(&b->c.lock));
@@ -655,8 +671,11 @@ got_node:
 
        mutex_unlock(&bc->lock);
 
-       if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
-               goto err;
+       if (btree_node_data_alloc(c, b, GFP_NOWAIT|__GFP_NOWARN)) {
+               bch2_trans_unlock(trans);
+               if (btree_node_data_alloc(c, b, GFP_KERNEL|__GFP_NOWARN))
+                       goto err;
+       }
 
        mutex_lock(&bc->lock);
        bc->used++;
@@ -687,6 +706,7 @@ err:
        /* Try to cannibalize another cached btree node: */
        if (bc->alloc_lock == current) {
                b2 = btree_node_cannibalize(c);
+               clear_btree_node_just_written(b2);
                bch2_btree_node_hash_remove(bc, b2);
 
                if (b) {
@@ -708,12 +728,11 @@ err:
 
        mutex_unlock(&bc->lock);
        memalloc_nofs_restore(flags);
-       return ERR_PTR(-ENOMEM);
+       return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
 }
 
 /* Slowpath, don't want it inlined into btree_iter_traverse() */
-static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
-                               struct btree_trans *trans,
+static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
                                struct btree_path *path,
                                const struct bkey_i *k,
                                enum btree_id btree_id,
@@ -721,6 +740,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
                                enum six_lock_type lock_type,
                                bool sync)
 {
+       struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
        u32 seq;
@@ -730,14 +750,14 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
         * Parent node must be locked, else we could read in a btree node that's
         * been freed:
         */
-       if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
+       if (path && !bch2_btree_node_relock(trans, path, level + 1)) {
                trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
        }
 
-       b = bch2_btree_node_mem_alloc(c, level != 0);
+       b = bch2_btree_node_mem_alloc(trans, level != 0);
 
-       if (trans && b == ERR_PTR(-ENOMEM)) {
+       if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
                trans->memory_allocation_failure = true;
                trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
@@ -746,6 +766,12 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
        if (IS_ERR(b))
                return b;
 
+       /*
+        * Btree nodes read in from disk should not have the accessed bit set
+        * initially, so that linear scans don't thrash the cache:
+        */
+       clear_btree_node_accessed(b);
+
        bkey_copy(&b->key, k);
        if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
                /* raced with another fill: */
@@ -765,19 +791,19 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
        set_btree_node_read_in_flight(b);
 
        six_unlock_write(&b->c.lock);
-       seq = b->c.lock.state.seq;
+       seq = six_lock_seq(&b->c.lock);
        six_unlock_intent(&b->c.lock);
 
        /* Unlock before doing IO: */
        if (trans && sync)
-               bch2_trans_unlock(trans);
+               bch2_trans_unlock_noassert(trans);
 
        bch2_btree_node_read(c, b, sync);
 
        if (!sync)
                return NULL;
 
-       if (trans) {
+       if (path) {
                int ret = bch2_trans_relock(trans) ?:
                        bch2_btree_path_relock_intent(trans, path);
                if (ret) {
@@ -787,7 +813,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
        }
 
        if (!six_relock_type(&b->c.lock, lock_type, seq)) {
-               if (trans)
+               if (path)
                        trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
                return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
        }
@@ -799,7 +825,7 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
 {
        struct printbuf buf = PRINTBUF;
 
-       if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
+       if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations)
                return;
 
        prt_printf(&buf,
@@ -826,46 +852,26 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b)
 {
        if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
            b->c.level != BTREE_NODE_LEVEL(b->data) ||
-           bpos_cmp(b->data->max_key, b->key.k.p) ||
+           !bpos_eq(b->data->max_key, b->key.k.p) ||
            (b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
-            bpos_cmp(b->data->min_key,
+            !bpos_eq(b->data->min_key,
                      bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
                btree_bad_header(c, b);
 }
 
-/**
- * bch_btree_node_get - find a btree node in the cache and lock it, reading it
- * in from disk if necessary.
- *
- * If IO is necessary and running under generic_make_request, returns -EAGAIN.
- *
- * The btree node will have either a read or a write lock held, depending on
- * the @write parameter.
- */
-struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
-                                 const struct bkey_i *k, unsigned level,
-                                 enum six_lock_type lock_type,
-                                 unsigned long trace_ip)
+static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
+                                          const struct bkey_i *k, unsigned level,
+                                          enum six_lock_type lock_type,
+                                          unsigned long trace_ip)
 {
        struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
        struct bset_tree *t;
+       bool need_relock = false;
        int ret;
 
        EBUG_ON(level >= BTREE_MAX_DEPTH);
-
-       b = btree_node_mem_ptr(k);
-
-       /*
-        * Check b->hash_val _before_ calling btree_node_lock() - this might not
-        * be the node we want anymore, and trying to lock the wrong node could
-        * cause an unneccessary transaction restart:
-        */
-       if (likely(c->opts.btree_node_mem_ptr_optimization &&
-                  b &&
-                  b->hash_val == btree_ptr_hash_val(k)))
-               goto lock_node;
 retry:
        b = btree_cache_find(bc, k);
        if (unlikely(!b)) {
@@ -874,8 +880,9 @@ retry:
                 * else we could read in a btree node from disk that's been
                 * freed:
                 */
-               b = bch2_btree_node_fill(c, trans, path, k, path->btree_id,
+               b = bch2_btree_node_fill(trans, path, k, path->btree_id,
                                         level, lock_type, true);
+               need_relock = true;
 
                /* We raced and found the btree node in the cache */
                if (!b)
@@ -884,35 +891,6 @@ retry:
                if (IS_ERR(b))
                        return b;
        } else {
-lock_node:
-               /*
-                * There's a potential deadlock with splits and insertions into
-                * interior nodes we have to avoid:
-                *
-                * The other thread might be holding an intent lock on the node
-                * we want, and they want to update its parent node so they're
-                * going to upgrade their intent lock on the parent node to a
-                * write lock.
-                *
-                * But if we're holding a read lock on the parent, and we're
-                * trying to get the intent lock they're holding, we deadlock.
-                *
-                * So to avoid this we drop the read locks on parent nodes when
-                * we're starting to take intent locks - and handle the race.
-                *
-                * The race is that they might be about to free the node we
-                * want, and dropping our read lock on the parent node lets them
-                * update the parent marking the node we want as freed, and then
-                * free it:
-                *
-                * To guard against this, btree nodes are evicted from the cache
-                * when they're freed - and b->hash_val is zeroed out, which we
-                * check for after we lock the node.
-                *
-                * Then, bch2_btree_node_relock() on the parent will fail - because
-                * the parent was modified, when the pointer to the node we want
-                * was removed - and we'll bail out:
-                */
                if (btree_node_read_locked(path, level + 1))
                        btree_node_unlock(trans, path, level + 1);
 
@@ -932,10 +910,113 @@ lock_node:
                        trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
                        return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
                }
+
+               /* avoid atomic set bit if it's not needed: */
+               if (!btree_node_accessed(b))
+                       set_btree_node_accessed(b);
+       }
+
+       if (unlikely(btree_node_read_in_flight(b))) {
+               u32 seq = six_lock_seq(&b->c.lock);
+
+               six_unlock_type(&b->c.lock, lock_type);
+               bch2_trans_unlock(trans);
+               need_relock = true;
+
+               bch2_btree_node_wait_on_read(b);
+
+               /*
+                * should_be_locked is not set on this path yet, so we need to
+                * relock it specifically:
+                */
+               if (!six_relock_type(&b->c.lock, lock_type, seq))
+                       goto retry;
+       }
+
+       if (unlikely(need_relock)) {
+               int ret = bch2_trans_relock(trans) ?:
+                       bch2_btree_path_relock_intent(trans, path);
+               if (ret) {
+                       six_unlock_type(&b->c.lock, lock_type);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       prefetch(b->aux_data);
+
+       for_each_bset(b, t) {
+               void *p = (u64 *) b->aux_data + t->aux_data_offset;
+
+               prefetch(p + L1_CACHE_BYTES * 0);
+               prefetch(p + L1_CACHE_BYTES * 1);
+               prefetch(p + L1_CACHE_BYTES * 2);
+       }
+
+       if (unlikely(btree_node_read_error(b))) {
+               six_unlock_type(&b->c.lock, lock_type);
+               return ERR_PTR(-EIO);
+       }
+
+       EBUG_ON(b->c.btree_id != path->btree_id);
+       EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
+       btree_check_header(c, b);
+
+       return b;
+}
+
+/**
+ * bch_btree_node_get - find a btree node in the cache and lock it, reading it
+ * in from disk if necessary.
+ *
+ * The btree node will have either a read or a write lock held, depending on
+ * the @write parameter.
+ */
+struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
+                                 const struct bkey_i *k, unsigned level,
+                                 enum six_lock_type lock_type,
+                                 unsigned long trace_ip)
+{
+       struct bch_fs *c = trans->c;
+       struct btree *b;
+       struct bset_tree *t;
+       int ret;
+
+       EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+       b = btree_node_mem_ptr(k);
+
+       /*
+        * Check b->hash_val _before_ calling btree_node_lock() - this might not
+        * be the node we want anymore, and trying to lock the wrong node could
+        * cause an unneccessary transaction restart:
+        */
+       if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
+                    !b ||
+                    b->hash_val != btree_ptr_hash_val(k)))
+               return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+
+       if (btree_node_read_locked(path, level + 1))
+               btree_node_unlock(trans, path, level + 1);
+
+       ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               return ERR_PTR(ret);
+
+       BUG_ON(ret);
+
+       if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
+                    b->c.level != level ||
+                    race_fault())) {
+               six_unlock_type(&b->c.lock, lock_type);
+               if (bch2_btree_node_relock(trans, path, level + 1))
+                       return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
+
+               trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
+               return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
        }
 
        if (unlikely(btree_node_read_in_flight(b))) {
-               u32 seq = b->c.lock.state.seq;
+               u32 seq = six_lock_seq(&b->c.lock);
 
                six_unlock_type(&b->c.lock, lock_type);
                bch2_trans_unlock(trans);
@@ -956,7 +1037,7 @@ lock_node:
                }
 
                if (!six_relock_type(&b->c.lock, lock_type, seq))
-                       goto retry;
+                       return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
        }
 
        prefetch(b->aux_data);
@@ -1010,7 +1091,7 @@ retry:
                if (nofill)
                        goto out;
 
-               b = bch2_btree_node_fill(c, NULL, NULL, k, btree_id,
+               b = bch2_btree_node_fill(trans, NULL, k, btree_id,
                                         level, SIX_LOCK_read, true);
 
                /* We raced and found the btree node in the cache */
@@ -1025,7 +1106,7 @@ retry:
                        goto out;
        } else {
 lock_node:
-               ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read);
+               ret = btree_node_lock_nopath(trans, &b->c, SIX_LOCK_read, _THIS_IP_);
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                        return ERR_PTR(ret);
 
@@ -1070,12 +1151,12 @@ out:
        return b;
 }
 
-int bch2_btree_node_prefetch(struct bch_fs *c,
-                            struct btree_trans *trans,
+int bch2_btree_node_prefetch(struct btree_trans *trans,
                             struct btree_path *path,
                             const struct bkey_i *k,
                             enum btree_id btree_id, unsigned level)
 {
+       struct bch_fs *c = trans->c;
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
 
@@ -1086,7 +1167,7 @@ int bch2_btree_node_prefetch(struct bch_fs *c,
        if (b)
                return 0;
 
-       b = bch2_btree_node_fill(c, trans, path, k, btree_id,
+       b = bch2_btree_node_fill(trans, path, k, btree_id,
                                 level, SIX_LOCK_read, false);
        return PTR_ERR_OR_ZERO(b);
 }
@@ -1113,7 +1194,7 @@ wait_on_io:
        btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
 
        if (btree_node_dirty(b)) {
-               __bch2_btree_node_write(c, b, 0);
+               __bch2_btree_node_write(c, b, BTREE_WRITE_cache_reclaim);
                six_unlock_write(&b->c.lock);
                six_unlock_intent(&b->c.lock);
                goto wait_on_io;
@@ -1131,7 +1212,7 @@ wait_on_io:
 }
 
 void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
-                            struct btree *b)
+                            const struct btree *b)
 {
        const struct bkey_format *f = &b->format;
        struct bset_stats stats;
@@ -1176,7 +1257,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
               stats.failed);
 }
 
-void bch2_btree_cache_to_text(struct printbuf *out, struct btree_cache *bc)
+void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc)
 {
        prt_printf(out, "nr nodes:\t\t%u\n", bc->used);
        prt_printf(out, "nr dirty:\t\t%u\n", atomic_read(&bc->dirty));