]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_cache.c
Update bcachefs sources to 18686af684 bcachefs: Inode backpointers
[bcachefs-tools-debian] / libbcachefs / btree_cache.c
index d99441a1edf4f1e283280e455638991b17948523..8a4667ba6b189e41d39aea23edec58ebf3ad33a1 100644 (file)
@@ -1,21 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
 
 #include "bcachefs.h"
+#include "bkey_buf.h"
 #include "btree_cache.h"
 #include "btree_io.h"
 #include "btree_iter.h"
 #include "btree_locking.h"
 #include "debug.h"
+#include "error.h"
 
 #include <linux/prefetch.h>
+#include <linux/sched/mm.h>
 #include <trace/events/bcachefs.h>
 
-const char * const bch2_btree_ids[] = {
-#define x(kwd, val, name) name,
-       BCH_BTREE_IDS()
-#undef x
-       NULL
-};
-
 void bch2_recalc_btree_reserve(struct bch_fs *c)
 {
        unsigned i, reserve = 16;
@@ -26,7 +23,7 @@ void bch2_recalc_btree_reserve(struct bch_fs *c)
        for (i = 0; i < BTREE_ID_NR; i++)
                if (c->btree_roots[i].b)
                        reserve += min_t(unsigned, 1,
-                                        c->btree_roots[i].b->level) * 8;
+                                        c->btree_roots[i].b->c.level) * 8;
 
        c->btree_cache.reserve = reserve;
 }
@@ -42,7 +39,8 @@ static void __btree_node_data_free(struct bch_fs *c, struct btree *b)
 
        kvpfree(b->data, btree_bytes(c));
        b->data = NULL;
-       bch2_btree_keys_free(b);
+       vfree(b->aux_data);
+       b->aux_data = NULL;
 }
 
 static void btree_node_data_free(struct bch_fs *c, struct btree *b)
@@ -60,49 +58,63 @@ static int bch2_btree_cache_cmp_fn(struct rhashtable_compare_arg *arg,
        const struct btree *b = obj;
        const u64 *v = arg->key;
 
-       return PTR_HASH(&b->key) == *v ? 0 : 1;
+       return b->hash_val == *v ? 0 : 1;
 }
 
 static const struct rhashtable_params bch_btree_cache_params = {
        .head_offset    = offsetof(struct btree, hash),
-       .key_offset     = offsetof(struct btree, key.v),
-       .key_len        = sizeof(struct bch_extent_ptr),
+       .key_offset     = offsetof(struct btree, hash_val),
+       .key_len        = sizeof(u64),
        .obj_cmpfn      = bch2_btree_cache_cmp_fn,
 };
 
-static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
+static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
 {
-       struct btree_cache *bc = &c->btree_cache;
+       BUG_ON(b->data || b->aux_data);
 
        b->data = kvpmalloc(btree_bytes(c), gfp);
        if (!b->data)
-               goto err;
+               return -ENOMEM;
 
-       if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp))
-               goto err;
+       b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
+       if (!b->aux_data) {
+               kvpfree(b->data, btree_bytes(c));
+               b->data = NULL;
+               return -ENOMEM;
+       }
 
-       bc->used++;
-       list_move(&b->list, &bc->freeable);
-       return;
-err:
-       kvpfree(b->data, btree_bytes(c));
-       b->data = NULL;
-       list_move(&b->list, &bc->freed);
+       return 0;
 }
 
-static struct btree *btree_node_mem_alloc(struct bch_fs *c, gfp_t gfp)
+static struct btree *__btree_node_mem_alloc(struct bch_fs *c)
 {
-       struct btree *b = kzalloc(sizeof(struct btree), gfp);
+       struct btree *b = kzalloc(sizeof(struct btree), GFP_KERNEL);
        if (!b)
                return NULL;
 
        bkey_btree_ptr_init(&b->key);
-       six_lock_init(&b->lock);
+       six_lock_init(&b->c.lock);
        INIT_LIST_HEAD(&b->list);
        INIT_LIST_HEAD(&b->write_blocked);
+       b->byte_order = ilog2(btree_bytes(c));
+       return b;
+}
+
+static struct btree *btree_node_mem_alloc(struct bch_fs *c)
+{
+       struct btree_cache *bc = &c->btree_cache;
+       struct btree *b = __btree_node_mem_alloc(c);
+       if (!b)
+               return NULL;
 
-       btree_node_data_alloc(c, b, gfp);
-       return b->data ? b : NULL;
+       if (btree_node_data_alloc(c, b, GFP_KERNEL)) {
+               kfree(b);
+               return NULL;
+       }
+
+       bc->used++;
+       list_add(&b->list, &bc->freeable);
+       return b;
 }
 
 /* Btree in memory cache - hash table */
@@ -112,11 +124,16 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
        rhashtable_remove_fast(&bc->table, &b->hash, bch_btree_cache_params);
 
        /* Cause future lookups for this node to fail: */
-       PTR_HASH(&b->key) = 0;
+       b->hash_val = 0;
+
+       six_lock_wakeup_all(&b->c.lock);
 }
 
 int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
 {
+       BUG_ON(b->hash_val);
+       b->hash_val = btree_ptr_hash_val(&b->key);
+
        return rhashtable_lookup_insert_fast(&bc->table, &b->hash,
                                             bch_btree_cache_params);
 }
@@ -126,8 +143,13 @@ int bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b,
 {
        int ret;
 
-       b->level        = level;
-       b->btree_id     = id;
+       b->c.level      = level;
+       b->c.btree_id   = id;
+
+       if (level)
+               six_lock_pcpu_alloc(&b->c.lock);
+       else
+               six_lock_pcpu_free_rcu(&b->c.lock);
 
        mutex_lock(&bc->lock);
        ret = __bch2_btree_node_hash_insert(bc, b);
@@ -142,8 +164,9 @@ __flatten
 static inline struct btree *btree_cache_find(struct btree_cache *bc,
                                     const struct bkey_i *k)
 {
-       return rhashtable_lookup_fast(&bc->table, &PTR_HASH(k),
-                                     bch_btree_cache_params);
+       u64 v = btree_ptr_hash_val(k);
+
+       return rhashtable_lookup_fast(&bc->table, &v, bch_btree_cache_params);
 }
 
 /*
@@ -157,10 +180,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
 
        lockdep_assert_held(&bc->lock);
 
-       if (!six_trylock_intent(&b->lock))
+       if (!six_trylock_intent(&b->c.lock))
                return -ENOMEM;
 
-       if (!six_trylock_write(&b->lock))
+       if (!six_trylock_write(&b->c.lock))
                goto out_unlock_intent;
 
        if (btree_node_noevict(b))
@@ -169,6 +192,10 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
        if (!btree_node_may_write(b))
                goto out_unlock;
 
+       if (btree_node_dirty(b) &&
+           test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
+               goto out_unlock;
+
        if (btree_node_dirty(b) ||
            btree_node_write_in_flight(b) ||
            btree_node_read_in_flight(b)) {
@@ -184,7 +211,7 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
                 * - unless btree verify mode is enabled, since it runs out of
                 * the post write cleanup:
                 */
-               if (verify_btree_ondisk(c))
+               if (bch2_verify_btree_ondisk)
                        bch2_btree_node_write(c, b, SIX_LOCK_intent);
                else
                        __bch2_btree_node_write(c, b, SIX_LOCK_read);
@@ -193,13 +220,13 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
                btree_node_wait_on_io(b);
        }
 out:
-       if (PTR_HASH(&b->key) && !ret)
+       if (b->hash_val && !ret)
                trace_btree_node_reap(c, b);
        return ret;
 out_unlock:
-       six_unlock_write(&b->lock);
+       six_unlock_write(&b->c.lock);
 out_unlock_intent:
-       six_unlock_intent(&b->lock);
+       six_unlock_intent(&b->c.lock);
        ret = -ENOMEM;
        goto out;
 }
@@ -225,17 +252,19 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
        unsigned long can_free;
        unsigned long touched = 0;
        unsigned long freed = 0;
-       unsigned i;
+       unsigned i, flags;
 
-       if (btree_shrinker_disabled(c))
+       if (bch2_btree_shrinker_disabled)
                return SHRINK_STOP;
 
        /* Return -1 if we can't do anything right now */
-       if (sc->gfp_mask & __GFP_IO)
+       if (sc->gfp_mask & __GFP_FS)
                mutex_lock(&bc->lock);
        else if (!mutex_trylock(&bc->lock))
                return -1;
 
+       flags = memalloc_nofs_save();
+
        /*
         * It's _really_ critical that we don't free too many btree nodes - we
         * have to always leave ourselves a reserve. The reserve is how we
@@ -257,8 +286,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
                if (++i > 3 &&
                    !btree_node_reclaim(c, b)) {
                        btree_node_data_free(c, b);
-                       six_unlock_write(&b->lock);
-                       six_unlock_intent(&b->lock);
+                       six_unlock_write(&b->c.lock);
+                       six_unlock_intent(&b->c.lock);
                        freed++;
                }
        }
@@ -284,13 +313,13 @@ restart:
                        mutex_unlock(&bc->lock);
 
                        bch2_btree_node_hash_remove(bc, b);
-                       six_unlock_write(&b->lock);
-                       six_unlock_intent(&b->lock);
+                       six_unlock_write(&b->c.lock);
+                       six_unlock_intent(&b->c.lock);
 
                        if (freed >= nr)
                                goto out;
 
-                       if (sc->gfp_mask & __GFP_IO)
+                       if (sc->gfp_mask & __GFP_FS)
                                mutex_lock(&bc->lock);
                        else if (!mutex_trylock(&bc->lock))
                                goto out;
@@ -301,6 +330,7 @@ restart:
 
        mutex_unlock(&bc->lock);
 out:
+       memalloc_nofs_restore(flags);
        return (unsigned long) freed * btree_pages(c);
 }
 
@@ -311,7 +341,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
                                        btree_cache.shrink);
        struct btree_cache *bc = &c->btree_cache;
 
-       if (btree_shrinker_disabled(c))
+       if (bch2_btree_shrinker_disabled)
                return 0;
 
        return btree_cache_can_free(bc) * btree_pages(c);
@@ -321,11 +351,13 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
 {
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
-       unsigned i;
+       unsigned i, flags;
 
        if (bc->shrink.list.next)
                unregister_shrinker(&bc->shrink);
 
+       /* vfree() can allocate memory: */
+       flags = memalloc_nofs_save();
        mutex_lock(&bc->lock);
 
 #ifdef CONFIG_BCACHEFS_DEBUG
@@ -349,18 +381,22 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
 
                if (btree_node_dirty(b))
                        bch2_btree_complete_write(c, b, btree_current_write(b));
-               clear_btree_node_dirty(b);
+               clear_btree_node_dirty(c, b);
 
                btree_node_data_free(c, b);
        }
 
+       BUG_ON(atomic_read(&c->btree_cache.dirty));
+
        while (!list_empty(&bc->freed)) {
                b = list_first_entry(&bc->freed, struct btree, list);
                list_del(&b->list);
+               six_lock_pcpu_free(&b->c.lock);
                kfree(b);
        }
 
        mutex_unlock(&bc->lock);
+       memalloc_nofs_restore(flags);
 
        if (bc->table_init_done)
                rhashtable_destroy(&bc->table);
@@ -383,7 +419,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
        bch2_recalc_btree_reserve(c);
 
        for (i = 0; i < bc->reserve; i++)
-               if (!btree_node_mem_alloc(c, GFP_KERNEL)) {
+               if (!btree_node_mem_alloc(c)) {
                        ret = -ENOMEM;
                        goto out;
                }
@@ -399,7 +435,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
                goto out;
        }
 
-       c->verify_data = btree_node_mem_alloc(c, GFP_KERNEL);
+       c->verify_data = btree_node_mem_alloc(c);
        if (!c->verify_data) {
                ret = -ENOMEM;
                goto out;
@@ -412,7 +448,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
        bc->shrink.scan_objects         = bch2_btree_cache_scan;
        bc->shrink.seeks                = 4;
        bc->shrink.batch                = btree_pages(c) * 2;
-       register_shrinker(&bc->shrink);
+       ret = register_shrinker(&bc->shrink);
 out:
        pr_verbose_init(c->opts, "ret %i", ret);
        return ret;
@@ -503,7 +539,9 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
        u64 start_time = local_clock();
+       unsigned flags;
 
+       flags = memalloc_nofs_save();
        mutex_lock(&bc->lock);
 
        /*
@@ -512,35 +550,42 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c)
         */
        list_for_each_entry(b, &bc->freeable, list)
                if (!btree_node_reclaim(c, b))
-                       goto out_unlock;
+                       goto got_node;
 
        /*
         * We never free struct btree itself, just the memory that holds the on
         * disk node. Check the freed list before allocating a new one:
         */
        list_for_each_entry(b, &bc->freed, list)
-               if (!btree_node_reclaim(c, b)) {
-                       btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_NOIO);
-                       if (b->data)
-                               goto out_unlock;
+               if (!btree_node_reclaim(c, b))
+                       goto got_node;
 
-                       six_unlock_write(&b->lock);
-                       six_unlock_intent(&b->lock);
+       b = NULL;
+got_node:
+       if (b)
+               list_del_init(&b->list);
+       mutex_unlock(&bc->lock);
+
+       if (!b) {
+               b = __btree_node_mem_alloc(c);
+               if (!b)
                        goto err;
-               }
 
-       b = btree_node_mem_alloc(c, __GFP_NOWARN|GFP_NOIO);
-       if (!b)
-               goto err;
+               BUG_ON(!six_trylock_intent(&b->c.lock));
+               BUG_ON(!six_trylock_write(&b->c.lock));
+       }
+
+       if (!b->data) {
+               if (btree_node_data_alloc(c, b, __GFP_NOWARN|GFP_KERNEL))
+                       goto err;
+
+               mutex_lock(&bc->lock);
+               bc->used++;
+               mutex_unlock(&bc->lock);
+       }
 
-       BUG_ON(!six_trylock_intent(&b->lock));
-       BUG_ON(!six_trylock_write(&b->lock));
-out_unlock:
        BUG_ON(btree_node_hashed(b));
        BUG_ON(btree_node_write_in_flight(b));
-
-       list_del_init(&b->list);
-       mutex_unlock(&bc->lock);
 out:
        b->flags                = 0;
        b->written              = 0;
@@ -548,14 +593,22 @@ out:
        b->sib_u64s[0]          = 0;
        b->sib_u64s[1]          = 0;
        b->whiteout_u64s        = 0;
-       b->uncompacted_whiteout_u64s = 0;
-       bch2_btree_keys_init(b, &c->expensive_debug_checks);
+       bch2_btree_keys_init(b);
 
        bch2_time_stats_update(&c->times[BCH_TIME_btree_node_mem_alloc],
                               start_time);
 
+       memalloc_nofs_restore(flags);
        return b;
 err:
+       mutex_lock(&bc->lock);
+
+       if (b) {
+               list_add(&b->list, &bc->freed);
+               six_unlock_write(&b->c.lock);
+               six_unlock_intent(&b->c.lock);
+       }
+
        /* Try to cannibalize another cached btree node: */
        if (bc->alloc_lock == current) {
                b = btree_node_cannibalize(c);
@@ -569,6 +622,7 @@ err:
        }
 
        mutex_unlock(&bc->lock);
+       memalloc_nofs_restore(flags);
        return ERR_PTR(-ENOMEM);
 }
 
@@ -576,6 +630,7 @@ err:
 static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
                                struct btree_iter *iter,
                                const struct bkey_i *k,
+                               enum btree_id btree_id,
                                unsigned level,
                                enum six_lock_type lock_type,
                                bool sync)
@@ -583,60 +638,65 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
 
+       BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
        /*
         * Parent node must be locked, else we could read in a btree node that's
         * been freed:
         */
-       BUG_ON(!btree_node_locked(iter, level + 1));
-       BUG_ON(level >= BTREE_MAX_DEPTH);
+       if (iter && !bch2_btree_node_relock(iter, level + 1))
+               return ERR_PTR(-EINTR);
 
        b = bch2_btree_node_mem_alloc(c);
        if (IS_ERR(b))
                return b;
 
        bkey_copy(&b->key, k);
-       if (bch2_btree_node_hash_insert(bc, b, level, iter->btree_id)) {
+       if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
                /* raced with another fill: */
 
                /* mark as unhashed... */
-               PTR_HASH(&b->key) = 0;
+               b->hash_val = 0;
 
                mutex_lock(&bc->lock);
                list_add(&b->list, &bc->freeable);
                mutex_unlock(&bc->lock);
 
-               six_unlock_write(&b->lock);
-               six_unlock_intent(&b->lock);
+               six_unlock_write(&b->c.lock);
+               six_unlock_intent(&b->c.lock);
                return NULL;
        }
 
        /*
-        * If the btree node wasn't cached, we can't drop our lock on
-        * the parent until after it's added to the cache - because
-        * otherwise we could race with a btree_split() freeing the node
-        * we're trying to lock.
+        * Unlock before doing IO:
         *
-        * But the deadlock described below doesn't exist in this case,
-        * so it's safe to not drop the parent lock until here:
+        * XXX: ideally should be dropping all btree node locks here
         */
-       if (btree_node_read_locked(iter, level + 1))
+       if (iter && btree_node_read_locked(iter, level + 1))
                btree_node_unlock(iter, level + 1);
 
        bch2_btree_node_read(c, b, sync);
 
-       six_unlock_write(&b->lock);
+       six_unlock_write(&b->c.lock);
 
        if (!sync) {
-               six_unlock_intent(&b->lock);
+               six_unlock_intent(&b->c.lock);
                return NULL;
        }
 
        if (lock_type == SIX_LOCK_read)
-               six_lock_downgrade(&b->lock);
+               six_lock_downgrade(&b->c.lock);
 
        return b;
 }
 
+static int lock_node_check_fn(struct six_lock *lock, void *p)
+{
+       struct btree *b = container_of(lock, struct btree, c.lock);
+       const struct bkey_i *k = p;
+
+       return b->hash_val == btree_ptr_hash_val(k) ? 0 : -1;
+}
+
 /**
  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
  * in from disk if necessary.
@@ -649,34 +709,27 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
 struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
                                  const struct bkey_i *k, unsigned level,
                                  enum six_lock_type lock_type,
-                                 bool may_drop_locks)
+                                 unsigned long trace_ip)
 {
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
        struct bset_tree *t;
 
-       /*
-        * XXX: locking optimization
-        *
-        * we can make the locking looser here - caller can drop lock on parent
-        * node before locking child node (and potentially blocking): we just
-        * have to have bch2_btree_node_fill() call relock on the parent and
-        * return -EINTR if that fails
-        */
-       EBUG_ON(!btree_node_locked(iter, level + 1));
        EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+       b = btree_node_mem_ptr(k);
+       if (b)
+               goto lock_node;
 retry:
-       rcu_read_lock();
        b = btree_cache_find(bc, k);
-       rcu_read_unlock();
-
        if (unlikely(!b)) {
                /*
                 * We must have the parent locked to call bch2_btree_node_fill(),
                 * else we could read in a btree node from disk that's been
                 * freed:
                 */
-               b = bch2_btree_node_fill(c, iter, k, level, lock_type, true);
+               b = bch2_btree_node_fill(c, iter, k, iter->btree_id,
+                                        level, lock_type, true);
 
                /* We raced and found the btree node in the cache */
                if (!b)
@@ -685,6 +738,7 @@ retry:
                if (IS_ERR(b))
                        return b;
        } else {
+lock_node:
                /*
                 * There's a potential deadlock with splits and insertions into
                 * interior nodes we have to avoid:
@@ -706,7 +760,7 @@ retry:
                 * free it:
                 *
                 * To guard against this, btree nodes are evicted from the cache
-                * when they're freed - and PTR_HASH() is zeroed out, which we
+                * when they're freed - and b->hash_val is zeroed out, which we
                 * check for after we lock the node.
                 *
                 * Then, bch2_btree_node_relock() on the parent will fail - because
@@ -716,22 +770,26 @@ retry:
                if (btree_node_read_locked(iter, level + 1))
                        btree_node_unlock(iter, level + 1);
 
-               if (!btree_node_lock(b, k->k.p, level, iter,
-                                    lock_type, may_drop_locks))
+               if (!btree_node_lock(b, k->k.p, level, iter, lock_type,
+                                    lock_node_check_fn, (void *) k, trace_ip)) {
+                       if (b->hash_val != btree_ptr_hash_val(k))
+                               goto retry;
                        return ERR_PTR(-EINTR);
+               }
 
-               if (unlikely(PTR_HASH(&b->key) != PTR_HASH(k) ||
-                            b->level != level ||
+               if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
+                            b->c.level != level ||
                             race_fault())) {
-                       six_unlock_type(&b->lock, lock_type);
+                       six_unlock_type(&b->c.lock, lock_type);
                        if (bch2_btree_node_relock(iter, level + 1))
                                goto retry;
 
-                       trans_restart();
+                       trace_trans_restart_btree_node_reused(iter->trans->ip);
                        return ERR_PTR(-EINTR);
                }
        }
 
+       /* XXX: waiting on IO with btree locks held: */
        wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
                       TASK_UNINTERRUPTIBLE);
 
@@ -746,42 +804,145 @@ retry:
        }
 
        /* avoid atomic set bit if it's not needed: */
-       if (btree_node_accessed(b))
+       if (!btree_node_accessed(b))
                set_btree_node_accessed(b);
 
        if (unlikely(btree_node_read_error(b))) {
-               six_unlock_type(&b->lock, lock_type);
+               six_unlock_type(&b->c.lock, lock_type);
                return ERR_PTR(-EIO);
        }
 
-       EBUG_ON(b->btree_id != iter->btree_id ||
-               BTREE_NODE_LEVEL(b->data) != level ||
-               bkey_cmp(b->data->max_key, k->k.p));
+       EBUG_ON(b->c.btree_id != iter->btree_id);
+       EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
+       EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
+       EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+               bpos_cmp(b->data->min_key,
+                        bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
 
        return b;
 }
 
+struct btree *bch2_btree_node_get_noiter(struct bch_fs *c,
+                                        const struct bkey_i *k,
+                                        enum btree_id btree_id,
+                                        unsigned level,
+                                        bool nofill)
+{
+       struct btree_cache *bc = &c->btree_cache;
+       struct btree *b;
+       struct bset_tree *t;
+       int ret;
+
+       EBUG_ON(level >= BTREE_MAX_DEPTH);
+
+       b = btree_node_mem_ptr(k);
+       if (b)
+               goto lock_node;
+retry:
+       b = btree_cache_find(bc, k);
+       if (unlikely(!b)) {
+               if (nofill)
+                       goto out;
+
+               b = bch2_btree_node_fill(c, NULL, k, btree_id,
+                                        level, SIX_LOCK_read, true);
+
+               /* We raced and found the btree node in the cache */
+               if (!b)
+                       goto retry;
+
+               if (IS_ERR(b) &&
+                   !bch2_btree_cache_cannibalize_lock(c, NULL))
+                       goto retry;
+
+               if (IS_ERR(b))
+                       goto out;
+       } else {
+lock_node:
+               ret = six_lock_read(&b->c.lock, lock_node_check_fn, (void *) k);
+               if (ret)
+                       goto retry;
+
+               if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
+                            b->c.btree_id != btree_id ||
+                            b->c.level != level)) {
+                       six_unlock_read(&b->c.lock);
+                       goto retry;
+               }
+       }
+
+       /* XXX: waiting on IO with btree locks held: */
+       wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
+                      TASK_UNINTERRUPTIBLE);
+
+       prefetch(b->aux_data);
+
+       for_each_bset(b, t) {
+               void *p = (u64 *) b->aux_data + t->aux_data_offset;
+
+               prefetch(p + L1_CACHE_BYTES * 0);
+               prefetch(p + L1_CACHE_BYTES * 1);
+               prefetch(p + L1_CACHE_BYTES * 2);
+       }
+
+       /* avoid atomic set bit if it's not needed: */
+       if (!btree_node_accessed(b))
+               set_btree_node_accessed(b);
+
+       if (unlikely(btree_node_read_error(b))) {
+               six_unlock_read(&b->c.lock);
+               b = ERR_PTR(-EIO);
+               goto out;
+       }
+
+       EBUG_ON(b->c.btree_id != btree_id);
+       EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
+       EBUG_ON(bpos_cmp(b->data->max_key, k->k.p));
+       EBUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
+               bpos_cmp(b->data->min_key,
+                        bkey_i_to_btree_ptr_v2(&b->key)->v.min_key));
+out:
+       bch2_btree_cache_cannibalize_unlock(c);
+       return b;
+}
+
 struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
                                          struct btree_iter *iter,
                                          struct btree *b,
-                                         bool may_drop_locks,
                                          enum btree_node_sibling sib)
 {
+       struct btree_trans *trans = iter->trans;
        struct btree *parent;
        struct btree_node_iter node_iter;
        struct bkey_packed *k;
-       BKEY_PADDED(k) tmp;
+       struct bkey_buf tmp;
        struct btree *ret = NULL;
-       unsigned level = b->level;
+       unsigned level = b->c.level;
+
+       bch2_bkey_buf_init(&tmp);
 
        parent = btree_iter_node(iter, level + 1);
        if (!parent)
                return NULL;
 
-       if (!bch2_btree_node_relock(iter, level + 1))
-               goto out_upgrade;
+       /*
+        * There's a corner case where a btree_iter might have a node locked
+        * that is just outside its current pos - when
+        * bch2_btree_iter_set_pos_same_leaf() gets to the end of the node.
+        *
+        * But the lock ordering checks in __bch2_btree_node_lock() go off of
+        * iter->pos, not the node's key: so if the iterator is marked as
+        * needing to be traversed, we risk deadlock if we don't bail out here:
+        */
+       if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
+               return ERR_PTR(-EINTR);
+
+       if (!bch2_btree_node_relock(iter, level + 1)) {
+               ret = ERR_PTR(-EINTR);
+               goto out;
+       }
 
-       node_iter = iter->l[parent->level].iter;
+       node_iter = iter->l[parent->c.level].iter;
 
        k = bch2_btree_node_iter_peek_all(&node_iter, parent);
        BUG_ON(bkey_cmp_left_packed(parent, k, &b->key.k.p));
@@ -793,30 +954,30 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
        if (!k)
                goto out;
 
-       bch2_bkey_unpack(parent, &tmp.k, k);
+       bch2_bkey_buf_unpack(&tmp, c, parent, k);
 
-       ret = bch2_btree_node_get(c, iter, &tmp.k, level,
-                                 SIX_LOCK_intent, may_drop_locks);
+       ret = bch2_btree_node_get(c, iter, tmp.k, level,
+                                 SIX_LOCK_intent, _THIS_IP_);
 
-       if (PTR_ERR_OR_ZERO(ret) == -EINTR && may_drop_locks) {
+       if (PTR_ERR_OR_ZERO(ret) == -EINTR && !trans->nounlock) {
                struct btree_iter *linked;
 
                if (!bch2_btree_node_relock(iter, level + 1))
-                       goto out_upgrade;
+                       goto out;
 
                /*
                 * We might have got -EINTR because trylock failed, and we're
                 * holding other locks that would cause us to deadlock:
                 */
-               for_each_linked_btree_iter(iter, linked)
-                       if (btree_iter_cmp(iter, linked) < 0)
+               trans_for_each_iter(trans, linked)
+                       if (btree_iter_lock_cmp(iter, linked) < 0)
                                __bch2_btree_iter_unlock(linked);
 
                if (sib == btree_prev_sib)
                        btree_node_unlock(iter, level);
 
-               ret = bch2_btree_node_get(c, iter, &tmp.k, level,
-                                         SIX_LOCK_intent, may_drop_locks);
+               ret = bch2_btree_node_get(c, iter, tmp.k, level,
+                                         SIX_LOCK_intent, _THIS_IP_);
 
                /*
                 * before btree_iter_relock() calls btree_iter_verify_locks():
@@ -828,22 +989,21 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
                        btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
 
                        if (!IS_ERR(ret)) {
-                               six_unlock_intent(&ret->lock);
+                               six_unlock_intent(&ret->c.lock);
                                ret = ERR_PTR(-EINTR);
                        }
                }
 
-               bch2_btree_iter_relock(iter);
+               bch2_trans_relock(trans);
        }
 out:
        if (btree_lock_want(iter, level + 1) == BTREE_NODE_UNLOCKED)
                btree_node_unlock(iter, level + 1);
 
-       bch2_btree_iter_verify_locks(iter);
+       if (PTR_ERR_OR_ZERO(ret) == -EINTR)
+               bch2_btree_iter_upgrade(iter, level + 2);
 
-       BUG_ON((!may_drop_locks || !IS_ERR(ret)) &&
-              (iter->uptodate >= BTREE_ITER_NEED_RELOCK ||
-               !btree_node_locked(iter, level)));
+       BUG_ON(!IS_ERR(ret) && !btree_node_locked(iter, level));
 
        if (!IS_ERR_OR_NULL(ret)) {
                struct btree *n1 = ret, *n2 = b;
@@ -851,36 +1011,46 @@ out:
                if (sib != btree_prev_sib)
                        swap(n1, n2);
 
-               BUG_ON(bkey_cmp(btree_type_successor(n1->btree_id,
-                                                    n1->key.k.p),
-                               n2->data->min_key));
+               if (bpos_cmp(bpos_successor(n1->key.k.p),
+                            n2->data->min_key)) {
+                       char buf1[200], buf2[200];
+
+                       bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&n1->key));
+                       bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&n2->key));
+
+                       bch2_fs_inconsistent(c, "btree topology error at btree %s level %u:\n"
+                                            "prev: %s\n"
+                                            "next: %s\n",
+                                            bch2_btree_ids[iter->btree_id], level,
+                                            buf1, buf2);
+
+                       six_unlock_intent(&ret->c.lock);
+                       ret = NULL;
+               }
        }
 
+       bch2_btree_trans_verify_locks(trans);
+
+       bch2_bkey_buf_exit(&tmp, c);
+
        return ret;
-out_upgrade:
-       if (may_drop_locks)
-               bch2_btree_iter_upgrade(iter, level + 2, true);
-       ret = ERR_PTR(-EINTR);
-       goto out;
 }
 
 void bch2_btree_node_prefetch(struct bch_fs *c, struct btree_iter *iter,
-                             const struct bkey_i *k, unsigned level)
+                             const struct bkey_i *k,
+                             enum btree_id btree_id, unsigned level)
 {
        struct btree_cache *bc = &c->btree_cache;
        struct btree *b;
 
-       BUG_ON(!btree_node_locked(iter, level + 1));
+       BUG_ON(iter && !btree_node_locked(iter, level + 1));
        BUG_ON(level >= BTREE_MAX_DEPTH);
 
-       rcu_read_lock();
        b = btree_cache_find(bc, k);
-       rcu_read_unlock();
-
        if (b)
                return;
 
-       bch2_btree_node_fill(c, iter, k, level, SIX_LOCK_read, false);
+       bch2_btree_node_fill(c, iter, k, btree_id, level, SIX_LOCK_read, false);
 }
 
 void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
@@ -893,15 +1063,14 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
 
        bch2_btree_keys_stats(b, &stats);
 
-       pr_buf(out,
-              "l %u %llu:%llu - %llu:%llu:\n"
-              "    ptrs: ",
-              b->level,
-              b->data->min_key.inode,
-              b->data->min_key.offset,
-              b->data->max_key.inode,
-              b->data->max_key.offset);
+       pr_buf(out, "l %u ", b->c.level);
+       bch2_bpos_to_text(out, b->data->min_key);
+       pr_buf(out, " - ");
+       bch2_bpos_to_text(out, b->data->max_key);
+       pr_buf(out, ":\n"
+              "    ptrs: ");
        bch2_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+
        pr_buf(out, "\n"
               "    format: u64s %u fields %u %u %u %u %u\n"
               "    unpack fn len: %u\n"
@@ -910,9 +1079,7 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
               "    nr packed keys %u\n"
               "    nr unpacked keys %u\n"
               "    floats %zu\n"
-              "    failed unpacked %zu\n"
-              "    failed prev %zu\n"
-              "    failed overflow %zu\n",
+              "    failed unpacked %zu\n",
               f->key_u64s,
               f->bits_per_field[0],
               f->bits_per_field[1],
@@ -929,7 +1096,12 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
               b->nr.packed_keys,
               b->nr.unpacked_keys,
               stats.floats,
-              stats.failed_unpacked,
-              stats.failed_prev,
-              stats.failed_overflow);
+              stats.failed);
+}
+
+void bch2_btree_cache_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       pr_buf(out, "nr nodes:\t\t%u\n", c->btree_cache.used);
+       pr_buf(out, "nr dirty:\t\t%u\n", atomic_read(&c->btree_cache.dirty));
+       pr_buf(out, "cannibalize lock:\t%p\n", c->btree_cache.alloc_lock);
 }