]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_iter.c
Update bcachefs sources to 1a510b00b6 bcachefs: Increase BTREE_TRANS_MEM_MAX
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
index b0e64957d4930f4b6339c98cdac83d2944fe6c87..d2a8121c52c9989bcfb554acb0c3e72adf4cc92b 100644 (file)
+// SPDX-License-Identifier: GPL-2.0
 
 #include "bcachefs.h"
 #include "bkey_methods.h"
+#include "bkey_buf.h"
 #include "btree_cache.h"
 #include "btree_iter.h"
+#include "btree_key_cache.h"
 #include "btree_locking.h"
+#include "btree_update.h"
 #include "debug.h"
+#include "error.h"
 #include "extents.h"
+#include "journal.h"
+#include "replicas.h"
 
 #include <linux/prefetch.h>
 #include <trace/events/bcachefs.h>
 
-#define BTREE_ITER_NOT_END     ((struct btree *) 1)
+static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
+static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long);
+static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *);
+static void btree_iter_copy(struct btree_iter *, struct btree_iter *);
+
+static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
+{
+       EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
+
+       /* Are we iterating over keys in all snapshots? */
+       if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+               p = bpos_successor(p);
+       } else {
+               p = bpos_nosnap_successor(p);
+               p.snapshot = iter->snapshot;
+       }
+
+       return p;
+}
+
+static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
+{
+       EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
+
+       /* Are we iterating over keys in all snapshots? */
+       if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
+               p = bpos_predecessor(p);
+       } else {
+               p = bpos_nosnap_predecessor(p);
+               p.snapshot = iter->snapshot;
+       }
+
+       return p;
+}
 
 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
 {
-       return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
+       return l < BTREE_MAX_DEPTH &&
+               (unsigned long) iter->l[l].b >= 128;
 }
 
-/* Btree node locking: */
+static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
+{
+       struct bpos pos = iter->pos;
 
-/*
- * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
- * succeed:
- */
-void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
+       if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+           bkey_cmp(pos, POS_MAX))
+               pos = bkey_successor(iter, pos);
+       return pos;
+}
+
+static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
+                                             struct btree *b)
 {
-       struct btree_iter *linked;
+       return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
+}
 
-       EBUG_ON(iter->nodes[b->level] != b);
-       EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
+static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
+                                            struct btree *b)
+{
+       return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
+}
 
-       for_each_linked_btree_node(iter, b, linked)
-               linked->lock_seq[b->level] += 2;
+static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
+                                         struct btree *b)
+{
+       return iter->btree_id == b->c.btree_id &&
+               !btree_iter_pos_before_node(iter, b) &&
+               !btree_iter_pos_after_node(iter, b);
+}
 
-       iter->lock_seq[b->level] += 2;
+/* Btree node locking: */
 
-       six_unlock_write(&b->lock);
+void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
+{
+       bch2_btree_node_unlock_write_inlined(b, iter);
 }
 
-void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
+void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
 {
        struct btree_iter *linked;
        unsigned readers = 0;
 
-       EBUG_ON(iter->nodes[b->level] != b);
-       EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
+       EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
 
-       if (six_trylock_write(&b->lock))
-               return;
-
-       for_each_linked_btree_iter(iter, linked)
-               if (linked->nodes[b->level] == b &&
-                   btree_node_read_locked(linked, b->level))
+       trans_for_each_iter(iter->trans, linked)
+               if (linked->l[b->c.level].b == b &&
+                   btree_node_read_locked(linked, b->c.level))
                        readers++;
 
-       if (likely(!readers)) {
-               six_lock_write(&b->lock);
+       /*
+        * Must drop our read locks before calling six_lock_write() -
+        * six_unlock() won't do wakeups until the reader count
+        * goes to 0, and it's safe because we have the node intent
+        * locked:
+        */
+       atomic64_sub(__SIX_VAL(read_lock, readers),
+                    &b->c.lock.state.counter);
+       btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
+       atomic64_add(__SIX_VAL(read_lock, readers),
+                    &b->c.lock.state.counter);
+}
+
+bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
+{
+       struct btree *b = btree_iter_node(iter, level);
+       int want = __btree_lock_want(iter, level);
+
+       if (!is_btree_node(iter, level))
+               return false;
+
+       if (race_fault())
+               return false;
+
+       if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
+           (btree_node_lock_seq_matches(iter, b, level) &&
+            btree_node_lock_increment(iter->trans, b, level, want))) {
+               mark_btree_node_locked(iter, level, want);
+               return true;
        } else {
-               /*
-                * Must drop our read locks before calling six_lock_write() -
-                * six_unlock() won't do wakeups until the reader count
-                * goes to 0, and it's safe because we have the node intent
-                * locked:
-                */
-               atomic64_sub(__SIX_VAL(read_lock, readers),
-                            &b->lock.state.counter);
-               six_lock_write(&b->lock);
-               atomic64_add(__SIX_VAL(read_lock, readers),
-                            &b->lock.state.counter);
+               return false;
        }
 }
 
-bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
+static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
 {
-       struct btree_iter *linked;
-       struct btree *b = iter->nodes[level];
-       enum btree_node_locked_type want = btree_lock_want(iter, level);
-       enum btree_node_locked_type have = btree_node_locked_type(iter, level);
+       struct btree *b = iter->l[level].b;
 
-       if (want == have)
-               return true;
+       EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
 
        if (!is_btree_node(iter, level))
                return false;
 
+       if (btree_node_intent_locked(iter, level))
+               return true;
+
        if (race_fault())
                return false;
 
-       if (have != BTREE_NODE_UNLOCKED
-           ? six_trylock_convert(&b->lock, have, want)
-           : six_relock_type(&b->lock, want, iter->lock_seq[level]))
+       if (btree_node_locked(iter, level)
+           ? six_lock_tryupgrade(&b->c.lock)
+           : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
                goto success;
 
-       for_each_linked_btree_iter(iter, linked)
-               if (linked->nodes[level] == b &&
-                   btree_node_locked_type(linked, level) == want &&
-                   iter->lock_seq[level] == b->lock.state.seq) {
-                       btree_node_unlock(iter, level);
-                       six_lock_increment(&b->lock, want);
-                       goto success;
-               }
+       if (btree_node_lock_seq_matches(iter, b, level) &&
+           btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
+               btree_node_unlock(iter, level);
+               goto success;
+       }
 
        return false;
 success:
-       mark_btree_node_unlocked(iter, level);
-       mark_btree_node_locked(iter, level, want);
+       mark_btree_node_intent_locked(iter, level);
        return true;
 }
 
-/* Slowpath: */
-bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
-                          unsigned level,
-                          struct btree_iter *iter,
-                          enum six_lock_type type)
+static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
+                                       unsigned long trace_ip)
 {
-       struct btree_iter *linked;
+       unsigned l = iter->level;
+       int fail_idx = -1;
 
-       /* Can't have children locked before ancestors: */
-       EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked));
-
-       /*
-        * Can't hold any read locks while we block taking an intent lock - see
-        * below for reasoning, and we should have already dropped any read
-        * locks in the current iterator
-        */
-       EBUG_ON(type == SIX_LOCK_intent &&
-               iter->nodes_locked != iter->nodes_intent_locked);
+       do {
+               if (!btree_iter_node(iter, l))
+                       break;
 
-       for_each_linked_btree_iter(iter, linked)
-               if (linked->nodes[level] == b &&
-                   btree_node_locked_type(linked, level) == type) {
-                       six_lock_increment(&b->lock, type);
-                       return true;
+               if (!(upgrade
+                     ? bch2_btree_node_upgrade(iter, l)
+                     : bch2_btree_node_relock(iter, l))) {
+                       (upgrade
+                        ? trace_node_upgrade_fail
+                        : trace_node_relock_fail)(iter->trans->ip, trace_ip,
+                                       iter->btree_id, &iter->real_pos,
+                                       l, iter->l[l].lock_seq,
+                                       is_btree_node(iter, l)
+                                       ? 0
+                                       : (unsigned long) iter->l[l].b,
+                                       is_btree_node(iter, l)
+                                       ? iter->l[l].b->c.lock.state.seq
+                                       : 0);
+
+                       fail_idx = l;
+                       btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
                }
 
+               l++;
+       } while (l < iter->locks_want);
+
        /*
-        * Must lock btree nodes in key order - this case hapens when locking
-        * the prev sibling in btree node merging:
+        * When we fail to get a lock, we have to ensure that any child nodes
+        * can't be relocked so bch2_btree_iter_traverse has to walk back up to
+        * the node that we failed to relock:
         */
-       if (iter->nodes_locked &&
-           __ffs(iter->nodes_locked) == level &&
-           __btree_iter_cmp(iter->btree_id, pos, iter))
-               return false;
+       while (fail_idx >= 0) {
+               btree_node_unlock(iter, fail_idx);
+               iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
+               --fail_idx;
+       }
+
+       if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
+               iter->uptodate = BTREE_ITER_NEED_PEEK;
 
-       for_each_linked_btree_iter(iter, linked) {
+       bch2_btree_trans_verify_locks(iter->trans);
+
+       return iter->uptodate < BTREE_ITER_NEED_RELOCK;
+}
+
+static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
+                                 enum btree_iter_type type)
+{
+       return  type != BTREE_ITER_CACHED
+               ? container_of(_b, struct btree, c)->key.k.p
+               : container_of(_b, struct bkey_cached, c)->key.pos;
+}
+
+/* Slowpath: */
+bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
+                           unsigned level, struct btree_iter *iter,
+                           enum six_lock_type type,
+                           six_lock_should_sleep_fn should_sleep_fn, void *p,
+                           unsigned long ip)
+{
+       struct btree_trans *trans = iter->trans;
+       struct btree_iter *linked, *deadlock_iter = NULL;
+       u64 start_time = local_clock();
+       unsigned reason = 9;
+       bool ret;
+
+       /* Check if it's safe to block: */
+       trans_for_each_iter(trans, linked) {
                if (!linked->nodes_locked)
                        continue;
 
@@ -161,166 +264,442 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
                 */
                if (type == SIX_LOCK_intent &&
                    linked->nodes_locked != linked->nodes_intent_locked) {
-                       linked->locks_want = max_t(unsigned,
-                                                  linked->locks_want,
-                                                  iter->locks_want);
-                       return false;
+                       deadlock_iter = linked;
+                       reason = 1;
                }
 
-               /* We have to lock btree nodes in key order: */
-               if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
-                       return false;
+               if (linked->btree_id != iter->btree_id) {
+                       if (linked->btree_id > iter->btree_id) {
+                               deadlock_iter = linked;
+                               reason = 3;
+                       }
+                       continue;
+               }
+
+               /*
+                * Within the same btree, cached iterators come before non
+                * cached iterators:
+                */
+               if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
+                       if (btree_iter_is_cached(iter)) {
+                               deadlock_iter = linked;
+                               reason = 4;
+                       }
+                       continue;
+               }
 
                /*
                 * Interior nodes must be locked before their descendants: if
                 * another iterator has possible descendants locked of the node
                 * we're about to lock, it must have the ancestors locked too:
                 */
-               if (linked->btree_id == iter->btree_id &&
-                   level > __fls(linked->nodes_locked)) {
-                       linked->locks_want = max_t(unsigned,
-                                                  linked->locks_want,
-                                                  iter->locks_want);
-                       return false;
+               if (level > __fls(linked->nodes_locked)) {
+                       deadlock_iter = linked;
+                       reason = 5;
+               }
+
+               /* Must lock btree nodes in key order: */
+               if (btree_node_locked(linked, level) &&
+                   bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
+                                                btree_iter_type(linked))) <= 0) {
+                       deadlock_iter = linked;
+                       reason = 7;
+                       BUG_ON(trans->in_traverse_all);
                }
        }
 
-       six_lock_type(&b->lock, type);
-       return true;
+       if (unlikely(deadlock_iter)) {
+               trace_trans_restart_would_deadlock(iter->trans->ip, ip,
+                               trans->in_traverse_all, reason,
+                               deadlock_iter->btree_id,
+                               btree_iter_type(deadlock_iter),
+                               &deadlock_iter->real_pos,
+                               iter->btree_id,
+                               btree_iter_type(iter),
+                               &pos);
+               return false;
+       }
+
+       if (six_trylock_type(&b->c.lock, type))
+               return true;
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+       trans->locking_iter_idx = iter->idx;
+       trans->locking_pos      = pos;
+       trans->locking_btree_id = iter->btree_id;
+       trans->locking_level    = level;
+       trans->locking          = b;
+#endif
+
+       ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+       trans->locking = NULL;
+#endif
+       if (ret)
+               bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
+                                      start_time);
+       return ret;
 }
 
 /* Btree iterator locking: */
 
-
-static void btree_iter_drop_extra_locks(struct btree_iter *iter)
+#ifdef CONFIG_BCACHEFS_DEBUG
+static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
 {
        unsigned l;
 
-       while (iter->nodes_locked &&
-              (l = __fls(iter->nodes_locked)) > iter->locks_want) {
-               if (!btree_node_locked(iter, l))
-                       panic("l %u nodes_locked %u\n", l, iter->nodes_locked);
+       if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
+               BUG_ON(iter->nodes_locked);
+               return;
+       }
 
-               if (l > iter->level) {
-                       btree_node_unlock(iter, l);
-               } else if (btree_node_intent_locked(iter, l)) {
-                       six_lock_downgrade(&iter->nodes[l]->lock);
-                       iter->nodes_intent_locked ^= 1 << l;
-               }
+       for (l = 0; is_btree_node(iter, l); l++) {
+               if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
+                   !btree_node_locked(iter, l))
+                       continue;
+
+               BUG_ON(btree_lock_want(iter, l) !=
+                      btree_node_locked_type(iter, l));
        }
 }
 
-bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
-                                    unsigned new_locks_want)
+void bch2_btree_trans_verify_locks(struct btree_trans *trans)
+{
+       struct btree_iter *iter;
+
+       trans_for_each_iter(trans, iter)
+               bch2_btree_iter_verify_locks(iter);
+}
+#else
+static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
+#endif
+
+__flatten
+static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
+{
+       return btree_iter_get_locks(iter, false, trace_ip);
+}
+
+bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
+                              unsigned new_locks_want)
 {
        struct btree_iter *linked;
-       unsigned l;
 
-       /* Drop locks we don't want anymore: */
-       if (new_locks_want < iter->locks_want)
-               for_each_linked_btree_iter(iter, linked)
-                       if (linked->locks_want > new_locks_want) {
-                               linked->locks_want = max_t(unsigned, 1,
-                                                          new_locks_want);
-                               btree_iter_drop_extra_locks(linked);
-                       }
+       EBUG_ON(iter->locks_want >= new_locks_want);
 
        iter->locks_want = new_locks_want;
-       btree_iter_drop_extra_locks(iter);
 
-       for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
-               if (!bch2_btree_node_relock(iter, l))
-                       goto fail;
+       if (btree_iter_get_locks(iter, true, _THIS_IP_))
+               return true;
 
-       return true;
-fail:
        /*
-        * Just an optimization: ancestor nodes must be locked before child
-        * nodes, so set locks_want on iterators that might lock ancestors
-        * before us to avoid getting -EINTR later:
+        * XXX: this is ugly - we'd prefer to not be mucking with other
+        * iterators in the btree_trans here.
+        *
+        * On failure to upgrade the iterator, setting iter->locks_want and
+        * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
+        * get the locks we want on transaction restart.
+        *
+        * But if this iterator was a clone, on transaction restart what we did
+        * to this iterator isn't going to be preserved.
+        *
+        * Possibly we could add an iterator field for the parent iterator when
+        * an iterator is a copy - for now, we'll just upgrade any other
+        * iterators with the same btree id.
+        *
+        * The code below used to be needed to ensure ancestor nodes get locked
+        * before interior nodes - now that's handled by
+        * bch2_btree_iter_traverse_all().
         */
-       for_each_linked_btree_iter(iter, linked)
-               if (linked->btree_id == iter->btree_id &&
-                   btree_iter_cmp(linked, iter) <= 0)
-                       linked->locks_want = max_t(unsigned, linked->locks_want,
-                                                  new_locks_want);
+       trans_for_each_iter(iter->trans, linked)
+               if (linked != iter &&
+                   btree_iter_type(linked) == btree_iter_type(iter) &&
+                   linked->btree_id == iter->btree_id &&
+                   linked->locks_want < new_locks_want) {
+                       linked->locks_want = new_locks_want;
+                       btree_iter_get_locks(linked, true, _THIS_IP_);
+               }
+
        return false;
 }
 
-static void __bch2_btree_iter_unlock(struct btree_iter *iter)
+void __bch2_btree_iter_downgrade(struct btree_iter *iter,
+                                unsigned new_locks_want)
+{
+       unsigned l;
+
+       EBUG_ON(iter->locks_want < new_locks_want);
+
+       iter->locks_want = new_locks_want;
+
+       while (iter->nodes_locked &&
+              (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
+               if (l > iter->level) {
+                       btree_node_unlock(iter, l);
+               } else {
+                       if (btree_node_intent_locked(iter, l)) {
+                               six_lock_downgrade(&iter->l[l].b->c.lock);
+                               iter->nodes_intent_locked ^= 1 << l;
+                       }
+                       break;
+               }
+       }
+
+       bch2_btree_trans_verify_locks(iter->trans);
+}
+
+void bch2_trans_downgrade(struct btree_trans *trans)
 {
-       iter->flags &= ~BTREE_ITER_UPTODATE;
+       struct btree_iter *iter;
 
-       while (iter->nodes_locked)
-               btree_node_unlock(iter, __ffs(iter->nodes_locked));
+       trans_for_each_iter(trans, iter)
+               bch2_btree_iter_downgrade(iter);
 }
 
-int bch2_btree_iter_unlock(struct btree_iter *iter)
+/* Btree transaction locking: */
+
+static inline bool btree_iter_should_be_locked(struct btree_trans *trans,
+                                              struct btree_iter *iter)
 {
-       struct btree_iter *linked;
+       return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) ||
+               iter->should_be_locked;
+}
+
+bool bch2_trans_relock(struct btree_trans *trans)
+{
+       struct btree_iter *iter;
+
+       trans_for_each_iter(trans, iter)
+               if (!bch2_btree_iter_relock(iter, _RET_IP_) &&
+                   btree_iter_should_be_locked(trans, iter)) {
+                       trace_trans_restart_relock(trans->ip, _RET_IP_,
+                                       iter->btree_id, &iter->real_pos);
+                       return false;
+               }
+       return true;
+}
 
-       for_each_linked_btree_iter(iter, linked)
-               __bch2_btree_iter_unlock(linked);
-       __bch2_btree_iter_unlock(iter);
+void bch2_trans_unlock(struct btree_trans *trans)
+{
+       struct btree_iter *iter;
 
-       return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
+       trans_for_each_iter(trans, iter)
+               __bch2_btree_iter_unlock(iter);
 }
 
 /* Btree iterator: */
 
 #ifdef CONFIG_BCACHEFS_DEBUG
 
-static void __bch2_btree_iter_verify(struct btree_iter *iter,
-                                   struct btree *b)
+static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
 {
-       struct btree_node_iter *node_iter = &iter->node_iters[b->level];
-       struct btree_node_iter tmp = *node_iter;
-       struct bkey_packed *k;
+       struct bkey_cached *ck;
+       bool locked = btree_node_locked(iter, 0);
+
+       if (!bch2_btree_node_relock(iter, 0))
+               return;
+
+       ck = (void *) iter->l[0].b;
+       BUG_ON(ck->key.btree_id != iter->btree_id ||
+              bkey_cmp(ck->key.pos, iter->pos));
+
+       if (!locked)
+               btree_node_unlock(iter, 0);
+}
+
+static void bch2_btree_iter_verify_level(struct btree_iter *iter,
+                                        unsigned level)
+{
+       struct btree_iter_level *l;
+       struct btree_node_iter tmp;
+       bool locked;
+       struct bkey_packed *p, *k;
+       char buf1[100], buf2[100], buf3[100];
+       const char *msg;
+
+       if (!bch2_debug_check_iterators)
+               return;
+
+       l       = &iter->l[level];
+       tmp     = l->iter;
+       locked  = btree_node_locked(iter, level);
+
+       if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
+               if (!level)
+                       bch2_btree_iter_verify_cached(iter);
+               return;
+       }
+
+       BUG_ON(iter->level < iter->min_depth);
+
+       if (!btree_iter_node(iter, level))
+               return;
+
+       if (!bch2_btree_node_relock(iter, level))
+               return;
+
+       BUG_ON(!btree_iter_pos_in_node(iter, l->b));
 
-       bch2_btree_node_iter_verify(node_iter, b);
+       /*
+        * node iterators don't use leaf node iterator:
+        */
+       if (btree_iter_type(iter) == BTREE_ITER_NODES &&
+           level <= iter->min_depth)
+               goto unlock;
+
+       bch2_btree_node_iter_verify(&l->iter, l->b);
 
        /*
         * For interior nodes, the iterator will have skipped past
         * deleted keys:
+        *
+        * For extents, the iterator may have skipped past deleted keys (but not
+        * whiteouts)
         */
-       k = b->level
-               ? bch2_btree_node_iter_prev(&tmp, b)
-               : bch2_btree_node_iter_prev_all(&tmp, b);
-       if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
-                               iter->flags & BTREE_ITER_IS_EXTENTS)) {
-               char buf[100];
-               struct bkey uk = bkey_unpack_key(b, k);
+       p = level || btree_node_type_is_extents(iter->btree_id)
+               ? bch2_btree_node_iter_prev(&tmp, l->b)
+               : bch2_btree_node_iter_prev_all(&tmp, l->b);
+       k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+
+       if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) {
+               msg = "before";
+               goto err;
+       }
 
-               bch2_bkey_to_text(buf, sizeof(buf), &uk);
-               panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
-                     buf, iter->pos.inode, iter->pos.offset);
+       if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
+               msg = "after";
+               goto err;
        }
+unlock:
+       if (!locked)
+               btree_node_unlock(iter, level);
+       return;
+err:
+       strcpy(buf2, "(none)");
+       strcpy(buf3, "(none)");
 
-       k = bch2_btree_node_iter_peek_all(node_iter, b);
-       if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
-                               iter->flags & BTREE_ITER_IS_EXTENTS)) {
-               char buf[100];
-               struct bkey uk = bkey_unpack_key(b, k);
+       bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
+
+       if (p) {
+               struct bkey uk = bkey_unpack_key(l->b, p);
+               bch2_bkey_to_text(&PBUF(buf2), &uk);
+       }
 
-               bch2_bkey_to_text(buf, sizeof(buf), &uk);
-               panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
-                     iter->pos.inode, iter->pos.offset, buf);
+       if (k) {
+               struct bkey uk = bkey_unpack_key(l->b, k);
+               bch2_bkey_to_text(&PBUF(buf3), &uk);
        }
+
+       panic("iterator should be %s key at level %u:\n"
+             "iter pos %s\n"
+             "prev key %s\n"
+             "cur  key %s\n",
+             msg, level, buf1, buf2, buf3);
 }
 
-void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
+static void bch2_btree_iter_verify(struct btree_iter *iter)
 {
-       struct btree_iter *linked;
+       enum btree_iter_type type = btree_iter_type(iter);
+       unsigned i;
+
+       EBUG_ON(iter->btree_id >= BTREE_ID_NR);
+
+       BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+              iter->pos.snapshot != iter->snapshot);
+
+       BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+              (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
+
+       BUG_ON(type == BTREE_ITER_NODES &&
+              !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
+
+       BUG_ON(type != BTREE_ITER_NODES &&
+              (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+              !btree_type_has_snapshots(iter->btree_id));
+
+       bch2_btree_iter_verify_locks(iter);
+
+       for (i = 0; i < BTREE_MAX_DEPTH; i++)
+               bch2_btree_iter_verify_level(iter, i);
+}
+
+static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
+{
+       enum btree_iter_type type = btree_iter_type(iter);
 
-       if (iter->nodes[b->level] == b)
-               __bch2_btree_iter_verify(iter, b);
+       BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
+              iter->pos.snapshot != iter->snapshot);
 
-       for_each_linked_btree_node(iter, b, linked)
-               __bch2_btree_iter_verify(iter, b);
+       BUG_ON((type == BTREE_ITER_KEYS ||
+               type == BTREE_ITER_CACHED) &&
+              (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
+               bkey_cmp(iter->pos, iter->k.p) > 0));
 }
 
+void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
+{
+       struct btree_iter *iter;
+
+       if (!bch2_debug_check_iterators)
+               return;
+
+       trans_for_each_iter_with_node(trans, b, iter)
+               bch2_btree_iter_verify_level(iter, b->c.level);
+}
+
+#else
+
+static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
+static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
+
 #endif
 
+static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
+                                       struct btree *b,
+                                       struct bset_tree *t,
+                                       struct bkey_packed *k)
+{
+       struct btree_node_iter_set *set;
+
+       btree_node_iter_for_each(iter, set)
+               if (set->end == t->end_offset) {
+                       set->k = __btree_node_key_to_offset(b, k);
+                       bch2_btree_node_iter_sort(iter, b);
+                       return;
+               }
+
+       bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
+}
+
+static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
+                                              struct btree *b,
+                                              struct bkey_packed *where)
+{
+       struct btree_iter_level *l = &iter->l[b->c.level];
+
+       if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
+               return;
+
+       if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
+               bch2_btree_node_iter_advance(&l->iter, l->b);
+
+       btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+}
+
+void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
+                                     struct btree *b,
+                                     struct bkey_packed *where)
+{
+       struct btree_iter *linked;
+
+       trans_for_each_iter_with_node(iter->trans, b, linked) {
+               __bch2_btree_iter_fix_key_modified(linked, b, where);
+               bch2_btree_iter_verify_level(linked, b->c.level);
+       }
+}
+
 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
                                      struct btree *b,
                                      struct btree_node_iter *node_iter,
@@ -333,7 +712,11 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
        struct btree_node_iter_set *set;
        unsigned offset = __btree_node_key_to_offset(b, where);
        int shift = new_u64s - clobber_u64s;
-       unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift;
+       unsigned old_end = t->end_offset - shift;
+       unsigned orig_iter_pos = node_iter->data[0].k;
+       bool iter_current_key_modified =
+               orig_iter_pos >= offset &&
+               orig_iter_pos <= offset + clobber_u64s;
 
        btree_node_iter_for_each(node_iter, set)
                if (set->end == old_end)
@@ -341,162 +724,182 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
 
        /* didn't find the bset in the iterator - might have to readd it: */
        if (new_u64s &&
-           btree_iter_pos_cmp_packed(b, &iter->pos, where,
-                                     iter->flags & BTREE_ITER_IS_EXTENTS))
+           bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
                bch2_btree_node_iter_push(node_iter, b, where, end);
-       return;
+               goto fixup_done;
+       } else {
+               /* Iterator is after key that changed */
+               return;
+       }
 found:
-       set->end = (int) set->end + shift;
+       set->end = t->end_offset;
 
        /* Iterator hasn't gotten to the key that changed yet: */
        if (set->k < offset)
                return;
 
        if (new_u64s &&
-           btree_iter_pos_cmp_packed(b, &iter->pos, where,
-                               iter->flags & BTREE_ITER_IS_EXTENTS)) {
+           bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
                set->k = offset;
-               bch2_btree_node_iter_sort(node_iter, b);
        } else if (set->k < offset + clobber_u64s) {
                set->k = offset + new_u64s;
                if (set->k == set->end)
-                       *set = node_iter->data[--node_iter->used];
-               bch2_btree_node_iter_sort(node_iter, b);
+                       bch2_btree_node_iter_set_drop(node_iter, set);
        } else {
+               /* Iterator is after key that changed */
                set->k = (int) set->k + shift;
+               return;
        }
 
+       bch2_btree_node_iter_sort(node_iter, b);
+fixup_done:
+       if (node_iter->data[0].k != orig_iter_pos)
+               iter_current_key_modified = true;
+
        /*
-        * Interior nodes are special because iterators for interior nodes don't
-        * obey the usual invariants regarding the iterator position:
-        *
-        * We may have whiteouts that compare greater than the iterator
-        * position, and logically should be in the iterator, but that we
-        * skipped past to find the first live key greater than the iterator
-        * position. This becomes an issue when we insert a new key that is
-        * greater than the current iterator position, but smaller than the
-        * whiteouts we've already skipped past - this happens in the course of
-        * a btree split.
-        *
-        * We have to rewind the iterator past to before those whiteouts here,
-        * else bkey_node_iter_prev() is not going to work and who knows what
-        * else would happen. And we have to do it manually, because here we've
-        * already done the insert and the iterator is currently inconsistent:
-        *
-        * We've got multiple competing invariants, here - we have to be careful
-        * about rewinding iterators for interior nodes, because they should
-        * always point to the key for the child node the btree iterator points
-        * to.
+        * When a new key is added, and the node iterator now points to that
+        * key, the iterator might have skipped past deleted keys that should
+        * come after the key the iterator now points to. We have to rewind to
+        * before those deleted keys - otherwise
+        * bch2_btree_node_iter_prev_all() breaks:
         */
-       if (b->level && new_u64s && !bkey_deleted(where) &&
-           btree_iter_pos_cmp_packed(b, &iter->pos, where,
-                               iter->flags & BTREE_ITER_IS_EXTENTS)) {
+       if (!bch2_btree_node_iter_end(node_iter) &&
+           iter_current_key_modified &&
+           (b->c.level ||
+            btree_node_type_is_extents(iter->btree_id))) {
                struct bset_tree *t;
-               struct bkey_packed *k;
+               struct bkey_packed *k, *k2, *p;
+
+               k = bch2_btree_node_iter_peek_all(node_iter, b);
 
                for_each_bset(b, t) {
-                       if (bch2_bkey_to_bset(b, where) == t)
+                       bool set_pos = false;
+
+                       if (node_iter->data[0].end == t->end_offset)
                                continue;
 
-                       k = bch2_bkey_prev_all(b, t,
-                               bch2_btree_node_iter_bset_pos(node_iter, b, t));
-                       if (k &&
-                           __btree_node_iter_cmp(node_iter, b,
-                                                 k, where) > 0) {
-                               struct btree_node_iter_set *set;
-                               unsigned offset =
-                                       __btree_node_key_to_offset(b, bkey_next(k));
-
-                               btree_node_iter_for_each(node_iter, set)
-                                       if (set->k == offset) {
-                                               set->k = __btree_node_key_to_offset(b, k);
-                                               bch2_btree_node_iter_sort(node_iter, b);
-                                               goto next_bset;
-                                       }
-
-                               bch2_btree_node_iter_push(node_iter, b, k,
-                                               btree_bkey_last(b, t));
+                       k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
+
+                       while ((p = bch2_bkey_prev_all(b, t, k2)) &&
+                              bkey_iter_cmp(b, k, p) < 0) {
+                               k2 = p;
+                               set_pos = true;
                        }
-next_bset:
-                       t = t;
+
+                       if (set_pos)
+                               btree_node_iter_set_set_pos(node_iter,
+                                                           b, t, k2);
                }
        }
+
+       if (!b->c.level &&
+           node_iter == &iter->l[0].iter &&
+           iter_current_key_modified)
+               btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
 }
 
 void bch2_btree_node_iter_fix(struct btree_iter *iter,
-                            struct btree *b,
-                            struct btree_node_iter *node_iter,
-                            struct bset_tree *t,
-                            struct bkey_packed *where,
-                            unsigned clobber_u64s,
-                            unsigned new_u64s)
+                             struct btree *b,
+                             struct btree_node_iter *node_iter,
+                             struct bkey_packed *where,
+                             unsigned clobber_u64s,
+                             unsigned new_u64s)
 {
+       struct bset_tree *t = bch2_bkey_to_bset(b, where);
        struct btree_iter *linked;
 
-       if (node_iter != &iter->node_iters[b->level])
+       if (node_iter != &iter->l[b->c.level].iter) {
                __bch2_btree_node_iter_fix(iter, b, node_iter, t,
-                                         where, clobber_u64s, new_u64s);
+                                          where, clobber_u64s, new_u64s);
 
-       if (iter->nodes[b->level] == b)
-               __bch2_btree_node_iter_fix(iter, b,
-                                         &iter->node_iters[b->level], t,
-                                         where, clobber_u64s, new_u64s);
+               if (bch2_debug_check_iterators)
+                       bch2_btree_node_iter_verify(node_iter, b);
+       }
 
-       for_each_linked_btree_node(iter, b, linked)
+       trans_for_each_iter_with_node(iter->trans, b, linked) {
                __bch2_btree_node_iter_fix(linked, b,
-                                         &linked->node_iters[b->level], t,
-                                         where, clobber_u64s, new_u64s);
-
-       /* interior node iterators are... special... */
-       if (!b->level)
-               bch2_btree_iter_verify(iter, b);
+                                          &linked->l[b->c.level].iter, t,
+                                          where, clobber_u64s, new_u64s);
+               bch2_btree_iter_verify_level(linked, b->c.level);
+       }
 }
 
-/* peek_all() doesn't skip deleted keys */
-static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
+static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
+                                                 struct btree_iter_level *l,
+                                                 struct bkey *u,
+                                                 struct bkey_packed *k)
 {
-       struct btree *b = iter->nodes[iter->level];
-       struct bkey_packed *k =
-               bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
        struct bkey_s_c ret;
 
-       EBUG_ON(!btree_node_locked(iter, iter->level));
-
-       if (!k)
+       if (unlikely(!k)) {
+               /*
+                * signal to bch2_btree_iter_peek_slot() that we're currently at
+                * a hole
+                */
+               u->type = KEY_TYPE_deleted;
                return bkey_s_c_null;
+       }
 
-       ret = bkey_disassemble(b, k, &iter->k);
+       ret = bkey_disassemble(l->b, k, u);
 
-       if (debug_check_bkeys(iter->c))
-               bch2_bkey_debugcheck(iter->c, b, ret);
+       /*
+        * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
+        * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
+        * being overwritten but doesn't change k->size. But this is ok, because
+        * those keys are never written out, we just have to avoid a spurious
+        * assertion here:
+        */
+       if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
+               bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
 
        return ret;
 }
 
-static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
+/* peek_all() doesn't skip deleted keys */
+static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
+                                                       struct btree_iter_level *l)
 {
-       struct btree *b = iter->nodes[iter->level];
-       struct bkey_packed *k =
-               bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
-       struct bkey_s_c ret;
-
-       EBUG_ON(!btree_node_locked(iter, iter->level));
+       return __btree_iter_unpack(iter, l, &iter->k,
+                       bch2_btree_node_iter_peek_all(&l->iter, l->b));
+}
 
-       if (!k)
-               return bkey_s_c_null;
+static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
+                                                   struct btree_iter_level *l)
+{
+       struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
+                       bch2_btree_node_iter_peek(&l->iter, l->b));
 
-       ret = bkey_disassemble(b, k, &iter->k);
+       iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
+       return k;
+}
 
-       if (debug_check_bkeys(iter->c))
-               bch2_bkey_debugcheck(iter->c, b, ret);
+static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
+                                                   struct btree_iter_level *l)
+{
+       struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
+                       bch2_btree_node_iter_prev(&l->iter, l->b));
 
-       return ret;
+       iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
+       return k;
 }
 
-static inline void __btree_iter_advance(struct btree_iter *iter)
+static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
+                                            struct btree_iter_level *l,
+                                            int max_advance)
 {
-       bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
-                                   iter->nodes[iter->level]);
+       struct bkey_packed *k;
+       int nr_advanced = 0;
+
+       while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
+              bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
+               if (max_advance > 0 && nr_advanced >= max_advance)
+                       return false;
+
+               bch2_btree_node_iter_advance(&l->iter, l->b);
+               nr_advanced++;
+       }
+
+       return true;
 }
 
 /*
@@ -504,133 +907,122 @@ static inline void __btree_iter_advance(struct btree_iter *iter)
  */
 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
 {
+       struct btree_iter_level *l;
+       unsigned plevel;
        bool parent_locked;
        struct bkey_packed *k;
 
-       if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
-           !iter->nodes[b->level + 1])
+       if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
                return;
 
-       parent_locked = btree_node_locked(iter, b->level + 1);
+       plevel = b->c.level + 1;
+       if (!btree_iter_node(iter, plevel))
+               return;
+
+       parent_locked = btree_node_locked(iter, plevel);
 
-       if (!bch2_btree_node_relock(iter, b->level + 1))
+       if (!bch2_btree_node_relock(iter, plevel))
                return;
 
-       k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
-                                        iter->nodes[b->level + 1]);
+       l = &iter->l[plevel];
+       k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
        if (!k ||
            bkey_deleted(k) ||
-           bkey_cmp_left_packed(iter->nodes[b->level + 1],
-                                k, &b->key.k.p)) {
-               char buf[100];
+           bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
+               char buf1[100];
+               char buf2[100];
+               char buf3[100];
+               char buf4[100];
                struct bkey uk = bkey_unpack_key(b, k);
 
-               bch2_bkey_to_text(buf, sizeof(buf), &uk);
-               panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
-                     buf, b->key.k.p.inode, b->key.k.p.offset);
+               bch2_dump_btree_node(iter->trans->c, l->b);
+               bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
+               bch2_bkey_to_text(&PBUF(buf2), &uk);
+               bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
+               bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
+               panic("parent iter doesn't point to new node:\n"
+                     "iter pos %s %s\n"
+                     "iter key %s\n"
+                     "new node %s-%s\n",
+                     bch2_btree_ids[iter->btree_id], buf1,
+                     buf2, buf3, buf4);
        }
 
        if (!parent_locked)
-               btree_node_unlock(iter, b->level + 1);
+               btree_node_unlock(iter, b->c.level + 1);
 }
 
 static inline void __btree_iter_init(struct btree_iter *iter,
-                                    struct btree *b)
+                                    unsigned level)
 {
-       bch2_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos,
-                                 iter->flags & BTREE_ITER_IS_EXTENTS,
-                                 btree_node_is_extents(b));
+       struct btree_iter_level *l = &iter->l[level];
 
-       /* Skip to first non whiteout: */
-       if (b->level)
-               bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
-}
+       bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos);
 
-static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
-                                         struct btree *b)
-{
-       return iter->btree_id == b->btree_id &&
-               bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
-               btree_iter_pos_cmp(iter->pos, &b->key.k,
-                                  iter->flags & BTREE_ITER_IS_EXTENTS);
+       /*
+        * Iterators to interior nodes should always be pointed at the first non
+        * whiteout:
+        */
+       if (level)
+               bch2_btree_node_iter_peek(&l->iter, l->b);
+
+       btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
 }
 
 static inline void btree_iter_node_set(struct btree_iter *iter,
                                       struct btree *b)
 {
+       BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
+
        btree_iter_verify_new_node(iter, b);
 
        EBUG_ON(!btree_iter_pos_in_node(iter, b));
-       EBUG_ON(b->lock.state.seq & 1);
+       EBUG_ON(b->c.lock.state.seq & 1);
 
-       iter->lock_seq[b->level] = b->lock.state.seq;
-       iter->nodes[b->level] = b;
-       __btree_iter_init(iter, b);
+       iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
+       iter->l[b->c.level].b = b;
+       __btree_iter_init(iter, b->c.level);
 }
 
 /*
  * A btree node is being replaced - update the iterator to point to the new
  * node:
  */
-bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
+void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
 {
+       enum btree_node_locked_type t;
        struct btree_iter *linked;
 
-       for_each_linked_btree_iter(iter, linked)
-               if (btree_iter_pos_in_node(linked, b)) {
+       trans_for_each_iter(iter->trans, linked)
+               if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
+                   btree_iter_pos_in_node(linked, b)) {
                        /*
                         * bch2_btree_iter_node_drop() has already been called -
                         * the old node we're replacing has already been
                         * unlocked and the pointer invalidated
                         */
-                       BUG_ON(btree_node_locked(linked, b->level));
-
-                       /*
-                        * If @linked wants this node read locked, we don't want
-                        * to actually take the read lock now because it's not
-                        * legal to hold read locks on other nodes while we take
-                        * write locks, so the journal can make forward
-                        * progress...
-                        *
-                        * Instead, btree_iter_node_set() sets things up so
-                        * bch2_btree_node_relock() will succeed:
-                        */
+                       BUG_ON(btree_node_locked(linked, b->c.level));
 
-                       if (btree_want_intent(linked, b->level)) {
-                               six_lock_increment(&b->lock, SIX_LOCK_intent);
-                               mark_btree_node_intent_locked(linked, b->level);
+                       t = btree_lock_want(linked, b->c.level);
+                       if (t != BTREE_NODE_UNLOCKED) {
+                               six_lock_increment(&b->c.lock, t);
+                               mark_btree_node_locked(linked, b->c.level, t);
                        }
 
                        btree_iter_node_set(linked, b);
                }
-
-       if (!btree_iter_pos_in_node(iter, b)) {
-               six_unlock_intent(&b->lock);
-               return false;
-       }
-
-       mark_btree_node_intent_locked(iter, b->level);
-       btree_iter_node_set(iter, b);
-       return true;
-}
-
-void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
-{
-       struct btree_iter *linked;
-
-       for_each_linked_btree_iter(iter, linked)
-               bch2_btree_iter_node_drop(linked, b);
 }
 
 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
 {
-       unsigned level = b->level;
+       struct btree_iter *linked;
+       unsigned level = b->c.level;
 
-       if (iter->nodes[level] == b) {
-               iter->flags &= ~BTREE_ITER_UPTODATE;
-               btree_node_unlock(iter, level);
-               iter->nodes[level] = BTREE_ITER_NOT_END;
-       }
+       trans_for_each_iter(iter->trans, linked)
+               if (linked->l[level].b == b) {
+                       btree_node_unlock(linked, level);
+                       linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
+               }
 }
 
 /*
@@ -641,24 +1033,32 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
 {
        struct btree_iter *linked;
 
-       for_each_linked_btree_node(iter, b, linked)
-               __btree_iter_init(linked, b);
-       __btree_iter_init(iter, b);
+       trans_for_each_iter_with_node(iter->trans, b, linked)
+               __btree_iter_init(linked, b->c.level);
+}
+
+static int lock_root_check_fn(struct six_lock *lock, void *p)
+{
+       struct btree *b = container_of(lock, struct btree, c.lock);
+       struct btree **rootp = p;
+
+       return b == *rootp ? 0 : -1;
 }
 
 static inline int btree_iter_lock_root(struct btree_iter *iter,
-                                      unsigned depth_want)
+                                      unsigned depth_want,
+                                      unsigned long trace_ip)
 {
-       struct bch_fs *c = iter->c;
-       struct btree *b;
+       struct bch_fs *c = iter->trans->c;
+       struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
        enum six_lock_type lock_type;
        unsigned i;
 
        EBUG_ON(iter->nodes_locked);
 
        while (1) {
-               b = READ_ONCE(c->btree_roots[iter->btree_id].b);
-               iter->level = READ_ONCE(b->level);
+               b = READ_ONCE(*rootp);
+               iter->level = READ_ONCE(b->c.level);
 
                if (unlikely(iter->level < depth_want)) {
                        /*
@@ -668,102 +1068,185 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
                         * that depth
                         */
                        iter->level = depth_want;
-                       iter->nodes[iter->level] = NULL;
-                       return 0;
+                       for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
+                               iter->l[i].b = NULL;
+                       return 1;
                }
 
-               lock_type = btree_lock_want(iter, iter->level);
+               lock_type = __btree_lock_want(iter, iter->level);
                if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
-                                             iter, lock_type)))
+                                             iter, lock_type,
+                                             lock_root_check_fn, rootp,
+                                             trace_ip)))
                        return -EINTR;
 
-               if (likely(b == c->btree_roots[iter->btree_id].b &&
-                          b->level == iter->level &&
+               if (likely(b == READ_ONCE(*rootp) &&
+                          b->c.level == iter->level &&
                           !race_fault())) {
                        for (i = 0; i < iter->level; i++)
-                               iter->nodes[i] = BTREE_ITER_NOT_END;
-                       iter->nodes[iter->level] = b;
+                               iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
+                       iter->l[iter->level].b = b;
+                       for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
+                               iter->l[i].b = NULL;
 
                        mark_btree_node_locked(iter, iter->level, lock_type);
                        btree_iter_node_set(iter, b);
                        return 0;
-
                }
 
-               six_unlock_type(&b->lock, lock_type);
+               six_unlock_type(&b->c.lock, lock_type);
        }
 }
 
 noinline
 static void btree_iter_prefetch(struct btree_iter *iter)
 {
-       struct btree *b = iter->nodes[iter->level + 1];
-       struct btree_node_iter node_iter = iter->node_iters[iter->level + 1];
+       struct bch_fs *c = iter->trans->c;
+       struct btree_iter_level *l = &iter->l[iter->level];
+       struct btree_node_iter node_iter = l->iter;
        struct bkey_packed *k;
-       BKEY_PADDED(k) tmp;
-       unsigned nr = iter->level ? 1 : 8;
-       bool was_locked = btree_node_locked(iter, iter->level + 1);
+       struct bkey_buf tmp;
+       unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
+               ? (iter->level > 1 ? 0 :  2)
+               : (iter->level > 1 ? 1 : 16);
+       bool was_locked = btree_node_locked(iter, iter->level);
+
+       bch2_bkey_buf_init(&tmp);
 
        while (nr) {
-               if (!bch2_btree_node_relock(iter, iter->level + 1))
-                       return;
+               if (!bch2_btree_node_relock(iter, iter->level))
+                       break;
 
-               bch2_btree_node_iter_advance(&node_iter, b);
-               k = bch2_btree_node_iter_peek(&node_iter, b);
+               bch2_btree_node_iter_advance(&node_iter, l->b);
+               k = bch2_btree_node_iter_peek(&node_iter, l->b);
                if (!k)
                        break;
 
-               bch2_bkey_unpack(b, &tmp.k, k);
-               bch2_btree_node_prefetch(iter->c, &tmp.k,
-                                        iter->level, iter->btree_id);
+               bch2_bkey_buf_unpack(&tmp, c, l->b, k);
+               bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
+                                        iter->level - 1);
        }
 
        if (!was_locked)
-               btree_node_unlock(iter, iter->level + 1);
+               btree_node_unlock(iter, iter->level);
+
+       bch2_bkey_buf_exit(&tmp, c);
 }
 
-static inline int btree_iter_down(struct btree_iter *iter)
+static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
+                                           unsigned plevel, struct btree *b)
 {
+       struct btree_iter_level *l = &iter->l[plevel];
+       bool locked = btree_node_locked(iter, plevel);
+       struct bkey_packed *k;
+       struct bch_btree_ptr_v2 *bp;
+
+       if (!bch2_btree_node_relock(iter, plevel))
+               return;
+
+       k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
+       BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
+
+       bp = (void *) bkeyp_val(&l->b->format, k);
+       bp->mem_ptr = (unsigned long)b;
+
+       if (!locked)
+               btree_node_unlock(iter, plevel);
+}
+
+static __always_inline int btree_iter_down(struct btree_iter *iter,
+                                          unsigned long trace_ip)
+{
+       struct bch_fs *c = iter->trans->c;
+       struct btree_iter_level *l = &iter->l[iter->level];
        struct btree *b;
-       struct bkey_s_c k = __btree_iter_peek(iter);
        unsigned level = iter->level - 1;
-       enum six_lock_type lock_type = btree_lock_want(iter, level);
-       BKEY_PADDED(k) tmp;
+       enum six_lock_type lock_type = __btree_lock_want(iter, level);
+       struct bkey_buf tmp;
+       int ret;
 
-       bkey_reassemble(&tmp.k, k);
+       EBUG_ON(!btree_node_locked(iter, iter->level));
 
-       b = bch2_btree_node_get(iter->c, iter, &tmp.k, level, lock_type);
-       if (unlikely(IS_ERR(b)))
-               return PTR_ERR(b);
+       bch2_bkey_buf_init(&tmp);
+       bch2_bkey_buf_unpack(&tmp, c, l->b,
+                        bch2_btree_node_iter_peek(&l->iter, l->b));
+
+       b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
+       ret = PTR_ERR_OR_ZERO(b);
+       if (unlikely(ret))
+               goto err;
 
-       iter->level = level;
        mark_btree_node_locked(iter, level, lock_type);
        btree_iter_node_set(iter, b);
 
+       if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
+           unlikely(b != btree_node_mem_ptr(tmp.k)))
+               btree_node_mem_ptr_set(iter, level + 1, b);
+
        if (iter->flags & BTREE_ITER_PREFETCH)
                btree_iter_prefetch(iter);
 
-       return 0;
-}
+       if (btree_node_read_locked(iter, level + 1))
+               btree_node_unlock(iter, level + 1);
+       iter->level = level;
 
-static void btree_iter_up(struct btree_iter *iter)
-{
-       btree_node_unlock(iter, iter->level++);
+       bch2_btree_iter_verify_locks(iter);
+err:
+       bch2_bkey_buf_exit(&tmp, c);
+       return ret;
 }
 
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
+static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
 
-static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
+static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
+                                    unsigned long trace_ip)
 {
-       struct bch_fs *c = iter->c;
-       struct btree_iter *linked, *sorted_iters, **i;
+       struct bch_fs *c = trans->c;
+       struct btree_iter *iter;
+       u8 sorted[BTREE_ITER_MAX];
+       int i, nr_sorted = 0;
+       bool relock_fail;
+
+       if (trans->in_traverse_all)
+               return -EINTR;
+
+       trans->in_traverse_all = true;
 retry_all:
-       bch2_btree_iter_unlock(iter);
+       nr_sorted = 0;
+       relock_fail = false;
+
+       trans_for_each_iter(trans, iter) {
+               if (!bch2_btree_iter_relock(iter, _THIS_IP_))
+                       relock_fail = true;
+               sorted[nr_sorted++] = iter->idx;
+       }
+
+       if (!relock_fail) {
+               trans->in_traverse_all = false;
+               return 0;
+       }
+
+#define btree_iter_cmp_by_idx(_l, _r)                          \
+               btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
+
+       bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
+#undef btree_iter_cmp_by_idx
+
+       for (i = nr_sorted - 2; i >= 0; --i) {
+               struct btree_iter *iter1 = trans->iters + sorted[i];
+               struct btree_iter *iter2 = trans->iters + sorted[i + 1];
+
+               if (iter1->btree_id == iter2->btree_id &&
+                   iter1->locks_want < iter2->locks_want)
+                       __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
+               else if (!iter1->locks_want && iter2->locks_want)
+                       __bch2_btree_iter_upgrade(iter1, 1);
+       }
 
-       if (ret != -ENOMEM && ret != -EINTR)
-               goto io_error;
+       bch2_trans_unlock(trans);
+       cond_resched();
 
-       if (ret == -ENOMEM) {
+       if (unlikely(ret == -ENOMEM)) {
                struct closure cl;
 
                closure_init_stack(&cl);
@@ -774,57 +1257,78 @@ retry_all:
                } while (ret);
        }
 
-       /*
-        * Linked iters are normally a circular singly linked list - break cycle
-        * while we sort them:
-        */
-       linked = iter->next;
-       iter->next = NULL;
-       sorted_iters = NULL;
-
-       while (linked) {
-               iter = linked;
-               linked = linked->next;
-
-               i = &sorted_iters;
-               while (*i && btree_iter_cmp(iter, *i) > 0)
-                       i = &(*i)->next;
-
-               iter->next = *i;
-               *i = iter;
+       if (unlikely(ret == -EIO)) {
+               trans->error = true;
+               goto out;
        }
 
-       /* Make list circular again: */
-       iter = sorted_iters;
-       while (iter->next)
-               iter = iter->next;
-       iter->next = sorted_iters;
+       BUG_ON(ret && ret != -EINTR);
 
        /* Now, redo traversals in correct order: */
+       for (i = 0; i < nr_sorted; i++) {
+               unsigned idx = sorted[i];
 
-       iter = sorted_iters;
-       do {
-retry:
-               ret = __bch2_btree_iter_traverse(iter);
-               if (unlikely(ret)) {
-                       if (ret == -EINTR)
-                               goto retry;
-                       goto retry_all;
-               }
+               /*
+                * sucessfully traversing one iterator can cause another to be
+                * unlinked, in btree_key_cache_fill()
+                */
+               if (!(trans->iters_linked & (1ULL << idx)))
+                       continue;
 
-               iter = iter->next;
-       } while (iter != sorted_iters);
+               ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
+               if (ret)
+                       goto retry_all;
+       }
 
-       ret = btree_iter_linked(iter) ? -EINTR : 0;
+       if (hweight64(trans->iters_live) > 1)
+               ret = -EINTR;
+       else
+               trans_for_each_iter(trans, iter)
+                       if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
+                               ret = -EINTR;
+                               break;
+                       }
 out:
        bch2_btree_cache_cannibalize_unlock(c);
+
+       trans->in_traverse_all = false;
+
+       trace_trans_traverse_all(trans->ip, trace_ip);
        return ret;
-io_error:
-       BUG_ON(ret != -EIO);
+}
 
-       iter->flags |= BTREE_ITER_ERROR;
-       iter->nodes[iter->level] = NULL;
-       goto out;
+int bch2_btree_iter_traverse_all(struct btree_trans *trans)
+{
+       return __btree_iter_traverse_all(trans, 0, _RET_IP_);
+}
+
+static inline bool btree_iter_good_node(struct btree_iter *iter,
+                                       unsigned l, int check_pos)
+{
+       if (!is_btree_node(iter, l) ||
+           !bch2_btree_node_relock(iter, l))
+               return false;
+
+       if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
+               return false;
+       if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
+               return false;
+       return true;
+}
+
+static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
+                                                    int check_pos)
+{
+       unsigned l = iter->level;
+
+       while (btree_iter_node(iter, l) &&
+              !btree_iter_good_node(iter, l, check_pos)) {
+               btree_node_unlock(iter, l);
+               iter->l[l].b = BTREE_ITER_NO_NODE_UP;
+               l++;
+       }
+
+       return l;
 }
 
 /*
@@ -834,57 +1338,34 @@ io_error:
  * Returns 0 on success, -EIO on error (error reading in a btree node).
  *
  * On error, caller (peek_node()/peek_key()) must return NULL; the error is
- * stashed in the iterator and returned from bch2_btree_iter_unlock().
+ * stashed in the iterator and returned from bch2_trans_exit().
  */
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
+static int btree_iter_traverse_one(struct btree_iter *iter,
+                                  unsigned long trace_ip)
 {
        unsigned depth_want = iter->level;
+       int ret = 0;
 
-       if (unlikely(!iter->nodes[iter->level]))
-               return 0;
-
-       iter->flags &= ~(BTREE_ITER_UPTODATE|BTREE_ITER_AT_END_OF_LEAF);
-
-       /* make sure we have all the intent locks we need - ugh */
-       if (unlikely(iter->nodes[iter->level] &&
-                    iter->level + 1 < iter->locks_want)) {
-               unsigned i;
+       /*
+        * if we need interior nodes locked, call btree_iter_relock() to make
+        * sure we walk back up enough that we lock them:
+        */
+       if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
+           iter->locks_want > 1)
+               bch2_btree_iter_relock(iter, _THIS_IP_);
 
-               for (i = iter->level + 1;
-                    i < iter->locks_want && iter->nodes[i];
-                    i++)
-                       if (!bch2_btree_node_relock(iter, i)) {
-                               while (iter->nodes[iter->level] &&
-                                      iter->level + 1 < iter->locks_want)
-                                       btree_iter_up(iter);
-                               break;
-                       }
+       if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
+               ret = bch2_btree_iter_traverse_cached(iter);
+               goto out;
        }
 
-       /*
-        * If the current node isn't locked, go up until we have a locked node
-        * or run out of nodes:
-        */
-       while (iter->nodes[iter->level] &&
-              !(is_btree_node(iter, iter->level) &&
-                bch2_btree_node_relock(iter, iter->level) &&
-                btree_iter_pos_cmp(iter->pos,
-                                   &iter->nodes[iter->level]->key.k,
-                                   iter->flags & BTREE_ITER_IS_EXTENTS)))
-               btree_iter_up(iter);
+       if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
+               goto out;
 
-       /*
-        * If we've got a btree node locked (i.e. we aren't about to relock the
-        * root) - advance its node iterator if necessary:
-        */
-       if (iter->nodes[iter->level]) {
-               struct bkey_s_c k;
+       if (unlikely(iter->level >= BTREE_MAX_DEPTH))
+               goto out;
 
-               while ((k = __btree_iter_peek_all(iter)).k &&
-                      !btree_iter_pos_cmp(iter->pos, k.k,
-                                          iter->flags & BTREE_ITER_IS_EXTENTS))
-                       __btree_iter_advance(iter);
-       }
+       iter->level = btree_iter_up_until_good_node(iter, 0);
 
        /*
         * Note: iter->nodes[iter->level] may be temporarily NULL here - that
@@ -893,30 +1374,85 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
         * btree_iter_lock_root() comes next and that it can't fail
         */
        while (iter->level > depth_want) {
-               int ret = iter->nodes[iter->level]
-                       ? btree_iter_down(iter)
-                       : btree_iter_lock_root(iter, depth_want);
+               ret = btree_iter_node(iter, iter->level)
+                       ? btree_iter_down(iter, trace_ip)
+                       : btree_iter_lock_root(iter, depth_want, trace_ip);
                if (unlikely(ret)) {
+                       if (ret == 1) {
+                               /*
+                                * Got to the end of the btree (in
+                                * BTREE_ITER_NODES mode)
+                                */
+                               ret = 0;
+                               goto out;
+                       }
+
                        iter->level = depth_want;
-                       iter->nodes[iter->level] = BTREE_ITER_NOT_END;
-                       return ret;
+
+                       if (ret == -EIO) {
+                               iter->flags |= BTREE_ITER_ERROR;
+                               iter->l[iter->level].b =
+                                       BTREE_ITER_NO_NODE_ERROR;
+                       } else {
+                               iter->l[iter->level].b =
+                                       BTREE_ITER_NO_NODE_DOWN;
+                       }
+                       goto out;
                }
        }
 
-       return 0;
+       iter->uptodate = BTREE_ITER_NEED_PEEK;
+out:
+       trace_iter_traverse(iter->trans->ip, trace_ip,
+                           iter->btree_id, &iter->real_pos, ret);
+       bch2_btree_iter_verify(iter);
+       return ret;
 }
 
-int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
+static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
 {
+       struct btree_trans *trans = iter->trans;
        int ret;
 
-       ret = __bch2_btree_iter_traverse(iter);
+       ret =   bch2_trans_cond_resched(trans) ?:
+               btree_iter_traverse_one(iter, _RET_IP_);
        if (unlikely(ret))
-               ret = btree_iter_traverse_error(iter, ret);
+               ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
 
        return ret;
 }
 
+/*
+ * Note:
+ * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
+ * for internal btree iterator users
+ *
+ * bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
+ * btree_iter_traverse() does not:
+ */
+static inline int __must_check
+btree_iter_traverse(struct btree_iter *iter)
+{
+       return iter->uptodate >= BTREE_ITER_NEED_RELOCK
+               ? __bch2_btree_iter_traverse(iter)
+               : 0;
+}
+
+int __must_check
+bch2_btree_iter_traverse(struct btree_iter *iter)
+{
+       int ret;
+
+       btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
+
+       ret = btree_iter_traverse(iter);
+       if (ret)
+               return ret;
+
+       iter->should_be_locked = true;
+       return 0;
+}
+
 /* Iterate across nodes (leaf and interior nodes) */
 
 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
@@ -924,306 +1460,1096 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
        struct btree *b;
        int ret;
 
-       EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
+       EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
+       bch2_btree_iter_verify(iter);
 
-       ret = bch2_btree_iter_traverse(iter);
+       ret = btree_iter_traverse(iter);
        if (ret)
-               return ERR_PTR(ret);
+               return NULL;
 
-       b = iter->nodes[iter->level];
+       b = btree_iter_node(iter, iter->level);
+       if (!b)
+               return NULL;
 
-       if (b) {
-               EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
-               iter->pos = b->key.k.p;
-       }
+       BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
+
+       iter->pos = iter->real_pos = b->key.k.p;
+
+       bch2_btree_iter_verify(iter);
+       iter->should_be_locked = true;
 
        return b;
 }
 
-struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
+struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
 {
        struct btree *b;
        int ret;
 
-       EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
+       EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
+       bch2_btree_iter_verify(iter);
 
-       btree_iter_up(iter);
-
-       if (!iter->nodes[iter->level])
+       /* already got to end? */
+       if (!btree_iter_node(iter, iter->level))
                return NULL;
 
-       /* parent node usually won't be locked: redo traversal if necessary */
-       ret = bch2_btree_iter_traverse(iter);
+       bch2_trans_cond_resched(iter->trans);
+
+       btree_node_unlock(iter, iter->level);
+       iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
+       iter->level++;
+
+       btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+       ret = btree_iter_traverse(iter);
        if (ret)
                return NULL;
 
-       b = iter->nodes[iter->level];
+       /* got to end? */
+       b = btree_iter_node(iter, iter->level);
        if (!b)
-               return b;
+               return NULL;
+
+       if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
+               /*
+                * Haven't gotten to the end of the parent node: go back down to
+                * the next child node
+                */
+               btree_iter_set_search_pos(iter, bpos_successor(iter->pos));
 
-       if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
-               /* Haven't gotten to the end of the parent node: */
+               /* Unlock to avoid screwing up our lock invariants: */
+               btree_node_unlock(iter, iter->level);
 
-               /* ick: */
-               iter->pos       = iter->btree_id == BTREE_ID_INODES
-                       ? btree_type_successor(iter->btree_id, iter->pos)
-                       : bkey_successor(iter->pos);
-               iter->level     = depth;
+               iter->level = iter->min_depth;
+               btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+               bch2_btree_iter_verify(iter);
 
-               ret = bch2_btree_iter_traverse(iter);
+               ret = btree_iter_traverse(iter);
                if (ret)
                        return NULL;
 
-               b = iter->nodes[iter->level];
+               b = iter->l[iter->level].b;
        }
 
-       iter->pos = b->key.k.p;
+       iter->pos = iter->real_pos = b->key.k.p;
+
+       bch2_btree_iter_verify(iter);
+       iter->should_be_locked = true;
 
        return b;
 }
 
 /* Iterate across keys (in leaf nodes only) */
 
-void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
+static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
 {
-       struct btree *b = iter->nodes[0];
-       struct btree_node_iter *node_iter = &iter->node_iters[0];
-       struct bkey_packed *k;
+#ifdef CONFIG_BCACHEFS_DEBUG
+       struct bpos old_pos = iter->real_pos;
+#endif
+       int cmp = bpos_cmp(new_pos, iter->real_pos);
+       unsigned l = iter->level;
+
+       if (!cmp)
+               goto out;
 
-       EBUG_ON(iter->level != 0);
-       EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
-       EBUG_ON(!btree_node_locked(iter, 0));
-       EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
+       iter->real_pos = new_pos;
+       iter->should_be_locked = false;
 
-       while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
-              !btree_iter_pos_cmp_packed(b, &new_pos, k,
-                                         iter->flags & BTREE_ITER_IS_EXTENTS))
-               bch2_btree_node_iter_advance(node_iter, b);
+       if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
+               btree_node_unlock(iter, 0);
+               iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
+               btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+               return;
+       }
 
-       if (!k &&
-           !btree_iter_pos_cmp(new_pos, &b->key.k,
-                               iter->flags & BTREE_ITER_IS_EXTENTS))
-               iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
+       l = btree_iter_up_until_good_node(iter, cmp);
+
+       if (btree_iter_node(iter, l)) {
+               /*
+                * We might have to skip over many keys, or just a few: try
+                * advancing the node iterator, and if we have to skip over too
+                * many keys just reinit it (or if we're rewinding, since that
+                * is expensive).
+                */
+               if (cmp < 0 ||
+                   !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
+                       __btree_iter_init(iter, l);
 
-       iter->pos = new_pos;
-       iter->flags &= ~BTREE_ITER_UPTODATE;
+               /* Don't leave it locked if we're not supposed to: */
+               if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
+                       btree_node_unlock(iter, l);
+       }
+out:
+       if (l != iter->level)
+               btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+       else
+               btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
+
+       bch2_btree_iter_verify(iter);
+#ifdef CONFIG_BCACHEFS_DEBUG
+       trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
+                                 iter->btree_id,
+                                 &old_pos, &new_pos, l);
+#endif
 }
 
-void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+inline bool bch2_btree_iter_advance(struct btree_iter *iter)
 {
-       EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
-       iter->pos = new_pos;
-       iter->flags &= ~BTREE_ITER_UPTODATE;
+       struct bpos pos = iter->k.p;
+       bool ret = bpos_cmp(pos, POS_MAX) != 0;
+
+       if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+               pos = bkey_successor(iter, pos);
+       bch2_btree_iter_set_pos(iter, pos);
+       return ret;
 }
 
-void bch2_btree_iter_advance_pos(struct btree_iter *iter)
+inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
 {
-       if (iter->flags & BTREE_ITER_UPTODATE &&
-           !(iter->flags & BTREE_ITER_WITH_HOLES)) {
-               struct bkey_s_c k;
+       struct bpos pos = bkey_start_pos(&iter->k);
+       bool ret = bpos_cmp(pos, POS_MIN) != 0;
 
-               __btree_iter_advance(iter);
-               k = __btree_iter_peek(iter);
-               if (likely(k.k)) {
-                       iter->pos = bkey_start_pos(k.k);
-                       return;
-               }
-       }
+       if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
+               pos = bkey_predecessor(iter, pos);
+       bch2_btree_iter_set_pos(iter, pos);
+       return ret;
+}
+
+static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
+{
+       struct bpos next_pos = iter->l[0].b->key.k.p;
+       bool ret = bpos_cmp(next_pos, POS_MAX) != 0;
 
        /*
-        * We use iter->k instead of iter->pos for extents: iter->pos will be
-        * equal to the start of the extent we returned, but we need to advance
-        * to the end of the extent we returned.
+        * Typically, we don't want to modify iter->pos here, since that
+        * indicates where we searched from - unless we got to the end of the
+        * btree, in that case we want iter->pos to reflect that:
         */
-       bch2_btree_iter_set_pos(iter,
-               btree_type_successor(iter->btree_id, iter->k.p));
+       if (ret)
+               btree_iter_set_search_pos(iter, bpos_successor(next_pos));
+       else
+               bch2_btree_iter_set_pos(iter, POS_MAX);
+
+       return ret;
 }
 
-/* XXX: expensive */
-void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
+static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
 {
-       /* incapable of rewinding across nodes: */
-       BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
+       struct bpos next_pos = iter->l[0].b->data->min_key;
+       bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
+
+       if (ret)
+               btree_iter_set_search_pos(iter, bpos_predecessor(next_pos));
+       else
+               bch2_btree_iter_set_pos(iter, POS_MIN);
+
+       return ret;
+}
+
+static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter,
+                                                     struct bpos pos)
+{
+       struct btree_insert_entry *i;
+
+       if (!(iter->flags & BTREE_ITER_WITH_UPDATES))
+               return NULL;
+
+       trans_for_each_update(iter->trans, i)
+               if ((cmp_int(iter->btree_id,    i->iter->btree_id) ?:
+                    bkey_cmp(pos,              i->k->k.p)) <= 0) {
+                       if (iter->btree_id ==   i->iter->btree_id)
+                               return i->k;
+                       break;
+               }
 
-       iter->pos = pos;
-       iter->flags &= ~BTREE_ITER_UPTODATE;
-       __btree_iter_init(iter, iter->nodes[iter->level]);
+       return NULL;
 }
 
+/**
+ * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
+ * current position
+ */
 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
 {
+       struct bpos search_key = btree_iter_search_key(iter);
+       struct bkey_i *next_update;
        struct bkey_s_c k;
        int ret;
 
-       EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
-               (iter->btree_id == BTREE_ID_EXTENTS));
+       EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+       bch2_btree_iter_verify(iter);
+       bch2_btree_iter_verify_entry_exit(iter);
+start:
+       next_update = btree_trans_peek_updates(iter, search_key);
+       btree_iter_set_search_pos(iter, search_key);
 
-       if (iter->flags & BTREE_ITER_UPTODATE) {
-               struct btree *b = iter->nodes[0];
-               struct bkey_packed *k =
-                       __bch2_btree_node_iter_peek_all(&iter->node_iters[0], b);
-               struct bkey_s_c ret = {
-                       .k = &iter->k,
-                       .v = bkeyp_val(&b->format, k)
-               };
+       while (1) {
+               ret = btree_iter_traverse(iter);
+               if (unlikely(ret))
+                       return bkey_s_c_err(ret);
 
-               EBUG_ON(!btree_node_locked(iter, 0));
+               k = btree_iter_level_peek(iter, &iter->l[0]);
 
-               if (debug_check_bkeys(iter->c))
-                       bch2_bkey_debugcheck(iter->c, b, ret);
-               return ret;
+               if (next_update &&
+                   bpos_cmp(next_update->k.p, iter->real_pos) <= 0) {
+                       iter->k = next_update->k;
+                       k = bkey_i_to_s_c(next_update);
+               }
+
+               if (likely(k.k)) {
+                       if (bkey_deleted(k.k)) {
+                               search_key = bkey_successor(iter, k.k->p);
+                               goto start;
+                       }
+
+                       break;
+               }
+
+               if (!btree_iter_set_pos_to_next_leaf(iter))
+                       return bkey_s_c_null;
        }
 
+       /*
+        * iter->pos should be mononotically increasing, and always be equal to
+        * the key we just returned - except extents can straddle iter->pos:
+        */
+       if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
+               iter->pos = k.k->p;
+       else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
+               iter->pos = bkey_start_pos(k.k);
+
+       bch2_btree_iter_verify_entry_exit(iter);
+       bch2_btree_iter_verify(iter);
+       iter->should_be_locked = true;
+       return k;
+}
+
+/**
+ * bch2_btree_iter_next: returns first key greater than iterator's current
+ * position
+ */
+struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
+{
+       if (!bch2_btree_iter_advance(iter))
+               return bkey_s_c_null;
+
+       return bch2_btree_iter_peek(iter);
+}
+
+/**
+ * bch2_btree_iter_peek_prev: returns first key less than or equal to
+ * iterator's current position
+ */
+struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
+{
+       struct btree_iter_level *l = &iter->l[0];
+       struct bkey_s_c k;
+       int ret;
+
+       EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
+       EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
+       bch2_btree_iter_verify(iter);
+       bch2_btree_iter_verify_entry_exit(iter);
+
+       btree_iter_set_search_pos(iter, iter->pos);
+
        while (1) {
-               ret = bch2_btree_iter_traverse(iter);
+               ret = btree_iter_traverse(iter);
                if (unlikely(ret)) {
-                       iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
-                       return bkey_s_c_err(ret);
+                       k = bkey_s_c_err(ret);
+                       goto no_key;
                }
 
-               k = __btree_iter_peek(iter);
-               if (likely(k.k)) {
-                       /*
-                        * iter->pos should always be equal to the key we just
-                        * returned - except extents can straddle iter->pos:
-                        */
-                       if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
-                           bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
-                               iter->pos = bkey_start_pos(k.k);
+               k = btree_iter_level_peek(iter, l);
+               if (!k.k ||
+                   ((iter->flags & BTREE_ITER_IS_EXTENTS)
+                    ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
+                    : bkey_cmp(k.k->p, iter->pos) > 0))
+                       k = btree_iter_level_prev(iter, l);
 
-                       iter->flags |= BTREE_ITER_UPTODATE;
-                       return k;
+               if (likely(k.k))
+                       break;
+
+               if (!btree_iter_set_pos_to_prev_leaf(iter)) {
+                       k = bkey_s_c_null;
+                       goto no_key;
                }
+       }
 
-               iter->pos = iter->nodes[0]->key.k.p;
+       EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
 
-               if (!bkey_cmp(iter->pos, POS_MAX)) {
-                       iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
-                       bch2_btree_iter_unlock(iter);
-                       return bkey_s_c_null;
-               }
+       /* Extents can straddle iter->pos: */
+       if (bkey_cmp(k.k->p, iter->pos) < 0)
+               iter->pos = k.k->p;
+out:
+       bch2_btree_iter_verify_entry_exit(iter);
+       bch2_btree_iter_verify(iter);
+       iter->should_be_locked = true;
+       return k;
+no_key:
+       /*
+        * btree_iter_level_peek() may have set iter->k to a key we didn't want, and
+        * then we errored going to the previous leaf - make sure it's
+        * consistent with iter->pos:
+        */
+       bkey_init(&iter->k);
+       iter->k.p = iter->pos;
+       goto out;
+}
 
-               iter->pos = btree_type_successor(iter->btree_id, iter->pos);
-       }
+/**
+ * bch2_btree_iter_prev: returns first key less than iterator's current
+ * position
+ */
+struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
+{
+       if (!bch2_btree_iter_rewind(iter))
+               return bkey_s_c_null;
+
+       return bch2_btree_iter_peek_prev(iter);
 }
 
-struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
 {
+       struct bpos search_key;
        struct bkey_s_c k;
-       struct bkey n;
        int ret;
 
-       EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
-               (iter->btree_id == BTREE_ID_EXTENTS));
+       EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS &&
+               btree_iter_type(iter) != BTREE_ITER_CACHED);
+       bch2_btree_iter_verify(iter);
+       bch2_btree_iter_verify_entry_exit(iter);
 
-       iter->flags &= ~BTREE_ITER_UPTODATE;
+       /* extents can't span inode numbers: */
+       if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
+           unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
+               if (iter->pos.inode == KEY_INODE_MAX)
+                       return bkey_s_c_null;
 
-       while (1) {
-               ret = bch2_btree_iter_traverse(iter);
-               if (unlikely(ret)) {
-                       iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
-                       return bkey_s_c_err(ret);
+               bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+       }
+
+       search_key = btree_iter_search_key(iter);
+       btree_iter_set_search_pos(iter, search_key);
+
+       ret = btree_iter_traverse(iter);
+       if (unlikely(ret))
+               return bkey_s_c_err(ret);
+
+       if (btree_iter_type(iter) == BTREE_ITER_CACHED ||
+           !(iter->flags & BTREE_ITER_IS_EXTENTS)) {
+               struct bkey_i *next_update;
+               struct bkey_cached *ck;
+
+               switch (btree_iter_type(iter)) {
+               case BTREE_ITER_KEYS:
+                       k = btree_iter_level_peek_all(iter, &iter->l[0]);
+                       EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
+                       break;
+               case BTREE_ITER_CACHED:
+                       ck = (void *) iter->l[0].b;
+                       EBUG_ON(iter->btree_id != ck->key.btree_id ||
+                               bkey_cmp(iter->pos, ck->key.pos));
+                       BUG_ON(!ck->valid);
+
+                       k = bkey_i_to_s_c(ck->k);
+                       break;
+               case BTREE_ITER_NODES:
+                       BUG();
                }
 
-               k = __btree_iter_peek_all(iter);
-recheck:
-               if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
-                       /* hole */
-                       bkey_init(&n);
-                       n.p = iter->pos;
-
-                       if (iter->flags & BTREE_ITER_IS_EXTENTS) {
-                               if (n.p.offset == KEY_OFFSET_MAX) {
-                                       iter->pos = bkey_successor(iter->pos);
-                                       goto recheck;
-                               }
+               next_update = btree_trans_peek_updates(iter, search_key);
+               if (next_update &&
+                   (!k.k || bpos_cmp(next_update->k.p, k.k->p) <= 0)) {
+                       iter->k = next_update->k;
+                       k = bkey_i_to_s_c(next_update);
+               }
+       } else {
+               if ((iter->flags & BTREE_ITER_INTENT)) {
+                       struct btree_iter *child =
+                               btree_iter_child_alloc(iter, _THIS_IP_);
 
-                               if (!k.k)
-                                       k.k = &iter->nodes[0]->key.k;
+                       btree_iter_copy(child, iter);
+                       k = bch2_btree_iter_peek(child);
 
-                               bch2_key_resize(&n,
-                                      min_t(u64, KEY_SIZE_MAX,
-                                            (k.k->p.inode == n.p.inode
-                                             ? bkey_start_offset(k.k)
-                                             : KEY_OFFSET_MAX) -
-                                            n.p.offset));
+                       if (k.k && !bkey_err(k))
+                               iter->k = child->k;
+               } else {
+                       struct bpos pos = iter->pos;
 
-                               EBUG_ON(!n.size);
-                       }
+                       k = bch2_btree_iter_peek(iter);
+                       iter->pos = pos;
+               }
 
-                       iter->k = n;
-                       return (struct bkey_s_c) { &iter->k, NULL };
-               } else if (!bkey_deleted(k.k)) {
+               if (unlikely(bkey_err(k)))
                        return k;
-               } else {
-                       __btree_iter_advance(iter);
+       }
+
+       if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) {
+               if (!k.k ||
+                   ((iter->flags & BTREE_ITER_ALL_SNAPSHOTS)
+                    ? bpos_cmp(iter->pos, k.k->p)
+                    : bkey_cmp(iter->pos, k.k->p))) {
+                       bkey_init(&iter->k);
+                       iter->k.p = iter->pos;
+                       k = (struct bkey_s_c) { &iter->k, NULL };
+               }
+       } else {
+               struct bpos next = k.k ? bkey_start_pos(k.k) : POS_MAX;
+
+               if (bkey_cmp(iter->pos, next) < 0) {
+                       bkey_init(&iter->k);
+                       iter->k.p = iter->pos;
+                       bch2_key_resize(&iter->k,
+                                       min_t(u64, KEY_SIZE_MAX,
+                                             (next.inode == iter->pos.inode
+                                              ? next.offset
+                                              : KEY_OFFSET_MAX) -
+                                             iter->pos.offset));
+
+                       k = (struct bkey_s_c) { &iter->k, NULL };
+                       EBUG_ON(!k.k->size);
                }
        }
+
+       bch2_btree_iter_verify_entry_exit(iter);
+       bch2_btree_iter_verify(iter);
+       iter->should_be_locked = true;
+
+       return k;
 }
 
-void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
-                           enum btree_id btree_id, struct bpos pos,
-                           unsigned locks_want, unsigned depth,
-                           unsigned flags)
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
 {
-       EBUG_ON(depth >= BTREE_MAX_DEPTH);
-       EBUG_ON(locks_want > BTREE_MAX_DEPTH);
+       if (!bch2_btree_iter_advance(iter))
+               return bkey_s_c_null;
+
+       return bch2_btree_iter_peek_slot(iter);
+}
 
-       iter->c                         = c;
-       iter->pos                       = pos;
-       iter->flags                     = flags;
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
+{
+       if (!bch2_btree_iter_rewind(iter))
+               return bkey_s_c_null;
+
+       return bch2_btree_iter_peek_slot(iter);
+}
+
+static inline void bch2_btree_iter_init(struct btree_trans *trans,
+                       struct btree_iter *iter, enum btree_id btree_id)
+{
+       struct bch_fs *c = trans->c;
+       unsigned i;
+
+       iter->trans                     = trans;
+       iter->uptodate                  = BTREE_ITER_NEED_TRAVERSE;
        iter->btree_id                  = btree_id;
-       iter->level                     = depth;
-       iter->locks_want                = locks_want;
+       iter->real_pos                  = POS_MIN;
+       iter->level                     = 0;
+       iter->min_depth                 = 0;
+       iter->locks_want                = 0;
        iter->nodes_locked              = 0;
        iter->nodes_intent_locked       = 0;
-       memset(iter->nodes, 0, sizeof(iter->nodes));
-       iter->nodes[iter->level]        = BTREE_ITER_NOT_END;
-       iter->next                      = iter;
+       for (i = 0; i < ARRAY_SIZE(iter->l); i++)
+               iter->l[i].b            = BTREE_ITER_NO_NODE_INIT;
 
        prefetch(c->btree_roots[btree_id].b);
 }
 
-void bch2_btree_iter_unlink(struct btree_iter *iter)
+/* new transactional stuff: */
+
+static void btree_iter_child_free(struct btree_iter *iter)
 {
-       struct btree_iter *linked;
+       struct btree_iter *child = btree_iter_child(iter);
 
-       __bch2_btree_iter_unlock(iter);
+       if (child) {
+               bch2_trans_iter_free(iter->trans, child);
+               iter->child_idx = U8_MAX;
+       }
+}
 
-       if (!btree_iter_linked(iter))
-               return;
+static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
+                                                unsigned long ip)
+{
+       struct btree_trans *trans = iter->trans;
+       struct btree_iter *child = btree_iter_child(iter);
 
-       for_each_linked_btree_iter(iter, linked) {
+       if (!child) {
+               child = btree_trans_iter_alloc(trans);
+               child->ip_allocated     = ip;
+               iter->child_idx         = child->idx;
 
-               if (linked->next == iter) {
-                       linked->next = iter->next;
-                       return;
-               }
+               trans->iters_live       |= 1ULL << child->idx;
+               trans->iters_touched    |= 1ULL << child->idx;
        }
 
-       BUG();
+       return child;
+}
+
+static inline void __bch2_trans_iter_free(struct btree_trans *trans,
+                                         unsigned idx)
+{
+       btree_iter_child_free(&trans->iters[idx]);
+
+       __bch2_btree_iter_unlock(&trans->iters[idx]);
+       trans->iters_linked             &= ~(1ULL << idx);
+       trans->iters_live               &= ~(1ULL << idx);
+       trans->iters_touched            &= ~(1ULL << idx);
+}
+
+int bch2_trans_iter_put(struct btree_trans *trans,
+                       struct btree_iter *iter)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(iter))
+               return 0;
+
+       BUG_ON(trans->iters + iter->idx != iter);
+       BUG_ON(!btree_iter_live(trans, iter));
+
+       ret = btree_iter_err(iter);
+
+       if (!(trans->iters_touched & (1ULL << iter->idx)) &&
+           !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
+               __bch2_trans_iter_free(trans, iter->idx);
+
+       trans->iters_live       &= ~(1ULL << iter->idx);
+       return ret;
 }
 
-void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
+int bch2_trans_iter_free(struct btree_trans *trans,
+                        struct btree_iter *iter)
 {
-       BUG_ON(btree_iter_linked(new));
+       if (IS_ERR_OR_NULL(iter))
+               return 0;
 
-       new->next = iter->next;
-       iter->next = new;
+       set_btree_iter_dontneed(trans, iter);
 
-       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
-               unsigned nr_iters = 1;
+       return bch2_trans_iter_put(trans, iter);
+}
 
-               for_each_linked_btree_iter(iter, new)
-                       nr_iters++;
+noinline __cold
+static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
+{
 
-               BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
+       struct btree_iter *iter;
+       struct btree_insert_entry *i;
+       char buf[100];
+
+       trans_for_each_iter(trans, iter)
+               printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n",
+                      bch2_btree_ids[iter->btree_id],
+                      (bch2_bpos_to_text(&PBUF(buf), iter->pos), buf),
+                      btree_iter_live(trans, iter) ? " live" : "",
+                      (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
+                      iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
+                      (void *) iter->ip_allocated);
+
+       trans_for_each_update(trans, i) {
+               char buf[300];
+
+               bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k));
+               printk(KERN_ERR "update: btree %s %s\n",
+                      bch2_btree_ids[i->iter->btree_id], buf);
        }
+       panic("trans iter oveflow\n");
+}
+
+static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
+{
+       struct btree_iter *iter;
+       unsigned idx;
+
+       if (unlikely(trans->iters_linked ==
+                    ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
+               btree_trans_iter_alloc_fail(trans);
+
+       idx = __ffs64(~trans->iters_linked);
+       iter = &trans->iters[idx];
+
+       iter->trans             = trans;
+       iter->idx               = idx;
+       iter->child_idx         = U8_MAX;
+       iter->flags             = 0;
+       iter->nodes_locked      = 0;
+       iter->nodes_intent_locked = 0;
+       trans->iters_linked     |= 1ULL << idx;
+       return iter;
 }
 
-void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
+static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
 {
+       unsigned i;
+
        __bch2_btree_iter_unlock(dst);
-       memcpy(dst, src, offsetof(struct btree_iter, next));
-       dst->nodes_locked = dst->nodes_intent_locked = 0;
+       btree_iter_child_free(dst);
+
+       memcpy(&dst->flags, &src->flags,
+              sizeof(struct btree_iter) - offsetof(struct btree_iter, flags));
+
+       for (i = 0; i < BTREE_MAX_DEPTH; i++)
+               if (btree_node_locked(dst, i))
+                       six_lock_increment(&dst->l[i].b->c.lock,
+                                          __btree_lock_want(dst, i));
+
+       dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+       dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
+}
+
+struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
+                                        unsigned btree_id, struct bpos pos,
+                                        unsigned locks_want,
+                                        unsigned depth,
+                                        unsigned flags)
+{
+       struct btree_iter *iter, *best = NULL;
+       struct bpos real_pos, pos_min = POS_MIN;
+
+       if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
+           btree_node_type_is_extents(btree_id) &&
+           !(flags & BTREE_ITER_NOT_EXTENTS) &&
+           !(flags & BTREE_ITER_ALL_SNAPSHOTS))
+               flags |= BTREE_ITER_IS_EXTENTS;
+
+       if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
+           !btree_type_has_snapshots(btree_id))
+               flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
+
+       if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
+               pos.snapshot = btree_type_has_snapshots(btree_id)
+                       ? U32_MAX : 0;
+
+       real_pos = pos;
+
+       if ((flags & BTREE_ITER_IS_EXTENTS) &&
+           bkey_cmp(pos, POS_MAX))
+               real_pos = bpos_nosnap_successor(pos);
+
+       trans_for_each_iter(trans, iter) {
+               if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
+                       continue;
+
+               if (iter->btree_id != btree_id)
+                       continue;
+
+               if (best) {
+                       int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
+                                          bpos_diff(iter->real_pos, real_pos));
+
+                       if (cmp < 0 ||
+                           ((cmp == 0 && btree_iter_keep(trans, iter))))
+                               continue;
+               }
+
+               best = iter;
+       }
+
+       if (!best) {
+               iter = btree_trans_iter_alloc(trans);
+               bch2_btree_iter_init(trans, iter, btree_id);
+       } else if (btree_iter_keep(trans, best)) {
+               iter = btree_trans_iter_alloc(trans);
+               btree_iter_copy(iter, best);
+       } else {
+               iter = best;
+       }
+
+       trans->iters_live       |= 1ULL << iter->idx;
+       trans->iters_touched    |= 1ULL << iter->idx;
+
+       iter->flags = flags;
+
+       iter->snapshot = pos.snapshot;
+
+       /*
+        * If the iterator has locks_want greater than requested, we explicitly
+        * do not downgrade it here - on transaction restart because btree node
+        * split needs to upgrade locks, we might be putting/getting the
+        * iterator again. Downgrading iterators only happens via an explicit
+        * bch2_trans_downgrade().
+        */
+
+       locks_want = min(locks_want, BTREE_MAX_DEPTH);
+       if (locks_want > iter->locks_want) {
+               iter->locks_want = locks_want;
+               btree_iter_get_locks(iter, true, _THIS_IP_);
+       }
+
+       while (iter->level != depth) {
+               btree_node_unlock(iter, iter->level);
+               iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
+               iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
+               if (iter->level < depth)
+                       iter->level++;
+               else
+                       iter->level--;
+       }
+
+       iter->min_depth = depth;
+
+       bch2_btree_iter_set_pos(iter, pos);
+       btree_iter_set_search_pos(iter, real_pos);
+
+       trace_trans_get_iter(_RET_IP_, trans->ip,
+                            btree_id,
+                            &real_pos, locks_want, iter->uptodate,
+                            best ? &best->real_pos     : &pos_min,
+                            best ? best->locks_want    : U8_MAX,
+                            best ? best->uptodate      : U8_MAX);
+
+       return iter;
+}
+
+struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
+                                           enum btree_id btree_id,
+                                           struct bpos pos,
+                                           unsigned locks_want,
+                                           unsigned depth,
+                                           unsigned flags)
+{
+       struct btree_iter *iter =
+               __bch2_trans_get_iter(trans, btree_id, pos,
+                                     locks_want, depth,
+                                     BTREE_ITER_NODES|
+                                     BTREE_ITER_NOT_EXTENTS|
+                                     BTREE_ITER_ALL_SNAPSHOTS|
+                                     flags);
+
+       BUG_ON(bkey_cmp(iter->pos, pos));
+       BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
+       BUG_ON(iter->level      != depth);
+       BUG_ON(iter->min_depth  != depth);
+       iter->ip_allocated = _RET_IP_;
+
+       return iter;
+}
+
+struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
+                                       struct btree_iter *src)
+{
+       struct btree_iter *iter;
+
+       iter = btree_trans_iter_alloc(trans);
+       btree_iter_copy(iter, src);
+
+       trans->iters_live |= 1ULL << iter->idx;
+       /*
+        * We don't need to preserve this iter since it's cheap to copy it
+        * again - this will cause trans_iter_put() to free it right away:
+        */
+       set_btree_iter_dontneed(trans, iter);
+
+       return iter;
+}
+
+void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
+{
+       size_t new_top = trans->mem_top + size;
+       void *p;
+
+       if (new_top > trans->mem_bytes) {
+               size_t old_bytes = trans->mem_bytes;
+               size_t new_bytes = roundup_pow_of_two(new_top);
+               void *new_mem;
+
+               WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
+
+               new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+               if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
+                       new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
+                       new_bytes = BTREE_TRANS_MEM_MAX;
+                       kfree(trans->mem);
+               }
+
+               if (!new_mem)
+                       return ERR_PTR(-ENOMEM);
+
+               trans->mem = new_mem;
+               trans->mem_bytes = new_bytes;
+
+               if (old_bytes) {
+                       trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
+                       return ERR_PTR(-EINTR);
+               }
+       }
+
+       p = trans->mem + trans->mem_top;
+       trans->mem_top += size;
+       memset(p, 0, size);
+       return p;
+}
+
+inline void bch2_trans_unlink_iters(struct btree_trans *trans)
+{
+       u64 iters = trans->iters_linked &
+               ~trans->iters_touched &
+               ~trans->iters_live;
+
+       while (iters) {
+               unsigned idx = __ffs64(iters);
+
+               iters &= ~(1ULL << idx);
+               __bch2_trans_iter_free(trans, idx);
+       }
+}
+
+void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
+{
+       struct btree_iter *iter;
+
+       trans_for_each_iter(trans, iter) {
+               iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
+                                BTREE_ITER_SET_POS_AFTER_COMMIT);
+               iter->should_be_locked = false;
+       }
+
+       bch2_trans_unlink_iters(trans);
+
+       trans->iters_touched &= trans->iters_live;
+
+       trans->extra_journal_res        = 0;
+       trans->nr_updates               = 0;
+       trans->mem_top                  = 0;
+
+       trans->hooks                    = NULL;
+       trans->extra_journal_entries    = NULL;
+       trans->extra_journal_entry_u64s = 0;
+
+       if (trans->fs_usage_deltas) {
+               trans->fs_usage_deltas->used = 0;
+               memset(&trans->fs_usage_deltas->memset_start, 0,
+                      (void *) &trans->fs_usage_deltas->memset_end -
+                      (void *) &trans->fs_usage_deltas->memset_start);
+       }
+
+       if (!(flags & TRANS_RESET_NOUNLOCK))
+               bch2_trans_cond_resched(trans);
+
+       if (!(flags & TRANS_RESET_NOTRAVERSE) &&
+           trans->iters_linked)
+               bch2_btree_iter_traverse_all(trans);
+}
+
+static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
+{
+       size_t iters_bytes      = sizeof(struct btree_iter) * BTREE_ITER_MAX;
+       size_t updates_bytes    = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
+       void *p = NULL;
+
+       BUG_ON(trans->used_mempool);
+
+#ifdef __KERNEL__
+       p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL);
+#endif
+       if (!p)
+               p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
+
+       trans->iters            = p; p += iters_bytes;
+       trans->updates          = p; p += updates_bytes;
+}
+
+void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
+                    unsigned expected_nr_iters,
+                    size_t expected_mem_bytes)
+       __acquires(&c->btree_trans_barrier)
+{
+       memset(trans, 0, sizeof(*trans));
+       trans->c                = c;
+       trans->ip               = _RET_IP_;
+
+       /*
+        * reallocating iterators currently completely breaks
+        * bch2_trans_iter_put(), we always allocate the max:
+        */
+       bch2_trans_alloc_iters(trans, c);
+
+       if (expected_mem_bytes) {
+               trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
+               trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
+
+               if (!unlikely(trans->mem)) {
+                       trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
+                       trans->mem_bytes = BTREE_TRANS_MEM_MAX;
+               }
+       }
+
+       trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+       trans->pid = current->pid;
+       mutex_lock(&c->btree_trans_lock);
+       list_add(&trans->list, &c->btree_trans_list);
+       mutex_unlock(&c->btree_trans_lock);
+#endif
+}
+
+int bch2_trans_exit(struct btree_trans *trans)
+       __releases(&c->btree_trans_barrier)
+{
+       struct bch_fs *c = trans->c;
+
+       bch2_trans_unlock(trans);
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+       if (trans->iters_live) {
+               struct btree_iter *iter;
+
+               trans_for_each_iter(trans, iter)
+                       btree_iter_child_free(iter);
+       }
+
+       if (trans->iters_live) {
+               struct btree_iter *iter;
+
+               bch_err(c, "btree iterators leaked!");
+               trans_for_each_iter(trans, iter)
+                       if (btree_iter_live(trans, iter))
+                               printk(KERN_ERR "  btree %s allocated at %pS\n",
+                                      bch2_btree_ids[iter->btree_id],
+                                      (void *) iter->ip_allocated);
+               /* Be noisy about this: */
+               bch2_fatal_error(c);
+       }
+
+       mutex_lock(&trans->c->btree_trans_lock);
+       list_del(&trans->list);
+       mutex_unlock(&trans->c->btree_trans_lock);
+#endif
+
+       srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+
+       bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
+
+       if (trans->fs_usage_deltas) {
+               if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
+                   REPLICAS_DELTA_LIST_MAX)
+                       mempool_free(trans->fs_usage_deltas,
+                                    &trans->c->replicas_delta_pool);
+               else
+                       kfree(trans->fs_usage_deltas);
+       }
+
+       if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
+               mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
+       else
+               kfree(trans->mem);
+
+#ifdef __KERNEL__
+       /*
+        * Userspace doesn't have a real percpu implementation:
+        */
+       trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
+#endif
+
+       if (trans->iters)
+               mempool_free(trans->iters, &trans->c->btree_iters_pool);
+
+       trans->mem      = (void *) 0x1;
+       trans->iters    = (void *) 0x1;
+
+       return trans->error ? -EIO : 0;
+}
+
+static void __maybe_unused
+bch2_btree_iter_node_to_text(struct printbuf *out,
+                            struct btree_bkey_cached_common *_b,
+                            enum btree_iter_type type)
+{
+       pr_buf(out, "    l=%u %s:",
+              _b->level, bch2_btree_ids[_b->btree_id]);
+       bch2_bpos_to_text(out, btree_node_pos(_b, type));
+}
+
+#ifdef CONFIG_BCACHEFS_DEBUG
+static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
+{
+       struct btree_iter *iter;
+
+       trans_for_each_iter(trans, iter)
+               if (btree_iter_type(iter) != BTREE_ITER_CACHED &&
+                   iter->nodes_locked)
+                       return true;
+       return false;
+}
+#endif
+
+void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+       struct btree_trans *trans;
+       struct btree_iter *iter;
+       struct btree *b;
+       unsigned l;
+
+       mutex_lock(&c->btree_trans_lock);
+       list_for_each_entry(trans, &c->btree_trans_list, list) {
+               if (!trans_has_btree_nodes_locked(trans))
+                       continue;
+
+               pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
+
+               trans_for_each_iter(trans, iter) {
+                       if (!iter->nodes_locked)
+                               continue;
+
+                       pr_buf(out, "  iter %u %c %s:",
+                              iter->idx,
+                              btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
+                              bch2_btree_ids[iter->btree_id]);
+                       bch2_bpos_to_text(out, iter->pos);
+                       pr_buf(out, "\n");
+
+                       for (l = 0; l < BTREE_MAX_DEPTH; l++) {
+                               if (btree_node_locked(iter, l)) {
+                                       pr_buf(out, "    %s l=%u ",
+                                              btree_node_intent_locked(iter, l) ? "i" : "r", l);
+                                       bch2_btree_iter_node_to_text(out,
+                                                       (void *) iter->l[l].b,
+                                                       btree_iter_type(iter));
+                                       pr_buf(out, "\n");
+                               }
+                       }
+               }
+
+               b = READ_ONCE(trans->locking);
+               if (b) {
+                       iter = &trans->iters[trans->locking_iter_idx];
+                       pr_buf(out, "  locking iter %u %c l=%u %s:",
+                              trans->locking_iter_idx,
+                              btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
+                              trans->locking_level,
+                              bch2_btree_ids[trans->locking_btree_id]);
+                       bch2_bpos_to_text(out, trans->locking_pos);
+
+                       pr_buf(out, " node ");
+                       bch2_btree_iter_node_to_text(out,
+                                       (void *) b,
+                                       btree_iter_type(iter));
+                       pr_buf(out, "\n");
+               }
+       }
+       mutex_unlock(&c->btree_trans_lock);
+#endif
+}
+
+void bch2_fs_btree_iter_exit(struct bch_fs *c)
+{
+       mempool_exit(&c->btree_trans_mem_pool);
+       mempool_exit(&c->btree_iters_pool);
+       cleanup_srcu_struct(&c->btree_trans_barrier);
+}
+
+int bch2_fs_btree_iter_init(struct bch_fs *c)
+{
+       unsigned nr = BTREE_ITER_MAX;
+
+       INIT_LIST_HEAD(&c->btree_trans_list);
+       mutex_init(&c->btree_trans_lock);
+
+       return  init_srcu_struct(&c->btree_trans_barrier) ?:
+               mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
+                       sizeof(struct btree_iter) * nr +
+                       sizeof(struct btree_insert_entry) * nr) ?:
+               mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
+                                         BTREE_TRANS_MEM_MAX);
 }