]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_iter.c
Update bcachefs sources to 4837f82ee1 bcachefs: Use cached iterators for alloc btree
[bcachefs-tools-debian] / libbcachefs / btree_iter.c
index 6abcbe3debe5ea0e9c5ffa4e60e7b6e9be5d8f19..e620088d3116b6b1a3021872a5b8917f9dbd52ad 100644 (file)
@@ -4,22 +4,16 @@
 #include "bkey_methods.h"
 #include "btree_cache.h"
 #include "btree_iter.h"
+#include "btree_key_cache.h"
 #include "btree_locking.h"
 #include "btree_update.h"
 #include "debug.h"
 #include "extents.h"
+#include "journal.h"
 
 #include <linux/prefetch.h>
 #include <trace/events/bcachefs.h>
 
-#define BTREE_ITER_NO_NODE_GET_LOCKS   ((struct btree *) 1)
-#define BTREE_ITER_NO_NODE_DROP                ((struct btree *) 2)
-#define BTREE_ITER_NO_NODE_LOCK_ROOT   ((struct btree *) 3)
-#define BTREE_ITER_NO_NODE_UP          ((struct btree *) 4)
-#define BTREE_ITER_NO_NODE_DOWN                ((struct btree *) 5)
-#define BTREE_ITER_NO_NODE_INIT                ((struct btree *) 6)
-#define BTREE_ITER_NO_NODE_ERROR       ((struct btree *) 7)
-
 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
 {
        return l < BTREE_MAX_DEPTH &&
@@ -51,7 +45,7 @@ static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
                                          struct btree *b)
 {
-       return iter->btree_id == b->btree_id &&
+       return iter->btree_id == b->c.btree_id &&
                !btree_iter_pos_before_node(iter, b) &&
                !btree_iter_pos_after_node(iter, b);
 }
@@ -68,11 +62,11 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
        struct btree_iter *linked;
        unsigned readers = 0;
 
-       EBUG_ON(!btree_node_intent_locked(iter, b->level));
+       EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
 
        trans_for_each_iter(iter->trans, linked)
-               if (linked->l[b->level].b == b &&
-                   btree_node_read_locked(linked, b->level))
+               if (linked->l[b->c.level].b == b &&
+                   btree_node_read_locked(linked, b->c.level))
                        readers++;
 
        /*
@@ -82,10 +76,10 @@ void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
         * locked:
         */
        atomic64_sub(__SIX_VAL(read_lock, readers),
-                    &b->lock.state.counter);
+                    &b->c.lock.state.counter);
        btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
        atomic64_add(__SIX_VAL(read_lock, readers),
-                    &b->lock.state.counter);
+                    &b->c.lock.state.counter);
 }
 
 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
@@ -99,9 +93,9 @@ bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
        if (race_fault())
                return false;
 
-       if (six_relock_type(&b->lock, want, iter->l[level].lock_seq) ||
+       if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
            (btree_node_lock_seq_matches(iter, b, level) &&
-            btree_node_lock_increment(iter, b, level, want))) {
+            btree_node_lock_increment(iter->trans, b, level, want))) {
                mark_btree_node_locked(iter, level, want);
                return true;
        } else {
@@ -125,12 +119,12 @@ static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
                return false;
 
        if (btree_node_locked(iter, level)
-           ? six_lock_tryupgrade(&b->lock)
-           : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq))
+           ? six_lock_tryupgrade(&b->c.lock)
+           : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
                goto success;
 
        if (btree_node_lock_seq_matches(iter, b, level) &&
-           btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) {
+           btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
                btree_node_unlock(iter, level);
                goto success;
        }
@@ -162,7 +156,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
                                                ? 0
                                                : (unsigned long) iter->l[l].b,
                                                is_btree_node(iter, l)
-                                               ? iter->l[l].b->lock.state.seq
+                                               ? iter->l[l].b->c.lock.state.seq
                                                : 0);
 
                        fail_idx = l;
@@ -193,23 +187,21 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
 
 /* Slowpath: */
 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
-                          unsigned level,
-                          struct btree_iter *iter,
-                          enum six_lock_type type)
+                           unsigned level, struct btree_iter *iter,
+                           enum six_lock_type type,
+                           six_lock_should_sleep_fn should_sleep_fn,
+                           void *p)
 {
+       struct btree_trans *trans = iter->trans;
        struct btree_iter *linked;
+       u64 start_time = local_clock();
        bool ret = true;
 
        /* Check if it's safe to block: */
-       trans_for_each_iter(iter->trans, linked) {
+       trans_for_each_iter(trans, linked) {
                if (!linked->nodes_locked)
                        continue;
 
-               /* Must lock btree nodes in key order: */
-               if ((cmp_int(iter->btree_id, linked->btree_id) ?:
-                    bkey_cmp(pos, linked->pos)) < 0)
-                       ret = false;
-
                /*
                 * Can't block taking an intent lock if we have _any_ nodes read
                 * locked:
@@ -224,13 +216,15 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
                 */
                if (type == SIX_LOCK_intent &&
                    linked->nodes_locked != linked->nodes_intent_locked) {
-                       if (!(iter->trans->nounlock)) {
+                       if (!(trans->nounlock)) {
                                linked->locks_want = max_t(unsigned,
                                                linked->locks_want,
                                                __fls(linked->nodes_locked) + 1);
-                               btree_iter_get_locks(linked, true, false);
+                               if (!btree_iter_get_locks(linked, true, false))
+                                       ret = false;
+                       } else {
+                               ret = false;
                        }
-                       ret = false;
                }
 
                /*
@@ -240,14 +234,37 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
                 */
                if (linked->btree_id == iter->btree_id &&
                    level > __fls(linked->nodes_locked)) {
-                       if (!(iter->trans->nounlock)) {
+                       if (!(trans->nounlock)) {
                                linked->locks_want =
                                        max(level + 1, max_t(unsigned,
                                            linked->locks_want,
                                            iter->locks_want));
-                               btree_iter_get_locks(linked, true, false);
+                               if (!btree_iter_get_locks(linked, true, false))
+                                       ret = false;
+                       } else {
+                               ret = false;
                        }
+               }
+
+               /* Must lock btree nodes in key order: */
+               if ((cmp_int(iter->btree_id, linked->btree_id) ?:
+                    -cmp_int(btree_iter_type(iter), btree_iter_type(linked))) < 0)
+                       ret = false;
+
+               if (iter->btree_id == linked->btree_id &&
+                   btree_node_locked(linked, level) &&
+                   bkey_cmp(pos, linked->l[level].b->key.k.p) <= 0)
                        ret = false;
+
+               /*
+                * Recheck if this is a node we already have locked - since one
+                * of the get_locks() calls might've successfully
+                * upgraded/relocked it:
+                */
+               if (linked->l[level].b == b &&
+                   btree_node_locked_type(linked, level) >= type) {
+                       six_lock_increment(&b->c.lock, type);
+                       return true;
                }
        }
 
@@ -256,7 +273,14 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
                return false;
        }
 
-       __btree_node_lock_type(iter->trans->c, b, type);
+       if (six_trylock_type(&b->c.lock, type))
+               return true;
+
+       if (six_lock_type(&b->c.lock, type, should_sleep_fn, p))
+               return false;
+
+       bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
+                              start_time);
        return true;
 }
 
@@ -267,7 +291,12 @@ static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
 {
        unsigned l;
 
-       for (l = 0; btree_iter_node(iter, l); l++) {
+       if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
+               BUG_ON(iter->nodes_locked);
+               return;
+       }
+
+       for (l = 0; is_btree_node(iter, l); l++) {
                if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
                    !btree_node_locked(iter, l))
                        continue;
@@ -281,7 +310,7 @@ void bch2_btree_trans_verify_locks(struct btree_trans *trans)
 {
        struct btree_iter *iter;
 
-       trans_for_each_iter(trans, iter)
+       trans_for_each_iter_all(trans, iter)
                bch2_btree_iter_verify_locks(iter);
 }
 #else
@@ -289,7 +318,7 @@ static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
 #endif
 
 __flatten
-static bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
+bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
 {
        return btree_iter_get_locks(iter, false, trace);
 }
@@ -349,31 +378,20 @@ bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter,
 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
                                 unsigned downgrade_to)
 {
-       struct btree_iter *linked;
-       unsigned l;
-
-       /*
-        * We downgrade linked iterators as well because btree_iter_upgrade
-        * might have had to modify locks_want on linked iterators due to lock
-        * ordering:
-        */
-       trans_for_each_iter(iter->trans, linked) {
-               unsigned new_locks_want = downgrade_to ?:
-                       (linked->flags & BTREE_ITER_INTENT ? 1 : 0);
-
-               if (linked->locks_want <= new_locks_want)
-                       continue;
+       unsigned l, new_locks_want = downgrade_to ?:
+               (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
 
-               linked->locks_want = new_locks_want;
+       if (iter->locks_want < downgrade_to) {
+               iter->locks_want = new_locks_want;
 
-               while (linked->nodes_locked &&
-                      (l = __fls(linked->nodes_locked)) >= linked->locks_want) {
-                       if (l > linked->level) {
-                               btree_node_unlock(linked, l);
+               while (iter->nodes_locked &&
+                      (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
+                       if (l > iter->level) {
+                               btree_node_unlock(iter, l);
                        } else {
-                               if (btree_node_intent_locked(linked, l)) {
-                                       six_lock_downgrade(&linked->l[l].b->lock);
-                                       linked->nodes_intent_locked ^= 1 << l;
+                               if (btree_node_intent_locked(iter, l)) {
+                                       six_lock_downgrade(&iter->l[l].b->c.lock);
+                                       iter->nodes_intent_locked ^= 1 << l;
                                }
                                break;
                        }
@@ -383,6 +401,14 @@ void __bch2_btree_iter_downgrade(struct btree_iter *iter,
        bch2_btree_trans_verify_locks(iter->trans);
 }
 
+void bch2_trans_downgrade(struct btree_trans *trans)
+{
+       struct btree_iter *iter;
+
+       trans_for_each_iter(trans, iter)
+               bch2_btree_iter_downgrade(iter);
+}
+
 /* Btree transaction locking: */
 
 bool bch2_trans_relock(struct btree_trans *trans)
@@ -514,7 +540,7 @@ void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
                return;
 
        trans_for_each_iter_with_node(trans, b, iter)
-               bch2_btree_iter_verify_level(iter, b->level);
+               bch2_btree_iter_verify_level(iter, b->c.level);
 }
 
 #else
@@ -545,7 +571,7 @@ static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
                                               struct btree *b,
                                               struct bkey_packed *where)
 {
-       struct btree_iter_level *l = &iter->l[b->level];
+       struct btree_iter_level *l = &iter->l[b->c.level];
        struct bpos pos = btree_iter_search_key(iter);
 
        if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
@@ -565,7 +591,7 @@ void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
 
        trans_for_each_iter_with_node(iter->trans, b, linked) {
                __bch2_btree_iter_fix_key_modified(linked, b, where);
-               bch2_btree_iter_verify_level(linked, b->level);
+               bch2_btree_iter_verify_level(linked, b->c.level);
        }
 }
 
@@ -635,7 +661,7 @@ fixup_done:
         */
        if (!bch2_btree_node_iter_end(node_iter) &&
            iter_current_key_modified &&
-           (b->level ||
+           (b->c.level ||
             btree_node_type_is_extents(iter->btree_id))) {
                struct bset_tree *t;
                struct bkey_packed *k, *k2, *p;
@@ -662,7 +688,7 @@ fixup_done:
                }
        }
 
-       if (!b->level &&
+       if (!b->c.level &&
            node_iter == &iter->l[0].iter &&
            iter_current_key_modified)
                btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
@@ -678,7 +704,7 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
        struct bset_tree *t = bch2_bkey_to_bset(b, where);
        struct btree_iter *linked;
 
-       if (node_iter != &iter->l[b->level].iter) {
+       if (node_iter != &iter->l[b->c.level].iter) {
                __bch2_btree_node_iter_fix(iter, b, node_iter, t,
                                           where, clobber_u64s, new_u64s);
 
@@ -688,9 +714,9 @@ void bch2_btree_node_iter_fix(struct btree_iter *iter,
 
        trans_for_each_iter_with_node(iter->trans, b, linked) {
                __bch2_btree_node_iter_fix(linked, b,
-                                          &linked->l[b->level].iter, t,
+                                          &linked->l[b->c.level].iter, t,
                                           where, clobber_u64s, new_u64s);
-               bch2_btree_iter_verify_level(linked, b->level);
+               bch2_btree_iter_verify_level(linked, b->c.level);
        }
 }
 
@@ -774,7 +800,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
        if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
                return;
 
-       plevel = b->level + 1;
+       plevel = b->c.level + 1;
        if (!btree_iter_node(iter, plevel))
                return;
 
@@ -797,7 +823,7 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
        }
 
        if (!parent_locked)
-               btree_node_unlock(iter, b->level + 1);
+               btree_node_unlock(iter, b->c.level + 1);
 }
 
 static inline void __btree_iter_init(struct btree_iter *iter,
@@ -814,14 +840,16 @@ static inline void __btree_iter_init(struct btree_iter *iter,
 static inline void btree_iter_node_set(struct btree_iter *iter,
                                       struct btree *b)
 {
+       BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
+
        btree_iter_verify_new_node(iter, b);
 
        EBUG_ON(!btree_iter_pos_in_node(iter, b));
-       EBUG_ON(b->lock.state.seq & 1);
+       EBUG_ON(b->c.lock.state.seq & 1);
 
-       iter->l[b->level].lock_seq = b->lock.state.seq;
-       iter->l[b->level].b = b;
-       __btree_iter_init(iter, b->level);
+       iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
+       iter->l[b->c.level].b = b;
+       __btree_iter_init(iter, b->c.level);
 }
 
 /*
@@ -834,18 +862,19 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
        struct btree_iter *linked;
 
        trans_for_each_iter(iter->trans, linked)
-               if (btree_iter_pos_in_node(linked, b)) {
+               if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
+                   btree_iter_pos_in_node(linked, b)) {
                        /*
                         * bch2_btree_iter_node_drop() has already been called -
                         * the old node we're replacing has already been
                         * unlocked and the pointer invalidated
                         */
-                       BUG_ON(btree_node_locked(linked, b->level));
+                       BUG_ON(btree_node_locked(linked, b->c.level));
 
-                       t = btree_lock_want(linked, b->level);
+                       t = btree_lock_want(linked, b->c.level);
                        if (t != BTREE_NODE_UNLOCKED) {
-                               six_lock_increment(&b->lock, t);
-                               mark_btree_node_locked(linked, b->level, t);
+                               six_lock_increment(&b->c.lock, t);
+                               mark_btree_node_locked(linked, b->c.level, t);
                        }
 
                        btree_iter_node_set(linked, b);
@@ -855,7 +884,7 @@ void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
 {
        struct btree_iter *linked;
-       unsigned level = b->level;
+       unsigned level = b->c.level;
 
        trans_for_each_iter(iter->trans, linked)
                if (linked->l[level].b == b) {
@@ -873,22 +902,30 @@ void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
        struct btree_iter *linked;
 
        trans_for_each_iter_with_node(iter->trans, b, linked)
-               __btree_iter_init(linked, b->level);
+               __btree_iter_init(linked, b->c.level);
+}
+
+static int lock_root_check_fn(struct six_lock *lock, void *p)
+{
+       struct btree *b = container_of(lock, struct btree, c.lock);
+       struct btree **rootp = p;
+
+       return b == *rootp ? 0 : -1;
 }
 
 static inline int btree_iter_lock_root(struct btree_iter *iter,
                                       unsigned depth_want)
 {
        struct bch_fs *c = iter->trans->c;
-       struct btree *b;
+       struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
        enum six_lock_type lock_type;
        unsigned i;
 
        EBUG_ON(iter->nodes_locked);
 
        while (1) {
-               b = READ_ONCE(c->btree_roots[iter->btree_id].b);
-               iter->level = READ_ONCE(b->level);
+               b = READ_ONCE(*rootp);
+               iter->level = READ_ONCE(b->c.level);
 
                if (unlikely(iter->level < depth_want)) {
                        /*
@@ -905,11 +942,12 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
 
                lock_type = __btree_lock_want(iter, iter->level);
                if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
-                                             iter, lock_type)))
+                                             iter, lock_type,
+                                             lock_root_check_fn, rootp)))
                        return -EINTR;
 
-               if (likely(b == c->btree_roots[iter->btree_id].b &&
-                          b->level == iter->level &&
+               if (likely(b == READ_ONCE(*rootp) &&
+                          b->c.level == iter->level &&
                           !race_fault())) {
                        for (i = 0; i < iter->level; i++)
                                iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
@@ -922,7 +960,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
                        return 0;
                }
 
-               six_unlock_type(&b->lock, lock_type);
+               six_unlock_type(&b->c.lock, lock_type);
        }
 }
 
@@ -1017,24 +1055,28 @@ static void btree_iter_up(struct btree_iter *iter)
 
 static int btree_iter_traverse_one(struct btree_iter *);
 
-static int __btree_iter_traverse_all(struct btree_trans *trans,
-                                  struct btree_iter *orig_iter, int ret)
+static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
 {
        struct bch_fs *c = trans->c;
        struct btree_iter *iter;
        u8 sorted[BTREE_ITER_MAX];
        unsigned i, nr_sorted = 0;
 
+       if (trans->in_traverse_all)
+               return -EINTR;
+
+       trans->in_traverse_all = true;
+retry_all:
+       nr_sorted = 0;
+
        trans_for_each_iter(trans, iter)
-               sorted[nr_sorted++] = iter - trans->iters;
+               sorted[nr_sorted++] = iter->idx;
 
 #define btree_iter_cmp_by_idx(_l, _r)                          \
                btree_iter_cmp(&trans->iters[_l], &trans->iters[_r])
 
        bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
 #undef btree_iter_cmp_by_idx
-
-retry_all:
        bch2_trans_unlock(trans);
 
        if (unlikely(ret == -ENOMEM)) {
@@ -1050,11 +1092,6 @@ retry_all:
 
        if (unlikely(ret == -EIO)) {
                trans->error = true;
-               if (orig_iter) {
-                       orig_iter->flags |= BTREE_ITER_ERROR;
-                       orig_iter->l[orig_iter->level].b =
-                               BTREE_ITER_NO_NODE_ERROR;
-               }
                goto out;
        }
 
@@ -1062,9 +1099,16 @@ retry_all:
 
        /* Now, redo traversals in correct order: */
        for (i = 0; i < nr_sorted; i++) {
-               iter = &trans->iters[sorted[i]];
+               unsigned idx = sorted[i];
+
+               /*
+                * sucessfully traversing one iterator can cause another to be
+                * unlinked, in btree_key_cache_fill()
+                */
+               if (!(trans->iters_linked & (1ULL << idx)))
+                       continue;
 
-               ret = btree_iter_traverse_one(iter);
+               ret = btree_iter_traverse_one(&trans->iters[idx]);
                if (ret)
                        goto retry_all;
        }
@@ -1079,12 +1123,14 @@ retry_all:
                        }
 out:
        bch2_btree_cache_cannibalize_unlock(c);
+
+       trans->in_traverse_all = false;
        return ret;
 }
 
 int bch2_btree_iter_traverse_all(struct btree_trans *trans)
 {
-       return __btree_iter_traverse_all(trans, NULL, 0);
+       return __btree_iter_traverse_all(trans, 0);
 }
 
 static inline bool btree_iter_good_node(struct btree_iter *iter,
@@ -1129,9 +1175,6 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
 {
        unsigned depth_want = iter->level;
 
-       if (unlikely(iter->level >= BTREE_MAX_DEPTH))
-               return 0;
-
        /*
         * if we need interior nodes locked, call btree_iter_relock() to make
         * sure we walk back up enough that we lock them:
@@ -1140,9 +1183,15 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
            iter->locks_want > 1)
                bch2_btree_iter_relock(iter, false);
 
+       if (btree_iter_type(iter) == BTREE_ITER_CACHED)
+               return bch2_btree_iter_traverse_cached(iter);
+
        if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
                return 0;
 
+       if (unlikely(iter->level >= BTREE_MAX_DEPTH))
+               return 0;
+
        /*
         * XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
         * here unnecessary
@@ -1176,7 +1225,15 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
                                return 0;
 
                        iter->level = depth_want;
-                       iter->l[iter->level].b = BTREE_ITER_NO_NODE_DOWN;
+
+                       if (ret == -EIO) {
+                               iter->flags |= BTREE_ITER_ERROR;
+                               iter->l[iter->level].b =
+                                       BTREE_ITER_NO_NODE_ERROR;
+                       } else {
+                               iter->l[iter->level].b =
+                                       BTREE_ITER_NO_NODE_DOWN;
+                       }
                        return ret;
                }
        }
@@ -1189,12 +1246,13 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
 
 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
 {
+       struct btree_trans *trans = iter->trans;
        int ret;
 
-       ret =   bch2_trans_cond_resched(iter->trans) ?:
+       ret =   bch2_trans_cond_resched(trans) ?:
                btree_iter_traverse_one(iter);
        if (unlikely(ret))
-               ret = __btree_iter_traverse_all(iter->trans, iter, ret);
+               ret = __btree_iter_traverse_all(trans, ret);
 
        return ret;
 }
@@ -1343,6 +1401,13 @@ static void btree_iter_pos_changed(struct btree_iter *iter, int cmp)
        if (!cmp)
                goto out;
 
+       if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
+               btree_node_unlock(iter, 0);
+               iter->l[0].b = BTREE_ITER_NO_NODE_UP;
+               btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
+               return;
+       }
+
        l = btree_iter_up_until_good_node(iter, cmp);
 
        if (btree_iter_node(iter, l)) {
@@ -1774,6 +1839,26 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
        return bch2_btree_iter_peek_slot(iter);
 }
 
+struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
+{
+       struct bkey_cached *ck;
+       int ret;
+
+       bch2_btree_iter_checks(iter, BTREE_ITER_CACHED);
+
+       ret = bch2_btree_iter_traverse(iter);
+       if (unlikely(ret))
+               return bkey_s_c_err(ret);
+
+       ck = (void *) iter->l[0].b;
+
+       EBUG_ON(iter->btree_id != ck->key.btree_id ||
+               bkey_cmp(iter->pos, ck->key.pos));
+       BUG_ON(!ck->valid);
+
+       return bkey_i_to_s_c(ck->k);
+}
+
 static inline void bch2_btree_iter_init(struct btree_trans *trans,
                        struct btree_iter *iter, enum btree_id btree_id,
                        struct bpos pos, unsigned flags)
@@ -1959,10 +2044,11 @@ static inline void btree_iter_copy(struct btree_iter *dst,
 
        *dst = *src;
        dst->idx = idx;
+       dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
 
        for (i = 0; i < BTREE_MAX_DEPTH; i++)
                if (btree_node_locked(dst, i))
-                       six_lock_increment(&dst->l[i].b->lock,
+                       six_lock_increment(&dst->l[i].b->c.lock,
                                           __btree_lock_want(dst, i));
 
        dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
@@ -2017,8 +2103,9 @@ static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
                iter = best;
        }
 
-       iter->flags &= ~(BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
-       iter->flags |= flags & (BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
+       iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
+       iter->flags &= ~BTREE_ITER_USER_FLAGS;
+       iter->flags |= flags & BTREE_ITER_USER_FLAGS;
 
        if (iter->flags & BTREE_ITER_INTENT)
                bch2_btree_iter_upgrade(iter, 1);
@@ -2222,6 +2309,8 @@ int bch2_trans_exit(struct btree_trans *trans)
        mutex_unlock(&trans->c->btree_trans_lock);
 #endif
 
+       bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
+
        kfree(trans->fs_usage_deltas);
        kfree(trans->mem);
        if (trans->used_mempool)
@@ -2244,13 +2333,15 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
 
        mutex_lock(&c->btree_trans_lock);
        list_for_each_entry(trans, &c->btree_trans_list, list) {
-               pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
+               pr_buf(out, "%i %px %ps\n", trans->pid, trans, (void *) trans->ip);
 
                trans_for_each_iter(trans, iter) {
                        if (!iter->nodes_locked)
                                continue;
 
-                       pr_buf(out, "  iter %s:", bch2_btree_ids[iter->btree_id]);
+                       pr_buf(out, "  iter %u %s:",
+                              iter->idx,
+                              bch2_btree_ids[iter->btree_id]);
                        bch2_bpos_to_text(out, iter->pos);
                        pr_buf(out, "\n");
 
@@ -2258,8 +2349,8 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
                                if (btree_node_locked(iter, l)) {
                                        b = iter->l[l].b;
 
-                                       pr_buf(out, "    %p l=%u %s ",
-                                              b, l, btree_node_intent_locked(iter, l) ? "i" : "r");
+                                       pr_buf(out, "    %px %s l=%u ",
+                                              b, btree_node_intent_locked(iter, l) ? "i" : "r", l);
                                        bch2_bpos_to_text(out, b->key.k.p);
                                        pr_buf(out, "\n");
                                }
@@ -2268,9 +2359,15 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
 
                b = READ_ONCE(trans->locking);
                if (b) {
-                       pr_buf(out, "  locking %px l=%u %s:",
-                              b, b->level,
-                              bch2_btree_ids[b->btree_id]);
+                       pr_buf(out, "  locking iter %u l=%u %s:",
+                              trans->locking_iter_idx,
+                              trans->locking_level,
+                              bch2_btree_ids[trans->locking_btree_id]);
+                       bch2_bpos_to_text(out, trans->locking_pos);
+
+                       pr_buf(out, " node %px l=%u %s:",
+                              b, b->c.level,
+                              bch2_btree_ids[b->c.btree_id]);
                        bch2_bpos_to_text(out, b->key.k.p);
                        pr_buf(out, "\n");
                }