]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/bset.c
Update bcachefs sources to f7670cba39 bcachefs: Fix for building in userspace
[bcachefs-tools-debian] / libbcachefs / bset.c
index 280dcf3e14795eb162022e56d461e2aa5c16e3f7..7e572f5ffd706c7697a87d3d261fef1ff1fdffba 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include "bcachefs.h"
+#include "btree_cache.h"
 #include "bset.h"
 #include "eytzinger.h"
 #include "util.h"
 #include "alloc_types.h"
 #include <trace/events/bcachefs.h>
 
+static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
+                                                 struct btree *);
+
 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
 {
+       unsigned offset = __btree_node_key_to_offset(b, k);
        struct bset_tree *t;
 
        for_each_bset(b, t)
-               if (k >= btree_bkey_first(b, t) &&
-                   k < btree_bkey_last(b, t))
+               if (offset <= t->end_offset) {
+                       EBUG_ON(offset < btree_bkey_first_offset(t));
                        return t;
+               }
 
        BUG();
 }
@@ -62,9 +68,9 @@ void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
             _k = _n, k = n) {
                _n = bkey_next(_k);
 
-               bch2_bkey_to_text(buf, sizeof(buf), &k);
-               printk(KERN_ERR "block %u key %zi/%u: %s\n", set,
-                      _k->_data - i->_data, i->u64s, buf);
+               bch2_bkey_to_text(&PBUF(buf), &k);
+               printk(KERN_ERR "block %u key %5u: %s\n", set,
+                      __btree_node_key_to_offset(b, _k), buf);
 
                if (_n == vstruct_last(i))
                        continue;
@@ -112,7 +118,7 @@ void bch2_dump_btree_node_iter(struct btree *b,
                struct bkey uk = bkey_unpack_key(b, k);
                char buf[100];
 
-               bch2_bkey_to_text(buf, sizeof(buf), &uk);
+               bch2_bkey_to_text(&PBUF(buf), &uk);
                printk(KERN_ERR "set %zu key %zi/%u: %s\n", t - b->set,
                       k->_data - bset(b, t)->_data, bset(b, t)->u64s, buf);
        }
@@ -120,20 +126,6 @@ void bch2_dump_btree_node_iter(struct btree *b,
 
 #ifdef CONFIG_BCACHEFS_DEBUG
 
-static bool keys_out_of_order(struct btree *b,
-                             const struct bkey_packed *prev,
-                             const struct bkey_packed *next,
-                             bool is_extents)
-{
-       struct bkey nextu = bkey_unpack_key(b, next);
-
-       return bkey_cmp_left_packed_byval(b, prev, bkey_start_pos(&nextu)) > 0 ||
-               ((is_extents
-                 ? !bkey_deleted(next)
-                 : !bkey_deleted(prev)) &&
-                !bkey_cmp_packed(b, prev, next));
-}
-
 void __bch2_verify_btree_nr_keys(struct btree *b)
 {
        struct bset_tree *t;
@@ -150,123 +142,126 @@ void __bch2_verify_btree_nr_keys(struct btree *b)
        BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
 }
 
-static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
-                                          struct btree *b,
-                                          struct bkey_packed *k)
+static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
+                                           struct btree *b)
 {
-       const struct bkey_packed *n = bch2_btree_node_iter_peek_all(iter, b);
+       struct btree_node_iter iter = *_iter;
+       const struct bkey_packed *k, *n;
+
+       k = bch2_btree_node_iter_peek_all(&iter, b);
+       __bch2_btree_node_iter_advance(&iter, b);
+       n = bch2_btree_node_iter_peek_all(&iter, b);
 
        bkey_unpack_key(b, k);
 
        if (n &&
-           keys_out_of_order(b, k, n, iter->is_extents)) {
+           bkey_iter_cmp(b, k, n) > 0) {
+               struct btree_node_iter_set *set;
                struct bkey ku = bkey_unpack_key(b, k);
                struct bkey nu = bkey_unpack_key(b, n);
                char buf1[80], buf2[80];
 
                bch2_dump_btree_node(b);
-               bch2_bkey_to_text(buf1, sizeof(buf1), &ku);
-               bch2_bkey_to_text(buf2, sizeof(buf2), &nu);
-               panic("out of order/overlapping:\n%s\n%s\n", buf1, buf2);
+               bch2_bkey_to_text(&PBUF(buf1), &ku);
+               bch2_bkey_to_text(&PBUF(buf2), &nu);
+               printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
+                      buf1, buf2);
+               printk(KERN_ERR "iter was:");
+
+               btree_node_iter_for_each(_iter, set) {
+                       struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
+                       struct bset_tree *t = bch2_bkey_to_bset(b, k);
+                       printk(" [%zi %zi]", t - b->set,
+                              k->_data - bset(b, t)->_data);
+               }
+               panic("\n");
        }
 }
 
 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
-                               struct btree *b)
+                                struct btree *b)
 {
-       struct btree_node_iter_set *set;
+       struct btree_node_iter_set *set, *s2;
        struct bset_tree *t;
-       struct bkey_packed *k, *first;
 
-       BUG_ON(iter->used > MAX_BSETS);
-
-       if (!iter->used)
-               return;
+       /* Verify no duplicates: */
+       btree_node_iter_for_each(iter, set)
+               btree_node_iter_for_each(iter, s2)
+                       BUG_ON(set != s2 && set->end == s2->end);
 
+       /* Verify that set->end is correct: */
        btree_node_iter_for_each(iter, set) {
-               k = __btree_node_offset_to_key(b, set->k);
-               t = bch2_bkey_to_bset(b, k);
-
-               BUG_ON(__btree_node_offset_to_key(b, set->end) !=
-                      btree_bkey_last(b, t));
-
-               BUG_ON(set + 1 < iter->data + iter->used &&
-                      btree_node_iter_cmp(iter, b, set[0], set[1]) > 0);
+               for_each_bset(b, t)
+                       if (set->end == t->end_offset)
+                               goto found;
+               BUG();
+found:
+               BUG_ON(set->k < btree_bkey_first_offset(t) ||
+                      set->k >= t->end_offset);
        }
 
-       first = __btree_node_offset_to_key(b, iter->data[0].k);
-
-       for_each_bset(b, t)
-               if (bch2_btree_node_iter_bset_pos(iter, b, t) ==
-                   btree_bkey_last(b, t) &&
-                   (k = bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))))
-                       BUG_ON(__btree_node_iter_cmp(iter->is_extents, b,
-                                                    k, first) > 0);
+       /* Verify iterator is sorted: */
+       btree_node_iter_for_each(iter, set)
+               BUG_ON(set != iter->data &&
+                      btree_node_iter_cmp(b, set[-1], set[0]) > 0);
 }
 
-void bch2_verify_key_order(struct btree *b,
-                         struct btree_node_iter *iter,
-                         struct bkey_packed *where)
+void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
+                           struct bkey_packed *insert, unsigned clobber_u64s)
 {
        struct bset_tree *t = bch2_bkey_to_bset(b, where);
-       struct bkey_packed *k, *prev;
-       struct bkey uk, uw = bkey_unpack_key(b, where);
-
-       k = bch2_bkey_prev_all(b, t, where);
-       if (k &&
-           keys_out_of_order(b, k, where, iter->is_extents)) {
-               char buf1[100], buf2[100];
+       struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
+       struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
+#if 0
+       BUG_ON(prev &&
+              bkey_iter_cmp(b, prev, insert) > 0);
+#else
+       if (prev &&
+           bkey_iter_cmp(b, prev, insert) > 0) {
+               struct bkey k1 = bkey_unpack_key(b, prev);
+               struct bkey k2 = bkey_unpack_key(b, insert);
+               char buf1[100];
+               char buf2[100];
 
                bch2_dump_btree_node(b);
-               uk = bkey_unpack_key(b, k);
-               bch2_bkey_to_text(buf1, sizeof(buf1), &uk);
-               bch2_bkey_to_text(buf2, sizeof(buf2), &uw);
-               panic("out of order with prev:\n%s\n%s\n",
-                     buf1, buf2);
+               bch2_bkey_to_text(&PBUF(buf1), &k1);
+               bch2_bkey_to_text(&PBUF(buf2), &k2);
+
+               panic("prev > insert:\n"
+                     "prev    key %5u %s\n"
+                     "insert  key %5u %s\n",
+                      __btree_node_key_to_offset(b, prev), buf1,
+                      __btree_node_key_to_offset(b, insert), buf2);
        }
+#endif
+#if 0
+       BUG_ON(next != btree_bkey_last(b, t) &&
+              bkey_iter_cmp(b, insert, next) > 0);
+#else
+       if (next != btree_bkey_last(b, t) &&
+           bkey_iter_cmp(b, insert, next) > 0) {
+               struct bkey k1 = bkey_unpack_key(b, insert);
+               struct bkey k2 = bkey_unpack_key(b, next);
+               char buf1[100];
+               char buf2[100];
 
-       k = bkey_next(where);
-       BUG_ON(k != btree_bkey_last(b, t) &&
-              keys_out_of_order(b, where, k, iter->is_extents));
-
-       for_each_bset(b, t) {
-               if (where >= btree_bkey_first(b, t) ||
-                   where < btree_bkey_last(b, t))
-                       continue;
-
-               k = bch2_btree_node_iter_bset_pos(iter, b, t);
-
-               if (k == btree_bkey_last(b, t))
-                       k = bch2_bkey_prev_all(b, t, k);
-
-               while (bkey_cmp_left_packed_byval(b, k, bkey_start_pos(&uw)) > 0 &&
-                      (prev = bch2_bkey_prev_all(b, t, k)))
-                       k = prev;
-
-               for (;
-                    k != btree_bkey_last(b, t);
-                    k = bkey_next(k)) {
-                       uk = bkey_unpack_key(b, k);
-
-                       if (iter->is_extents) {
-                               BUG_ON(!(bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0 ||
-                                        bkey_cmp(uk.p, bkey_start_pos(&uw)) <= 0));
-                       } else {
-                               BUG_ON(!bkey_cmp(uw.p, uk.p) &&
-                                      !bkey_deleted(&uk));
-                       }
-
-                       if (bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0)
-                               break;
-               }
+               bch2_dump_btree_node(b);
+               bch2_bkey_to_text(&PBUF(buf1), &k1);
+               bch2_bkey_to_text(&PBUF(buf2), &k2);
+
+               panic("insert > next:\n"
+                     "insert  key %5u %s\n"
+                     "next    key %5u %s\n",
+                      __btree_node_key_to_offset(b, insert), buf1,
+                      __btree_node_key_to_offset(b, next), buf2);
        }
+#endif
 }
 
 #else
 
-static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
-                                          struct btree *b,
-                                          struct bkey_packed *k) {}
+static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
+                                                  struct btree *b) {}
 
 #endif
 
@@ -438,6 +433,10 @@ void bch2_btree_keys_free(struct btree *b)
        b->aux_data = NULL;
 }
 
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
 int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
 {
        b->page_order   = page_order;
@@ -473,7 +472,7 @@ void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
  * in one cacheline in t->set (BSET_CACHELINE bytes).
  *
  * This means we don't have to store the full index of the key that a node in
- * the binary tree points to; eytzinger_to_inorder() gives us the cacheline, and
+ * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
  * then bkey_float->m gives us the offset within that cacheline, in units of 8
  * bytes.
  *
@@ -534,7 +533,7 @@ static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
                                               unsigned j)
 {
        return cacheline_to_bkey(b, t,
-                       __eytzinger_to_inorder(j, t->size, t->extra),
+                       __eytzinger1_to_inorder(j, t->size, t->extra),
                        bkey_float(b, t, j)->key_offset);
 }
 
@@ -569,7 +568,7 @@ static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
                            unsigned j, struct bkey_packed *k)
 {
-       BUG_ON(k >= btree_bkey_last(b, t));
+       EBUG_ON(k >= btree_bkey_last(b, t));
 
        rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
                .offset = __btree_node_key_to_offset(b, k),
@@ -617,28 +616,30 @@ static unsigned rw_aux_tree_bsearch(struct btree *b,
                                    struct bset_tree *t,
                                    unsigned offset)
 {
-       unsigned l = 0, r = t->size;
-
-       BUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
+       unsigned bset_offs = offset - btree_bkey_first_offset(t);
+       unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
+       unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
 
-       while (l < r) {
-               unsigned m = (l + r) >> 1;
+       EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
+       EBUG_ON(!t->size);
+       EBUG_ON(idx > t->size);
 
-               if (rw_aux_tree(b, t)[m].offset < offset)
-                       l = m + 1;
-               else
-                       r = m;
-       }
+       while (idx < t->size &&
+              rw_aux_tree(b, t)[idx].offset < offset)
+               idx++;
 
-       BUG_ON(l < t->size &&
-              rw_aux_tree(b, t)[l].offset < offset);
-       BUG_ON(l &&
-              rw_aux_tree(b, t)[l - 1].offset >= offset);
+       while (idx &&
+              rw_aux_tree(b, t)[idx - 1].offset >= offset)
+               idx--;
 
-       BUG_ON(l > r);
-       BUG_ON(l > t->size);
+       EBUG_ON(idx < t->size &&
+               rw_aux_tree(b, t)[idx].offset < offset);
+       EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
+       EBUG_ON(idx + 1 < t->size &&
+               rw_aux_tree(b, t)[idx].offset ==
+               rw_aux_tree(b, t)[idx + 1].offset);
 
-       return l;
+       return idx;
 }
 
 static inline unsigned bfloat_mantissa(const struct bkey_float *f,
@@ -672,7 +673,7 @@ static inline unsigned bkey_mantissa(const struct bkey_packed *k,
         * (and then the bits we want are at the high end, so we shift them
         * back down):
         */
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        v >>= f->exponent & 7;
 #else
        v >>= 64 - (f->exponent & 7) - (idx < BFLOAT_32BIT_NR ? 32 : 16);
@@ -691,7 +692,7 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
        struct bkey_packed *l, *r;
        unsigned bits = j < BFLOAT_32BIT_NR ? 32 : 16;
        unsigned mantissa;
-       int shift, exponent;
+       int shift, exponent, high_bit;
 
        EBUG_ON(bkey_next(p) != m);
 
@@ -737,7 +738,8 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
         */
 
        if (!bkey_packed(l) || !bkey_packed(r) ||
-           !bkey_packed(p) || !bkey_packed(m)) {
+           !bkey_packed(p) || !bkey_packed(m) ||
+           !b->nr_key_bits) {
                f->exponent = BFLOAT_FAILED_UNPACKED;
                return;
        }
@@ -752,13 +754,15 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
         * Note that this may be negative - we may be running off the low end
         * of the key: we handle this later:
         */
-       exponent = (int) bch2_bkey_greatest_differing_bit(b, l, r) - (bits - 1);
+       high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
+                      min_t(unsigned, bits, b->nr_key_bits) - 1);
+       exponent = high_bit - (bits - 1);
 
        /*
         * Then we calculate the actual shift value, from the start of the key
         * (k->_data), to get the key bits starting at exponent:
         */
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
 
        EBUG_ON(shift + bits > b->format.key_u64s * 64);
@@ -882,11 +886,12 @@ retry:
        t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
 
        /* First we figure out where the first key in each cacheline is */
-       eytzinger_for_each(j, t->size) {
+       eytzinger1_for_each(j, t->size) {
                while (bkey_to_cacheline(b, t, k) < cacheline)
                        prev = k, k = bkey_next(k);
 
                if (k >= btree_bkey_last(b, t)) {
+                       /* XXX: this path sucks */
                        t->size--;
                        goto retry;
                }
@@ -895,8 +900,8 @@ retry:
                bkey_float(b, t, j)->key_offset =
                        bkey_to_cacheline_offset(b, t, cacheline++, k);
 
-               BUG_ON(tree_to_prev_bkey(b, t, j) != prev);
-               BUG_ON(tree_to_bkey(b, t, j) != k);
+               EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
+               EBUG_ON(tree_to_bkey(b, t, j) != k);
        }
 
        while (bkey_next(k) != btree_bkey_last(b, t))
@@ -905,7 +910,7 @@ retry:
        t->max_key = bkey_unpack_pos(b, k);
 
        /* Then we build the tree */
-       eytzinger_for_each(j, t->size)
+       eytzinger1_for_each(j, t->size)
                make_bfloat(b, t, j, &min_key, &max_key);
 }
 
@@ -960,10 +965,14 @@ void bch2_bset_init_first(struct btree *b, struct bset *i)
        set_btree_bset(b, t, i);
 }
 
-void bch2_bset_init_next(struct btree *b, struct bset *i)
+void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
+                        struct btree_node_entry *bne)
 {
+       struct bset *i = &bne->keys;
        struct bset_tree *t;
 
+       BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
+       BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
        BUG_ON(b->nsets >= MAX_BSETS);
 
        memset(i, 0, sizeof(*i));
@@ -974,6 +983,10 @@ void bch2_bset_init_next(struct btree *b, struct bset *i)
        set_btree_bset(b, t, i);
 }
 
+/*
+ * find _some_ key in the same bset as @k that precedes @k - not necessarily the
+ * immediate predecessor:
+ */
 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
                                       struct bkey_packed *k)
 {
@@ -996,7 +1009,7 @@ static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
 
                do {
                        p = j ? tree_to_bkey(b, t,
-                                       __inorder_to_eytzinger(j--,
+                                       __inorder_to_eytzinger1(j--,
                                                        t->size, t->extra))
                              : btree_bkey_first(b, t);
                } while (p >= k);
@@ -1012,40 +1025,31 @@ static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
        return p;
 }
 
-struct bkey_packed *bch2_bkey_prev_all(struct btree *b, struct bset_tree *t,
-                                      struct bkey_packed *k)
-{
-       struct bkey_packed *p;
-
-       p = __bkey_prev(b, t, k);
-       if (!p)
-               return NULL;
-
-       while (bkey_next(p) != k)
-               p = bkey_next(p);
-
-       return p;
-}
-
-struct bkey_packed *bch2_bkey_prev(struct btree *b, struct bset_tree *t,
-                                  struct bkey_packed *k)
+struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
+                                         struct bset_tree *t,
+                                         struct bkey_packed *k,
+                                         unsigned min_key_type)
 {
-       while (1) {
-               struct bkey_packed *p, *i, *ret = NULL;
-
-               p = __bkey_prev(b, t, k);
-               if (!p)
-                       return NULL;
+       struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
 
+       while ((p = __bkey_prev(b, t, k)) && !ret) {
                for (i = p; i != k; i = bkey_next(i))
-                       if (!bkey_deleted(i))
+                       if (i->type >= min_key_type)
                                ret = i;
 
-               if (ret)
-                       return ret;
-
                k = p;
        }
+
+       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+               BUG_ON(ret >= orig_k);
+
+               for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t);
+                    i != orig_k;
+                    i = bkey_next(i))
+                       BUG_ON(i->type >= min_key_type);
+       }
+
+       return ret;
 }
 
 /* Insert */
@@ -1071,7 +1075,7 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b,
        struct bkey_packed min_key, max_key;
        unsigned inorder, j;
 
-       BUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
+       EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
 
        /* signal to make_bfloat() that they're uninitialized: */
        min_key.u64s = max_key.u64s = 0;
@@ -1087,30 +1091,30 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b,
 
        if (inorder &&
            inorder < t->size) {
-               j = __inorder_to_eytzinger(inorder, t->size, t->extra);
+               j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
 
                if (k == tree_to_bkey(b, t, j)) {
                        /* Fix the node this key corresponds to */
                        make_bfloat(b, t, j, &min_key, &max_key);
 
                        /* Children for which this key is the right boundary */
-                       for (j = eytzinger_left_child(j);
+                       for (j = eytzinger1_left_child(j);
                             j < t->size;
-                            j = eytzinger_right_child(j))
+                            j = eytzinger1_right_child(j))
                                make_bfloat(b, t, j, &min_key, &max_key);
                }
        }
 
        if (inorder + 1 < t->size) {
-               j = __inorder_to_eytzinger(inorder + 1, t->size, t->extra);
+               j = __inorder_to_eytzinger1(inorder + 1, t->size, t->extra);
 
                if (k == tree_to_prev_bkey(b, t, j)) {
                        make_bfloat(b, t, j, &min_key, &max_key);
 
                        /* Children for which this key is the left boundary */
-                       for (j = eytzinger_right_child(j);
+                       for (j = eytzinger1_right_child(j);
                             j < t->size;
-                            j = eytzinger_left_child(j))
+                            j = eytzinger1_left_child(j))
                                make_bfloat(b, t, j, &min_key, &max_key);
                }
        }
@@ -1121,9 +1125,10 @@ static void ro_aux_tree_fix_invalidated_key(struct btree *b,
  * modified, fix any auxiliary search tree by remaking all the nodes in the
  * auxiliary search tree that @k corresponds to
  */
-void bch2_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
-                                  struct bkey_packed *k)
+void bch2_bset_fix_invalidated_key(struct btree *b, struct bkey_packed *k)
 {
+       struct bset_tree *t = bch2_bkey_to_bset(b, k);
+
        switch (bset_aux_tree_type(t)) {
        case BSET_NO_AUX_TREE:
                break;
@@ -1145,18 +1150,14 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
        int shift = new_u64s - clobber_u64s;
        unsigned l, j, where = __btree_node_key_to_offset(b, _where);
 
-       BUG_ON(bset_has_ro_aux_tree(t));
+       EBUG_ON(bset_has_ro_aux_tree(t));
 
        if (!bset_has_rw_aux_tree(t))
                return;
 
+       /* returns first entry >= where */
        l = rw_aux_tree_bsearch(b, t, where);
 
-       /* l is first >= than @where */
-
-       BUG_ON(l < t->size && rw_aux_tree(b, t)[l].offset < where);
-       BUG_ON(l && rw_aux_tree(b, t)[l - 1].offset >= where);
-
        if (!l) /* never delete first entry */
                l++;
        else if (l < t->size &&
@@ -1186,9 +1187,9 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
        for (j = l; j < t->size; j++)
               rw_aux_tree(b, t)[j].offset += shift;
 
-       BUG_ON(l < t->size &&
-              rw_aux_tree(b, t)[l].offset ==
-              rw_aux_tree(b, t)[l - 1].offset);
+       EBUG_ON(l < t->size &&
+               rw_aux_tree(b, t)[l].offset ==
+               rw_aux_tree(b, t)[l - 1].offset);
 
        if (t->size < bset_rw_tree_capacity(b, t) &&
            (l < t->size
@@ -1234,6 +1235,7 @@ void bch2_bset_insert(struct btree *b,
        struct bkey_packed packed, *src = bkey_to_packed(insert);
 
        bch2_bset_verify_rw_aux_tree(b, t);
+       bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
 
        if (bch2_bkey_pack_key(&packed, &insert->k, f))
                src = &packed;
@@ -1245,8 +1247,8 @@ void bch2_bset_insert(struct btree *b,
                u64 *src_p = where->_data + clobber_u64s;
                u64 *dst_p = where->_data + src->u64s;
 
-               BUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
-                      (int) clobber_u64s - src->u64s);
+               EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
+                       (int) clobber_u64s - src->u64s);
 
                memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
                le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
@@ -1260,7 +1262,6 @@ void bch2_bset_insert(struct btree *b,
 
        bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
 
-       bch2_verify_key_order(b, iter, where);
        bch2_verify_btree_nr_keys(b);
 }
 
@@ -1274,7 +1275,7 @@ void bch2_bset_delete(struct btree *b,
 
        bch2_bset_verify_rw_aux_tree(b, t);
 
-       BUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
+       EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
 
        memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
        le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
@@ -1288,7 +1289,7 @@ void bch2_bset_delete(struct btree *b,
 __flatten
 static struct bkey_packed *bset_search_write_set(const struct btree *b,
                                struct bset_tree *t,
-                               struct bpos search,
+                               struct bpos *search,
                                const struct bkey_packed *packed_search)
 {
        unsigned l = 0, r = t->size;
@@ -1296,7 +1297,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
        while (l + 1 != r) {
                unsigned m = (l + r) >> 1;
 
-               if (bkey_cmp(rw_aux_tree(b, t)[m].k, search) < 0)
+               if (bkey_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
                        l = m;
                else
                        r = m;
@@ -1318,7 +1319,7 @@ static int bset_search_tree_slowpath(const struct btree *b,
 __flatten
 static struct bkey_packed *bset_search_tree(const struct btree *b,
                                struct bset_tree *t,
-                               struct bpos search,
+                               struct bpos *search,
                                const struct bkey_packed *packed_search)
 {
        struct ro_aux_tree *base = ro_aux_tree_base(b, t);
@@ -1331,7 +1332,7 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
                        p = bkey_float_get(base, n << 4);
                        prefetch(p);
                } else if (n << 3 < t->size) {
-                       inorder = __eytzinger_to_inorder(n, t->size, t->extra);
+                       inorder = __eytzinger1_to_inorder(n, t->size, t->extra);
                        p = bset_cacheline(b, t, inorder);
 #ifdef CONFIG_X86_64
                        asm(".intel_syntax noprefix;"
@@ -1359,10 +1360,10 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
                                     bkey_mantissa(packed_search, f, n));
                else
                        n = n * 2 + bset_search_tree_slowpath(b, t,
-                                               &search, packed_search, n);
+                                               search, packed_search, n);
        } while (n < t->size);
 
-       inorder = __eytzinger_to_inorder(n >> 1, t->size, t->extra);
+       inorder = __eytzinger1_to_inorder(n >> 1, t->size, t->extra);
 
        /*
         * n would have been the node we recursed to - the low bit tells us if
@@ -1372,7 +1373,7 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
                return cacheline_to_bkey(b, t, inorder, f->key_offset);
        } else {
                if (--inorder) {
-                       n = eytzinger_prev(n >> 1, t->size);
+                       n = eytzinger1_prev(n >> 1, t->size);
                        f = bkey_float_get(base, n);
                        return cacheline_to_bkey(b, t, inorder, f->key_offset);
                } else
@@ -1386,10 +1387,9 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
 __always_inline __flatten
 static struct bkey_packed *bch2_bset_search(struct btree *b,
                                struct bset_tree *t,
-                               struct bpos search,
+                               struct bpos *search,
                                struct bkey_packed *packed_search,
-                               const struct bkey_packed *lossy_packed_search,
-                               bool strictly_greater)
+                               const struct bkey_packed *lossy_packed_search)
 {
        struct bkey_packed *m;
 
@@ -1423,7 +1423,7 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
                 * start and end - handle that here:
                 */
 
-               if (bkey_cmp(search, t->max_key) > 0)
+               if (bkey_cmp(*search, t->max_key) > 0)
                        return btree_bkey_last(b, t);
 
                m = bset_search_tree(b, t, search, lossy_packed_search);
@@ -1432,21 +1432,21 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
 
        if (lossy_packed_search)
                while (m != btree_bkey_last(b, t) &&
-                      !btree_iter_pos_cmp_p_or_unp(b, search, lossy_packed_search,
-                                                   m, strictly_greater))
+                      bkey_iter_cmp_p_or_unp(b, search, lossy_packed_search,
+                                             m) > 0)
                        m = bkey_next(m);
 
        if (!packed_search)
                while (m != btree_bkey_last(b, t) &&
-                      !btree_iter_pos_cmp_packed(b, &search, m, strictly_greater))
+                      bkey_iter_pos_cmp(b, search, m) > 0)
                        m = bkey_next(m);
 
-       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+       if (btree_keys_expensive_checks(b)) {
                struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
 
                BUG_ON(prev &&
-                      btree_iter_pos_cmp_p_or_unp(b, search, packed_search,
-                                                  prev, strictly_greater));
+                      bkey_iter_cmp_p_or_unp(b, search, packed_search,
+                                             prev) <= 0);
        }
 
        return m;
@@ -1454,33 +1454,37 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
 
 /* Btree node iterator */
 
-void bch2_btree_node_iter_push(struct btree_node_iter *iter,
-                              struct btree *b,
-                              const struct bkey_packed *k,
-                              const struct bkey_packed *end)
+static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
+                             struct btree *b,
+                             const struct bkey_packed *k,
+                             const struct bkey_packed *end)
 {
        if (k != end) {
-               struct btree_node_iter_set *pos, n =
-                       ((struct btree_node_iter_set) {
-                                __btree_node_key_to_offset(b, k),
-                                __btree_node_key_to_offset(b, end)
-                        });
+               struct btree_node_iter_set *pos;
 
                btree_node_iter_for_each(iter, pos)
-                       if (btree_node_iter_cmp(iter, b, n, *pos) <= 0)
-                               break;
+                       ;
 
-               memmove(pos + 1, pos,
-                       (void *) (iter->data + iter->used) - (void *) pos);
-               iter->used++;
-               *pos = n;
+               BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
+               *pos = (struct btree_node_iter_set) {
+                       __btree_node_key_to_offset(b, k),
+                       __btree_node_key_to_offset(b, end)
+               };
        }
 }
 
+void bch2_btree_node_iter_push(struct btree_node_iter *iter,
+                              struct btree *b,
+                              const struct bkey_packed *k,
+                              const struct bkey_packed *end)
+{
+       __bch2_btree_node_iter_push(iter, b, k, end);
+       bch2_btree_node_iter_sort(iter, b);
+}
+
 noinline __flatten __attribute__((cold))
 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
-                             struct btree *b, struct bpos search,
-                             bool strictly_greater, bool is_extents)
+                             struct btree *b, struct bpos *search)
 {
        struct bset_tree *t;
 
@@ -1488,8 +1492,7 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
 
        for_each_bset(b, t)
                __bch2_btree_node_iter_push(iter, b,
-                       bch2_bset_search(b, t, search, NULL, NULL,
-                                       strictly_greater),
+                       bch2_bset_search(b, t, search, NULL, NULL),
                        btree_bkey_last(b, t));
 
        bch2_btree_node_iter_sort(iter, b);
@@ -1536,21 +1539,17 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
  *    past any extents that compare equal to the position we searched for.
  */
 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
-                              struct btree *b, struct bpos search,
-                              bool strictly_greater, bool is_extents)
+                              struct btree *b, struct bpos *search)
 {
        struct bset_tree *t;
        struct bkey_packed p, *packed_search = NULL;
 
-       EBUG_ON(bkey_cmp(search, b->data->min_key) < 0);
+       EBUG_ON(bkey_cmp(*search, b->data->min_key) < 0);
        bset_aux_tree_verify(b);
 
-       __bch2_btree_node_iter_init(iter, is_extents);
+       memset(iter, 0, sizeof(*iter));
 
-       //if (bkey_cmp(search, b->curr_max_key) > 0)
-       //      return;
-
-       switch (bch2_bkey_pack_pos_lossy(&p, search, b)) {
+       switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
        case BKEY_PACK_POS_EXACT:
                packed_search = &p;
                break;
@@ -1558,28 +1557,25 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
                packed_search = NULL;
                break;
        case BKEY_PACK_POS_FAIL:
-               btree_node_iter_init_pack_failed(iter, b, search,
-                                       strictly_greater, is_extents);
+               btree_node_iter_init_pack_failed(iter, b, search);
                return;
        }
 
        for_each_bset(b, t)
                __bch2_btree_node_iter_push(iter, b,
                                           bch2_bset_search(b, t, search,
-                                                          packed_search, &p,
-                                                          strictly_greater),
+                                                           packed_search, &p),
                                           btree_bkey_last(b, t));
 
        bch2_btree_node_iter_sort(iter, b);
 }
 
 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
-                                         struct btree *b,
-                                         bool is_extents)
+                                         struct btree *b)
 {
        struct bset_tree *t;
 
-       __bch2_btree_node_iter_init(iter, is_extents);
+       memset(iter, 0, sizeof(*iter));
 
        for_each_bset(b, t)
                __bch2_btree_node_iter_push(iter, b,
@@ -1594,8 +1590,6 @@ struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
 {
        struct btree_node_iter_set *set;
 
-       BUG_ON(iter->used > MAX_BSETS);
-
        btree_node_iter_for_each(iter, set)
                if (set->end == t->end_offset)
                        return __btree_node_offset_to_key(b, set->k);
@@ -1603,134 +1597,149 @@ struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
        return btree_bkey_last(b, t);
 }
 
-static inline void btree_node_iter_sift(struct btree_node_iter *iter,
-                                       struct btree *b,
-                                       unsigned start)
-{
-       unsigned i;
-
-       EBUG_ON(iter->used > MAX_BSETS);
-
-       for (i = start;
-            i + 1 < iter->used &&
-            btree_node_iter_cmp(iter, b, iter->data[i], iter->data[i + 1]) > 0;
-            i++)
-               swap(iter->data[i], iter->data[i + 1]);
-}
-
-static inline void btree_node_iter_sort_two(struct btree_node_iter *iter,
+static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
                                            struct btree *b,
                                            unsigned first)
 {
-       if (btree_node_iter_cmp(iter, b,
-                               iter->data[first],
-                               iter->data[first + 1]) > 0)
+       bool ret;
+
+       if ((ret = (btree_node_iter_cmp(b,
+                                       iter->data[first],
+                                       iter->data[first + 1]) > 0)))
                swap(iter->data[first], iter->data[first + 1]);
+       return ret;
 }
 
 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
                               struct btree *b)
 {
-       EBUG_ON(iter->used > 3);
-
        /* unrolled bubble sort: */
 
-       if (iter->used > 2) {
+       if (!__btree_node_iter_set_end(iter, 2)) {
                btree_node_iter_sort_two(iter, b, 0);
                btree_node_iter_sort_two(iter, b, 1);
        }
 
-       if (iter->used > 1)
+       if (!__btree_node_iter_set_end(iter, 1))
                btree_node_iter_sort_two(iter, b, 0);
 }
 
-/**
- * bch_btree_node_iter_advance - advance @iter by one key
- *
- * Doesn't do debugchecks - for cases where (insert_fixup_extent()) a bset might
- * momentarily have out of order extents.
- */
-void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
-                                 struct btree *b)
+void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
+                                  struct btree_node_iter_set *set)
 {
-       struct bkey_packed *k = bch2_btree_node_iter_peek_all(iter, b);
+       struct btree_node_iter_set *last =
+               iter->data + ARRAY_SIZE(iter->data) - 1;
+
+       memmove(&set[0], &set[1], (void *) last - (void *) set);
+       *last = (struct btree_node_iter_set) { 0, 0 };
+}
 
+static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
+                                                 struct btree *b)
+{
        iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
 
-       BUG_ON(iter->data->k > iter->data->end);
+       EBUG_ON(iter->data->k > iter->data->end);
 
-       if (iter->data->k == iter->data->end) {
-               BUG_ON(iter->used == 0);
-               iter->data[0] = iter->data[--iter->used];
+       if (unlikely(__btree_node_iter_set_end(iter, 0))) {
+               bch2_btree_node_iter_set_drop(iter, iter->data);
+               return;
        }
 
-       btree_node_iter_sift(iter, b, 0);
+       if (__btree_node_iter_set_end(iter, 1))
+               return;
+
+       if (!btree_node_iter_sort_two(iter, b, 0))
+               return;
+
+       if (__btree_node_iter_set_end(iter, 2))
+               return;
 
-       bch2_btree_node_iter_next_check(iter, b, k);
+       btree_node_iter_sort_two(iter, b, 1);
+}
+
+void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
+                                 struct btree *b)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+       bch2_btree_node_iter_verify(iter, b);
+       bch2_btree_node_iter_next_check(iter, b);
+#endif
+       __bch2_btree_node_iter_advance(iter, b);
+}
+
+static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
+{
+       unsigned n = ARRAY_SIZE(iter->data);
+
+       while (n && __btree_node_iter_set_end(iter, n - 1))
+               --n;
+
+       return n;
 }
 
 /*
  * Expensive:
  */
-struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
-                                                 struct btree *b)
+struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
+                                                    struct btree *b,
+                                                    unsigned min_key_type)
 {
        struct bkey_packed *k, *prev = NULL;
+       struct bkey_packed *orig_pos = bch2_btree_node_iter_peek_all(iter, b);
        struct btree_node_iter_set *set;
        struct bset_tree *t;
-       struct bset_tree *prev_t;
-       unsigned end;
+       unsigned end = 0;
 
        bch2_btree_node_iter_verify(iter, b);
 
        for_each_bset(b, t) {
-               k = bch2_bkey_prev_all(b, t,
-                       bch2_btree_node_iter_bset_pos(iter, b, t));
+               k = bch2_bkey_prev_filter(b, t,
+                       bch2_btree_node_iter_bset_pos(iter, b, t),
+                       min_key_type);
                if (k &&
-                   (!prev || __btree_node_iter_cmp(iter->is_extents, b,
-                                                   k, prev) > 0)) {
+                   (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
                        prev = k;
-                       prev_t = t;
+                       end = t->end_offset;
                }
        }
 
        if (!prev)
-               return NULL;
+               goto out;
 
        /*
         * We're manually memmoving instead of just calling sort() to ensure the
         * prev we picked ends up in slot 0 - sort won't necessarily put it
         * there because of duplicate deleted keys:
         */
-       end = __btree_node_key_to_offset(b, btree_bkey_last(b, prev_t));
        btree_node_iter_for_each(iter, set)
-               if (set->end == end) {
-                       memmove(&iter->data[1],
-                               &iter->data[0],
-                               (void *) set - (void *) &iter->data[0]);
-                       goto out;
-               }
+               if (set->end == end)
+                       goto found;
+
+       BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
+found:
+       BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
 
        memmove(&iter->data[1],
                &iter->data[0],
-               (void *) &iter->data[iter->used] - (void *) &iter->data[0]);
-       iter->used++;
-out:
+               (void *) set - (void *) &iter->data[0]);
+
        iter->data[0].k = __btree_node_key_to_offset(b, prev);
        iter->data[0].end = end;
-       return prev;
-}
+out:
+       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+               struct btree_node_iter iter2 = *iter;
 
-struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
-                                             struct btree *b)
-{
-       struct bkey_packed *k;
+               if (prev)
+                       __bch2_btree_node_iter_advance(&iter2, b);
 
-       do {
-               k = bch2_btree_node_iter_prev_all(iter, b);
-       } while (k && bkey_deleted(k));
+               while ((k = bch2_btree_node_iter_peek_all(&iter2, b)) != orig_pos) {
+                       BUG_ON(k->type >= min_key_type);
+                       __bch2_btree_node_iter_advance(&iter2, b);
+               }
+       }
 
-       return k;
+       return prev;
 }
 
 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
@@ -1775,69 +1784,73 @@ void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
        }
 }
 
-int bch2_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
-                          char *buf, size_t size)
+void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
+                        struct bkey_packed *k)
 {
        struct bset_tree *t = bch2_bkey_to_bset(b, k);
        struct bkey_packed *l, *r, *p;
        struct bkey uk, up;
        char buf1[200], buf2[200];
-       unsigned j;
+       unsigned j, inorder;
 
-       if (!size)
-               return 0;
+       if (out->pos != out->end)
+               *out->pos = '\0';
 
        if (!bset_has_ro_aux_tree(t))
-               goto out;
+               return;
 
-       j = __inorder_to_eytzinger(bkey_to_cacheline(b, t, k), t->size, t->extra);
-       if (j &&
-           j < t->size &&
-           k == tree_to_bkey(b, t, j))
-               switch (bkey_float(b, t, j)->exponent) {
-               case BFLOAT_FAILED_UNPACKED:
-                       uk = bkey_unpack_key(b, k);
-                       return scnprintf(buf, size,
-                                        "    failed unpacked at depth %u\n"
-                                        "\t%llu:%llu\n",
-                                        ilog2(j),
-                                        uk.p.inode, uk.p.offset);
-               case BFLOAT_FAILED_PREV:
-                       p = tree_to_prev_bkey(b, t, j);
-                       l = is_power_of_2(j)
-                               ? btree_bkey_first(b, t)
-                               : tree_to_prev_bkey(b, t, j >> ffs(j));
-                       r = is_power_of_2(j + 1)
-                               ? bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))
-                               : tree_to_bkey(b, t, j >> (ffz(j) + 1));
-
-                       up = bkey_unpack_key(b, p);
-                       uk = bkey_unpack_key(b, k);
-                       bch2_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
-                       bch2_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
-
-                       return scnprintf(buf, size,
-                                        "    failed prev at depth %u\n"
-                                        "\tkey starts at bit %u but first differing bit at %u\n"
-                                        "\t%llu:%llu\n"
-                                        "\t%llu:%llu\n"
-                                        "\t%s\n"
-                                        "\t%s\n",
-                                        ilog2(j),
-                                        bch2_bkey_greatest_differing_bit(b, l, r),
-                                        bch2_bkey_greatest_differing_bit(b, p, k),
-                                        uk.p.inode, uk.p.offset,
-                                        up.p.inode, up.p.offset,
-                                        buf1, buf2);
-               case BFLOAT_FAILED_OVERFLOW:
-                       uk = bkey_unpack_key(b, k);
-                       return scnprintf(buf, size,
-                                        "    failed overflow at depth %u\n"
-                                        "\t%llu:%llu\n",
-                                        ilog2(j),
-                                        uk.p.inode, uk.p.offset);
-               }
-out:
-       *buf = '\0';
-       return 0;
+       inorder = bkey_to_cacheline(b, t, k);
+       if (!inorder || inorder >= t->size)
+               return;
+
+       j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
+       if (k != tree_to_bkey(b, t, j))
+               return;
+
+       switch (bkey_float(b, t, j)->exponent) {
+       case BFLOAT_FAILED_UNPACKED:
+               uk = bkey_unpack_key(b, k);
+               pr_buf(out,
+                      "    failed unpacked at depth %u\n"
+                      "\t%llu:%llu\n",
+                      ilog2(j),
+                      uk.p.inode, uk.p.offset);
+               break;
+       case BFLOAT_FAILED_PREV:
+               p = tree_to_prev_bkey(b, t, j);
+               l = is_power_of_2(j)
+                       ? btree_bkey_first(b, t)
+                       : tree_to_prev_bkey(b, t, j >> ffs(j));
+               r = is_power_of_2(j + 1)
+                       ? bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))
+                       : tree_to_bkey(b, t, j >> (ffz(j) + 1));
+
+               up = bkey_unpack_key(b, p);
+               uk = bkey_unpack_key(b, k);
+               bch2_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
+               bch2_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
+
+               pr_buf(out,
+                      "    failed prev at depth %u\n"
+                      "\tkey starts at bit %u but first differing bit at %u\n"
+                      "\t%llu:%llu\n"
+                      "\t%llu:%llu\n"
+                      "\t%s\n"
+                      "\t%s\n",
+                      ilog2(j),
+                      bch2_bkey_greatest_differing_bit(b, l, r),
+                      bch2_bkey_greatest_differing_bit(b, p, k),
+                      uk.p.inode, uk.p.offset,
+                      up.p.inode, up.p.offset,
+                      buf1, buf2);
+               break;
+       case BFLOAT_FAILED_OVERFLOW:
+               uk = bkey_unpack_key(b, k);
+               pr_buf(out,
+                      "    failed overflow at depth %u\n"
+                      "\t%llu:%llu\n",
+                      ilog2(j),
+                      uk.p.inode, uk.p.offset);
+               break;
+       }
 }