]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/bset.c
Merge pull request #196 from Conan-Kudo/spec-libexecdir
[bcachefs-tools-debian] / libbcachefs / bset.c
index 10f3f3f353a60a7e26956b826fa93dab88725ef8..bb73ba9017b006e7fe181e19b7cccfe8494c1339 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Code for working with individual keys, and sorted sets of keys with in a
  * btree node
@@ -6,30 +7,33 @@
  */
 
 #include "bcachefs.h"
+#include "btree_cache.h"
 #include "bset.h"
 #include "eytzinger.h"
+#include "trace.h"
 #include "util.h"
 
 #include <asm/unaligned.h>
-#include <linux/dynamic_fault.h>
 #include <linux/console.h>
 #include <linux/random.h>
 #include <linux/prefetch.h>
 
-/* hack.. */
-#include "alloc_types.h"
-#include <trace/events/bcachefs.h>
+static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
+                                                 struct btree *);
 
-struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
+static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
 {
-       struct bset_tree *t;
+       unsigned n = ARRAY_SIZE(iter->data);
 
-       for_each_bset(b, t)
-               if (k >= btree_bkey_first(b, t) &&
-                   k < btree_bkey_last(b, t))
-                       return t;
+       while (n && __btree_node_iter_set_end(iter, n - 1))
+               --n;
+
+       return n;
+}
 
-       BUG();
+struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
+{
+       return bch2_bkey_to_bset_inlined(b, k);
 }
 
 /*
@@ -48,54 +52,56 @@ struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
  * by the time we actually do the insert will all be deleted.
  */
 
-void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
+void bch2_dump_bset(struct bch_fs *c, struct btree *b,
+                   struct bset *i, unsigned set)
 {
        struct bkey_packed *_k, *_n;
-       struct bkey k, n;
-       char buf[120];
+       struct bkey uk, n;
+       struct bkey_s_c k;
+       struct printbuf buf = PRINTBUF;
 
        if (!i->u64s)
                return;
 
-       for (_k = i->start, k = bkey_unpack_key(b, _k);
+       for (_k = i->start;
             _k < vstruct_last(i);
-            _k = _n, k = n) {
-               _n = bkey_next(_k);
+            _k = _n) {
+               _n = bkey_p_next(_k);
 
-               bch2_bkey_to_text(buf, sizeof(buf), &k);
-               printk(KERN_ERR "block %u key %zi/%u: %s\n", set,
-                      _k->_data - i->_data, i->u64s, buf);
+               k = bkey_disassemble(b, _k, &uk);
+
+               printbuf_reset(&buf);
+               if (c)
+                       bch2_bkey_val_to_text(&buf, c, k);
+               else
+                       bch2_bkey_to_text(&buf, k.k);
+               printk(KERN_ERR "block %u key %5zu: %s\n", set,
+                      _k->_data - i->_data, buf.buf);
 
                if (_n == vstruct_last(i))
                        continue;
 
                n = bkey_unpack_key(b, _n);
 
-               if (bkey_cmp(bkey_start_pos(&n), k.p) < 0) {
+               if (bpos_lt(n.p, k.k->p)) {
                        printk(KERN_ERR "Key skipped backwards\n");
                        continue;
                }
 
-               /*
-                * Weird check for duplicate non extent keys: extents are
-                * deleted iff they have 0 size, so if it has zero size and it's
-                * not deleted these aren't extents:
-                */
-               if (((!k.size && !bkey_deleted(&k)) ||
-                    (!n.size && !bkey_deleted(&n))) &&
-                   !bkey_deleted(&k) &&
-                   !bkey_cmp(n.p, k.p))
+               if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
                        printk(KERN_ERR "Duplicate keys\n");
        }
+
+       printbuf_exit(&buf);
 }
 
-void bch2_dump_btree_node(struct btree *b)
+void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
 {
        struct bset_tree *t;
 
        console_lock();
        for_each_bset(b, t)
-               bch2_dump_bset(b, bset(b, t), t - b->set);
+               bch2_dump_bset(c, b, bset(b, t), t - b->set);
        console_unlock();
 }
 
@@ -103,37 +109,27 @@ void bch2_dump_btree_node_iter(struct btree *b,
                              struct btree_node_iter *iter)
 {
        struct btree_node_iter_set *set;
+       struct printbuf buf = PRINTBUF;
 
-       printk(KERN_ERR "btree node iter with %u sets:\n", b->nsets);
+       printk(KERN_ERR "btree node iter with %u/%u sets:\n",
+              __btree_node_iter_used(iter), b->nsets);
 
        btree_node_iter_for_each(iter, set) {
                struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
                struct bset_tree *t = bch2_bkey_to_bset(b, k);
                struct bkey uk = bkey_unpack_key(b, k);
-               char buf[100];
 
-               bch2_bkey_to_text(buf, sizeof(buf), &uk);
-               printk(KERN_ERR "set %zu key %zi/%u: %s\n", t - b->set,
-                      k->_data - bset(b, t)->_data, bset(b, t)->u64s, buf);
+               printbuf_reset(&buf);
+               bch2_bkey_to_text(&buf, &uk);
+               printk(KERN_ERR "set %zu key %u: %s\n",
+                      t - b->set, set->k, buf.buf);
        }
+
+       printbuf_exit(&buf);
 }
 
 #ifdef CONFIG_BCACHEFS_DEBUG
 
-static bool keys_out_of_order(struct btree *b,
-                             const struct bkey_packed *prev,
-                             const struct bkey_packed *next,
-                             bool is_extents)
-{
-       struct bkey nextu = bkey_unpack_key(b, next);
-
-       return bkey_cmp_left_packed_byval(b, prev, bkey_start_pos(&nextu)) > 0 ||
-               ((is_extents
-                 ? !bkey_deleted(next)
-                 : !bkey_deleted(prev)) &&
-                !bkey_cmp_packed(b, prev, next));
-}
-
 void __bch2_verify_btree_nr_keys(struct btree *b)
 {
        struct bset_tree *t;
@@ -141,169 +137,171 @@ void __bch2_verify_btree_nr_keys(struct btree *b)
        struct btree_nr_keys nr = { 0 };
 
        for_each_bset(b, t)
-               for (k = btree_bkey_first(b, t);
-                    k != btree_bkey_last(b, t);
-                    k = bkey_next(k))
-                       if (!bkey_whiteout(k))
+               bset_tree_for_each_key(b, t, k)
+                       if (!bkey_deleted(k))
                                btree_keys_account_key_add(&nr, t - b->set, k);
 
        BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
 }
 
-static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
-                                          struct btree *b,
-                                          struct bkey_packed *k)
+static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
+                                           struct btree *b)
 {
-       const struct bkey_packed *n = bch2_btree_node_iter_peek_all(iter, b);
+       struct btree_node_iter iter = *_iter;
+       const struct bkey_packed *k, *n;
+
+       k = bch2_btree_node_iter_peek_all(&iter, b);
+       __bch2_btree_node_iter_advance(&iter, b);
+       n = bch2_btree_node_iter_peek_all(&iter, b);
 
        bkey_unpack_key(b, k);
 
        if (n &&
-           keys_out_of_order(b, k, n, iter->is_extents)) {
+           bkey_iter_cmp(b, k, n) > 0) {
+               struct btree_node_iter_set *set;
                struct bkey ku = bkey_unpack_key(b, k);
                struct bkey nu = bkey_unpack_key(b, n);
-               char buf1[80], buf2[80];
-
-               bch2_dump_btree_node(b);
-               bch2_bkey_to_text(buf1, sizeof(buf1), &ku);
-               bch2_bkey_to_text(buf2, sizeof(buf2), &nu);
-               panic("out of order/overlapping:\n%s\n%s\n", buf1, buf2);
+               struct printbuf buf1 = PRINTBUF;
+               struct printbuf buf2 = PRINTBUF;
+
+               bch2_dump_btree_node(NULL, b);
+               bch2_bkey_to_text(&buf1, &ku);
+               bch2_bkey_to_text(&buf2, &nu);
+               printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
+                      buf1.buf, buf2.buf);
+               printk(KERN_ERR "iter was:");
+
+               btree_node_iter_for_each(_iter, set) {
+                       struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
+                       struct bset_tree *t = bch2_bkey_to_bset(b, k2);
+                       printk(" [%zi %zi]", t - b->set,
+                              k2->_data - bset(b, t)->_data);
+               }
+               panic("\n");
        }
 }
 
 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
-                               struct btree *b)
+                                struct btree *b)
 {
-       struct btree_node_iter_set *set;
+       struct btree_node_iter_set *set, *s2;
+       struct bkey_packed *k, *p;
        struct bset_tree *t;
-       struct bkey_packed *k, *first;
-
-       BUG_ON(iter->used > MAX_BSETS);
 
-       if (!iter->used)
+       if (bch2_btree_node_iter_end(iter))
                return;
 
+       /* Verify no duplicates: */
        btree_node_iter_for_each(iter, set) {
-               k = __btree_node_offset_to_key(b, set->k);
-               t = bch2_bkey_to_bset(b, k);
-
-               BUG_ON(__btree_node_offset_to_key(b, set->end) !=
-                      btree_bkey_last(b, t));
-
-               BUG_ON(set + 1 < iter->data + iter->used &&
-                      btree_node_iter_cmp(iter, b, set[0], set[1]) > 0);
+               BUG_ON(set->k > set->end);
+               btree_node_iter_for_each(iter, s2)
+                       BUG_ON(set != s2 && set->end == s2->end);
        }
 
-       first = __btree_node_offset_to_key(b, iter->data[0].k);
-
-       for_each_bset(b, t)
-               if (bch2_btree_node_iter_bset_pos(iter, b, t) ==
-                   btree_bkey_last(b, t) &&
-                   (k = bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))))
-                       BUG_ON(__btree_node_iter_cmp(iter->is_extents, b,
-                                                    k, first) > 0);
-}
-
-void bch2_verify_key_order(struct btree *b,
-                         struct btree_node_iter *iter,
-                         struct bkey_packed *where)
-{
-       struct bset_tree *t = bch2_bkey_to_bset(b, where);
-       struct bkey_packed *k, *prev;
-       struct bkey uk, uw = bkey_unpack_key(b, where);
-
-       k = bch2_bkey_prev_all(b, t, where);
-       if (k &&
-           keys_out_of_order(b, k, where, iter->is_extents)) {
-               char buf1[100], buf2[100];
-
-               bch2_dump_btree_node(b);
-               uk = bkey_unpack_key(b, k);
-               bch2_bkey_to_text(buf1, sizeof(buf1), &uk);
-               bch2_bkey_to_text(buf2, sizeof(buf2), &uw);
-               panic("out of order with prev:\n%s\n%s\n",
-                     buf1, buf2);
+       /* Verify that set->end is correct: */
+       btree_node_iter_for_each(iter, set) {
+               for_each_bset(b, t)
+                       if (set->end == t->end_offset)
+                               goto found;
+               BUG();
+found:
+               BUG_ON(set->k < btree_bkey_first_offset(t) ||
+                      set->k >= t->end_offset);
        }
 
-       k = bkey_next(where);
-       BUG_ON(k != btree_bkey_last(b, t) &&
-              keys_out_of_order(b, where, k, iter->is_extents));
+       /* Verify iterator is sorted: */
+       btree_node_iter_for_each(iter, set)
+               BUG_ON(set != iter->data &&
+                      btree_node_iter_cmp(b, set[-1], set[0]) > 0);
+
+       k = bch2_btree_node_iter_peek_all(iter, b);
 
        for_each_bset(b, t) {
-               if (where >= btree_bkey_first(b, t) ||
-                   where < btree_bkey_last(b, t))
+               if (iter->data[0].end == t->end_offset)
                        continue;
 
-               k = bch2_btree_node_iter_bset_pos(iter, b, t);
-
-               if (k == btree_bkey_last(b, t))
-                       k = bch2_bkey_prev_all(b, t, k);
-
-               while (bkey_cmp_left_packed_byval(b, k, bkey_start_pos(&uw)) > 0 &&
-                      (prev = bch2_bkey_prev_all(b, t, k)))
-                       k = prev;
-
-               for (;
-                    k != btree_bkey_last(b, t);
-                    k = bkey_next(k)) {
-                       uk = bkey_unpack_key(b, k);
+               p = bch2_bkey_prev_all(b, t,
+                       bch2_btree_node_iter_bset_pos(iter, b, t));
 
-                       if (iter->is_extents) {
-                               BUG_ON(!(bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0 ||
-                                        bkey_cmp(uk.p, bkey_start_pos(&uw)) <= 0));
-                       } else {
-                               BUG_ON(!bkey_cmp(uw.p, uk.p) &&
-                                      !bkey_deleted(&uk));
-                       }
+               BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
+       }
+}
 
-                       if (bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0)
-                               break;
-               }
+void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
+                           struct bkey_packed *insert, unsigned clobber_u64s)
+{
+       struct bset_tree *t = bch2_bkey_to_bset(b, where);
+       struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
+       struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
+       struct printbuf buf1 = PRINTBUF;
+       struct printbuf buf2 = PRINTBUF;
+#if 0
+       BUG_ON(prev &&
+              bkey_iter_cmp(b, prev, insert) > 0);
+#else
+       if (prev &&
+           bkey_iter_cmp(b, prev, insert) > 0) {
+               struct bkey k1 = bkey_unpack_key(b, prev);
+               struct bkey k2 = bkey_unpack_key(b, insert);
+
+               bch2_dump_btree_node(NULL, b);
+               bch2_bkey_to_text(&buf1, &k1);
+               bch2_bkey_to_text(&buf2, &k2);
+
+               panic("prev > insert:\n"
+                     "prev    key %s\n"
+                     "insert  key %s\n",
+                     buf1.buf, buf2.buf);
+       }
+#endif
+#if 0
+       BUG_ON(next != btree_bkey_last(b, t) &&
+              bkey_iter_cmp(b, insert, next) > 0);
+#else
+       if (next != btree_bkey_last(b, t) &&
+           bkey_iter_cmp(b, insert, next) > 0) {
+               struct bkey k1 = bkey_unpack_key(b, insert);
+               struct bkey k2 = bkey_unpack_key(b, next);
+
+               bch2_dump_btree_node(NULL, b);
+               bch2_bkey_to_text(&buf1, &k1);
+               bch2_bkey_to_text(&buf2, &k2);
+
+               panic("insert > next:\n"
+                     "insert  key %s\n"
+                     "next    key %s\n",
+                     buf1.buf, buf2.buf);
        }
+#endif
 }
 
 #else
 
-static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
-                                          struct btree *b,
-                                          struct bkey_packed *k) {}
+static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
+                                                  struct btree *b) {}
 
 #endif
 
 /* Auxiliary search trees */
 
-#define BFLOAT_FAILED_UNPACKED (U8_MAX - 0)
-#define BFLOAT_FAILED_PREV     (U8_MAX - 1)
-#define BFLOAT_FAILED_OVERFLOW (U8_MAX - 2)
-#define BFLOAT_FAILED          (U8_MAX - 2)
-
-#define KEY_WORDS              BITS_TO_LONGS(1 << BKEY_EXPONENT_BITS)
+#define BFLOAT_FAILED_UNPACKED U8_MAX
+#define BFLOAT_FAILED          U8_MAX
 
 struct bkey_float {
        u8              exponent;
        u8              key_offset;
-       union {
-               u32     mantissa32;
-       struct {
-               u16     mantissa16;
-               u16     _pad;
-       };
-       };
-} __packed;
-
-#define BFLOAT_32BIT_NR                32U
+       u16             mantissa;
+};
+#define BKEY_MANTISSA_BITS     16
 
 static unsigned bkey_float_byte_offset(unsigned idx)
 {
-       int d = (idx - BFLOAT_32BIT_NR) << 1;
-
-       d &= ~(d >> 31);
-
-       return idx * 6 - d;
+       return idx * sizeof(struct bkey_float);
 }
 
 struct ro_aux_tree {
-       struct bkey_float       _d[0];
+       u8                      nothing[0];
+       struct bkey_float       f[];
 };
 
 struct rw_aux_tree {
@@ -311,44 +309,6 @@ struct rw_aux_tree {
        struct bpos     k;
 };
 
-/*
- * BSET_CACHELINE was originally intended to match the hardware cacheline size -
- * it used to be 64, but I realized the lookup code would touch slightly less
- * memory if it was 128.
- *
- * It definites the number of bytes (in struct bset) per struct bkey_float in
- * the auxiliar search tree - when we're done searching the bset_float tree we
- * have this many bytes left that we do a linear search over.
- *
- * Since (after level 5) every level of the bset_tree is on a new cacheline,
- * we're touching one fewer cacheline in the bset tree in exchange for one more
- * cacheline in the linear search - but the linear search might stop before it
- * gets to the second cacheline.
- */
-
-#define BSET_CACHELINE         128
-
-/* Space required for the btree node keys */
-static inline size_t btree_keys_bytes(struct btree *b)
-{
-       return PAGE_SIZE << b->page_order;
-}
-
-static inline size_t btree_keys_cachelines(struct btree *b)
-{
-       return btree_keys_bytes(b) / BSET_CACHELINE;
-}
-
-static inline size_t btree_aux_data_bytes(struct btree *b)
-{
-       return btree_keys_cachelines(b) * 8;
-}
-
-static inline size_t btree_aux_data_u64s(struct btree *b)
-{
-       return btree_aux_data_bytes(b) / sizeof(u64);
-}
-
 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
 {
        BUG_ON(t->aux_data_offset == U16_MAX);
@@ -358,8 +318,8 @@ static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
                return t->aux_data_offset;
        case BSET_RO_AUX_TREE:
                return t->aux_data_offset +
-                       DIV_ROUND_UP(bkey_float_byte_offset(t->size) +
-                                    sizeof(u8) * t->size, 8);
+                       DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
+                                    t->size * sizeof(u8), 8);
        case BSET_RW_AUX_TREE:
                return t->aux_data_offset +
                        DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
@@ -398,23 +358,17 @@ static u8 *ro_aux_tree_prev(const struct btree *b,
        return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
 }
 
-static struct bkey_float *bkey_float_get(struct ro_aux_tree *b,
-                                        unsigned idx)
-{
-       return (void *) b + bkey_float_byte_offset(idx);
-}
-
 static struct bkey_float *bkey_float(const struct btree *b,
                                     const struct bset_tree *t,
                                     unsigned idx)
 {
-       return bkey_float_get(ro_aux_tree_base(b, t), idx);
+       return ro_aux_tree_base(b, t)->f + idx;
 }
 
-static void bset_aux_tree_verify(struct btree *b)
+static void bset_aux_tree_verify(const struct btree *b)
 {
 #ifdef CONFIG_BCACHEFS_DEBUG
-       struct bset_tree *t;
+       const struct bset_tree *t;
 
        for_each_bset(b, t) {
                if (t->aux_data_offset == U16_MAX)
@@ -430,34 +384,13 @@ static void bset_aux_tree_verify(struct btree *b)
 #endif
 }
 
-/* Memory allocation */
-
-void bch2_btree_keys_free(struct btree *b)
-{
-       vfree(b->aux_data);
-       b->aux_data = NULL;
-}
-
-int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
-{
-       b->page_order   = page_order;
-       b->aux_data     = __vmalloc(btree_aux_data_bytes(b), gfp,
-                                   PAGE_KERNEL_EXEC);
-       if (!b->aux_data)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
+void bch2_btree_keys_init(struct btree *b)
 {
        unsigned i;
 
        b->nsets                = 0;
        memset(&b->nr, 0, sizeof(b->nr));
-#ifdef CONFIG_BCACHEFS_DEBUG
-       b->expensive_debug_checks = expensive_debug_checks;
-#endif
+
        for (i = 0; i < MAX_BSETS; i++)
                b->set[i].data_offset = U16_MAX;
 
@@ -534,7 +467,7 @@ static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
                                               unsigned j)
 {
        return cacheline_to_bkey(b, t,
-                       __eytzinger1_to_inorder(j, t->size, t->extra),
+                       __eytzinger1_to_inorder(j, t->size - 1, t->extra),
                        bkey_float(b, t, j)->key_offset);
 }
 
@@ -544,7 +477,7 @@ static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
 {
        unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
 
-       return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
+       return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
 }
 
 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
@@ -569,7 +502,7 @@ static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
                            unsigned j, struct bkey_packed *k)
 {
-       BUG_ON(k >= btree_bkey_last(b, t));
+       EBUG_ON(k >= btree_bkey_last(b, t));
 
        rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
                .offset = __btree_node_key_to_offset(b, k),
@@ -583,7 +516,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
        struct bkey_packed *k = btree_bkey_first(b, t);
        unsigned j = 0;
 
-       if (!btree_keys_expensive_checks(b))
+       if (!bch2_expensive_debug_checks)
                return;
 
        BUG_ON(bset_has_ro_aux_tree(t));
@@ -597,7 +530,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
        goto start;
        while (1) {
                if (rw_aux_to_bkey(b, t, j) == k) {
-                       BUG_ON(bkey_cmp(rw_aux_tree(b, t)[j].k,
+                       BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
                                        bkey_unpack_pos(b, k)));
 start:
                        if (++j == t->size)
@@ -607,7 +540,7 @@ start:
                               rw_aux_tree(b, t)[j - 1].offset);
                }
 
-               k = bkey_next(k);
+               k = bkey_p_next(k);
                BUG_ON(k >= btree_bkey_last(b, t));
        }
 }
@@ -617,43 +550,30 @@ static unsigned rw_aux_tree_bsearch(struct btree *b,
                                    struct bset_tree *t,
                                    unsigned offset)
 {
-       unsigned l = 0, r = t->size;
-
-       BUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
-
-       while (l < r) {
-               unsigned m = (l + r) >> 1;
-
-               if (rw_aux_tree(b, t)[m].offset < offset)
-                       l = m + 1;
-               else
-                       r = m;
-       }
+       unsigned bset_offs = offset - btree_bkey_first_offset(t);
+       unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
+       unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
 
-       BUG_ON(l < t->size &&
-              rw_aux_tree(b, t)[l].offset < offset);
-       BUG_ON(l &&
-              rw_aux_tree(b, t)[l - 1].offset >= offset);
+       EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
+       EBUG_ON(!t->size);
+       EBUG_ON(idx > t->size);
 
-       BUG_ON(l > r);
-       BUG_ON(l > t->size);
+       while (idx < t->size &&
+              rw_aux_tree(b, t)[idx].offset < offset)
+               idx++;
 
-       return l;
-}
+       while (idx &&
+              rw_aux_tree(b, t)[idx - 1].offset >= offset)
+               idx--;
 
-static inline unsigned bfloat_mantissa(const struct bkey_float *f,
-                                      unsigned idx)
-{
-       return idx < BFLOAT_32BIT_NR ? f->mantissa32 : f->mantissa16;
-}
+       EBUG_ON(idx < t->size &&
+               rw_aux_tree(b, t)[idx].offset < offset);
+       EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
+       EBUG_ON(idx + 1 < t->size &&
+               rw_aux_tree(b, t)[idx].offset ==
+               rw_aux_tree(b, t)[idx + 1].offset);
 
-static inline void bfloat_mantissa_set(struct bkey_float *f,
-                                      unsigned idx, unsigned mantissa)
-{
-       if (idx < BFLOAT_32BIT_NR)
-               f->mantissa32 = mantissa;
-       else
-               f->mantissa16 = mantissa;
+       return idx;
 }
 
 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
@@ -672,72 +592,36 @@ static inline unsigned bkey_mantissa(const struct bkey_packed *k,
         * (and then the bits we want are at the high end, so we shift them
         * back down):
         */
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        v >>= f->exponent & 7;
 #else
-       v >>= 64 - (f->exponent & 7) - (idx < BFLOAT_32BIT_NR ? 32 : 16);
+       v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
 #endif
-       return idx < BFLOAT_32BIT_NR ? (u32) v : (u16) v;
+       return (u16) v;
 }
 
-static void make_bfloat(struct btree *b, struct bset_tree *t,
-                       unsigned j,
-                       struct bkey_packed *min_key,
-                       struct bkey_packed *max_key)
+static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
+                                       unsigned j,
+                                       struct bkey_packed *min_key,
+                                       struct bkey_packed *max_key)
 {
        struct bkey_float *f = bkey_float(b, t, j);
        struct bkey_packed *m = tree_to_bkey(b, t, j);
-       struct bkey_packed *p = tree_to_prev_bkey(b, t, j);
-       struct bkey_packed *l, *r;
-       unsigned bits = j < BFLOAT_32BIT_NR ? 32 : 16;
+       struct bkey_packed *l = is_power_of_2(j)
+               ? min_key
+               : tree_to_prev_bkey(b, t, j >> ffs(j));
+       struct bkey_packed *r = is_power_of_2(j + 1)
+               ? max_key
+               : tree_to_bkey(b, t, j >> (ffz(j) + 1));
        unsigned mantissa;
        int shift, exponent, high_bit;
 
-       EBUG_ON(bkey_next(p) != m);
-
-       if (is_power_of_2(j)) {
-               l = min_key;
-
-               if (!l->u64s) {
-                       if (!bkey_pack_pos(l, b->data->min_key, b)) {
-                               struct bkey_i tmp;
-
-                               bkey_init(&tmp.k);
-                               tmp.k.p = b->data->min_key;
-                               bkey_copy(l, &tmp);
-                       }
-               }
-       } else {
-               l = tree_to_prev_bkey(b, t, j >> ffs(j));
-
-               EBUG_ON(m < l);
-       }
-
-       if (is_power_of_2(j + 1)) {
-               r = max_key;
-
-               if (!r->u64s) {
-                       if (!bkey_pack_pos(r, t->max_key, b)) {
-                               struct bkey_i tmp;
-
-                               bkey_init(&tmp.k);
-                               tmp.k.p = t->max_key;
-                               bkey_copy(r, &tmp);
-                       }
-               }
-       } else {
-               r = tree_to_bkey(b, t, j >> (ffz(j) + 1));
-
-               EBUG_ON(m > r);
-       }
-
        /*
         * for failed bfloats, the lookup code falls back to comparing against
         * the original key.
         */
 
-       if (!bkey_packed(l) || !bkey_packed(r) ||
-           !bkey_packed(p) || !bkey_packed(m) ||
+       if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
            !b->nr_key_bits) {
                f->exponent = BFLOAT_FAILED_UNPACKED;
                return;
@@ -754,26 +638,26 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
         * of the key: we handle this later:
         */
        high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
-                      min_t(unsigned, bits, b->nr_key_bits) - 1);
-       exponent = high_bit - (bits - 1);
+                      min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
+       exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
 
        /*
         * Then we calculate the actual shift value, from the start of the key
         * (k->_data), to get the key bits starting at exponent:
         */
-#ifdef __LITTLE_ENDIAN
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
        shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
 
-       BUG_ON(shift + bits > b->format.key_u64s * 64);
+       EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
 #else
        shift = high_bit_offset +
                b->nr_key_bits -
                exponent -
-               bits;
+               BKEY_MANTISSA_BITS;
 
-       BUG_ON(shift < KEY_PACKED_BITS_START);
+       EBUG_ON(shift < KEY_PACKED_BITS_START);
 #endif
-       BUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
+       EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
 
        f->exponent = shift;
        mantissa = bkey_mantissa(m, f, j);
@@ -785,65 +669,29 @@ static void make_bfloat(struct btree *b, struct bset_tree *t,
        if (exponent < 0)
                mantissa |= ~(~0U << -exponent);
 
-       bfloat_mantissa_set(f, j, mantissa);
-
-       /*
-        * The bfloat must be able to tell its key apart from the previous key -
-        * if its key and the previous key don't differ in the required bits,
-        * flag as failed - unless the keys are actually equal, in which case
-        * we aren't required to return a specific one:
-        */
-       if (exponent > 0 &&
-           bfloat_mantissa(f, j) == bkey_mantissa(p, f, j) &&
-           bkey_cmp_packed(b, p, m)) {
-               f->exponent = BFLOAT_FAILED_PREV;
-               return;
-       }
-
-       /*
-        * f->mantissa must compare >= the original key - for transitivity with
-        * the comparison in bset_search_tree. If we're dropping set bits,
-        * increment it:
-        */
-       if (exponent > (int) bch2_bkey_ffs(b, m)) {
-               if (j < BFLOAT_32BIT_NR
-                   ? f->mantissa32 == U32_MAX
-                   : f->mantissa16 == U16_MAX)
-                       f->exponent = BFLOAT_FAILED_OVERFLOW;
-
-               if (j < BFLOAT_32BIT_NR)
-                       f->mantissa32++;
-               else
-                       f->mantissa16++;
-       }
+       f->mantissa = mantissa;
 }
 
 /* bytes remaining - only valid for last bset: */
-static unsigned __bset_tree_capacity(struct btree *b, struct bset_tree *t)
+static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
 {
        bset_aux_tree_verify(b);
 
        return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
 }
 
-static unsigned bset_ro_tree_capacity(struct btree *b, struct bset_tree *t)
+static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
 {
-       unsigned bytes = __bset_tree_capacity(b, t);
-
-       if (bytes < 7 * BFLOAT_32BIT_NR)
-               return bytes / 7;
-
-       bytes -= 7 * BFLOAT_32BIT_NR;
-
-       return BFLOAT_32BIT_NR + bytes / 5;
+       return __bset_tree_capacity(b, t) /
+               (sizeof(struct bkey_float) + sizeof(u8));
 }
 
-static unsigned bset_rw_tree_capacity(struct btree *b, struct bset_tree *t)
+static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
 {
        return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
 }
 
-static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
+static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
 {
        struct bkey_packed *k;
 
@@ -852,9 +700,7 @@ static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
        rw_aux_tree(b, t)[0].offset =
                __btree_node_key_to_offset(b, btree_bkey_first(b, t));
 
-       for (k = btree_bkey_first(b, t);
-            k != btree_bkey_last(b, t);
-            k = bkey_next(k)) {
+       bset_tree_for_each_key(b, t, k) {
                if (t->size == bset_rw_tree_capacity(b, t))
                        break;
 
@@ -864,15 +710,12 @@ static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
        }
 }
 
-static void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
+static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
 {
        struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
-       struct bkey_packed min_key, max_key;
+       struct bkey_i min_key, max_key;
        unsigned j, cacheline = 1;
 
-       /* signal to make_bfloat() that they're uninitialized: */
-       min_key.u64s = max_key.u64s = 0;
-
        t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
                      bset_ro_tree_capacity(b, t));
 retry:
@@ -885,11 +728,12 @@ retry:
        t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
 
        /* First we figure out where the first key in each cacheline is */
-       eytzinger1_for_each(j, t->size) {
+       eytzinger1_for_each(j, t->size - 1) {
                while (bkey_to_cacheline(b, t, k) < cacheline)
-                       prev = k, k = bkey_next(k);
+                       prev = k, k = bkey_p_next(k);
 
                if (k >= btree_bkey_last(b, t)) {
+                       /* XXX: this path sucks */
                        t->size--;
                        goto retry;
                }
@@ -898,18 +742,28 @@ retry:
                bkey_float(b, t, j)->key_offset =
                        bkey_to_cacheline_offset(b, t, cacheline++, k);
 
-               BUG_ON(tree_to_prev_bkey(b, t, j) != prev);
-               BUG_ON(tree_to_bkey(b, t, j) != k);
+               EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
+               EBUG_ON(tree_to_bkey(b, t, j) != k);
        }
 
-       while (bkey_next(k) != btree_bkey_last(b, t))
-               k = bkey_next(k);
+       while (k != btree_bkey_last(b, t))
+               prev = k, k = bkey_p_next(k);
 
-       t->max_key = bkey_unpack_pos(b, k);
+       if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
+               bkey_init(&min_key.k);
+               min_key.k.p = b->data->min_key;
+       }
+
+       if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
+               bkey_init(&max_key.k);
+               max_key.k.p = b->data->max_key;
+       }
 
        /* Then we build the tree */
-       eytzinger1_for_each(j, t->size)
-               make_bfloat(b, t, j, &min_key, &max_key);
+       eytzinger1_for_each(j, t->size - 1)
+               make_bfloat(b, t, j,
+                           bkey_to_packed(&min_key),
+                           bkey_to_packed(&max_key));
 }
 
 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
@@ -963,10 +817,14 @@ void bch2_bset_init_first(struct btree *b, struct bset *i)
        set_btree_bset(b, t, i);
 }
 
-void bch2_bset_init_next(struct btree *b, struct bset *i)
+void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
+                        struct btree_node_entry *bne)
 {
+       struct bset *i = &bne->keys;
        struct bset_tree *t;
 
+       BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
+       BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
        BUG_ON(b->nsets >= MAX_BSETS);
 
        memset(i, 0, sizeof(*i));
@@ -977,6 +835,10 @@ void bch2_bset_init_next(struct btree *b, struct bset *i)
        set_btree_bset(b, t, i);
 }
 
+/*
+ * find _some_ key in the same bset as @k that precedes @k - not necessarily the
+ * immediate predecessor:
+ */
 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
                                       struct bkey_packed *k)
 {
@@ -1000,7 +862,7 @@ static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
                do {
                        p = j ? tree_to_bkey(b, t,
                                        __inorder_to_eytzinger1(j--,
-                                                       t->size, t->extra))
+                                                       t->size - 1, t->extra))
                              : btree_bkey_first(b, t);
                } while (p >= k);
                break;
@@ -1015,129 +877,36 @@ static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
        return p;
 }
 
-struct bkey_packed *bch2_bkey_prev_all(struct btree *b, struct bset_tree *t,
-                                      struct bkey_packed *k)
-{
-       struct bkey_packed *p;
-
-       p = __bkey_prev(b, t, k);
-       if (!p)
-               return NULL;
-
-       while (bkey_next(p) != k)
-               p = bkey_next(p);
-
-       return p;
-}
-
-struct bkey_packed *bch2_bkey_prev(struct btree *b, struct bset_tree *t,
-                                  struct bkey_packed *k)
+struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
+                                         struct bset_tree *t,
+                                         struct bkey_packed *k,
+                                         unsigned min_key_type)
 {
-       while (1) {
-               struct bkey_packed *p, *i, *ret = NULL;
+       struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
 
-               p = __bkey_prev(b, t, k);
-               if (!p)
-                       return NULL;
-
-               for (i = p; i != k; i = bkey_next(i))
-                       if (!bkey_deleted(i))
+       while ((p = __bkey_prev(b, t, k)) && !ret) {
+               for (i = p; i != k; i = bkey_p_next(i))
+                       if (i->type >= min_key_type)
                                ret = i;
 
-               if (ret)
-                       return ret;
-
                k = p;
        }
-}
-
-/* Insert */
-
-static void rw_aux_tree_fix_invalidated_key(struct btree *b,
-                                           struct bset_tree *t,
-                                           struct bkey_packed *k)
-{
-       unsigned offset = __btree_node_key_to_offset(b, k);
-       unsigned j = rw_aux_tree_bsearch(b, t, offset);
-
-       if (j < t->size &&
-           rw_aux_tree(b, t)[j].offset == offset)
-               rw_aux_tree_set(b, t, j, k);
-
-       bch2_bset_verify_rw_aux_tree(b, t);
-}
-
-static void ro_aux_tree_fix_invalidated_key(struct btree *b,
-                                           struct bset_tree *t,
-                                           struct bkey_packed *k)
-{
-       struct bkey_packed min_key, max_key;
-       unsigned inorder, j;
-
-       BUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
-
-       /* signal to make_bfloat() that they're uninitialized: */
-       min_key.u64s = max_key.u64s = 0;
-
-       if (bkey_next(k) == btree_bkey_last(b, t)) {
-               t->max_key = bkey_unpack_pos(b, k);
-
-               for (j = 1; j < t->size; j = j * 2 + 1)
-                       make_bfloat(b, t, j, &min_key, &max_key);
-       }
-
-       inorder = bkey_to_cacheline(b, t, k);
 
-       if (inorder &&
-           inorder < t->size) {
-               j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
+       if (bch2_expensive_debug_checks) {
+               BUG_ON(ret >= orig_k);
 
-               if (k == tree_to_bkey(b, t, j)) {
-                       /* Fix the node this key corresponds to */
-                       make_bfloat(b, t, j, &min_key, &max_key);
-
-                       /* Children for which this key is the right boundary */
-                       for (j = eytzinger1_left_child(j);
-                            j < t->size;
-                            j = eytzinger1_right_child(j))
-                               make_bfloat(b, t, j, &min_key, &max_key);
-               }
+               for (i = ret
+                       ? bkey_p_next(ret)
+                       : btree_bkey_first(b, t);
+                    i != orig_k;
+                    i = bkey_p_next(i))
+                       BUG_ON(i->type >= min_key_type);
        }
 
-       if (inorder + 1 < t->size) {
-               j = __inorder_to_eytzinger1(inorder + 1, t->size, t->extra);
-
-               if (k == tree_to_prev_bkey(b, t, j)) {
-                       make_bfloat(b, t, j, &min_key, &max_key);
-
-                       /* Children for which this key is the left boundary */
-                       for (j = eytzinger1_right_child(j);
-                            j < t->size;
-                            j = eytzinger1_left_child(j))
-                               make_bfloat(b, t, j, &min_key, &max_key);
-               }
-       }
+       return ret;
 }
 
-/**
- * bch2_bset_fix_invalidated_key() - given an existing  key @k that has been
- * modified, fix any auxiliary search tree by remaking all the nodes in the
- * auxiliary search tree that @k corresponds to
- */
-void bch2_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
-                                  struct bkey_packed *k)
-{
-       switch (bset_aux_tree_type(t)) {
-       case BSET_NO_AUX_TREE:
-               break;
-       case BSET_RO_AUX_TREE:
-               ro_aux_tree_fix_invalidated_key(b, t, k);
-               break;
-       case BSET_RW_AUX_TREE:
-               rw_aux_tree_fix_invalidated_key(b, t, k);
-               break;
-       }
-}
+/* Insert */
 
 static void bch2_bset_fix_lookup_table(struct btree *b,
                                       struct bset_tree *t,
@@ -1148,18 +917,14 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
        int shift = new_u64s - clobber_u64s;
        unsigned l, j, where = __btree_node_key_to_offset(b, _where);
 
-       BUG_ON(bset_has_ro_aux_tree(t));
+       EBUG_ON(bset_has_ro_aux_tree(t));
 
        if (!bset_has_rw_aux_tree(t))
                return;
 
+       /* returns first entry >= where */
        l = rw_aux_tree_bsearch(b, t, where);
 
-       /* l is first >= than @where */
-
-       BUG_ON(l < t->size && rw_aux_tree(b, t)[l].offset < where);
-       BUG_ON(l && rw_aux_tree(b, t)[l - 1].offset >= where);
-
        if (!l) /* never delete first entry */
                l++;
        else if (l < t->size &&
@@ -1187,11 +952,11 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
        t->size -= j - l;
 
        for (j = l; j < t->size; j++)
-              rw_aux_tree(b, t)[j].offset += shift;
+               rw_aux_tree(b, t)[j].offset += shift;
 
-       BUG_ON(l < t->size &&
-              rw_aux_tree(b, t)[l].offset ==
-              rw_aux_tree(b, t)[l - 1].offset);
+       EBUG_ON(l < t->size &&
+               rw_aux_tree(b, t)[l].offset ==
+               rw_aux_tree(b, t)[l - 1].offset);
 
        if (t->size < bset_rw_tree_capacity(b, t) &&
            (l < t->size
@@ -1206,7 +971,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
                struct bkey_packed *k = start;
 
                while (1) {
-                       k = bkey_next(k);
+                       k = bkey_p_next(k);
                        if (k == end)
                                break;
 
@@ -1237,33 +1002,34 @@ void bch2_bset_insert(struct btree *b,
        struct bkey_packed packed, *src = bkey_to_packed(insert);
 
        bch2_bset_verify_rw_aux_tree(b, t);
+       bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
 
        if (bch2_bkey_pack_key(&packed, &insert->k, f))
                src = &packed;
 
-       if (!bkey_whiteout(&insert->k))
+       if (!bkey_deleted(&insert->k))
                btree_keys_account_key_add(&b->nr, t - b->set, src);
 
        if (src->u64s != clobber_u64s) {
-               u64 *src_p = where->_data + clobber_u64s;
-               u64 *dst_p = where->_data + src->u64s;
+               u64 *src_p = (u64 *) where->_data + clobber_u64s;
+               u64 *dst_p = (u64 *) where->_data + src->u64s;
 
-               BUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
-                      (int) clobber_u64s - src->u64s);
+               EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
+                       (int) clobber_u64s - src->u64s);
 
                memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
                le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
                set_btree_bset_end(b, t);
        }
 
-       memcpy_u64s(where, src,
+       memcpy_u64s_small(where, src,
                    bkeyp_key_u64s(f, src));
        memcpy_u64s(bkeyp_val(f, where), &insert->v,
                    bkeyp_val_u64s(f, src));
 
-       bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
+       if (src->u64s != clobber_u64s)
+               bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
 
-       bch2_verify_key_order(b, iter, where);
        bch2_verify_btree_nr_keys(b);
 }
 
@@ -1272,12 +1038,12 @@ void bch2_bset_delete(struct btree *b,
                      unsigned clobber_u64s)
 {
        struct bset_tree *t = bset_tree_last(b);
-       u64 *src_p = where->_data + clobber_u64s;
+       u64 *src_p = (u64 *) where->_data + clobber_u64s;
        u64 *dst_p = where->_data;
 
        bch2_bset_verify_rw_aux_tree(b, t);
 
-       BUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
+       EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
 
        memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
        le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
@@ -1291,15 +1057,14 @@ void bch2_bset_delete(struct btree *b,
 __flatten
 static struct bkey_packed *bset_search_write_set(const struct btree *b,
                                struct bset_tree *t,
-                               struct bpos search,
-                               const struct bkey_packed *packed_search)
+                               struct bpos *search)
 {
        unsigned l = 0, r = t->size;
 
        while (l + 1 != r) {
                unsigned m = (l + r) >> 1;
 
-               if (bkey_cmp(rw_aux_tree(b, t)[m].k, search) < 0)
+               if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
                        l = m;
                else
                        r = m;
@@ -1308,93 +1073,98 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
        return rw_aux_to_bkey(b, t, l);
 }
 
-noinline
-static int bset_search_tree_slowpath(const struct btree *b,
-                               struct bset_tree *t, struct bpos *search,
-                               const struct bkey_packed *packed_search,
-                               unsigned n)
+static inline void prefetch_four_cachelines(void *p)
 {
-       return bkey_cmp_p_or_unp(b, tree_to_bkey(b, t, n),
-                                packed_search, search) < 0;
+#ifdef CONFIG_X86_64
+       asm("prefetcht0 (-127 + 64 * 0)(%0);"
+           "prefetcht0 (-127 + 64 * 1)(%0);"
+           "prefetcht0 (-127 + 64 * 2)(%0);"
+           "prefetcht0 (-127 + 64 * 3)(%0);"
+           :
+           : "r" (p + 127));
+#else
+       prefetch(p + L1_CACHE_BYTES * 0);
+       prefetch(p + L1_CACHE_BYTES * 1);
+       prefetch(p + L1_CACHE_BYTES * 2);
+       prefetch(p + L1_CACHE_BYTES * 3);
+#endif
+}
+
+static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
+                                             const struct bkey_float *f,
+                                             unsigned idx)
+{
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+       unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
+
+       return f->exponent > key_bits_start;
+#else
+       unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
+
+       return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
+#endif
 }
 
 __flatten
 static struct bkey_packed *bset_search_tree(const struct btree *b,
-                               struct bset_tree *t,
-                               struct bpos search,
+                               const struct bset_tree *t,
+                               const struct bpos *search,
                                const struct bkey_packed *packed_search)
 {
        struct ro_aux_tree *base = ro_aux_tree_base(b, t);
-       struct bkey_float *f = bkey_float_get(base, 1);
-       void *p;
-       unsigned inorder, n = 1;
+       struct bkey_float *f;
+       struct bkey_packed *k;
+       unsigned inorder, n = 1, l, r;
+       int cmp;
 
-       while (1) {
-               if (likely(n << 4 < t->size)) {
-                       p = bkey_float_get(base, n << 4);
-                       prefetch(p);
-               } else if (n << 3 < t->size) {
-                       inorder = __eytzinger1_to_inorder(n, t->size, t->extra);
-                       p = bset_cacheline(b, t, inorder);
-#ifdef CONFIG_X86_64
-                       asm(".intel_syntax noprefix;"
-                           "prefetcht0 [%0 - 127 + 64 * 0];"
-                           "prefetcht0 [%0 - 127 + 64 * 1];"
-                           "prefetcht0 [%0 - 127 + 64 * 2];"
-                           "prefetcht0 [%0 - 127 + 64 * 3];"
-                           ".att_syntax prefix;"
-                           :
-                           : "r" (p + 127));
-#else
-                       prefetch(p + L1_CACHE_BYTES * 0);
-                       prefetch(p + L1_CACHE_BYTES * 1);
-                       prefetch(p + L1_CACHE_BYTES * 2);
-                       prefetch(p + L1_CACHE_BYTES * 3);
-#endif
-               } else if (n >= t->size)
-                       break;
+       do {
+               if (likely(n << 4 < t->size))
+                       prefetch(&base->f[n << 4]);
 
-               f = bkey_float_get(base, n);
+               f = &base->f[n];
+               if (unlikely(f->exponent >= BFLOAT_FAILED))
+                       goto slowpath;
 
-               if (packed_search &&
-                   likely(f->exponent < BFLOAT_FAILED))
-                       n = n * 2 + (bfloat_mantissa(f, n) <
-                                    bkey_mantissa(packed_search, f, n));
-               else
-                       n = n * 2 + bset_search_tree_slowpath(b, t,
-                                               &search, packed_search, n);
+               l = f->mantissa;
+               r = bkey_mantissa(packed_search, f, n);
+
+               if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
+                       goto slowpath;
+
+               n = n * 2 + (l < r);
+               continue;
+slowpath:
+               k = tree_to_bkey(b, t, n);
+               cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
+               if (!cmp)
+                       return k;
+
+               n = n * 2 + (cmp < 0);
        } while (n < t->size);
 
-       inorder = __eytzinger1_to_inorder(n >> 1, t->size, t->extra);
+       inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
 
        /*
         * n would have been the node we recursed to - the low bit tells us if
         * we recursed left or recursed right.
         */
-       if (n & 1) {
-               return cacheline_to_bkey(b, t, inorder, f->key_offset);
-       } else {
-               if (--inorder) {
-                       n = eytzinger1_prev(n >> 1, t->size);
-                       f = bkey_float_get(base, n);
-                       return cacheline_to_bkey(b, t, inorder, f->key_offset);
-               } else
+       if (likely(!(n & 1))) {
+               --inorder;
+               if (unlikely(!inorder))
                        return btree_bkey_first(b, t);
+
+               f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
        }
+
+       return cacheline_to_bkey(b, t, inorder, f->key_offset);
 }
 
-/*
- * Returns the first key greater than or equal to @search
- */
-__always_inline __flatten
-static struct bkey_packed *bch2_bset_search(struct btree *b,
+static __always_inline __flatten
+struct bkey_packed *__bch2_bset_search(struct btree *b,
                                struct bset_tree *t,
-                               struct bpos search,
-                               struct bkey_packed *packed_search,
-                               const struct bkey_packed *lossy_packed_search,
-                               bool strictly_greater)
+                               struct bpos *search,
+                               const struct bkey_packed *lossy_packed_search)
 {
-       struct bkey_packed *m;
 
        /*
         * First, we search for a cacheline, then lastly we do a linear search
@@ -1413,43 +1183,41 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
 
        switch (bset_aux_tree_type(t)) {
        case BSET_NO_AUX_TREE:
-               m = btree_bkey_first(b, t);
-               break;
+               return btree_bkey_first(b, t);
        case BSET_RW_AUX_TREE:
-               m = bset_search_write_set(b, t, search, lossy_packed_search);
-               break;
+               return bset_search_write_set(b, t, search);
        case BSET_RO_AUX_TREE:
-               /*
-                * Each node in the auxiliary search tree covers a certain range
-                * of bits, and keys above and below the set it covers might
-                * differ outside those bits - so we have to special case the
-                * start and end - handle that here:
-                */
-
-               if (bkey_cmp(search, t->max_key) > 0)
-                       return btree_bkey_last(b, t);
-
-               m = bset_search_tree(b, t, search, lossy_packed_search);
-               break;
+               return bset_search_tree(b, t, search, lossy_packed_search);
+       default:
+               BUG();
        }
+}
 
+static __always_inline __flatten
+struct bkey_packed *bch2_bset_search_linear(struct btree *b,
+                               struct bset_tree *t,
+                               struct bpos *search,
+                               struct bkey_packed *packed_search,
+                               const struct bkey_packed *lossy_packed_search,
+                               struct bkey_packed *m)
+{
        if (lossy_packed_search)
                while (m != btree_bkey_last(b, t) &&
-                      !btree_iter_pos_cmp_p_or_unp(b, search, lossy_packed_search,
-                                                   m, strictly_greater))
-                       m = bkey_next(m);
+                      bkey_iter_cmp_p_or_unp(b, m,
+                                       lossy_packed_search, search) < 0)
+                       m = bkey_p_next(m);
 
        if (!packed_search)
                while (m != btree_bkey_last(b, t) &&
-                      !btree_iter_pos_cmp_packed(b, &search, m, strictly_greater))
-                       m = bkey_next(m);
+                      bkey_iter_pos_cmp(b, m, search) < 0)
+                       m = bkey_p_next(m);
 
-       if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
+       if (bch2_expensive_debug_checks) {
                struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
 
                BUG_ON(prev &&
-                      btree_iter_pos_cmp_p_or_unp(b, search, packed_search,
-                                                  prev, strictly_greater));
+                      bkey_iter_cmp_p_or_unp(b, prev,
+                                       packed_search, search) >= 0);
        }
 
        return m;
@@ -1457,51 +1225,57 @@ static struct bkey_packed *bch2_bset_search(struct btree *b,
 
 /* Btree node iterator */
 
-void bch2_btree_node_iter_push(struct btree_node_iter *iter,
-                              struct btree *b,
-                              const struct bkey_packed *k,
-                              const struct bkey_packed *end)
+static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
+                             struct btree *b,
+                             const struct bkey_packed *k,
+                             const struct bkey_packed *end)
 {
        if (k != end) {
-               struct btree_node_iter_set *pos, n =
-                       ((struct btree_node_iter_set) {
-                                __btree_node_key_to_offset(b, k),
-                                __btree_node_key_to_offset(b, end)
-                        });
+               struct btree_node_iter_set *pos;
 
                btree_node_iter_for_each(iter, pos)
-                       if (btree_node_iter_cmp(iter, b, n, *pos) <= 0)
-                               break;
+                       ;
 
-               memmove(pos + 1, pos,
-                       (void *) (iter->data + iter->used) - (void *) pos);
-               iter->used++;
-               *pos = n;
+               BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
+               *pos = (struct btree_node_iter_set) {
+                       __btree_node_key_to_offset(b, k),
+                       __btree_node_key_to_offset(b, end)
+               };
        }
 }
 
-noinline __flatten __attribute__((cold))
+void bch2_btree_node_iter_push(struct btree_node_iter *iter,
+                              struct btree *b,
+                              const struct bkey_packed *k,
+                              const struct bkey_packed *end)
+{
+       __bch2_btree_node_iter_push(iter, b, k, end);
+       bch2_btree_node_iter_sort(iter, b);
+}
+
+noinline __flatten __cold
 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
-                             struct btree *b, struct bpos search,
-                             bool strictly_greater, bool is_extents)
+                             struct btree *b, struct bpos *search)
 {
-       struct bset_tree *t;
+       struct bkey_packed *k;
 
        trace_bkey_pack_pos_fail(search);
 
-       for_each_bset(b, t)
-               __bch2_btree_node_iter_push(iter, b,
-                       bch2_bset_search(b, t, search, NULL, NULL,
-                                       strictly_greater),
-                       btree_bkey_last(b, t));
+       bch2_btree_node_iter_init_from_start(iter, b);
 
-       bch2_btree_node_iter_sort(iter, b);
+       while ((k = bch2_btree_node_iter_peek(iter, b)) &&
+              bkey_iter_pos_cmp(b, k, search) < 0)
+               bch2_btree_node_iter_advance(iter, b);
 }
 
 /**
- * bch_btree_node_iter_init - initialize a btree node iterator, starting from a
+ * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
  * given position
  *
+ * @iter:      iterator to initialize
+ * @b:         btree node to search
+ * @search:    search key
+ *
  * Main entry point to the lookup code for individual btree nodes:
  *
  * NOTE:
@@ -1529,7 +1303,7 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
  *    to the search key is going to have 0 sectors after the search key.
  *
  *    But this does mean that we can't just search for
- *    bkey_successor(start_of_range) to get the first extent that overlaps with
+ *    bpos_successor(start_of_range) to get the first extent that overlaps with
  *    the range we want - if we're unlucky and there's an extent that ends
  *    exactly where we searched, then there could be a deleted key at the same
  *    position and we'd get that when we search instead of the preceding extent
@@ -1538,22 +1312,22 @@ static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
  *    So we've got to search for start_of_range, then after the lookup iterate
  *    past any extents that compare equal to the position we searched for.
  */
+__flatten
 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
-                              struct btree *b, struct bpos search,
-                              bool strictly_greater, bool is_extents)
+                              struct btree *b, struct bpos *search)
 {
-       struct bset_tree *t;
        struct bkey_packed p, *packed_search = NULL;
+       struct btree_node_iter_set *pos = iter->data;
+       struct bkey_packed *k[MAX_BSETS];
+       unsigned i;
 
-       EBUG_ON(bkey_cmp(search, b->data->min_key) < 0);
+       EBUG_ON(bpos_lt(*search, b->data->min_key));
+       EBUG_ON(bpos_gt(*search, b->data->max_key));
        bset_aux_tree_verify(b);
 
-       __bch2_btree_node_iter_init(iter, is_extents);
-
-       //if (bkey_cmp(search, b->curr_max_key) > 0)
-       //      return;
+       memset(iter, 0, sizeof(*iter));
 
-       switch (bch2_bkey_pack_pos_lossy(&p, search, b)) {
+       switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
        case BKEY_PACK_POS_EXACT:
                packed_search = &p;
                break;
@@ -1561,28 +1335,37 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
                packed_search = NULL;
                break;
        case BKEY_PACK_POS_FAIL:
-               btree_node_iter_init_pack_failed(iter, b, search,
-                                       strictly_greater, is_extents);
+               btree_node_iter_init_pack_failed(iter, b, search);
                return;
        }
 
-       for_each_bset(b, t)
-               __bch2_btree_node_iter_push(iter, b,
-                                          bch2_bset_search(b, t, search,
-                                                          packed_search, &p,
-                                                          strictly_greater),
-                                          btree_bkey_last(b, t));
+       for (i = 0; i < b->nsets; i++) {
+               k[i] = __bch2_bset_search(b, b->set + i, search, &p);
+               prefetch_four_cachelines(k[i]);
+       }
+
+       for (i = 0; i < b->nsets; i++) {
+               struct bset_tree *t = b->set + i;
+               struct bkey_packed *end = btree_bkey_last(b, t);
+
+               k[i] = bch2_bset_search_linear(b, t, search,
+                                              packed_search, &p, k[i]);
+               if (k[i] != end)
+                       *pos++ = (struct btree_node_iter_set) {
+                               __btree_node_key_to_offset(b, k[i]),
+                               __btree_node_key_to_offset(b, end)
+                       };
+       }
 
        bch2_btree_node_iter_sort(iter, b);
 }
 
 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
-                                         struct btree *b,
-                                         bool is_extents)
+                                         struct btree *b)
 {
        struct bset_tree *t;
 
-       __bch2_btree_node_iter_init(iter, is_extents);
+       memset(iter, 0, sizeof(*iter));
 
        for_each_bset(b, t)
                __bch2_btree_node_iter_push(iter, b,
@@ -1597,8 +1380,6 @@ struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
 {
        struct btree_node_iter_set *set;
 
-       BUG_ON(iter->used > MAX_BSETS);
-
        btree_node_iter_for_each(iter, set)
                if (set->end == t->end_offset)
                        return __btree_node_offset_to_key(b, set->k);
@@ -1606,70 +1387,79 @@ struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
        return btree_bkey_last(b, t);
 }
 
-static inline void btree_node_iter_sift(struct btree_node_iter *iter,
-                                       struct btree *b,
-                                       unsigned start)
-{
-       unsigned i;
-
-       EBUG_ON(iter->used > MAX_BSETS);
-
-       for (i = start;
-            i + 1 < iter->used &&
-            btree_node_iter_cmp(iter, b, iter->data[i], iter->data[i + 1]) > 0;
-            i++)
-               swap(iter->data[i], iter->data[i + 1]);
-}
-
-static inline void btree_node_iter_sort_two(struct btree_node_iter *iter,
+static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
                                            struct btree *b,
                                            unsigned first)
 {
-       if (btree_node_iter_cmp(iter, b,
-                               iter->data[first],
-                               iter->data[first + 1]) > 0)
+       bool ret;
+
+       if ((ret = (btree_node_iter_cmp(b,
+                                       iter->data[first],
+                                       iter->data[first + 1]) > 0)))
                swap(iter->data[first], iter->data[first + 1]);
+       return ret;
 }
 
 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
                               struct btree *b)
 {
-       EBUG_ON(iter->used > 3);
-
        /* unrolled bubble sort: */
 
-       if (iter->used > 2) {
+       if (!__btree_node_iter_set_end(iter, 2)) {
                btree_node_iter_sort_two(iter, b, 0);
                btree_node_iter_sort_two(iter, b, 1);
        }
 
-       if (iter->used > 1)
+       if (!__btree_node_iter_set_end(iter, 1))
                btree_node_iter_sort_two(iter, b, 0);
 }
 
-/**
- * bch_btree_node_iter_advance - advance @iter by one key
- *
- * Doesn't do debugchecks - for cases where (insert_fixup_extent()) a bset might
- * momentarily have out of order extents.
- */
-void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
-                                 struct btree *b)
+void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
+                                  struct btree_node_iter_set *set)
 {
-       struct bkey_packed *k = bch2_btree_node_iter_peek_all(iter, b);
+       struct btree_node_iter_set *last =
+               iter->data + ARRAY_SIZE(iter->data) - 1;
+
+       memmove(&set[0], &set[1], (void *) last - (void *) set);
+       *last = (struct btree_node_iter_set) { 0, 0 };
+}
 
+static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
+                                                 struct btree *b)
+{
        iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
 
-       BUG_ON(iter->data->k > iter->data->end);
+       EBUG_ON(iter->data->k > iter->data->end);
 
-       if (iter->data->k == iter->data->end) {
-               BUG_ON(iter->used == 0);
-               iter->data[0] = iter->data[--iter->used];
+       if (unlikely(__btree_node_iter_set_end(iter, 0))) {
+               /* avoid an expensive memmove call: */
+               iter->data[0] = iter->data[1];
+               iter->data[1] = iter->data[2];
+               iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
+               return;
        }
 
-       btree_node_iter_sift(iter, b, 0);
+       if (__btree_node_iter_set_end(iter, 1))
+               return;
+
+       if (!btree_node_iter_sort_two(iter, b, 0))
+               return;
+
+       if (__btree_node_iter_set_end(iter, 2))
+               return;
+
+       btree_node_iter_sort_two(iter, b, 1);
+}
+
+void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
+                                 struct btree *b)
+{
+       if (bch2_expensive_debug_checks) {
+               bch2_btree_node_iter_verify(iter, b);
+               bch2_btree_node_iter_next_check(iter, b);
+       }
 
-       bch2_btree_node_iter_next_check(iter, b, k);
+       __bch2_btree_node_iter_advance(iter, b);
 }
 
 /*
@@ -1681,19 +1471,18 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
        struct bkey_packed *k, *prev = NULL;
        struct btree_node_iter_set *set;
        struct bset_tree *t;
-       struct bset_tree *prev_t;
-       unsigned end;
+       unsigned end = 0;
 
-       bch2_btree_node_iter_verify(iter, b);
+       if (bch2_expensive_debug_checks)
+               bch2_btree_node_iter_verify(iter, b);
 
        for_each_bset(b, t) {
                k = bch2_bkey_prev_all(b, t,
                        bch2_btree_node_iter_bset_pos(iter, b, t));
                if (k &&
-                   (!prev || __btree_node_iter_cmp(iter->is_extents, b,
-                                                   k, prev) > 0)) {
+                   (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
                        prev = k;
-                       prev_t = t;
+                       end = t->end_offset;
                }
        }
 
@@ -1705,35 +1494,36 @@ struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
         * prev we picked ends up in slot 0 - sort won't necessarily put it
         * there because of duplicate deleted keys:
         */
-       end = __btree_node_key_to_offset(b, btree_bkey_last(b, prev_t));
        btree_node_iter_for_each(iter, set)
-               if (set->end == end) {
-                       memmove(&iter->data[1],
-                               &iter->data[0],
-                               (void *) set - (void *) &iter->data[0]);
-                       goto out;
-               }
+               if (set->end == end)
+                       goto found;
+
+       BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
+found:
+       BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
 
        memmove(&iter->data[1],
                &iter->data[0],
-               (void *) &iter->data[iter->used] - (void *) &iter->data[0]);
-       iter->used++;
-out:
+               (void *) set - (void *) &iter->data[0]);
+
        iter->data[0].k = __btree_node_key_to_offset(b, prev);
        iter->data[0].end = end;
+
+       if (bch2_expensive_debug_checks)
+               bch2_btree_node_iter_verify(iter, b);
        return prev;
 }
 
 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
                                              struct btree *b)
 {
-       struct bkey_packed *k;
+       struct bkey_packed *prev;
 
        do {
-               k = bch2_btree_node_iter_prev_all(iter, b);
-       } while (k && bkey_deleted(k));
+               prev = bch2_btree_node_iter_prev_all(iter, b);
+       } while (prev && bkey_deleted(prev));
 
-       return k;
+       return prev;
 }
 
 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
@@ -1747,9 +1537,9 @@ struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
 
 /* Mergesort */
 
-void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
+void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
 {
-       struct bset_tree *t;
+       const struct bset_tree *t;
 
        for_each_bset(b, t) {
                enum bset_aux_tree_type type = bset_aux_tree_type(t);
@@ -1763,84 +1553,40 @@ void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
                        stats->floats += t->size - 1;
 
                        for (j = 1; j < t->size; j++)
-                               switch (bkey_float(b, t, j)->exponent) {
-                               case BFLOAT_FAILED_UNPACKED:
-                                       stats->failed_unpacked++;
-                                       break;
-                               case BFLOAT_FAILED_PREV:
-                                       stats->failed_prev++;
-                                       break;
-                               case BFLOAT_FAILED_OVERFLOW:
-                                       stats->failed_overflow++;
-                                       break;
-                               }
+                               stats->failed +=
+                                       bkey_float(b, t, j)->exponent ==
+                                       BFLOAT_FAILED;
                }
        }
 }
 
-int bch2_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
-                          char *buf, size_t size)
+void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
+                        struct bkey_packed *k)
 {
        struct bset_tree *t = bch2_bkey_to_bset(b, k);
-       struct bkey_packed *l, *r, *p;
-       struct bkey uk, up;
-       char buf1[200], buf2[200];
-       unsigned j;
-
-       if (!size)
-               return 0;
+       struct bkey uk;
+       unsigned j, inorder;
 
        if (!bset_has_ro_aux_tree(t))
-               goto out;
-
-       j = __inorder_to_eytzinger1(bkey_to_cacheline(b, t, k), t->size, t->extra);
-       if (j &&
-           j < t->size &&
-           k == tree_to_bkey(b, t, j))
-               switch (bkey_float(b, t, j)->exponent) {
-               case BFLOAT_FAILED_UNPACKED:
-                       uk = bkey_unpack_key(b, k);
-                       return scnprintf(buf, size,
-                                        "    failed unpacked at depth %u\n"
-                                        "\t%llu:%llu\n",
-                                        ilog2(j),
-                                        uk.p.inode, uk.p.offset);
-               case BFLOAT_FAILED_PREV:
-                       p = tree_to_prev_bkey(b, t, j);
-                       l = is_power_of_2(j)
-                               ? btree_bkey_first(b, t)
-                               : tree_to_prev_bkey(b, t, j >> ffs(j));
-                       r = is_power_of_2(j + 1)
-                               ? bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))
-                               : tree_to_bkey(b, t, j >> (ffz(j) + 1));
-
-                       up = bkey_unpack_key(b, p);
-                       uk = bkey_unpack_key(b, k);
-                       bch2_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
-                       bch2_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
-
-                       return scnprintf(buf, size,
-                                        "    failed prev at depth %u\n"
-                                        "\tkey starts at bit %u but first differing bit at %u\n"
-                                        "\t%llu:%llu\n"
-                                        "\t%llu:%llu\n"
-                                        "\t%s\n"
-                                        "\t%s\n",
-                                        ilog2(j),
-                                        bch2_bkey_greatest_differing_bit(b, l, r),
-                                        bch2_bkey_greatest_differing_bit(b, p, k),
-                                        uk.p.inode, uk.p.offset,
-                                        up.p.inode, up.p.offset,
-                                        buf1, buf2);
-               case BFLOAT_FAILED_OVERFLOW:
-                       uk = bkey_unpack_key(b, k);
-                       return scnprintf(buf, size,
-                                        "    failed overflow at depth %u\n"
-                                        "\t%llu:%llu\n",
-                                        ilog2(j),
-                                        uk.p.inode, uk.p.offset);
-               }
-out:
-       *buf = '\0';
-       return 0;
+               return;
+
+       inorder = bkey_to_cacheline(b, t, k);
+       if (!inorder || inorder >= t->size)
+               return;
+
+       j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
+       if (k != tree_to_bkey(b, t, j))
+               return;
+
+       switch (bkey_float(b, t, j)->exponent) {
+       case BFLOAT_FAILED:
+               uk = bkey_unpack_key(b, k);
+               prt_printf(out,
+                      "    failed unpacked at depth %u\n"
+                      "\t",
+                      ilog2(j));
+               bch2_bpos_to_text(out, uk.p);
+               prt_printf(out, "\n");
+               break;
+       }
 }