1 // SPDX-License-Identifier: GPL-2.0
3 * Code for working with individual keys, and sorted sets of keys with in a
6 * Copyright 2012 Google, Inc.
10 #include "btree_cache.h"
12 #include "eytzinger.h"
15 #include <asm/unaligned.h>
16 #include <linux/console.h>
17 #include <linux/random.h>
18 #include <linux/prefetch.h>
21 #include "alloc_types.h"
22 #include <trace/events/bcachefs.h>
24 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
27 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
29 unsigned n = ARRAY_SIZE(iter->data);
31 while (n && __btree_node_iter_set_end(iter, n - 1))
37 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
39 unsigned offset = __btree_node_key_to_offset(b, k);
43 if (offset <= t->end_offset) {
44 EBUG_ON(offset < btree_bkey_first_offset(t));
52 * There are never duplicate live keys in the btree - but including keys that
53 * have been flagged as deleted (and will be cleaned up later) we _will_ see
56 * Thus the sort order is: usual key comparison first, but for keys that compare
57 * equal the deleted key(s) come first, and the (at most one) live version comes
60 * The main reason for this is insertion: to handle overwrites, we first iterate
61 * over keys that compare equal to our insert key, and then insert immediately
62 * prior to the first key greater than the key we're inserting - our insert
63 * position will be after all keys that compare equal to our insert key, which
64 * by the time we actually do the insert will all be deleted.
67 void bch2_dump_bset(struct bch_fs *c, struct btree *b,
68 struct bset *i, unsigned set)
70 struct bkey_packed *_k, *_n;
81 _n = bkey_next_skip_noops(_k, vstruct_last(i));
83 k = bkey_disassemble(b, _k, &uk);
85 bch2_bkey_val_to_text(&PBUF(buf), c, k);
87 bch2_bkey_to_text(&PBUF(buf), k.k);
88 printk(KERN_ERR "block %u key %5zu: %s\n", set,
89 _k->_data - i->_data, buf);
91 if (_n == vstruct_last(i))
94 n = bkey_unpack_key(b, _n);
96 if (bkey_cmp(bkey_start_pos(&n), k.k->p) < 0) {
97 printk(KERN_ERR "Key skipped backwards\n");
101 if (!bkey_deleted(k.k) &&
102 !bkey_cmp(n.p, k.k->p))
103 printk(KERN_ERR "Duplicate keys\n");
107 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
113 bch2_dump_bset(c, b, bset(b, t), t - b->set);
117 void bch2_dump_btree_node_iter(struct btree *b,
118 struct btree_node_iter *iter)
120 struct btree_node_iter_set *set;
122 printk(KERN_ERR "btree node iter with %u/%u sets:\n",
123 __btree_node_iter_used(iter), b->nsets);
125 btree_node_iter_for_each(iter, set) {
126 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
127 struct bset_tree *t = bch2_bkey_to_bset(b, k);
128 struct bkey uk = bkey_unpack_key(b, k);
131 bch2_bkey_to_text(&PBUF(buf), &uk);
132 printk(KERN_ERR "set %zu key %u: %s\n",
133 t - b->set, set->k, buf);
137 #ifdef CONFIG_BCACHEFS_DEBUG
139 void __bch2_verify_btree_nr_keys(struct btree *b)
142 struct bkey_packed *k;
143 struct btree_nr_keys nr = { 0 };
146 bset_tree_for_each_key(b, t, k)
147 if (!bkey_whiteout(k))
148 btree_keys_account_key_add(&nr, t - b->set, k);
150 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
153 static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
156 struct btree_node_iter iter = *_iter;
157 const struct bkey_packed *k, *n;
159 k = bch2_btree_node_iter_peek_all(&iter, b);
160 __bch2_btree_node_iter_advance(&iter, b);
161 n = bch2_btree_node_iter_peek_all(&iter, b);
163 bkey_unpack_key(b, k);
166 bkey_iter_cmp(b, k, n) > 0) {
167 struct btree_node_iter_set *set;
168 struct bkey ku = bkey_unpack_key(b, k);
169 struct bkey nu = bkey_unpack_key(b, n);
170 char buf1[80], buf2[80];
172 bch2_dump_btree_node(NULL, b);
173 bch2_bkey_to_text(&PBUF(buf1), &ku);
174 bch2_bkey_to_text(&PBUF(buf2), &nu);
175 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
177 printk(KERN_ERR "iter was:");
179 btree_node_iter_for_each(_iter, set) {
180 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
181 struct bset_tree *t = bch2_bkey_to_bset(b, k);
182 printk(" [%zi %zi]", t - b->set,
183 k->_data - bset(b, t)->_data);
189 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
192 struct btree_node_iter_set *set, *s2;
193 struct bkey_packed *k, *p;
196 if (bch2_btree_node_iter_end(iter))
199 /* Verify no duplicates: */
200 btree_node_iter_for_each(iter, set)
201 btree_node_iter_for_each(iter, s2)
202 BUG_ON(set != s2 && set->end == s2->end);
204 /* Verify that set->end is correct: */
205 btree_node_iter_for_each(iter, set) {
207 if (set->end == t->end_offset)
211 BUG_ON(set->k < btree_bkey_first_offset(t) ||
212 set->k >= t->end_offset);
215 /* Verify iterator is sorted: */
216 btree_node_iter_for_each(iter, set)
217 BUG_ON(set != iter->data &&
218 btree_node_iter_cmp(b, set[-1], set[0]) > 0);
220 k = bch2_btree_node_iter_peek_all(iter, b);
222 for_each_bset(b, t) {
223 if (iter->data[0].end == t->end_offset)
226 p = bch2_bkey_prev_all(b, t,
227 bch2_btree_node_iter_bset_pos(iter, b, t));
229 BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
233 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
234 struct bkey_packed *insert, unsigned clobber_u64s)
236 struct bset_tree *t = bch2_bkey_to_bset(b, where);
237 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
238 struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
241 bkey_iter_cmp(b, prev, insert) > 0);
244 bkey_iter_cmp(b, prev, insert) > 0) {
245 struct bkey k1 = bkey_unpack_key(b, prev);
246 struct bkey k2 = bkey_unpack_key(b, insert);
250 bch2_dump_btree_node(NULL, b);
251 bch2_bkey_to_text(&PBUF(buf1), &k1);
252 bch2_bkey_to_text(&PBUF(buf2), &k2);
254 panic("prev > insert:\n"
261 BUG_ON(next != btree_bkey_last(b, t) &&
262 bkey_iter_cmp(b, insert, next) > 0);
264 if (next != btree_bkey_last(b, t) &&
265 bkey_iter_cmp(b, insert, next) > 0) {
266 struct bkey k1 = bkey_unpack_key(b, insert);
267 struct bkey k2 = bkey_unpack_key(b, next);
271 bch2_dump_btree_node(NULL, b);
272 bch2_bkey_to_text(&PBUF(buf1), &k1);
273 bch2_bkey_to_text(&PBUF(buf2), &k2);
275 panic("insert > next:\n"
285 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
290 /* Auxiliary search trees */
292 #define BFLOAT_FAILED_UNPACKED U8_MAX
293 #define BFLOAT_FAILED U8_MAX
300 #define BKEY_MANTISSA_BITS 16
302 static unsigned bkey_float_byte_offset(unsigned idx)
304 return idx * sizeof(struct bkey_float);
308 struct bkey_float f[0];
316 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
318 BUG_ON(t->aux_data_offset == U16_MAX);
320 switch (bset_aux_tree_type(t)) {
321 case BSET_NO_AUX_TREE:
322 return t->aux_data_offset;
323 case BSET_RO_AUX_TREE:
324 return t->aux_data_offset +
325 DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
326 t->size * sizeof(u8), 8);
327 case BSET_RW_AUX_TREE:
328 return t->aux_data_offset +
329 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
335 static unsigned bset_aux_tree_buf_start(const struct btree *b,
336 const struct bset_tree *t)
339 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
340 : bset_aux_tree_buf_end(t - 1);
343 static void *__aux_tree_base(const struct btree *b,
344 const struct bset_tree *t)
346 return b->aux_data + t->aux_data_offset * 8;
349 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
350 const struct bset_tree *t)
352 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
354 return __aux_tree_base(b, t);
357 static u8 *ro_aux_tree_prev(const struct btree *b,
358 const struct bset_tree *t)
360 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
362 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
365 static struct bkey_float *bkey_float(const struct btree *b,
366 const struct bset_tree *t,
369 return ro_aux_tree_base(b, t)->f + idx;
372 static void bset_aux_tree_verify(const struct btree *b)
374 #ifdef CONFIG_BCACHEFS_DEBUG
375 const struct bset_tree *t;
377 for_each_bset(b, t) {
378 if (t->aux_data_offset == U16_MAX)
381 BUG_ON(t != b->set &&
382 t[-1].aux_data_offset == U16_MAX);
384 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
385 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
386 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
391 void bch2_btree_keys_init(struct btree *b)
396 memset(&b->nr, 0, sizeof(b->nr));
398 for (i = 0; i < MAX_BSETS; i++)
399 b->set[i].data_offset = U16_MAX;
401 bch2_bset_set_no_aux_tree(b, b->set);
404 /* Binary tree stuff for auxiliary search trees */
407 * Cacheline/offset <-> bkey pointer arithmetic:
409 * t->tree is a binary search tree in an array; each node corresponds to a key
410 * in one cacheline in t->set (BSET_CACHELINE bytes).
412 * This means we don't have to store the full index of the key that a node in
413 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
414 * then bkey_float->m gives us the offset within that cacheline, in units of 8
417 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
420 * To construct the bfloat for an arbitrary key we need to know what the key
421 * immediately preceding it is: we have to check if the two keys differ in the
422 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
423 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
426 static inline void *bset_cacheline(const struct btree *b,
427 const struct bset_tree *t,
430 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
432 cacheline * BSET_CACHELINE;
435 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
436 const struct bset_tree *t,
440 return bset_cacheline(b, t, cacheline) + offset * 8;
443 static unsigned bkey_to_cacheline(const struct btree *b,
444 const struct bset_tree *t,
445 const struct bkey_packed *k)
447 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
450 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
451 const struct bset_tree *t,
453 const struct bkey_packed *k)
455 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
458 static unsigned bkey_to_cacheline_offset(const struct btree *b,
459 const struct bset_tree *t,
461 const struct bkey_packed *k)
463 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
469 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
470 const struct bset_tree *t,
473 return cacheline_to_bkey(b, t,
474 __eytzinger1_to_inorder(j, t->size, t->extra),
475 bkey_float(b, t, j)->key_offset);
478 static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
479 const struct bset_tree *t,
482 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
484 return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
487 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
488 const struct bset_tree *t)
490 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
492 return __aux_tree_base(b, t);
496 * For the write set - the one we're currently inserting keys into - we don't
497 * maintain a full search tree, we just keep a simple lookup table in t->prev.
499 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
503 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
506 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
507 unsigned j, struct bkey_packed *k)
509 EBUG_ON(k >= btree_bkey_last(b, t));
511 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
512 .offset = __btree_node_key_to_offset(b, k),
513 .k = bkey_unpack_pos(b, k),
517 static void bch2_bset_verify_rw_aux_tree(struct btree *b,
520 struct bkey_packed *k = btree_bkey_first(b, t);
523 if (!bch2_expensive_debug_checks)
526 BUG_ON(bset_has_ro_aux_tree(t));
528 if (!bset_has_rw_aux_tree(t))
532 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
536 if (rw_aux_to_bkey(b, t, j) == k) {
537 BUG_ON(bkey_cmp(rw_aux_tree(b, t)[j].k,
538 bkey_unpack_pos(b, k)));
543 BUG_ON(rw_aux_tree(b, t)[j].offset <=
544 rw_aux_tree(b, t)[j - 1].offset);
547 k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
548 BUG_ON(k >= btree_bkey_last(b, t));
552 /* returns idx of first entry >= offset: */
553 static unsigned rw_aux_tree_bsearch(struct btree *b,
557 unsigned bset_offs = offset - btree_bkey_first_offset(t);
558 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
559 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
561 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
563 EBUG_ON(idx > t->size);
565 while (idx < t->size &&
566 rw_aux_tree(b, t)[idx].offset < offset)
570 rw_aux_tree(b, t)[idx - 1].offset >= offset)
573 EBUG_ON(idx < t->size &&
574 rw_aux_tree(b, t)[idx].offset < offset);
575 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
576 EBUG_ON(idx + 1 < t->size &&
577 rw_aux_tree(b, t)[idx].offset ==
578 rw_aux_tree(b, t)[idx + 1].offset);
583 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
584 const struct bkey_float *f,
589 EBUG_ON(!bkey_packed(k));
591 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
594 * In little endian, we're shifting off low bits (and then the bits we
595 * want are at the low end), in big endian we're shifting off high bits
596 * (and then the bits we want are at the high end, so we shift them
599 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
600 v >>= f->exponent & 7;
602 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
608 static inline void __make_bfloat(struct btree *b, struct bset_tree *t,
610 struct bkey_packed *min_key,
611 struct bkey_packed *max_key)
613 struct bkey_float *f = bkey_float(b, t, j);
614 struct bkey_packed *m = tree_to_bkey(b, t, j);
615 struct bkey_packed *l = is_power_of_2(j)
617 : tree_to_prev_bkey(b, t, j >> ffs(j));
618 struct bkey_packed *r = is_power_of_2(j + 1)
620 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
622 int shift, exponent, high_bit;
625 * for failed bfloats, the lookup code falls back to comparing against
629 if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
631 f->exponent = BFLOAT_FAILED_UNPACKED;
636 * The greatest differing bit of l and r is the first bit we must
637 * include in the bfloat mantissa we're creating in order to do
638 * comparisons - that bit always becomes the high bit of
639 * bfloat->mantissa, and thus the exponent we're calculating here is
640 * the position of what will become the low bit in bfloat->mantissa:
642 * Note that this may be negative - we may be running off the low end
643 * of the key: we handle this later:
645 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
646 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
647 exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
650 * Then we calculate the actual shift value, from the start of the key
651 * (k->_data), to get the key bits starting at exponent:
653 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
654 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
656 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
658 shift = high_bit_offset +
663 EBUG_ON(shift < KEY_PACKED_BITS_START);
665 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
668 mantissa = bkey_mantissa(m, f, j);
671 * If we've got garbage bits, set them to all 1s - it's legal for the
672 * bfloat to compare larger than the original key, but not smaller:
675 mantissa |= ~(~0U << -exponent);
677 f->mantissa = mantissa;
680 static void make_bfloat(struct btree *b, struct bset_tree *t,
682 struct bkey_packed *min_key,
683 struct bkey_packed *max_key)
687 if (is_power_of_2(j) &&
689 k = (void *) min_key;
691 k->k.p = b->data->min_key;
694 if (is_power_of_2(j + 1) &&
696 k = (void *) max_key;
701 __make_bfloat(b, t, j, min_key, max_key);
704 /* bytes remaining - only valid for last bset: */
705 static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
707 bset_aux_tree_verify(b);
709 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
712 static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
714 return __bset_tree_capacity(b, t) /
715 (sizeof(struct bkey_float) + sizeof(u8));
718 static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
720 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
723 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
725 struct bkey_packed *k;
728 t->extra = BSET_RW_AUX_TREE_VAL;
729 rw_aux_tree(b, t)[0].offset =
730 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
732 bset_tree_for_each_key(b, t, k) {
733 if (t->size == bset_rw_tree_capacity(b, t))
736 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
738 rw_aux_tree_set(b, t, t->size++, k);
742 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
744 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
745 struct bkey_i min_key, max_key;
746 unsigned j, cacheline = 1;
748 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
749 bset_ro_tree_capacity(b, t));
753 t->extra = BSET_NO_AUX_TREE_VAL;
757 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
759 /* First we figure out where the first key in each cacheline is */
760 eytzinger1_for_each(j, t->size) {
761 while (bkey_to_cacheline(b, t, k) < cacheline)
762 prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
764 if (k >= btree_bkey_last(b, t)) {
765 /* XXX: this path sucks */
770 ro_aux_tree_prev(b, t)[j] = prev->u64s;
771 bkey_float(b, t, j)->key_offset =
772 bkey_to_cacheline_offset(b, t, cacheline++, k);
774 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
775 EBUG_ON(tree_to_bkey(b, t, j) != k);
778 while (k != btree_bkey_last(b, t))
779 prev = k, k = bkey_next_skip_noops(k, btree_bkey_last(b, t));
781 t->max_key = bkey_unpack_pos(b, prev);
783 bkey_init(&min_key.k);
784 min_key.k.p = b->data->min_key;
785 bkey_init(&max_key.k);
786 max_key.k.p = t->max_key;
788 /* Then we build the tree */
789 eytzinger1_for_each(j, t->size)
790 __make_bfloat(b, t, j,
791 bkey_to_packed(&min_key),
792 bkey_to_packed(&max_key));
795 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
799 for (i = b->set; i != t; i++)
800 BUG_ON(bset_has_rw_aux_tree(i));
802 bch2_bset_set_no_aux_tree(b, t);
804 /* round up to next cacheline: */
805 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
806 SMP_CACHE_BYTES / sizeof(u64));
808 bset_aux_tree_verify(b);
811 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
815 ? bset_has_rw_aux_tree(t)
816 : bset_has_ro_aux_tree(t))
819 bset_alloc_tree(b, t);
821 if (!__bset_tree_capacity(b, t))
825 __build_rw_aux_tree(b, t);
827 __build_ro_aux_tree(b, t);
829 bset_aux_tree_verify(b);
832 void bch2_bset_init_first(struct btree *b, struct bset *i)
838 memset(i, 0, sizeof(*i));
839 get_random_bytes(&i->seq, sizeof(i->seq));
840 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
842 t = &b->set[b->nsets++];
843 set_btree_bset(b, t, i);
846 void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
847 struct btree_node_entry *bne)
849 struct bset *i = &bne->keys;
852 BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
853 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
854 BUG_ON(b->nsets >= MAX_BSETS);
856 memset(i, 0, sizeof(*i));
857 i->seq = btree_bset_first(b)->seq;
858 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
860 t = &b->set[b->nsets++];
861 set_btree_bset(b, t, i);
865 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
866 * immediate predecessor:
868 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
869 struct bkey_packed *k)
871 struct bkey_packed *p;
875 EBUG_ON(k < btree_bkey_first(b, t) ||
876 k > btree_bkey_last(b, t));
878 if (k == btree_bkey_first(b, t))
881 switch (bset_aux_tree_type(t)) {
882 case BSET_NO_AUX_TREE:
883 p = btree_bkey_first(b, t);
885 case BSET_RO_AUX_TREE:
886 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
889 p = j ? tree_to_bkey(b, t,
890 __inorder_to_eytzinger1(j--,
892 : btree_bkey_first(b, t);
895 case BSET_RW_AUX_TREE:
896 offset = __btree_node_key_to_offset(b, k);
897 j = rw_aux_tree_bsearch(b, t, offset);
898 p = j ? rw_aux_to_bkey(b, t, j - 1)
899 : btree_bkey_first(b, t);
906 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
908 struct bkey_packed *k,
909 unsigned min_key_type)
911 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
913 while ((p = __bkey_prev(b, t, k)) && !ret) {
914 for (i = p; i != k; i = bkey_next_skip_noops(i, k))
915 if (i->type >= min_key_type)
921 if (bch2_expensive_debug_checks) {
922 BUG_ON(ret >= orig_k);
925 ? bkey_next_skip_noops(ret, orig_k)
926 : btree_bkey_first(b, t);
928 i = bkey_next_skip_noops(i, orig_k))
929 BUG_ON(i->type >= min_key_type);
937 static void rw_aux_tree_fix_invalidated_key(struct btree *b,
939 struct bkey_packed *k)
941 unsigned offset = __btree_node_key_to_offset(b, k);
942 unsigned j = rw_aux_tree_bsearch(b, t, offset);
945 rw_aux_tree(b, t)[j].offset == offset)
946 rw_aux_tree_set(b, t, j, k);
948 bch2_bset_verify_rw_aux_tree(b, t);
951 static void ro_aux_tree_fix_invalidated_key(struct btree *b,
953 struct bkey_packed *k)
955 struct bkey_packed min_key, max_key;
958 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
960 /* signal to make_bfloat() that they're uninitialized: */
961 min_key.u64s = max_key.u64s = 0;
963 if (bkey_next_skip_noops(k, btree_bkey_last(b, t)) == btree_bkey_last(b, t)) {
964 t->max_key = bkey_unpack_pos(b, k);
966 for (j = 1; j < t->size; j = j * 2 + 1)
967 make_bfloat(b, t, j, &min_key, &max_key);
970 inorder = bkey_to_cacheline(b, t, k);
974 j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
976 if (k == tree_to_bkey(b, t, j)) {
977 /* Fix the node this key corresponds to */
978 make_bfloat(b, t, j, &min_key, &max_key);
980 /* Children for which this key is the right boundary */
981 for (j = eytzinger1_left_child(j);
983 j = eytzinger1_right_child(j))
984 make_bfloat(b, t, j, &min_key, &max_key);
988 if (inorder + 1 < t->size) {
989 j = __inorder_to_eytzinger1(inorder + 1, t->size, t->extra);
991 if (k == tree_to_prev_bkey(b, t, j)) {
992 make_bfloat(b, t, j, &min_key, &max_key);
994 /* Children for which this key is the left boundary */
995 for (j = eytzinger1_right_child(j);
997 j = eytzinger1_left_child(j))
998 make_bfloat(b, t, j, &min_key, &max_key);
1004 * bch2_bset_fix_invalidated_key() - given an existing key @k that has been
1005 * modified, fix any auxiliary search tree by remaking all the nodes in the
1006 * auxiliary search tree that @k corresponds to
1008 void bch2_bset_fix_invalidated_key(struct btree *b, struct bkey_packed *k)
1010 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1012 switch (bset_aux_tree_type(t)) {
1013 case BSET_NO_AUX_TREE:
1015 case BSET_RO_AUX_TREE:
1016 ro_aux_tree_fix_invalidated_key(b, t, k);
1018 case BSET_RW_AUX_TREE:
1019 rw_aux_tree_fix_invalidated_key(b, t, k);
1024 static void bch2_bset_fix_lookup_table(struct btree *b,
1025 struct bset_tree *t,
1026 struct bkey_packed *_where,
1027 unsigned clobber_u64s,
1030 int shift = new_u64s - clobber_u64s;
1031 unsigned l, j, where = __btree_node_key_to_offset(b, _where);
1033 EBUG_ON(bset_has_ro_aux_tree(t));
1035 if (!bset_has_rw_aux_tree(t))
1038 /* returns first entry >= where */
1039 l = rw_aux_tree_bsearch(b, t, where);
1041 if (!l) /* never delete first entry */
1043 else if (l < t->size &&
1044 where < t->end_offset &&
1045 rw_aux_tree(b, t)[l].offset == where)
1046 rw_aux_tree_set(b, t, l++, _where);
1052 rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
1057 rw_aux_tree(b, t)[j].offset + shift ==
1058 rw_aux_tree(b, t)[l - 1].offset)
1061 memmove(&rw_aux_tree(b, t)[l],
1062 &rw_aux_tree(b, t)[j],
1063 (void *) &rw_aux_tree(b, t)[t->size] -
1064 (void *) &rw_aux_tree(b, t)[j]);
1067 for (j = l; j < t->size; j++)
1068 rw_aux_tree(b, t)[j].offset += shift;
1070 EBUG_ON(l < t->size &&
1071 rw_aux_tree(b, t)[l].offset ==
1072 rw_aux_tree(b, t)[l - 1].offset);
1074 if (t->size < bset_rw_tree_capacity(b, t) &&
1076 ? rw_aux_tree(b, t)[l].offset
1078 rw_aux_tree(b, t)[l - 1].offset >
1079 L1_CACHE_BYTES / sizeof(u64)) {
1080 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
1081 struct bkey_packed *end = l < t->size
1082 ? rw_aux_to_bkey(b, t, l)
1083 : btree_bkey_last(b, t);
1084 struct bkey_packed *k = start;
1087 k = bkey_next_skip_noops(k, end);
1091 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
1092 memmove(&rw_aux_tree(b, t)[l + 1],
1093 &rw_aux_tree(b, t)[l],
1094 (void *) &rw_aux_tree(b, t)[t->size] -
1095 (void *) &rw_aux_tree(b, t)[l]);
1097 rw_aux_tree_set(b, t, l, k);
1103 bch2_bset_verify_rw_aux_tree(b, t);
1104 bset_aux_tree_verify(b);
1107 void bch2_bset_insert(struct btree *b,
1108 struct btree_node_iter *iter,
1109 struct bkey_packed *where,
1110 struct bkey_i *insert,
1111 unsigned clobber_u64s)
1113 struct bkey_format *f = &b->format;
1114 struct bset_tree *t = bset_tree_last(b);
1115 struct bkey_packed packed, *src = bkey_to_packed(insert);
1117 bch2_bset_verify_rw_aux_tree(b, t);
1118 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
1120 if (bch2_bkey_pack_key(&packed, &insert->k, f))
1123 if (!bkey_whiteout(&insert->k))
1124 btree_keys_account_key_add(&b->nr, t - b->set, src);
1126 if (src->u64s != clobber_u64s) {
1127 u64 *src_p = where->_data + clobber_u64s;
1128 u64 *dst_p = where->_data + src->u64s;
1130 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1131 (int) clobber_u64s - src->u64s);
1133 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1134 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1135 set_btree_bset_end(b, t);
1138 memcpy_u64s(where, src,
1139 bkeyp_key_u64s(f, src));
1140 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1141 bkeyp_val_u64s(f, src));
1143 if (src->u64s != clobber_u64s)
1144 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1146 bch2_verify_btree_nr_keys(b);
1149 void bch2_bset_delete(struct btree *b,
1150 struct bkey_packed *where,
1151 unsigned clobber_u64s)
1153 struct bset_tree *t = bset_tree_last(b);
1154 u64 *src_p = where->_data + clobber_u64s;
1155 u64 *dst_p = where->_data;
1157 bch2_bset_verify_rw_aux_tree(b, t);
1159 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1161 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1162 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1163 set_btree_bset_end(b, t);
1165 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1171 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1172 struct bset_tree *t,
1173 struct bpos *search,
1174 const struct bkey_packed *packed_search)
1176 unsigned l = 0, r = t->size;
1178 while (l + 1 != r) {
1179 unsigned m = (l + r) >> 1;
1181 if (bkey_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
1187 return rw_aux_to_bkey(b, t, l);
1190 static inline void prefetch_four_cachelines(void *p)
1192 #ifdef CONFIG_X86_64
1193 asm(".intel_syntax noprefix;"
1194 "prefetcht0 [%0 - 127 + 64 * 0];"
1195 "prefetcht0 [%0 - 127 + 64 * 1];"
1196 "prefetcht0 [%0 - 127 + 64 * 2];"
1197 "prefetcht0 [%0 - 127 + 64 * 3];"
1198 ".att_syntax prefix;"
1202 prefetch(p + L1_CACHE_BYTES * 0);
1203 prefetch(p + L1_CACHE_BYTES * 1);
1204 prefetch(p + L1_CACHE_BYTES * 2);
1205 prefetch(p + L1_CACHE_BYTES * 3);
1209 static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
1210 const struct bkey_float *f,
1213 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1214 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
1216 return f->exponent > key_bits_start;
1218 unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
1220 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
1225 static struct bkey_packed *bset_search_tree(const struct btree *b,
1226 const struct bset_tree *t,
1227 const struct bpos *search,
1228 const struct bkey_packed *packed_search)
1230 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1231 struct bkey_float *f;
1232 struct bkey_packed *k;
1233 unsigned inorder, n = 1, l, r;
1237 if (likely(n << 4 < t->size))
1238 prefetch(&base->f[n << 4]);
1242 if (!unlikely(packed_search))
1244 if (unlikely(f->exponent >= BFLOAT_FAILED))
1248 r = bkey_mantissa(packed_search, f, n);
1250 if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
1253 n = n * 2 + (l < r);
1256 k = tree_to_bkey(b, t, n);
1257 cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
1261 n = n * 2 + (cmp < 0);
1262 } while (n < t->size);
1264 inorder = __eytzinger1_to_inorder(n >> 1, t->size, t->extra);
1267 * n would have been the node we recursed to - the low bit tells us if
1268 * we recursed left or recursed right.
1270 if (likely(!(n & 1))) {
1272 if (unlikely(!inorder))
1273 return btree_bkey_first(b, t);
1275 f = &base->f[eytzinger1_prev(n >> 1, t->size)];
1278 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1281 static __always_inline __flatten
1282 struct bkey_packed *__bch2_bset_search(struct btree *b,
1283 struct bset_tree *t,
1284 struct bpos *search,
1285 const struct bkey_packed *lossy_packed_search)
1289 * First, we search for a cacheline, then lastly we do a linear search
1290 * within that cacheline.
1292 * To search for the cacheline, there's three different possibilities:
1293 * * The set is too small to have a search tree, so we just do a linear
1294 * search over the whole set.
1295 * * The set is the one we're currently inserting into; keeping a full
1296 * auxiliary search tree up to date would be too expensive, so we
1297 * use a much simpler lookup table to do a binary search -
1298 * bset_search_write_set().
1299 * * Or we use the auxiliary search tree we constructed earlier -
1300 * bset_search_tree()
1303 switch (bset_aux_tree_type(t)) {
1304 case BSET_NO_AUX_TREE:
1305 return btree_bkey_first(b, t);
1306 case BSET_RW_AUX_TREE:
1307 return bset_search_write_set(b, t, search, lossy_packed_search);
1308 case BSET_RO_AUX_TREE:
1310 * Each node in the auxiliary search tree covers a certain range
1311 * of bits, and keys above and below the set it covers might
1312 * differ outside those bits - so we have to special case the
1313 * start and end - handle that here:
1316 if (bkey_cmp(*search, t->max_key) > 0)
1317 return btree_bkey_last(b, t);
1319 return bset_search_tree(b, t, search, lossy_packed_search);
1325 static __always_inline __flatten
1326 struct bkey_packed *bch2_bset_search_linear(struct btree *b,
1327 struct bset_tree *t,
1328 struct bpos *search,
1329 struct bkey_packed *packed_search,
1330 const struct bkey_packed *lossy_packed_search,
1331 struct bkey_packed *m)
1333 if (lossy_packed_search)
1334 while (m != btree_bkey_last(b, t) &&
1335 bkey_iter_cmp_p_or_unp(b, m,
1336 lossy_packed_search, search) < 0)
1337 m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
1340 while (m != btree_bkey_last(b, t) &&
1341 bkey_iter_pos_cmp(b, m, search) < 0)
1342 m = bkey_next_skip_noops(m, btree_bkey_last(b, t));
1344 if (bch2_expensive_debug_checks) {
1345 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1348 bkey_iter_cmp_p_or_unp(b, prev,
1349 packed_search, search) >= 0);
1356 * Returns the first key greater than or equal to @search
1358 static __always_inline __flatten
1359 struct bkey_packed *bch2_bset_search(struct btree *b,
1360 struct bset_tree *t,
1361 struct bpos *search,
1362 struct bkey_packed *packed_search,
1363 const struct bkey_packed *lossy_packed_search)
1365 struct bkey_packed *m = __bch2_bset_search(b, t, search,
1366 lossy_packed_search);
1368 return bch2_bset_search_linear(b, t, search,
1369 packed_search, lossy_packed_search, m);
1372 /* Btree node iterator */
1374 static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
1376 const struct bkey_packed *k,
1377 const struct bkey_packed *end)
1380 struct btree_node_iter_set *pos;
1382 btree_node_iter_for_each(iter, pos)
1385 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
1386 *pos = (struct btree_node_iter_set) {
1387 __btree_node_key_to_offset(b, k),
1388 __btree_node_key_to_offset(b, end)
1393 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1395 const struct bkey_packed *k,
1396 const struct bkey_packed *end)
1398 __bch2_btree_node_iter_push(iter, b, k, end);
1399 bch2_btree_node_iter_sort(iter, b);
1402 noinline __flatten __attribute__((cold))
1403 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1404 struct btree *b, struct bpos *search)
1406 struct bset_tree *t;
1408 trace_bkey_pack_pos_fail(search);
1411 __bch2_btree_node_iter_push(iter, b,
1412 bch2_bset_search(b, t, search, NULL, NULL),
1413 btree_bkey_last(b, t));
1415 bch2_btree_node_iter_sort(iter, b);
1419 * bch_btree_node_iter_init - initialize a btree node iterator, starting from a
1422 * Main entry point to the lookup code for individual btree nodes:
1426 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1427 * keys. This doesn't matter for most code, but it does matter for lookups.
1429 * Some adjacent keys with a string of equal keys:
1432 * If you search for k, the lookup code isn't guaranteed to return you any
1433 * specific k. The lookup code is conceptually doing a binary search and
1434 * iterating backwards is very expensive so if the pivot happens to land at the
1435 * last k that's what you'll get.
1437 * This works out ok, but it's something to be aware of:
1439 * - For non extents, we guarantee that the live key comes last - see
1440 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1441 * see will only be deleted keys you don't care about.
1443 * - For extents, deleted keys sort last (see the comment at the top of this
1444 * file). But when you're searching for extents, you actually want the first
1445 * key strictly greater than your search key - an extent that compares equal
1446 * to the search key is going to have 0 sectors after the search key.
1448 * But this does mean that we can't just search for
1449 * bkey_successor(start_of_range) to get the first extent that overlaps with
1450 * the range we want - if we're unlucky and there's an extent that ends
1451 * exactly where we searched, then there could be a deleted key at the same
1452 * position and we'd get that when we search instead of the preceding extent
1455 * So we've got to search for start_of_range, then after the lookup iterate
1456 * past any extents that compare equal to the position we searched for.
1459 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1460 struct btree *b, struct bpos *search)
1462 struct bkey_packed p, *packed_search = NULL;
1463 struct btree_node_iter_set *pos = iter->data;
1464 struct bkey_packed *k[MAX_BSETS];
1467 EBUG_ON(bkey_cmp(*search, b->data->min_key) < 0);
1468 bset_aux_tree_verify(b);
1470 memset(iter, 0, sizeof(*iter));
1472 switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
1473 case BKEY_PACK_POS_EXACT:
1476 case BKEY_PACK_POS_SMALLER:
1477 packed_search = NULL;
1479 case BKEY_PACK_POS_FAIL:
1480 btree_node_iter_init_pack_failed(iter, b, search);
1484 for (i = 0; i < b->nsets; i++) {
1485 k[i] = __bch2_bset_search(b, b->set + i, search, &p);
1486 prefetch_four_cachelines(k[i]);
1489 for (i = 0; i < b->nsets; i++) {
1490 struct bset_tree *t = b->set + i;
1491 struct bkey_packed *end = btree_bkey_last(b, t);
1493 k[i] = bch2_bset_search_linear(b, t, search,
1494 packed_search, &p, k[i]);
1496 *pos++ = (struct btree_node_iter_set) {
1497 __btree_node_key_to_offset(b, k[i]),
1498 __btree_node_key_to_offset(b, end)
1502 bch2_btree_node_iter_sort(iter, b);
1505 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1508 struct bset_tree *t;
1510 memset(iter, 0, sizeof(*iter));
1513 __bch2_btree_node_iter_push(iter, b,
1514 btree_bkey_first(b, t),
1515 btree_bkey_last(b, t));
1516 bch2_btree_node_iter_sort(iter, b);
1519 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1521 struct bset_tree *t)
1523 struct btree_node_iter_set *set;
1525 btree_node_iter_for_each(iter, set)
1526 if (set->end == t->end_offset)
1527 return __btree_node_offset_to_key(b, set->k);
1529 return btree_bkey_last(b, t);
1532 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1538 if ((ret = (btree_node_iter_cmp(b,
1540 iter->data[first + 1]) > 0)))
1541 swap(iter->data[first], iter->data[first + 1]);
1545 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1548 /* unrolled bubble sort: */
1550 if (!__btree_node_iter_set_end(iter, 2)) {
1551 btree_node_iter_sort_two(iter, b, 0);
1552 btree_node_iter_sort_two(iter, b, 1);
1555 if (!__btree_node_iter_set_end(iter, 1))
1556 btree_node_iter_sort_two(iter, b, 0);
1559 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1560 struct btree_node_iter_set *set)
1562 struct btree_node_iter_set *last =
1563 iter->data + ARRAY_SIZE(iter->data) - 1;
1565 memmove(&set[0], &set[1], (void *) last - (void *) set);
1566 *last = (struct btree_node_iter_set) { 0, 0 };
1569 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1572 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1574 EBUG_ON(iter->data->k > iter->data->end);
1576 while (!__btree_node_iter_set_end(iter, 0) &&
1577 !__bch2_btree_node_iter_peek_all(iter, b)->u64s)
1580 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1581 bch2_btree_node_iter_set_drop(iter, iter->data);
1585 if (__btree_node_iter_set_end(iter, 1))
1588 if (!btree_node_iter_sort_two(iter, b, 0))
1591 if (__btree_node_iter_set_end(iter, 2))
1594 btree_node_iter_sort_two(iter, b, 1);
1597 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1600 if (bch2_expensive_debug_checks) {
1601 bch2_btree_node_iter_verify(iter, b);
1602 bch2_btree_node_iter_next_check(iter, b);
1605 __bch2_btree_node_iter_advance(iter, b);
1611 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
1614 struct bkey_packed *k, *prev = NULL;
1615 struct btree_node_iter_set *set;
1616 struct bset_tree *t;
1619 if (bch2_expensive_debug_checks)
1620 bch2_btree_node_iter_verify(iter, b);
1622 for_each_bset(b, t) {
1623 k = bch2_bkey_prev_all(b, t,
1624 bch2_btree_node_iter_bset_pos(iter, b, t));
1626 (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
1628 end = t->end_offset;
1636 * We're manually memmoving instead of just calling sort() to ensure the
1637 * prev we picked ends up in slot 0 - sort won't necessarily put it
1638 * there because of duplicate deleted keys:
1640 btree_node_iter_for_each(iter, set)
1641 if (set->end == end)
1644 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1646 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1648 memmove(&iter->data[1],
1650 (void *) set - (void *) &iter->data[0]);
1652 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1653 iter->data[0].end = end;
1655 if (bch2_expensive_debug_checks)
1656 bch2_btree_node_iter_verify(iter, b);
1660 struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
1662 unsigned min_key_type)
1664 struct bkey_packed *prev;
1667 prev = bch2_btree_node_iter_prev_all(iter, b);
1668 } while (prev && prev->type < min_key_type);
1673 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1677 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1679 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1684 void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
1686 struct bset_tree *t;
1688 for_each_bset(b, t) {
1689 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1692 stats->sets[type].nr++;
1693 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1696 if (bset_has_ro_aux_tree(t)) {
1697 stats->floats += t->size - 1;
1699 for (j = 1; j < t->size; j++)
1701 bkey_float(b, t, j)->exponent ==
1707 void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
1708 struct bkey_packed *k)
1710 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1712 unsigned j, inorder;
1714 if (out->pos != out->end)
1717 if (!bset_has_ro_aux_tree(t))
1720 inorder = bkey_to_cacheline(b, t, k);
1721 if (!inorder || inorder >= t->size)
1724 j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
1725 if (k != tree_to_bkey(b, t, j))
1728 switch (bkey_float(b, t, j)->exponent) {
1730 uk = bkey_unpack_key(b, k);
1732 " failed unpacked at depth %u\n"
1735 uk.p.inode, uk.p.offset);