2 * Code for working with individual keys, and sorted sets of keys with in a
5 * Copyright 2012 Google, Inc.
9 #include "btree_cache.h"
11 #include "eytzinger.h"
14 #include <asm/unaligned.h>
15 #include <linux/dynamic_fault.h>
16 #include <linux/console.h>
17 #include <linux/random.h>
18 #include <linux/prefetch.h>
21 #include "alloc_types.h"
22 #include <trace/events/bcachefs.h>
24 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
27 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
29 unsigned offset = __btree_node_key_to_offset(b, k);
33 if (offset <= t->end_offset) {
34 EBUG_ON(offset < btree_bkey_first_offset(t));
42 * There are never duplicate live keys in the btree - but including keys that
43 * have been flagged as deleted (and will be cleaned up later) we _will_ see
46 * Thus the sort order is: usual key comparison first, but for keys that compare
47 * equal the deleted key(s) come first, and the (at most one) live version comes
50 * The main reason for this is insertion: to handle overwrites, we first iterate
51 * over keys that compare equal to our insert key, and then insert immediately
52 * prior to the first key greater than the key we're inserting - our insert
53 * position will be after all keys that compare equal to our insert key, which
54 * by the time we actually do the insert will all be deleted.
57 void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
59 struct bkey_packed *_k, *_n;
66 for (_k = i->start, k = bkey_unpack_key(b, _k);
71 bch2_bkey_to_text(buf, sizeof(buf), &k);
72 printk(KERN_ERR "block %u key %5u: %s\n", set,
73 __btree_node_key_to_offset(b, _k), buf);
75 if (_n == vstruct_last(i))
78 n = bkey_unpack_key(b, _n);
80 if (bkey_cmp(bkey_start_pos(&n), k.p) < 0) {
81 printk(KERN_ERR "Key skipped backwards\n");
86 * Weird check for duplicate non extent keys: extents are
87 * deleted iff they have 0 size, so if it has zero size and it's
88 * not deleted these aren't extents:
90 if (((!k.size && !bkey_deleted(&k)) ||
91 (!n.size && !bkey_deleted(&n))) &&
94 printk(KERN_ERR "Duplicate keys\n");
98 void bch2_dump_btree_node(struct btree *b)
104 bch2_dump_bset(b, bset(b, t), t - b->set);
108 void bch2_dump_btree_node_iter(struct btree *b,
109 struct btree_node_iter *iter)
111 struct btree_node_iter_set *set;
113 printk(KERN_ERR "btree node iter with %u sets:\n", b->nsets);
115 btree_node_iter_for_each(iter, set) {
116 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
117 struct bset_tree *t = bch2_bkey_to_bset(b, k);
118 struct bkey uk = bkey_unpack_key(b, k);
121 bch2_bkey_to_text(buf, sizeof(buf), &uk);
122 printk(KERN_ERR "set %zu key %zi/%u: %s\n", t - b->set,
123 k->_data - bset(b, t)->_data, bset(b, t)->u64s, buf);
127 #ifdef CONFIG_BCACHEFS_DEBUG
129 void __bch2_verify_btree_nr_keys(struct btree *b)
132 struct bkey_packed *k;
133 struct btree_nr_keys nr = { 0 };
136 for (k = btree_bkey_first(b, t);
137 k != btree_bkey_last(b, t);
139 if (!bkey_whiteout(k))
140 btree_keys_account_key_add(&nr, t - b->set, k);
142 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
145 static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
148 struct btree_node_iter iter = *_iter;
149 const struct bkey_packed *k, *n;
151 k = bch2_btree_node_iter_peek_all(&iter, b);
152 __bch2_btree_node_iter_advance(&iter, b);
153 n = bch2_btree_node_iter_peek_all(&iter, b);
155 bkey_unpack_key(b, k);
158 __btree_node_iter_cmp(b, k, n) > 0) {
159 struct btree_node_iter_set *set;
160 struct bkey ku = bkey_unpack_key(b, k);
161 struct bkey nu = bkey_unpack_key(b, n);
162 char buf1[80], buf2[80];
164 bch2_dump_btree_node(b);
165 bch2_bkey_to_text(buf1, sizeof(buf1), &ku);
166 bch2_bkey_to_text(buf2, sizeof(buf2), &nu);
167 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
169 printk(KERN_ERR "iter was:");
171 btree_node_iter_for_each(_iter, set) {
172 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
173 struct bset_tree *t = bch2_bkey_to_bset(b, k);
174 printk(" [%zi %zi]", t - b->set,
175 k->_data - bset(b, t)->_data);
181 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
184 struct btree_node_iter_set *set, *s2;
187 /* Verify no duplicates: */
188 btree_node_iter_for_each(iter, set)
189 btree_node_iter_for_each(iter, s2)
190 BUG_ON(set != s2 && set->end == s2->end);
192 /* Verify that set->end is correct: */
193 btree_node_iter_for_each(iter, set) {
195 if (set->end == t->end_offset)
199 BUG_ON(set->k < btree_bkey_first_offset(t) ||
200 set->k >= t->end_offset);
203 /* Verify iterator is sorted: */
204 btree_node_iter_for_each(iter, set)
205 BUG_ON(set != iter->data &&
206 btree_node_iter_cmp(b, set[-1], set[0]) > 0);
209 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
210 struct bkey_packed *insert, unsigned clobber_u64s)
212 struct bset_tree *t = bch2_bkey_to_bset(b, where);
213 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
214 struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
217 __btree_node_iter_cmp(b, prev, insert) > 0);
220 __btree_node_iter_cmp(b, prev, insert) > 0) {
221 struct bkey k1 = bkey_unpack_key(b, prev);
222 struct bkey k2 = bkey_unpack_key(b, insert);
226 bch2_dump_btree_node(b);
227 bch2_bkey_to_text(buf1, sizeof(buf1), &k1);
228 bch2_bkey_to_text(buf2, sizeof(buf2), &k2);
230 panic("prev > insert:\n"
232 "insert key %5u %s\n",
233 __btree_node_key_to_offset(b, prev), buf1,
234 __btree_node_key_to_offset(b, insert), buf2);
238 BUG_ON(next != btree_bkey_last(b, t) &&
239 __btree_node_iter_cmp(b, insert, next) > 0);
241 if (next != btree_bkey_last(b, t) &&
242 __btree_node_iter_cmp(b, insert, next) > 0) {
243 struct bkey k1 = bkey_unpack_key(b, insert);
244 struct bkey k2 = bkey_unpack_key(b, next);
248 bch2_dump_btree_node(b);
249 bch2_bkey_to_text(buf1, sizeof(buf1), &k1);
250 bch2_bkey_to_text(buf2, sizeof(buf2), &k2);
252 panic("insert > next:\n"
253 "insert key %5u %s\n"
255 __btree_node_key_to_offset(b, insert), buf1,
256 __btree_node_key_to_offset(b, next), buf2);
263 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
268 /* Auxiliary search trees */
270 #define BFLOAT_FAILED_UNPACKED (U8_MAX - 0)
271 #define BFLOAT_FAILED_PREV (U8_MAX - 1)
272 #define BFLOAT_FAILED_OVERFLOW (U8_MAX - 2)
273 #define BFLOAT_FAILED (U8_MAX - 2)
275 #define KEY_WORDS BITS_TO_LONGS(1 << BKEY_EXPONENT_BITS)
289 #define BFLOAT_32BIT_NR 32U
291 static unsigned bkey_float_byte_offset(unsigned idx)
293 int d = (idx - BFLOAT_32BIT_NR) << 1;
301 struct bkey_float _d[0];
310 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
311 * it used to be 64, but I realized the lookup code would touch slightly less
312 * memory if it was 128.
314 * It definites the number of bytes (in struct bset) per struct bkey_float in
315 * the auxiliar search tree - when we're done searching the bset_float tree we
316 * have this many bytes left that we do a linear search over.
318 * Since (after level 5) every level of the bset_tree is on a new cacheline,
319 * we're touching one fewer cacheline in the bset tree in exchange for one more
320 * cacheline in the linear search - but the linear search might stop before it
321 * gets to the second cacheline.
324 #define BSET_CACHELINE 128
326 /* Space required for the btree node keys */
327 static inline size_t btree_keys_bytes(struct btree *b)
329 return PAGE_SIZE << b->page_order;
332 static inline size_t btree_keys_cachelines(struct btree *b)
334 return btree_keys_bytes(b) / BSET_CACHELINE;
337 static inline size_t btree_aux_data_bytes(struct btree *b)
339 return btree_keys_cachelines(b) * 8;
342 static inline size_t btree_aux_data_u64s(struct btree *b)
344 return btree_aux_data_bytes(b) / sizeof(u64);
347 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
349 BUG_ON(t->aux_data_offset == U16_MAX);
351 switch (bset_aux_tree_type(t)) {
352 case BSET_NO_AUX_TREE:
353 return t->aux_data_offset;
354 case BSET_RO_AUX_TREE:
355 return t->aux_data_offset +
356 DIV_ROUND_UP(bkey_float_byte_offset(t->size) +
357 sizeof(u8) * t->size, 8);
358 case BSET_RW_AUX_TREE:
359 return t->aux_data_offset +
360 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
366 static unsigned bset_aux_tree_buf_start(const struct btree *b,
367 const struct bset_tree *t)
370 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
371 : bset_aux_tree_buf_end(t - 1);
374 static void *__aux_tree_base(const struct btree *b,
375 const struct bset_tree *t)
377 return b->aux_data + t->aux_data_offset * 8;
380 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
381 const struct bset_tree *t)
383 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
385 return __aux_tree_base(b, t);
388 static u8 *ro_aux_tree_prev(const struct btree *b,
389 const struct bset_tree *t)
391 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
393 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
396 static struct bkey_float *bkey_float_get(struct ro_aux_tree *b,
399 return (void *) b + bkey_float_byte_offset(idx);
402 static struct bkey_float *bkey_float(const struct btree *b,
403 const struct bset_tree *t,
406 return bkey_float_get(ro_aux_tree_base(b, t), idx);
409 static void bset_aux_tree_verify(struct btree *b)
411 #ifdef CONFIG_BCACHEFS_DEBUG
414 for_each_bset(b, t) {
415 if (t->aux_data_offset == U16_MAX)
418 BUG_ON(t != b->set &&
419 t[-1].aux_data_offset == U16_MAX);
421 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
422 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
423 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
428 /* Memory allocation */
430 void bch2_btree_keys_free(struct btree *b)
436 #ifndef PAGE_KERNEL_EXEC
437 # define PAGE_KERNEL_EXEC PAGE_KERNEL
440 int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
442 b->page_order = page_order;
443 b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp,
451 void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
456 memset(&b->nr, 0, sizeof(b->nr));
457 #ifdef CONFIG_BCACHEFS_DEBUG
458 b->expensive_debug_checks = expensive_debug_checks;
460 for (i = 0; i < MAX_BSETS; i++)
461 b->set[i].data_offset = U16_MAX;
463 bch2_bset_set_no_aux_tree(b, b->set);
466 /* Binary tree stuff for auxiliary search trees */
469 * Cacheline/offset <-> bkey pointer arithmetic:
471 * t->tree is a binary search tree in an array; each node corresponds to a key
472 * in one cacheline in t->set (BSET_CACHELINE bytes).
474 * This means we don't have to store the full index of the key that a node in
475 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
476 * then bkey_float->m gives us the offset within that cacheline, in units of 8
479 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
482 * To construct the bfloat for an arbitrary key we need to know what the key
483 * immediately preceding it is: we have to check if the two keys differ in the
484 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
485 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
488 static inline void *bset_cacheline(const struct btree *b,
489 const struct bset_tree *t,
492 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
494 cacheline * BSET_CACHELINE;
497 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
498 const struct bset_tree *t,
502 return bset_cacheline(b, t, cacheline) + offset * 8;
505 static unsigned bkey_to_cacheline(const struct btree *b,
506 const struct bset_tree *t,
507 const struct bkey_packed *k)
509 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
512 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
513 const struct bset_tree *t,
515 const struct bkey_packed *k)
517 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
520 static unsigned bkey_to_cacheline_offset(const struct btree *b,
521 const struct bset_tree *t,
523 const struct bkey_packed *k)
525 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
531 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
532 const struct bset_tree *t,
535 return cacheline_to_bkey(b, t,
536 __eytzinger1_to_inorder(j, t->size, t->extra),
537 bkey_float(b, t, j)->key_offset);
540 static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
541 const struct bset_tree *t,
544 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
546 return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
549 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
550 const struct bset_tree *t)
552 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
554 return __aux_tree_base(b, t);
558 * For the write set - the one we're currently inserting keys into - we don't
559 * maintain a full search tree, we just keep a simple lookup table in t->prev.
561 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
565 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
568 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
569 unsigned j, struct bkey_packed *k)
571 EBUG_ON(k >= btree_bkey_last(b, t));
573 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
574 .offset = __btree_node_key_to_offset(b, k),
575 .k = bkey_unpack_pos(b, k),
579 static void bch2_bset_verify_rw_aux_tree(struct btree *b,
582 struct bkey_packed *k = btree_bkey_first(b, t);
585 if (!btree_keys_expensive_checks(b))
588 BUG_ON(bset_has_ro_aux_tree(t));
590 if (!bset_has_rw_aux_tree(t))
594 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
598 if (rw_aux_to_bkey(b, t, j) == k) {
599 BUG_ON(bkey_cmp(rw_aux_tree(b, t)[j].k,
600 bkey_unpack_pos(b, k)));
605 BUG_ON(rw_aux_tree(b, t)[j].offset <=
606 rw_aux_tree(b, t)[j - 1].offset);
610 BUG_ON(k >= btree_bkey_last(b, t));
614 /* returns idx of first entry >= offset: */
615 static unsigned rw_aux_tree_bsearch(struct btree *b,
619 unsigned bset_offs = offset - btree_bkey_first_offset(t);
620 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
621 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
623 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
625 EBUG_ON(idx > t->size);
627 while (idx < t->size &&
628 rw_aux_tree(b, t)[idx].offset < offset)
632 rw_aux_tree(b, t)[idx - 1].offset >= offset)
635 EBUG_ON(idx < t->size &&
636 rw_aux_tree(b, t)[idx].offset < offset);
637 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
638 EBUG_ON(idx + 1 < t->size &&
639 rw_aux_tree(b, t)[idx].offset ==
640 rw_aux_tree(b, t)[idx + 1].offset);
645 static inline unsigned bfloat_mantissa(const struct bkey_float *f,
648 return idx < BFLOAT_32BIT_NR ? f->mantissa32 : f->mantissa16;
651 static inline void bfloat_mantissa_set(struct bkey_float *f,
652 unsigned idx, unsigned mantissa)
654 if (idx < BFLOAT_32BIT_NR)
655 f->mantissa32 = mantissa;
657 f->mantissa16 = mantissa;
660 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
661 const struct bkey_float *f,
666 EBUG_ON(!bkey_packed(k));
668 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
671 * In little endian, we're shifting off low bits (and then the bits we
672 * want are at the low end), in big endian we're shifting off high bits
673 * (and then the bits we want are at the high end, so we shift them
676 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
677 v >>= f->exponent & 7;
679 v >>= 64 - (f->exponent & 7) - (idx < BFLOAT_32BIT_NR ? 32 : 16);
681 return idx < BFLOAT_32BIT_NR ? (u32) v : (u16) v;
684 static void make_bfloat(struct btree *b, struct bset_tree *t,
686 struct bkey_packed *min_key,
687 struct bkey_packed *max_key)
689 struct bkey_float *f = bkey_float(b, t, j);
690 struct bkey_packed *m = tree_to_bkey(b, t, j);
691 struct bkey_packed *p = tree_to_prev_bkey(b, t, j);
692 struct bkey_packed *l, *r;
693 unsigned bits = j < BFLOAT_32BIT_NR ? 32 : 16;
695 int shift, exponent, high_bit;
697 EBUG_ON(bkey_next(p) != m);
699 if (is_power_of_2(j)) {
703 if (!bkey_pack_pos(l, b->data->min_key, b)) {
707 tmp.k.p = b->data->min_key;
712 l = tree_to_prev_bkey(b, t, j >> ffs(j));
717 if (is_power_of_2(j + 1)) {
721 if (!bkey_pack_pos(r, t->max_key, b)) {
725 tmp.k.p = t->max_key;
730 r = tree_to_bkey(b, t, j >> (ffz(j) + 1));
736 * for failed bfloats, the lookup code falls back to comparing against
740 if (!bkey_packed(l) || !bkey_packed(r) ||
741 !bkey_packed(p) || !bkey_packed(m) ||
743 f->exponent = BFLOAT_FAILED_UNPACKED;
748 * The greatest differing bit of l and r is the first bit we must
749 * include in the bfloat mantissa we're creating in order to do
750 * comparisons - that bit always becomes the high bit of
751 * bfloat->mantissa, and thus the exponent we're calculating here is
752 * the position of what will become the low bit in bfloat->mantissa:
754 * Note that this may be negative - we may be running off the low end
755 * of the key: we handle this later:
757 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
758 min_t(unsigned, bits, b->nr_key_bits) - 1);
759 exponent = high_bit - (bits - 1);
762 * Then we calculate the actual shift value, from the start of the key
763 * (k->_data), to get the key bits starting at exponent:
765 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
766 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
768 EBUG_ON(shift + bits > b->format.key_u64s * 64);
770 shift = high_bit_offset +
775 EBUG_ON(shift < KEY_PACKED_BITS_START);
777 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
780 mantissa = bkey_mantissa(m, f, j);
783 * If we've got garbage bits, set them to all 1s - it's legal for the
784 * bfloat to compare larger than the original key, but not smaller:
787 mantissa |= ~(~0U << -exponent);
789 bfloat_mantissa_set(f, j, mantissa);
792 * The bfloat must be able to tell its key apart from the previous key -
793 * if its key and the previous key don't differ in the required bits,
794 * flag as failed - unless the keys are actually equal, in which case
795 * we aren't required to return a specific one:
798 bfloat_mantissa(f, j) == bkey_mantissa(p, f, j) &&
799 bkey_cmp_packed(b, p, m)) {
800 f->exponent = BFLOAT_FAILED_PREV;
805 * f->mantissa must compare >= the original key - for transitivity with
806 * the comparison in bset_search_tree. If we're dropping set bits,
809 if (exponent > (int) bch2_bkey_ffs(b, m)) {
810 if (j < BFLOAT_32BIT_NR
811 ? f->mantissa32 == U32_MAX
812 : f->mantissa16 == U16_MAX)
813 f->exponent = BFLOAT_FAILED_OVERFLOW;
815 if (j < BFLOAT_32BIT_NR)
822 /* bytes remaining - only valid for last bset: */
823 static unsigned __bset_tree_capacity(struct btree *b, struct bset_tree *t)
825 bset_aux_tree_verify(b);
827 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
830 static unsigned bset_ro_tree_capacity(struct btree *b, struct bset_tree *t)
832 unsigned bytes = __bset_tree_capacity(b, t);
834 if (bytes < 7 * BFLOAT_32BIT_NR)
837 bytes -= 7 * BFLOAT_32BIT_NR;
839 return BFLOAT_32BIT_NR + bytes / 5;
842 static unsigned bset_rw_tree_capacity(struct btree *b, struct bset_tree *t)
844 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
847 static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
849 struct bkey_packed *k;
852 t->extra = BSET_RW_AUX_TREE_VAL;
853 rw_aux_tree(b, t)[0].offset =
854 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
856 for (k = btree_bkey_first(b, t);
857 k != btree_bkey_last(b, t);
859 if (t->size == bset_rw_tree_capacity(b, t))
862 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
864 rw_aux_tree_set(b, t, t->size++, k);
868 static void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
870 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
871 struct bkey_packed min_key, max_key;
872 unsigned j, cacheline = 1;
874 /* signal to make_bfloat() that they're uninitialized: */
875 min_key.u64s = max_key.u64s = 0;
877 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
878 bset_ro_tree_capacity(b, t));
882 t->extra = BSET_NO_AUX_TREE_VAL;
886 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
888 /* First we figure out where the first key in each cacheline is */
889 eytzinger1_for_each(j, t->size) {
890 while (bkey_to_cacheline(b, t, k) < cacheline)
891 prev = k, k = bkey_next(k);
893 if (k >= btree_bkey_last(b, t)) {
894 /* XXX: this path sucks */
899 ro_aux_tree_prev(b, t)[j] = prev->u64s;
900 bkey_float(b, t, j)->key_offset =
901 bkey_to_cacheline_offset(b, t, cacheline++, k);
903 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
904 EBUG_ON(tree_to_bkey(b, t, j) != k);
907 while (bkey_next(k) != btree_bkey_last(b, t))
910 t->max_key = bkey_unpack_pos(b, k);
912 /* Then we build the tree */
913 eytzinger1_for_each(j, t->size)
914 make_bfloat(b, t, j, &min_key, &max_key);
917 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
921 for (i = b->set; i != t; i++)
922 BUG_ON(bset_has_rw_aux_tree(i));
924 bch2_bset_set_no_aux_tree(b, t);
926 /* round up to next cacheline: */
927 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
928 SMP_CACHE_BYTES / sizeof(u64));
930 bset_aux_tree_verify(b);
933 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
937 ? bset_has_rw_aux_tree(t)
938 : bset_has_ro_aux_tree(t))
941 bset_alloc_tree(b, t);
943 if (!__bset_tree_capacity(b, t))
947 __build_rw_aux_tree(b, t);
949 __build_ro_aux_tree(b, t);
951 bset_aux_tree_verify(b);
954 void bch2_bset_init_first(struct btree *b, struct bset *i)
960 memset(i, 0, sizeof(*i));
961 get_random_bytes(&i->seq, sizeof(i->seq));
962 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
964 t = &b->set[b->nsets++];
965 set_btree_bset(b, t, i);
968 void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
969 struct btree_node_entry *bne)
971 struct bset *i = &bne->keys;
974 BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
975 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
976 BUG_ON(b->nsets >= MAX_BSETS);
978 memset(i, 0, sizeof(*i));
979 i->seq = btree_bset_first(b)->seq;
980 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
982 t = &b->set[b->nsets++];
983 set_btree_bset(b, t, i);
987 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
988 * immediate predecessor:
990 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
991 struct bkey_packed *k)
993 struct bkey_packed *p;
997 EBUG_ON(k < btree_bkey_first(b, t) ||
998 k > btree_bkey_last(b, t));
1000 if (k == btree_bkey_first(b, t))
1003 switch (bset_aux_tree_type(t)) {
1004 case BSET_NO_AUX_TREE:
1005 p = btree_bkey_first(b, t);
1007 case BSET_RO_AUX_TREE:
1008 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
1011 p = j ? tree_to_bkey(b, t,
1012 __inorder_to_eytzinger1(j--,
1014 : btree_bkey_first(b, t);
1017 case BSET_RW_AUX_TREE:
1018 offset = __btree_node_key_to_offset(b, k);
1019 j = rw_aux_tree_bsearch(b, t, offset);
1020 p = j ? rw_aux_to_bkey(b, t, j - 1)
1021 : btree_bkey_first(b, t);
1028 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
1029 struct bset_tree *t,
1030 struct bkey_packed *k,
1031 unsigned min_key_type)
1033 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
1035 while ((p = __bkey_prev(b, t, k)) && !ret) {
1036 for (i = p; i != k; i = bkey_next(i))
1037 if (i->type >= min_key_type)
1043 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1044 BUG_ON(ret >= orig_k);
1046 for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t);
1049 BUG_ON(i->type >= min_key_type);
1057 static void rw_aux_tree_fix_invalidated_key(struct btree *b,
1058 struct bset_tree *t,
1059 struct bkey_packed *k)
1061 unsigned offset = __btree_node_key_to_offset(b, k);
1062 unsigned j = rw_aux_tree_bsearch(b, t, offset);
1065 rw_aux_tree(b, t)[j].offset == offset)
1066 rw_aux_tree_set(b, t, j, k);
1068 bch2_bset_verify_rw_aux_tree(b, t);
1071 static void ro_aux_tree_fix_invalidated_key(struct btree *b,
1072 struct bset_tree *t,
1073 struct bkey_packed *k)
1075 struct bkey_packed min_key, max_key;
1076 unsigned inorder, j;
1078 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
1080 /* signal to make_bfloat() that they're uninitialized: */
1081 min_key.u64s = max_key.u64s = 0;
1083 if (bkey_next(k) == btree_bkey_last(b, t)) {
1084 t->max_key = bkey_unpack_pos(b, k);
1086 for (j = 1; j < t->size; j = j * 2 + 1)
1087 make_bfloat(b, t, j, &min_key, &max_key);
1090 inorder = bkey_to_cacheline(b, t, k);
1093 inorder < t->size) {
1094 j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
1096 if (k == tree_to_bkey(b, t, j)) {
1097 /* Fix the node this key corresponds to */
1098 make_bfloat(b, t, j, &min_key, &max_key);
1100 /* Children for which this key is the right boundary */
1101 for (j = eytzinger1_left_child(j);
1103 j = eytzinger1_right_child(j))
1104 make_bfloat(b, t, j, &min_key, &max_key);
1108 if (inorder + 1 < t->size) {
1109 j = __inorder_to_eytzinger1(inorder + 1, t->size, t->extra);
1111 if (k == tree_to_prev_bkey(b, t, j)) {
1112 make_bfloat(b, t, j, &min_key, &max_key);
1114 /* Children for which this key is the left boundary */
1115 for (j = eytzinger1_right_child(j);
1117 j = eytzinger1_left_child(j))
1118 make_bfloat(b, t, j, &min_key, &max_key);
1124 * bch2_bset_fix_invalidated_key() - given an existing key @k that has been
1125 * modified, fix any auxiliary search tree by remaking all the nodes in the
1126 * auxiliary search tree that @k corresponds to
1128 void bch2_bset_fix_invalidated_key(struct btree *b, struct bkey_packed *k)
1130 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1132 switch (bset_aux_tree_type(t)) {
1133 case BSET_NO_AUX_TREE:
1135 case BSET_RO_AUX_TREE:
1136 ro_aux_tree_fix_invalidated_key(b, t, k);
1138 case BSET_RW_AUX_TREE:
1139 rw_aux_tree_fix_invalidated_key(b, t, k);
1144 static void bch2_bset_fix_lookup_table(struct btree *b,
1145 struct bset_tree *t,
1146 struct bkey_packed *_where,
1147 unsigned clobber_u64s,
1150 int shift = new_u64s - clobber_u64s;
1151 unsigned l, j, where = __btree_node_key_to_offset(b, _where);
1153 EBUG_ON(bset_has_ro_aux_tree(t));
1155 if (!bset_has_rw_aux_tree(t))
1158 /* returns first entry >= where */
1159 l = rw_aux_tree_bsearch(b, t, where);
1161 if (!l) /* never delete first entry */
1163 else if (l < t->size &&
1164 where < t->end_offset &&
1165 rw_aux_tree(b, t)[l].offset == where)
1166 rw_aux_tree_set(b, t, l++, _where);
1172 rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
1177 rw_aux_tree(b, t)[j].offset + shift ==
1178 rw_aux_tree(b, t)[l - 1].offset)
1181 memmove(&rw_aux_tree(b, t)[l],
1182 &rw_aux_tree(b, t)[j],
1183 (void *) &rw_aux_tree(b, t)[t->size] -
1184 (void *) &rw_aux_tree(b, t)[j]);
1187 for (j = l; j < t->size; j++)
1188 rw_aux_tree(b, t)[j].offset += shift;
1190 EBUG_ON(l < t->size &&
1191 rw_aux_tree(b, t)[l].offset ==
1192 rw_aux_tree(b, t)[l - 1].offset);
1194 if (t->size < bset_rw_tree_capacity(b, t) &&
1196 ? rw_aux_tree(b, t)[l].offset
1198 rw_aux_tree(b, t)[l - 1].offset >
1199 L1_CACHE_BYTES / sizeof(u64)) {
1200 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
1201 struct bkey_packed *end = l < t->size
1202 ? rw_aux_to_bkey(b, t, l)
1203 : btree_bkey_last(b, t);
1204 struct bkey_packed *k = start;
1211 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
1212 memmove(&rw_aux_tree(b, t)[l + 1],
1213 &rw_aux_tree(b, t)[l],
1214 (void *) &rw_aux_tree(b, t)[t->size] -
1215 (void *) &rw_aux_tree(b, t)[l]);
1217 rw_aux_tree_set(b, t, l, k);
1223 bch2_bset_verify_rw_aux_tree(b, t);
1224 bset_aux_tree_verify(b);
1227 void bch2_bset_insert(struct btree *b,
1228 struct btree_node_iter *iter,
1229 struct bkey_packed *where,
1230 struct bkey_i *insert,
1231 unsigned clobber_u64s)
1233 struct bkey_format *f = &b->format;
1234 struct bset_tree *t = bset_tree_last(b);
1235 struct bkey_packed packed, *src = bkey_to_packed(insert);
1237 bch2_bset_verify_rw_aux_tree(b, t);
1238 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
1240 if (bch2_bkey_pack_key(&packed, &insert->k, f))
1243 if (!bkey_whiteout(&insert->k))
1244 btree_keys_account_key_add(&b->nr, t - b->set, src);
1246 if (src->u64s != clobber_u64s) {
1247 u64 *src_p = where->_data + clobber_u64s;
1248 u64 *dst_p = where->_data + src->u64s;
1250 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1251 (int) clobber_u64s - src->u64s);
1253 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1254 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1255 set_btree_bset_end(b, t);
1258 memcpy_u64s(where, src,
1259 bkeyp_key_u64s(f, src));
1260 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1261 bkeyp_val_u64s(f, src));
1263 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1265 bch2_verify_btree_nr_keys(b);
1268 void bch2_bset_delete(struct btree *b,
1269 struct bkey_packed *where,
1270 unsigned clobber_u64s)
1272 struct bset_tree *t = bset_tree_last(b);
1273 u64 *src_p = where->_data + clobber_u64s;
1274 u64 *dst_p = where->_data;
1276 bch2_bset_verify_rw_aux_tree(b, t);
1278 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1280 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1281 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1282 set_btree_bset_end(b, t);
1284 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1290 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1291 struct bset_tree *t,
1293 const struct bkey_packed *packed_search)
1295 unsigned l = 0, r = t->size;
1297 while (l + 1 != r) {
1298 unsigned m = (l + r) >> 1;
1300 if (bkey_cmp(rw_aux_tree(b, t)[m].k, search) < 0)
1306 return rw_aux_to_bkey(b, t, l);
1310 static int bset_search_tree_slowpath(const struct btree *b,
1311 struct bset_tree *t, struct bpos *search,
1312 const struct bkey_packed *packed_search,
1315 return bkey_cmp_p_or_unp(b, tree_to_bkey(b, t, n),
1316 packed_search, search) < 0;
1320 static struct bkey_packed *bset_search_tree(const struct btree *b,
1321 struct bset_tree *t,
1323 const struct bkey_packed *packed_search)
1325 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1326 struct bkey_float *f = bkey_float_get(base, 1);
1328 unsigned inorder, n = 1;
1331 if (likely(n << 4 < t->size)) {
1332 p = bkey_float_get(base, n << 4);
1334 } else if (n << 3 < t->size) {
1335 inorder = __eytzinger1_to_inorder(n, t->size, t->extra);
1336 p = bset_cacheline(b, t, inorder);
1337 #ifdef CONFIG_X86_64
1338 asm(".intel_syntax noprefix;"
1339 "prefetcht0 [%0 - 127 + 64 * 0];"
1340 "prefetcht0 [%0 - 127 + 64 * 1];"
1341 "prefetcht0 [%0 - 127 + 64 * 2];"
1342 "prefetcht0 [%0 - 127 + 64 * 3];"
1343 ".att_syntax prefix;"
1347 prefetch(p + L1_CACHE_BYTES * 0);
1348 prefetch(p + L1_CACHE_BYTES * 1);
1349 prefetch(p + L1_CACHE_BYTES * 2);
1350 prefetch(p + L1_CACHE_BYTES * 3);
1352 } else if (n >= t->size)
1355 f = bkey_float_get(base, n);
1357 if (packed_search &&
1358 likely(f->exponent < BFLOAT_FAILED))
1359 n = n * 2 + (bfloat_mantissa(f, n) <
1360 bkey_mantissa(packed_search, f, n));
1362 n = n * 2 + bset_search_tree_slowpath(b, t,
1363 &search, packed_search, n);
1364 } while (n < t->size);
1366 inorder = __eytzinger1_to_inorder(n >> 1, t->size, t->extra);
1369 * n would have been the node we recursed to - the low bit tells us if
1370 * we recursed left or recursed right.
1373 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1376 n = eytzinger1_prev(n >> 1, t->size);
1377 f = bkey_float_get(base, n);
1378 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1380 return btree_bkey_first(b, t);
1385 * Returns the first key greater than or equal to @search
1387 __always_inline __flatten
1388 static struct bkey_packed *bch2_bset_search(struct btree *b,
1389 struct bset_tree *t,
1391 struct bkey_packed *packed_search,
1392 const struct bkey_packed *lossy_packed_search,
1393 bool strictly_greater)
1395 struct bkey_packed *m;
1398 * First, we search for a cacheline, then lastly we do a linear search
1399 * within that cacheline.
1401 * To search for the cacheline, there's three different possibilities:
1402 * * The set is too small to have a search tree, so we just do a linear
1403 * search over the whole set.
1404 * * The set is the one we're currently inserting into; keeping a full
1405 * auxiliary search tree up to date would be too expensive, so we
1406 * use a much simpler lookup table to do a binary search -
1407 * bset_search_write_set().
1408 * * Or we use the auxiliary search tree we constructed earlier -
1409 * bset_search_tree()
1412 switch (bset_aux_tree_type(t)) {
1413 case BSET_NO_AUX_TREE:
1414 m = btree_bkey_first(b, t);
1416 case BSET_RW_AUX_TREE:
1417 m = bset_search_write_set(b, t, search, lossy_packed_search);
1419 case BSET_RO_AUX_TREE:
1421 * Each node in the auxiliary search tree covers a certain range
1422 * of bits, and keys above and below the set it covers might
1423 * differ outside those bits - so we have to special case the
1424 * start and end - handle that here:
1427 if (bkey_cmp(search, t->max_key) > 0)
1428 return btree_bkey_last(b, t);
1430 m = bset_search_tree(b, t, search, lossy_packed_search);
1434 if (lossy_packed_search)
1435 while (m != btree_bkey_last(b, t) &&
1436 !btree_iter_pos_cmp_p_or_unp(b, search, lossy_packed_search,
1437 m, strictly_greater))
1441 while (m != btree_bkey_last(b, t) &&
1442 !btree_iter_pos_cmp_packed(b, &search, m, strictly_greater))
1445 if (btree_keys_expensive_checks(b)) {
1446 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1449 btree_iter_pos_cmp_p_or_unp(b, search, packed_search,
1450 prev, strictly_greater));
1456 /* Btree node iterator */
1458 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1460 const struct bkey_packed *k,
1461 const struct bkey_packed *end)
1463 __bch2_btree_node_iter_push(iter, b, k, end);
1464 bch2_btree_node_iter_sort(iter, b);
1467 noinline __flatten __attribute__((cold))
1468 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1469 struct btree *b, struct bpos search,
1470 bool strictly_greater)
1472 struct bset_tree *t;
1474 trace_bkey_pack_pos_fail(&search);
1477 __bch2_btree_node_iter_push(iter, b,
1478 bch2_bset_search(b, t, search, NULL, NULL,
1480 btree_bkey_last(b, t));
1482 bch2_btree_node_iter_sort(iter, b);
1486 * bch_btree_node_iter_init - initialize a btree node iterator, starting from a
1489 * Main entry point to the lookup code for individual btree nodes:
1493 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1494 * keys. This doesn't matter for most code, but it does matter for lookups.
1496 * Some adjacent keys with a string of equal keys:
1499 * If you search for k, the lookup code isn't guaranteed to return you any
1500 * specific k. The lookup code is conceptually doing a binary search and
1501 * iterating backwards is very expensive so if the pivot happens to land at the
1502 * last k that's what you'll get.
1504 * This works out ok, but it's something to be aware of:
1506 * - For non extents, we guarantee that the live key comes last - see
1507 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1508 * see will only be deleted keys you don't care about.
1510 * - For extents, deleted keys sort last (see the comment at the top of this
1511 * file). But when you're searching for extents, you actually want the first
1512 * key strictly greater than your search key - an extent that compares equal
1513 * to the search key is going to have 0 sectors after the search key.
1515 * But this does mean that we can't just search for
1516 * bkey_successor(start_of_range) to get the first extent that overlaps with
1517 * the range we want - if we're unlucky and there's an extent that ends
1518 * exactly where we searched, then there could be a deleted key at the same
1519 * position and we'd get that when we search instead of the preceding extent
1522 * So we've got to search for start_of_range, then after the lookup iterate
1523 * past any extents that compare equal to the position we searched for.
1525 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1526 struct btree *b, struct bpos search,
1527 bool strictly_greater)
1529 struct bset_tree *t;
1530 struct bkey_packed p, *packed_search = NULL;
1532 EBUG_ON(bkey_cmp(search, b->data->min_key) < 0);
1533 bset_aux_tree_verify(b);
1535 memset(iter, 0, sizeof(*iter));
1537 switch (bch2_bkey_pack_pos_lossy(&p, search, b)) {
1538 case BKEY_PACK_POS_EXACT:
1541 case BKEY_PACK_POS_SMALLER:
1542 packed_search = NULL;
1544 case BKEY_PACK_POS_FAIL:
1545 btree_node_iter_init_pack_failed(iter, b, search,
1551 __bch2_btree_node_iter_push(iter, b,
1552 bch2_bset_search(b, t, search,
1555 btree_bkey_last(b, t));
1557 bch2_btree_node_iter_sort(iter, b);
1560 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1563 struct bset_tree *t;
1565 memset(iter, 0, sizeof(*iter));
1568 __bch2_btree_node_iter_push(iter, b,
1569 btree_bkey_first(b, t),
1570 btree_bkey_last(b, t));
1571 bch2_btree_node_iter_sort(iter, b);
1574 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1576 struct bset_tree *t)
1578 struct btree_node_iter_set *set;
1580 btree_node_iter_for_each(iter, set)
1581 if (set->end == t->end_offset)
1582 return __btree_node_offset_to_key(b, set->k);
1584 return btree_bkey_last(b, t);
1587 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1593 if ((ret = (btree_node_iter_cmp(b,
1595 iter->data[first + 1]) > 0)))
1596 swap(iter->data[first], iter->data[first + 1]);
1600 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1603 /* unrolled bubble sort: */
1605 if (!__btree_node_iter_set_end(iter, 2)) {
1606 btree_node_iter_sort_two(iter, b, 0);
1607 btree_node_iter_sort_two(iter, b, 1);
1610 if (!__btree_node_iter_set_end(iter, 1))
1611 btree_node_iter_sort_two(iter, b, 0);
1614 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1615 struct btree_node_iter_set *set)
1617 struct btree_node_iter_set *last =
1618 iter->data + ARRAY_SIZE(iter->data) - 1;
1620 memmove(&set[0], &set[1], (void *) last - (void *) set);
1621 *last = (struct btree_node_iter_set) { 0, 0 };
1624 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1627 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1629 EBUG_ON(iter->data->k > iter->data->end);
1631 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1632 bch2_btree_node_iter_set_drop(iter, iter->data);
1636 if (__btree_node_iter_set_end(iter, 1))
1639 if (!btree_node_iter_sort_two(iter, b, 0))
1642 if (__btree_node_iter_set_end(iter, 2))
1645 btree_node_iter_sort_two(iter, b, 1);
1648 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1651 #ifdef CONFIG_BCACHEFS_DEBUG
1652 bch2_btree_node_iter_verify(iter, b);
1653 bch2_btree_node_iter_next_check(iter, b);
1655 __bch2_btree_node_iter_advance(iter, b);
1658 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
1660 unsigned n = ARRAY_SIZE(iter->data);
1662 while (n && __btree_node_iter_set_end(iter, n - 1))
1671 struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
1673 unsigned min_key_type)
1675 struct bkey_packed *k, *prev = NULL;
1676 struct bkey_packed *orig_pos = bch2_btree_node_iter_peek_all(iter, b);
1677 struct btree_node_iter_set *set;
1678 struct bset_tree *t;
1681 bch2_btree_node_iter_verify(iter, b);
1683 for_each_bset(b, t) {
1684 k = bch2_bkey_prev_filter(b, t,
1685 bch2_btree_node_iter_bset_pos(iter, b, t),
1688 (!prev || __btree_node_iter_cmp(b, k, prev) > 0)) {
1690 end = t->end_offset;
1698 * We're manually memmoving instead of just calling sort() to ensure the
1699 * prev we picked ends up in slot 0 - sort won't necessarily put it
1700 * there because of duplicate deleted keys:
1702 btree_node_iter_for_each(iter, set)
1703 if (set->end == end)
1706 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1708 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1710 memmove(&iter->data[1],
1712 (void *) set - (void *) &iter->data[0]);
1714 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1715 iter->data[0].end = end;
1717 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1718 struct btree_node_iter iter2 = *iter;
1721 __bch2_btree_node_iter_advance(&iter2, b);
1723 while ((k = bch2_btree_node_iter_peek_all(&iter2, b)) != orig_pos) {
1724 BUG_ON(k->type >= min_key_type);
1725 __bch2_btree_node_iter_advance(&iter2, b);
1732 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1736 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1738 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1743 void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
1745 struct bset_tree *t;
1747 for_each_bset(b, t) {
1748 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1751 stats->sets[type].nr++;
1752 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1755 if (bset_has_ro_aux_tree(t)) {
1756 stats->floats += t->size - 1;
1758 for (j = 1; j < t->size; j++)
1759 switch (bkey_float(b, t, j)->exponent) {
1760 case BFLOAT_FAILED_UNPACKED:
1761 stats->failed_unpacked++;
1763 case BFLOAT_FAILED_PREV:
1764 stats->failed_prev++;
1766 case BFLOAT_FAILED_OVERFLOW:
1767 stats->failed_overflow++;
1774 int bch2_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
1775 char *buf, size_t size)
1777 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1778 struct bkey_packed *l, *r, *p;
1780 char buf1[200], buf2[200];
1786 if (!bset_has_ro_aux_tree(t))
1789 j = __inorder_to_eytzinger1(bkey_to_cacheline(b, t, k), t->size, t->extra);
1792 k == tree_to_bkey(b, t, j))
1793 switch (bkey_float(b, t, j)->exponent) {
1794 case BFLOAT_FAILED_UNPACKED:
1795 uk = bkey_unpack_key(b, k);
1796 return scnprintf(buf, size,
1797 " failed unpacked at depth %u\n"
1800 uk.p.inode, uk.p.offset);
1801 case BFLOAT_FAILED_PREV:
1802 p = tree_to_prev_bkey(b, t, j);
1803 l = is_power_of_2(j)
1804 ? btree_bkey_first(b, t)
1805 : tree_to_prev_bkey(b, t, j >> ffs(j));
1806 r = is_power_of_2(j + 1)
1807 ? bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))
1808 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
1810 up = bkey_unpack_key(b, p);
1811 uk = bkey_unpack_key(b, k);
1812 bch2_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
1813 bch2_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
1815 return scnprintf(buf, size,
1816 " failed prev at depth %u\n"
1817 "\tkey starts at bit %u but first differing bit at %u\n"
1823 bch2_bkey_greatest_differing_bit(b, l, r),
1824 bch2_bkey_greatest_differing_bit(b, p, k),
1825 uk.p.inode, uk.p.offset,
1826 up.p.inode, up.p.offset,
1828 case BFLOAT_FAILED_OVERFLOW:
1829 uk = bkey_unpack_key(b, k);
1830 return scnprintf(buf, size,
1831 " failed overflow at depth %u\n"
1834 uk.p.inode, uk.p.offset);