2 * Code for working with individual keys, and sorted sets of keys with in a
5 * Copyright 2012 Google, Inc.
9 #include "btree_cache.h"
11 #include "eytzinger.h"
14 #include <asm/unaligned.h>
15 #include <linux/dynamic_fault.h>
16 #include <linux/console.h>
17 #include <linux/random.h>
18 #include <linux/prefetch.h>
21 #include "alloc_types.h"
22 #include <trace/events/bcachefs.h>
24 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
29 if (k >= btree_bkey_first(b, t) &&
30 k < btree_bkey_last(b, t))
37 * There are never duplicate live keys in the btree - but including keys that
38 * have been flagged as deleted (and will be cleaned up later) we _will_ see
41 * Thus the sort order is: usual key comparison first, but for keys that compare
42 * equal the deleted key(s) come first, and the (at most one) live version comes
45 * The main reason for this is insertion: to handle overwrites, we first iterate
46 * over keys that compare equal to our insert key, and then insert immediately
47 * prior to the first key greater than the key we're inserting - our insert
48 * position will be after all keys that compare equal to our insert key, which
49 * by the time we actually do the insert will all be deleted.
52 void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
54 struct bkey_packed *_k, *_n;
61 for (_k = i->start, k = bkey_unpack_key(b, _k);
66 bch2_bkey_to_text(buf, sizeof(buf), &k);
67 printk(KERN_ERR "block %u key %zi/%u: %s\n", set,
68 _k->_data - i->_data, i->u64s, buf);
70 if (_n == vstruct_last(i))
73 n = bkey_unpack_key(b, _n);
75 if (bkey_cmp(bkey_start_pos(&n), k.p) < 0) {
76 printk(KERN_ERR "Key skipped backwards\n");
81 * Weird check for duplicate non extent keys: extents are
82 * deleted iff they have 0 size, so if it has zero size and it's
83 * not deleted these aren't extents:
85 if (((!k.size && !bkey_deleted(&k)) ||
86 (!n.size && !bkey_deleted(&n))) &&
89 printk(KERN_ERR "Duplicate keys\n");
93 void bch2_dump_btree_node(struct btree *b)
99 bch2_dump_bset(b, bset(b, t), t - b->set);
103 void bch2_dump_btree_node_iter(struct btree *b,
104 struct btree_node_iter *iter)
106 struct btree_node_iter_set *set;
108 printk(KERN_ERR "btree node iter with %u sets:\n", b->nsets);
110 btree_node_iter_for_each(iter, set) {
111 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
112 struct bset_tree *t = bch2_bkey_to_bset(b, k);
113 struct bkey uk = bkey_unpack_key(b, k);
116 bch2_bkey_to_text(buf, sizeof(buf), &uk);
117 printk(KERN_ERR "set %zu key %zi/%u: %s\n", t - b->set,
118 k->_data - bset(b, t)->_data, bset(b, t)->u64s, buf);
122 #ifdef CONFIG_BCACHEFS_DEBUG
124 static bool keys_out_of_order(struct btree *b,
125 const struct bkey_packed *prev,
126 const struct bkey_packed *next,
129 struct bkey nextu = bkey_unpack_key(b, next);
131 return bkey_cmp_left_packed_byval(b, prev, bkey_start_pos(&nextu)) > 0 ||
133 ? !bkey_deleted(next)
134 : !bkey_deleted(prev)) &&
135 !bkey_cmp_packed(b, prev, next));
138 void __bch2_verify_btree_nr_keys(struct btree *b)
141 struct bkey_packed *k;
142 struct btree_nr_keys nr = { 0 };
145 for (k = btree_bkey_first(b, t);
146 k != btree_bkey_last(b, t);
148 if (!bkey_whiteout(k))
149 btree_keys_account_key_add(&nr, t - b->set, k);
151 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
154 static void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
156 struct bkey_packed *k)
158 const struct bkey_packed *n = bch2_btree_node_iter_peek_all(iter, b);
160 bkey_unpack_key(b, k);
163 keys_out_of_order(b, k, n, iter->is_extents)) {
164 struct bkey ku = bkey_unpack_key(b, k);
165 struct bkey nu = bkey_unpack_key(b, n);
166 char buf1[80], buf2[80];
168 bch2_dump_btree_node(b);
169 bch2_bkey_to_text(buf1, sizeof(buf1), &ku);
170 bch2_bkey_to_text(buf2, sizeof(buf2), &nu);
171 panic("out of order/overlapping:\n%s\n%s\n", buf1, buf2);
175 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
178 struct btree_node_iter_set *set, *prev = NULL;
180 struct bkey_packed *k, *first;
182 if (bch2_btree_node_iter_end(iter))
185 btree_node_iter_for_each(iter, set) {
186 k = __btree_node_offset_to_key(b, set->k);
187 t = bch2_bkey_to_bset(b, k);
189 BUG_ON(__btree_node_offset_to_key(b, set->end) !=
190 btree_bkey_last(b, t));
193 btree_node_iter_cmp(iter, b, *prev, *set) > 0);
198 first = __btree_node_offset_to_key(b, iter->data[0].k);
201 if (bch2_btree_node_iter_bset_pos(iter, b, t) ==
202 btree_bkey_last(b, t) &&
203 (k = bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))))
204 BUG_ON(__btree_node_iter_cmp(iter->is_extents, b,
208 void bch2_verify_key_order(struct btree *b,
209 struct btree_node_iter *iter,
210 struct bkey_packed *where)
212 struct bset_tree *t = bch2_bkey_to_bset(b, where);
213 struct bkey_packed *k, *prev;
214 struct bkey uk, uw = bkey_unpack_key(b, where);
216 k = bch2_bkey_prev_all(b, t, where);
218 keys_out_of_order(b, k, where, iter->is_extents)) {
219 char buf1[100], buf2[100];
221 bch2_dump_btree_node(b);
222 uk = bkey_unpack_key(b, k);
223 bch2_bkey_to_text(buf1, sizeof(buf1), &uk);
224 bch2_bkey_to_text(buf2, sizeof(buf2), &uw);
225 panic("out of order with prev:\n%s\n%s\n",
229 k = bkey_next(where);
230 BUG_ON(k != btree_bkey_last(b, t) &&
231 keys_out_of_order(b, where, k, iter->is_extents));
233 for_each_bset(b, t) {
234 if (where >= btree_bkey_first(b, t) ||
235 where < btree_bkey_last(b, t))
238 k = bch2_btree_node_iter_bset_pos(iter, b, t);
240 if (k == btree_bkey_last(b, t))
241 k = bch2_bkey_prev_all(b, t, k);
243 while (bkey_cmp_left_packed_byval(b, k, bkey_start_pos(&uw)) > 0 &&
244 (prev = bch2_bkey_prev_all(b, t, k)))
248 k != btree_bkey_last(b, t);
250 uk = bkey_unpack_key(b, k);
252 if (iter->is_extents) {
253 BUG_ON(!(bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0 ||
254 bkey_cmp(uk.p, bkey_start_pos(&uw)) <= 0));
256 BUG_ON(!bkey_cmp(uw.p, uk.p) &&
260 if (bkey_cmp(uw.p, bkey_start_pos(&uk)) <= 0)
268 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
270 struct bkey_packed *k) {}
274 /* Auxiliary search trees */
276 #define BFLOAT_FAILED_UNPACKED (U8_MAX - 0)
277 #define BFLOAT_FAILED_PREV (U8_MAX - 1)
278 #define BFLOAT_FAILED_OVERFLOW (U8_MAX - 2)
279 #define BFLOAT_FAILED (U8_MAX - 2)
281 #define KEY_WORDS BITS_TO_LONGS(1 << BKEY_EXPONENT_BITS)
295 #define BFLOAT_32BIT_NR 32U
297 static unsigned bkey_float_byte_offset(unsigned idx)
299 int d = (idx - BFLOAT_32BIT_NR) << 1;
307 struct bkey_float _d[0];
316 * BSET_CACHELINE was originally intended to match the hardware cacheline size -
317 * it used to be 64, but I realized the lookup code would touch slightly less
318 * memory if it was 128.
320 * It definites the number of bytes (in struct bset) per struct bkey_float in
321 * the auxiliar search tree - when we're done searching the bset_float tree we
322 * have this many bytes left that we do a linear search over.
324 * Since (after level 5) every level of the bset_tree is on a new cacheline,
325 * we're touching one fewer cacheline in the bset tree in exchange for one more
326 * cacheline in the linear search - but the linear search might stop before it
327 * gets to the second cacheline.
330 #define BSET_CACHELINE 128
332 /* Space required for the btree node keys */
333 static inline size_t btree_keys_bytes(struct btree *b)
335 return PAGE_SIZE << b->page_order;
338 static inline size_t btree_keys_cachelines(struct btree *b)
340 return btree_keys_bytes(b) / BSET_CACHELINE;
343 static inline size_t btree_aux_data_bytes(struct btree *b)
345 return btree_keys_cachelines(b) * 8;
348 static inline size_t btree_aux_data_u64s(struct btree *b)
350 return btree_aux_data_bytes(b) / sizeof(u64);
353 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
355 BUG_ON(t->aux_data_offset == U16_MAX);
357 switch (bset_aux_tree_type(t)) {
358 case BSET_NO_AUX_TREE:
359 return t->aux_data_offset;
360 case BSET_RO_AUX_TREE:
361 return t->aux_data_offset +
362 DIV_ROUND_UP(bkey_float_byte_offset(t->size) +
363 sizeof(u8) * t->size, 8);
364 case BSET_RW_AUX_TREE:
365 return t->aux_data_offset +
366 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
372 static unsigned bset_aux_tree_buf_start(const struct btree *b,
373 const struct bset_tree *t)
376 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
377 : bset_aux_tree_buf_end(t - 1);
380 static void *__aux_tree_base(const struct btree *b,
381 const struct bset_tree *t)
383 return b->aux_data + t->aux_data_offset * 8;
386 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
387 const struct bset_tree *t)
389 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
391 return __aux_tree_base(b, t);
394 static u8 *ro_aux_tree_prev(const struct btree *b,
395 const struct bset_tree *t)
397 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
399 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
402 static struct bkey_float *bkey_float_get(struct ro_aux_tree *b,
405 return (void *) b + bkey_float_byte_offset(idx);
408 static struct bkey_float *bkey_float(const struct btree *b,
409 const struct bset_tree *t,
412 return bkey_float_get(ro_aux_tree_base(b, t), idx);
415 static void bset_aux_tree_verify(struct btree *b)
417 #ifdef CONFIG_BCACHEFS_DEBUG
420 for_each_bset(b, t) {
421 if (t->aux_data_offset == U16_MAX)
424 BUG_ON(t != b->set &&
425 t[-1].aux_data_offset == U16_MAX);
427 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
428 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
429 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
434 /* Memory allocation */
436 void bch2_btree_keys_free(struct btree *b)
442 #ifndef PAGE_KERNEL_EXEC
443 # define PAGE_KERNEL_EXEC PAGE_KERNEL
446 int bch2_btree_keys_alloc(struct btree *b, unsigned page_order, gfp_t gfp)
448 b->page_order = page_order;
449 b->aux_data = __vmalloc(btree_aux_data_bytes(b), gfp,
457 void bch2_btree_keys_init(struct btree *b, bool *expensive_debug_checks)
462 memset(&b->nr, 0, sizeof(b->nr));
463 #ifdef CONFIG_BCACHEFS_DEBUG
464 b->expensive_debug_checks = expensive_debug_checks;
466 for (i = 0; i < MAX_BSETS; i++)
467 b->set[i].data_offset = U16_MAX;
469 bch2_bset_set_no_aux_tree(b, b->set);
472 /* Binary tree stuff for auxiliary search trees */
475 * Cacheline/offset <-> bkey pointer arithmetic:
477 * t->tree is a binary search tree in an array; each node corresponds to a key
478 * in one cacheline in t->set (BSET_CACHELINE bytes).
480 * This means we don't have to store the full index of the key that a node in
481 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
482 * then bkey_float->m gives us the offset within that cacheline, in units of 8
485 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
488 * To construct the bfloat for an arbitrary key we need to know what the key
489 * immediately preceding it is: we have to check if the two keys differ in the
490 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
491 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
494 static inline void *bset_cacheline(const struct btree *b,
495 const struct bset_tree *t,
498 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
500 cacheline * BSET_CACHELINE;
503 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
504 const struct bset_tree *t,
508 return bset_cacheline(b, t, cacheline) + offset * 8;
511 static unsigned bkey_to_cacheline(const struct btree *b,
512 const struct bset_tree *t,
513 const struct bkey_packed *k)
515 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
518 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
519 const struct bset_tree *t,
521 const struct bkey_packed *k)
523 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
526 static unsigned bkey_to_cacheline_offset(const struct btree *b,
527 const struct bset_tree *t,
529 const struct bkey_packed *k)
531 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
537 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
538 const struct bset_tree *t,
541 return cacheline_to_bkey(b, t,
542 __eytzinger1_to_inorder(j, t->size, t->extra),
543 bkey_float(b, t, j)->key_offset);
546 static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
547 const struct bset_tree *t,
550 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
552 return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
555 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
556 const struct bset_tree *t)
558 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
560 return __aux_tree_base(b, t);
564 * For the write set - the one we're currently inserting keys into - we don't
565 * maintain a full search tree, we just keep a simple lookup table in t->prev.
567 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
571 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
574 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
575 unsigned j, struct bkey_packed *k)
577 EBUG_ON(k >= btree_bkey_last(b, t));
579 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
580 .offset = __btree_node_key_to_offset(b, k),
581 .k = bkey_unpack_pos(b, k),
585 static void bch2_bset_verify_rw_aux_tree(struct btree *b,
588 struct bkey_packed *k = btree_bkey_first(b, t);
591 if (!btree_keys_expensive_checks(b))
594 BUG_ON(bset_has_ro_aux_tree(t));
596 if (!bset_has_rw_aux_tree(t))
600 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
604 if (rw_aux_to_bkey(b, t, j) == k) {
605 BUG_ON(bkey_cmp(rw_aux_tree(b, t)[j].k,
606 bkey_unpack_pos(b, k)));
611 BUG_ON(rw_aux_tree(b, t)[j].offset <=
612 rw_aux_tree(b, t)[j - 1].offset);
616 BUG_ON(k >= btree_bkey_last(b, t));
620 /* returns idx of first entry >= offset: */
621 static unsigned rw_aux_tree_bsearch(struct btree *b,
625 unsigned l = 0, r = t->size;
627 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
630 unsigned m = (l + r) >> 1;
632 if (rw_aux_tree(b, t)[m].offset < offset)
638 EBUG_ON(l < t->size &&
639 rw_aux_tree(b, t)[l].offset < offset);
641 rw_aux_tree(b, t)[l - 1].offset >= offset);
644 EBUG_ON(l > t->size);
649 static inline unsigned bfloat_mantissa(const struct bkey_float *f,
652 return idx < BFLOAT_32BIT_NR ? f->mantissa32 : f->mantissa16;
655 static inline void bfloat_mantissa_set(struct bkey_float *f,
656 unsigned idx, unsigned mantissa)
658 if (idx < BFLOAT_32BIT_NR)
659 f->mantissa32 = mantissa;
661 f->mantissa16 = mantissa;
664 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
665 const struct bkey_float *f,
670 EBUG_ON(!bkey_packed(k));
672 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
675 * In little endian, we're shifting off low bits (and then the bits we
676 * want are at the low end), in big endian we're shifting off high bits
677 * (and then the bits we want are at the high end, so we shift them
680 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
681 v >>= f->exponent & 7;
683 v >>= 64 - (f->exponent & 7) - (idx < BFLOAT_32BIT_NR ? 32 : 16);
685 return idx < BFLOAT_32BIT_NR ? (u32) v : (u16) v;
688 static void make_bfloat(struct btree *b, struct bset_tree *t,
690 struct bkey_packed *min_key,
691 struct bkey_packed *max_key)
693 struct bkey_float *f = bkey_float(b, t, j);
694 struct bkey_packed *m = tree_to_bkey(b, t, j);
695 struct bkey_packed *p = tree_to_prev_bkey(b, t, j);
696 struct bkey_packed *l, *r;
697 unsigned bits = j < BFLOAT_32BIT_NR ? 32 : 16;
699 int shift, exponent, high_bit;
701 EBUG_ON(bkey_next(p) != m);
703 if (is_power_of_2(j)) {
707 if (!bkey_pack_pos(l, b->data->min_key, b)) {
711 tmp.k.p = b->data->min_key;
716 l = tree_to_prev_bkey(b, t, j >> ffs(j));
721 if (is_power_of_2(j + 1)) {
725 if (!bkey_pack_pos(r, t->max_key, b)) {
729 tmp.k.p = t->max_key;
734 r = tree_to_bkey(b, t, j >> (ffz(j) + 1));
740 * for failed bfloats, the lookup code falls back to comparing against
744 if (!bkey_packed(l) || !bkey_packed(r) ||
745 !bkey_packed(p) || !bkey_packed(m) ||
747 f->exponent = BFLOAT_FAILED_UNPACKED;
752 * The greatest differing bit of l and r is the first bit we must
753 * include in the bfloat mantissa we're creating in order to do
754 * comparisons - that bit always becomes the high bit of
755 * bfloat->mantissa, and thus the exponent we're calculating here is
756 * the position of what will become the low bit in bfloat->mantissa:
758 * Note that this may be negative - we may be running off the low end
759 * of the key: we handle this later:
761 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
762 min_t(unsigned, bits, b->nr_key_bits) - 1);
763 exponent = high_bit - (bits - 1);
766 * Then we calculate the actual shift value, from the start of the key
767 * (k->_data), to get the key bits starting at exponent:
769 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
770 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
772 EBUG_ON(shift + bits > b->format.key_u64s * 64);
774 shift = high_bit_offset +
779 EBUG_ON(shift < KEY_PACKED_BITS_START);
781 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
784 mantissa = bkey_mantissa(m, f, j);
787 * If we've got garbage bits, set them to all 1s - it's legal for the
788 * bfloat to compare larger than the original key, but not smaller:
791 mantissa |= ~(~0U << -exponent);
793 bfloat_mantissa_set(f, j, mantissa);
796 * The bfloat must be able to tell its key apart from the previous key -
797 * if its key and the previous key don't differ in the required bits,
798 * flag as failed - unless the keys are actually equal, in which case
799 * we aren't required to return a specific one:
802 bfloat_mantissa(f, j) == bkey_mantissa(p, f, j) &&
803 bkey_cmp_packed(b, p, m)) {
804 f->exponent = BFLOAT_FAILED_PREV;
809 * f->mantissa must compare >= the original key - for transitivity with
810 * the comparison in bset_search_tree. If we're dropping set bits,
813 if (exponent > (int) bch2_bkey_ffs(b, m)) {
814 if (j < BFLOAT_32BIT_NR
815 ? f->mantissa32 == U32_MAX
816 : f->mantissa16 == U16_MAX)
817 f->exponent = BFLOAT_FAILED_OVERFLOW;
819 if (j < BFLOAT_32BIT_NR)
826 /* bytes remaining - only valid for last bset: */
827 static unsigned __bset_tree_capacity(struct btree *b, struct bset_tree *t)
829 bset_aux_tree_verify(b);
831 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
834 static unsigned bset_ro_tree_capacity(struct btree *b, struct bset_tree *t)
836 unsigned bytes = __bset_tree_capacity(b, t);
838 if (bytes < 7 * BFLOAT_32BIT_NR)
841 bytes -= 7 * BFLOAT_32BIT_NR;
843 return BFLOAT_32BIT_NR + bytes / 5;
846 static unsigned bset_rw_tree_capacity(struct btree *b, struct bset_tree *t)
848 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
851 static void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
853 struct bkey_packed *k;
856 t->extra = BSET_RW_AUX_TREE_VAL;
857 rw_aux_tree(b, t)[0].offset =
858 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
860 for (k = btree_bkey_first(b, t);
861 k != btree_bkey_last(b, t);
863 if (t->size == bset_rw_tree_capacity(b, t))
866 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
868 rw_aux_tree_set(b, t, t->size++, k);
872 static void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
874 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
875 struct bkey_packed min_key, max_key;
876 unsigned j, cacheline = 1;
878 /* signal to make_bfloat() that they're uninitialized: */
879 min_key.u64s = max_key.u64s = 0;
881 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
882 bset_ro_tree_capacity(b, t));
886 t->extra = BSET_NO_AUX_TREE_VAL;
890 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
892 /* First we figure out where the first key in each cacheline is */
893 eytzinger1_for_each(j, t->size) {
894 while (bkey_to_cacheline(b, t, k) < cacheline)
895 prev = k, k = bkey_next(k);
897 if (k >= btree_bkey_last(b, t)) {
898 /* XXX: this path sucks */
903 ro_aux_tree_prev(b, t)[j] = prev->u64s;
904 bkey_float(b, t, j)->key_offset =
905 bkey_to_cacheline_offset(b, t, cacheline++, k);
907 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
908 EBUG_ON(tree_to_bkey(b, t, j) != k);
911 while (bkey_next(k) != btree_bkey_last(b, t))
914 t->max_key = bkey_unpack_pos(b, k);
916 /* Then we build the tree */
917 eytzinger1_for_each(j, t->size)
918 make_bfloat(b, t, j, &min_key, &max_key);
921 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
925 for (i = b->set; i != t; i++)
926 BUG_ON(bset_has_rw_aux_tree(i));
928 bch2_bset_set_no_aux_tree(b, t);
930 /* round up to next cacheline: */
931 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
932 SMP_CACHE_BYTES / sizeof(u64));
934 bset_aux_tree_verify(b);
937 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
941 ? bset_has_rw_aux_tree(t)
942 : bset_has_ro_aux_tree(t))
945 bset_alloc_tree(b, t);
947 if (!__bset_tree_capacity(b, t))
951 __build_rw_aux_tree(b, t);
953 __build_ro_aux_tree(b, t);
955 bset_aux_tree_verify(b);
958 void bch2_bset_init_first(struct btree *b, struct bset *i)
964 memset(i, 0, sizeof(*i));
965 get_random_bytes(&i->seq, sizeof(i->seq));
966 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
968 t = &b->set[b->nsets++];
969 set_btree_bset(b, t, i);
972 void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
973 struct btree_node_entry *bne)
975 struct bset *i = &bne->keys;
978 BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
979 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
980 BUG_ON(b->nsets >= MAX_BSETS);
982 memset(i, 0, sizeof(*i));
983 i->seq = btree_bset_first(b)->seq;
984 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
986 t = &b->set[b->nsets++];
987 set_btree_bset(b, t, i);
991 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
992 * immediate predecessor:
994 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
995 struct bkey_packed *k)
997 struct bkey_packed *p;
1001 EBUG_ON(k < btree_bkey_first(b, t) ||
1002 k > btree_bkey_last(b, t));
1004 if (k == btree_bkey_first(b, t))
1007 switch (bset_aux_tree_type(t)) {
1008 case BSET_NO_AUX_TREE:
1009 p = btree_bkey_first(b, t);
1011 case BSET_RO_AUX_TREE:
1012 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
1015 p = j ? tree_to_bkey(b, t,
1016 __inorder_to_eytzinger1(j--,
1018 : btree_bkey_first(b, t);
1021 case BSET_RW_AUX_TREE:
1022 offset = __btree_node_key_to_offset(b, k);
1023 j = rw_aux_tree_bsearch(b, t, offset);
1024 p = j ? rw_aux_to_bkey(b, t, j - 1)
1025 : btree_bkey_first(b, t);
1032 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
1033 struct bset_tree *t,
1034 struct bkey_packed *k,
1035 unsigned min_key_type)
1037 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
1039 while ((p = __bkey_prev(b, t, k)) && !ret) {
1040 for (i = p; i != k; i = bkey_next(i))
1041 if (i->type >= min_key_type)
1047 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1048 BUG_ON(ret >= orig_k);
1050 for (i = ret ? bkey_next(ret) : btree_bkey_first(b, t);
1053 BUG_ON(i->type >= min_key_type);
1061 static void rw_aux_tree_fix_invalidated_key(struct btree *b,
1062 struct bset_tree *t,
1063 struct bkey_packed *k)
1065 unsigned offset = __btree_node_key_to_offset(b, k);
1066 unsigned j = rw_aux_tree_bsearch(b, t, offset);
1069 rw_aux_tree(b, t)[j].offset == offset)
1070 rw_aux_tree_set(b, t, j, k);
1072 bch2_bset_verify_rw_aux_tree(b, t);
1075 static void ro_aux_tree_fix_invalidated_key(struct btree *b,
1076 struct bset_tree *t,
1077 struct bkey_packed *k)
1079 struct bkey_packed min_key, max_key;
1080 unsigned inorder, j;
1082 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
1084 /* signal to make_bfloat() that they're uninitialized: */
1085 min_key.u64s = max_key.u64s = 0;
1087 if (bkey_next(k) == btree_bkey_last(b, t)) {
1088 t->max_key = bkey_unpack_pos(b, k);
1090 for (j = 1; j < t->size; j = j * 2 + 1)
1091 make_bfloat(b, t, j, &min_key, &max_key);
1094 inorder = bkey_to_cacheline(b, t, k);
1097 inorder < t->size) {
1098 j = __inorder_to_eytzinger1(inorder, t->size, t->extra);
1100 if (k == tree_to_bkey(b, t, j)) {
1101 /* Fix the node this key corresponds to */
1102 make_bfloat(b, t, j, &min_key, &max_key);
1104 /* Children for which this key is the right boundary */
1105 for (j = eytzinger1_left_child(j);
1107 j = eytzinger1_right_child(j))
1108 make_bfloat(b, t, j, &min_key, &max_key);
1112 if (inorder + 1 < t->size) {
1113 j = __inorder_to_eytzinger1(inorder + 1, t->size, t->extra);
1115 if (k == tree_to_prev_bkey(b, t, j)) {
1116 make_bfloat(b, t, j, &min_key, &max_key);
1118 /* Children for which this key is the left boundary */
1119 for (j = eytzinger1_right_child(j);
1121 j = eytzinger1_left_child(j))
1122 make_bfloat(b, t, j, &min_key, &max_key);
1128 * bch2_bset_fix_invalidated_key() - given an existing key @k that has been
1129 * modified, fix any auxiliary search tree by remaking all the nodes in the
1130 * auxiliary search tree that @k corresponds to
1132 void bch2_bset_fix_invalidated_key(struct btree *b, struct bset_tree *t,
1133 struct bkey_packed *k)
1135 switch (bset_aux_tree_type(t)) {
1136 case BSET_NO_AUX_TREE:
1138 case BSET_RO_AUX_TREE:
1139 ro_aux_tree_fix_invalidated_key(b, t, k);
1141 case BSET_RW_AUX_TREE:
1142 rw_aux_tree_fix_invalidated_key(b, t, k);
1147 static void bch2_bset_fix_lookup_table(struct btree *b,
1148 struct bset_tree *t,
1149 struct bkey_packed *_where,
1150 unsigned clobber_u64s,
1153 int shift = new_u64s - clobber_u64s;
1154 unsigned l, j, where = __btree_node_key_to_offset(b, _where);
1156 EBUG_ON(bset_has_ro_aux_tree(t));
1158 if (!bset_has_rw_aux_tree(t))
1161 l = rw_aux_tree_bsearch(b, t, where);
1163 /* l is first >= than @where */
1165 EBUG_ON(l < t->size && rw_aux_tree(b, t)[l].offset < where);
1166 EBUG_ON(l && rw_aux_tree(b, t)[l - 1].offset >= where);
1168 if (!l) /* never delete first entry */
1170 else if (l < t->size &&
1171 where < t->end_offset &&
1172 rw_aux_tree(b, t)[l].offset == where)
1173 rw_aux_tree_set(b, t, l++, _where);
1179 rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
1184 rw_aux_tree(b, t)[j].offset + shift ==
1185 rw_aux_tree(b, t)[l - 1].offset)
1188 memmove(&rw_aux_tree(b, t)[l],
1189 &rw_aux_tree(b, t)[j],
1190 (void *) &rw_aux_tree(b, t)[t->size] -
1191 (void *) &rw_aux_tree(b, t)[j]);
1194 for (j = l; j < t->size; j++)
1195 rw_aux_tree(b, t)[j].offset += shift;
1197 EBUG_ON(l < t->size &&
1198 rw_aux_tree(b, t)[l].offset ==
1199 rw_aux_tree(b, t)[l - 1].offset);
1201 if (t->size < bset_rw_tree_capacity(b, t) &&
1203 ? rw_aux_tree(b, t)[l].offset
1205 rw_aux_tree(b, t)[l - 1].offset >
1206 L1_CACHE_BYTES / sizeof(u64)) {
1207 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
1208 struct bkey_packed *end = l < t->size
1209 ? rw_aux_to_bkey(b, t, l)
1210 : btree_bkey_last(b, t);
1211 struct bkey_packed *k = start;
1218 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
1219 memmove(&rw_aux_tree(b, t)[l + 1],
1220 &rw_aux_tree(b, t)[l],
1221 (void *) &rw_aux_tree(b, t)[t->size] -
1222 (void *) &rw_aux_tree(b, t)[l]);
1224 rw_aux_tree_set(b, t, l, k);
1230 bch2_bset_verify_rw_aux_tree(b, t);
1231 bset_aux_tree_verify(b);
1234 void bch2_bset_insert(struct btree *b,
1235 struct btree_node_iter *iter,
1236 struct bkey_packed *where,
1237 struct bkey_i *insert,
1238 unsigned clobber_u64s)
1240 struct bkey_format *f = &b->format;
1241 struct bset_tree *t = bset_tree_last(b);
1242 struct bkey_packed packed, *src = bkey_to_packed(insert);
1244 bch2_bset_verify_rw_aux_tree(b, t);
1246 if (bch2_bkey_pack_key(&packed, &insert->k, f))
1249 if (!bkey_whiteout(&insert->k))
1250 btree_keys_account_key_add(&b->nr, t - b->set, src);
1252 if (src->u64s != clobber_u64s) {
1253 u64 *src_p = where->_data + clobber_u64s;
1254 u64 *dst_p = where->_data + src->u64s;
1256 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1257 (int) clobber_u64s - src->u64s);
1259 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1260 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1261 set_btree_bset_end(b, t);
1264 memcpy_u64s(where, src,
1265 bkeyp_key_u64s(f, src));
1266 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1267 bkeyp_val_u64s(f, src));
1269 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1271 bch2_verify_key_order(b, iter, where);
1272 bch2_verify_btree_nr_keys(b);
1275 void bch2_bset_delete(struct btree *b,
1276 struct bkey_packed *where,
1277 unsigned clobber_u64s)
1279 struct bset_tree *t = bset_tree_last(b);
1280 u64 *src_p = where->_data + clobber_u64s;
1281 u64 *dst_p = where->_data;
1283 bch2_bset_verify_rw_aux_tree(b, t);
1285 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1287 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1288 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1289 set_btree_bset_end(b, t);
1291 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1297 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1298 struct bset_tree *t,
1300 const struct bkey_packed *packed_search)
1302 unsigned l = 0, r = t->size;
1304 while (l + 1 != r) {
1305 unsigned m = (l + r) >> 1;
1307 if (bkey_cmp(rw_aux_tree(b, t)[m].k, search) < 0)
1313 return rw_aux_to_bkey(b, t, l);
1317 static int bset_search_tree_slowpath(const struct btree *b,
1318 struct bset_tree *t, struct bpos *search,
1319 const struct bkey_packed *packed_search,
1322 return bkey_cmp_p_or_unp(b, tree_to_bkey(b, t, n),
1323 packed_search, search) < 0;
1327 static struct bkey_packed *bset_search_tree(const struct btree *b,
1328 struct bset_tree *t,
1330 const struct bkey_packed *packed_search)
1332 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1333 struct bkey_float *f = bkey_float_get(base, 1);
1335 unsigned inorder, n = 1;
1338 if (likely(n << 4 < t->size)) {
1339 p = bkey_float_get(base, n << 4);
1341 } else if (n << 3 < t->size) {
1342 inorder = __eytzinger1_to_inorder(n, t->size, t->extra);
1343 p = bset_cacheline(b, t, inorder);
1344 #ifdef CONFIG_X86_64
1345 asm(".intel_syntax noprefix;"
1346 "prefetcht0 [%0 - 127 + 64 * 0];"
1347 "prefetcht0 [%0 - 127 + 64 * 1];"
1348 "prefetcht0 [%0 - 127 + 64 * 2];"
1349 "prefetcht0 [%0 - 127 + 64 * 3];"
1350 ".att_syntax prefix;"
1354 prefetch(p + L1_CACHE_BYTES * 0);
1355 prefetch(p + L1_CACHE_BYTES * 1);
1356 prefetch(p + L1_CACHE_BYTES * 2);
1357 prefetch(p + L1_CACHE_BYTES * 3);
1359 } else if (n >= t->size)
1362 f = bkey_float_get(base, n);
1364 if (packed_search &&
1365 likely(f->exponent < BFLOAT_FAILED))
1366 n = n * 2 + (bfloat_mantissa(f, n) <
1367 bkey_mantissa(packed_search, f, n));
1369 n = n * 2 + bset_search_tree_slowpath(b, t,
1370 &search, packed_search, n);
1371 } while (n < t->size);
1373 inorder = __eytzinger1_to_inorder(n >> 1, t->size, t->extra);
1376 * n would have been the node we recursed to - the low bit tells us if
1377 * we recursed left or recursed right.
1380 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1383 n = eytzinger1_prev(n >> 1, t->size);
1384 f = bkey_float_get(base, n);
1385 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1387 return btree_bkey_first(b, t);
1392 * Returns the first key greater than or equal to @search
1394 __always_inline __flatten
1395 static struct bkey_packed *bch2_bset_search(struct btree *b,
1396 struct bset_tree *t,
1398 struct bkey_packed *packed_search,
1399 const struct bkey_packed *lossy_packed_search,
1400 bool strictly_greater)
1402 struct bkey_packed *m;
1405 * First, we search for a cacheline, then lastly we do a linear search
1406 * within that cacheline.
1408 * To search for the cacheline, there's three different possibilities:
1409 * * The set is too small to have a search tree, so we just do a linear
1410 * search over the whole set.
1411 * * The set is the one we're currently inserting into; keeping a full
1412 * auxiliary search tree up to date would be too expensive, so we
1413 * use a much simpler lookup table to do a binary search -
1414 * bset_search_write_set().
1415 * * Or we use the auxiliary search tree we constructed earlier -
1416 * bset_search_tree()
1419 switch (bset_aux_tree_type(t)) {
1420 case BSET_NO_AUX_TREE:
1421 m = btree_bkey_first(b, t);
1423 case BSET_RW_AUX_TREE:
1424 m = bset_search_write_set(b, t, search, lossy_packed_search);
1426 case BSET_RO_AUX_TREE:
1428 * Each node in the auxiliary search tree covers a certain range
1429 * of bits, and keys above and below the set it covers might
1430 * differ outside those bits - so we have to special case the
1431 * start and end - handle that here:
1434 if (bkey_cmp(search, t->max_key) > 0)
1435 return btree_bkey_last(b, t);
1437 m = bset_search_tree(b, t, search, lossy_packed_search);
1441 if (lossy_packed_search)
1442 while (m != btree_bkey_last(b, t) &&
1443 !btree_iter_pos_cmp_p_or_unp(b, search, lossy_packed_search,
1444 m, strictly_greater))
1448 while (m != btree_bkey_last(b, t) &&
1449 !btree_iter_pos_cmp_packed(b, &search, m, strictly_greater))
1452 if (btree_keys_expensive_checks(b)) {
1453 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1456 btree_iter_pos_cmp_p_or_unp(b, search, packed_search,
1457 prev, strictly_greater));
1463 /* Btree node iterator */
1465 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1467 const struct bkey_packed *k,
1468 const struct bkey_packed *end)
1470 __bch2_btree_node_iter_push(iter, b, k, end);
1471 bch2_btree_node_iter_sort(iter, b);
1474 noinline __flatten __attribute__((cold))
1475 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1476 struct btree *b, struct bpos search,
1477 bool strictly_greater, bool is_extents)
1479 struct bset_tree *t;
1481 trace_bkey_pack_pos_fail(search);
1484 __bch2_btree_node_iter_push(iter, b,
1485 bch2_bset_search(b, t, search, NULL, NULL,
1487 btree_bkey_last(b, t));
1489 bch2_btree_node_iter_sort(iter, b);
1493 * bch_btree_node_iter_init - initialize a btree node iterator, starting from a
1496 * Main entry point to the lookup code for individual btree nodes:
1500 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1501 * keys. This doesn't matter for most code, but it does matter for lookups.
1503 * Some adjacent keys with a string of equal keys:
1506 * If you search for k, the lookup code isn't guaranteed to return you any
1507 * specific k. The lookup code is conceptually doing a binary search and
1508 * iterating backwards is very expensive so if the pivot happens to land at the
1509 * last k that's what you'll get.
1511 * This works out ok, but it's something to be aware of:
1513 * - For non extents, we guarantee that the live key comes last - see
1514 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1515 * see will only be deleted keys you don't care about.
1517 * - For extents, deleted keys sort last (see the comment at the top of this
1518 * file). But when you're searching for extents, you actually want the first
1519 * key strictly greater than your search key - an extent that compares equal
1520 * to the search key is going to have 0 sectors after the search key.
1522 * But this does mean that we can't just search for
1523 * bkey_successor(start_of_range) to get the first extent that overlaps with
1524 * the range we want - if we're unlucky and there's an extent that ends
1525 * exactly where we searched, then there could be a deleted key at the same
1526 * position and we'd get that when we search instead of the preceding extent
1529 * So we've got to search for start_of_range, then after the lookup iterate
1530 * past any extents that compare equal to the position we searched for.
1532 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1533 struct btree *b, struct bpos search,
1534 bool strictly_greater, bool is_extents)
1536 struct bset_tree *t;
1537 struct bkey_packed p, *packed_search = NULL;
1539 EBUG_ON(bkey_cmp(search, b->data->min_key) < 0);
1540 bset_aux_tree_verify(b);
1542 __bch2_btree_node_iter_init(iter, is_extents);
1544 switch (bch2_bkey_pack_pos_lossy(&p, search, b)) {
1545 case BKEY_PACK_POS_EXACT:
1548 case BKEY_PACK_POS_SMALLER:
1549 packed_search = NULL;
1551 case BKEY_PACK_POS_FAIL:
1552 btree_node_iter_init_pack_failed(iter, b, search,
1553 strictly_greater, is_extents);
1558 __bch2_btree_node_iter_push(iter, b,
1559 bch2_bset_search(b, t, search,
1562 btree_bkey_last(b, t));
1564 bch2_btree_node_iter_sort(iter, b);
1567 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1571 struct bset_tree *t;
1573 __bch2_btree_node_iter_init(iter, is_extents);
1576 __bch2_btree_node_iter_push(iter, b,
1577 btree_bkey_first(b, t),
1578 btree_bkey_last(b, t));
1579 bch2_btree_node_iter_sort(iter, b);
1582 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1584 struct bset_tree *t)
1586 struct btree_node_iter_set *set;
1588 btree_node_iter_for_each(iter, set)
1589 if (set->end == t->end_offset)
1590 return __btree_node_offset_to_key(b, set->k);
1592 return btree_bkey_last(b, t);
1595 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1601 if ((ret = (btree_node_iter_cmp(iter, b,
1603 iter->data[first + 1]) > 0)))
1604 swap(iter->data[first], iter->data[first + 1]);
1608 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1611 /* unrolled bubble sort: */
1613 if (!__btree_node_iter_set_end(iter, 2)) {
1614 btree_node_iter_sort_two(iter, b, 0);
1615 btree_node_iter_sort_two(iter, b, 1);
1618 if (!__btree_node_iter_set_end(iter, 1))
1619 btree_node_iter_sort_two(iter, b, 0);
1622 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1623 struct btree_node_iter_set *set)
1625 struct btree_node_iter_set *last =
1626 iter->data + ARRAY_SIZE(iter->data) - 1;
1628 memmove(&set[0], &set[1], (void *) last - (void *) set);
1629 *last = (struct btree_node_iter_set) { 0, 0 };
1632 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1635 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1637 EBUG_ON(iter->data->k > iter->data->end);
1639 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1640 bch2_btree_node_iter_set_drop(iter, iter->data);
1644 if (__btree_node_iter_set_end(iter, 1))
1647 if (!btree_node_iter_sort_two(iter, b, 0))
1650 if (__btree_node_iter_set_end(iter, 2))
1653 btree_node_iter_sort_two(iter, b, 1);
1657 * bch_btree_node_iter_advance - advance @iter by one key
1659 * Doesn't do debugchecks - for cases where (insert_fixup_extent()) a bset might
1660 * momentarily have out of order extents.
1662 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1665 #ifdef CONFIG_BCACHEFS_DEBUG
1666 struct bkey_packed *k = bch2_btree_node_iter_peek_all(iter, b);
1668 __bch2_btree_node_iter_advance(iter, b);
1669 bch2_btree_node_iter_next_check(iter, b, k);
1671 __bch2_btree_node_iter_advance(iter, b);
1675 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
1677 unsigned n = ARRAY_SIZE(iter->data);
1679 while (n && __btree_node_iter_set_end(iter, n - 1))
1688 struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *iter,
1690 unsigned min_key_type)
1692 struct bkey_packed *k, *prev = NULL;
1693 struct bkey_packed *orig_pos = bch2_btree_node_iter_peek_all(iter, b);
1694 struct btree_node_iter_set *set;
1695 struct bset_tree *t;
1698 bch2_btree_node_iter_verify(iter, b);
1700 for_each_bset(b, t) {
1701 k = bch2_bkey_prev_filter(b, t,
1702 bch2_btree_node_iter_bset_pos(iter, b, t),
1705 (!prev || __btree_node_iter_cmp(iter->is_extents, b,
1708 end = t->end_offset;
1716 * We're manually memmoving instead of just calling sort() to ensure the
1717 * prev we picked ends up in slot 0 - sort won't necessarily put it
1718 * there because of duplicate deleted keys:
1720 btree_node_iter_for_each(iter, set)
1721 if (set->end == end)
1724 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1726 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1728 memmove(&iter->data[1],
1730 (void *) set - (void *) &iter->data[0]);
1732 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1733 iter->data[0].end = end;
1735 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1736 struct btree_node_iter iter2 = *iter;
1739 bch2_btree_node_iter_advance(&iter2, b);
1741 while ((k = bch2_btree_node_iter_peek_all(&iter2, b)) != orig_pos) {
1742 BUG_ON(k->type >= min_key_type);
1743 bch2_btree_node_iter_advance(&iter2, b);
1750 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1754 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1756 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1761 void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
1763 struct bset_tree *t;
1765 for_each_bset(b, t) {
1766 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1769 stats->sets[type].nr++;
1770 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1773 if (bset_has_ro_aux_tree(t)) {
1774 stats->floats += t->size - 1;
1776 for (j = 1; j < t->size; j++)
1777 switch (bkey_float(b, t, j)->exponent) {
1778 case BFLOAT_FAILED_UNPACKED:
1779 stats->failed_unpacked++;
1781 case BFLOAT_FAILED_PREV:
1782 stats->failed_prev++;
1784 case BFLOAT_FAILED_OVERFLOW:
1785 stats->failed_overflow++;
1792 int bch2_bkey_print_bfloat(struct btree *b, struct bkey_packed *k,
1793 char *buf, size_t size)
1795 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1796 struct bkey_packed *l, *r, *p;
1798 char buf1[200], buf2[200];
1804 if (!bset_has_ro_aux_tree(t))
1807 j = __inorder_to_eytzinger1(bkey_to_cacheline(b, t, k), t->size, t->extra);
1810 k == tree_to_bkey(b, t, j))
1811 switch (bkey_float(b, t, j)->exponent) {
1812 case BFLOAT_FAILED_UNPACKED:
1813 uk = bkey_unpack_key(b, k);
1814 return scnprintf(buf, size,
1815 " failed unpacked at depth %u\n"
1818 uk.p.inode, uk.p.offset);
1819 case BFLOAT_FAILED_PREV:
1820 p = tree_to_prev_bkey(b, t, j);
1821 l = is_power_of_2(j)
1822 ? btree_bkey_first(b, t)
1823 : tree_to_prev_bkey(b, t, j >> ffs(j));
1824 r = is_power_of_2(j + 1)
1825 ? bch2_bkey_prev_all(b, t, btree_bkey_last(b, t))
1826 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
1828 up = bkey_unpack_key(b, p);
1829 uk = bkey_unpack_key(b, k);
1830 bch2_to_binary(buf1, high_word(&b->format, p), b->nr_key_bits);
1831 bch2_to_binary(buf2, high_word(&b->format, k), b->nr_key_bits);
1833 return scnprintf(buf, size,
1834 " failed prev at depth %u\n"
1835 "\tkey starts at bit %u but first differing bit at %u\n"
1841 bch2_bkey_greatest_differing_bit(b, l, r),
1842 bch2_bkey_greatest_differing_bit(b, p, k),
1843 uk.p.inode, uk.p.offset,
1844 up.p.inode, up.p.offset,
1846 case BFLOAT_FAILED_OVERFLOW:
1847 uk = bkey_unpack_key(b, k);
1848 return scnprintf(buf, size,
1849 " failed overflow at depth %u\n"
1852 uk.p.inode, uk.p.offset);