1 // SPDX-License-Identifier: GPL-2.0
3 * Code for working with individual keys, and sorted sets of keys with in a
6 * Copyright 2012 Google, Inc.
10 #include "btree_cache.h"
12 #include "eytzinger.h"
16 #include <asm/unaligned.h>
17 #include <linux/console.h>
18 #include <linux/random.h>
19 #include <linux/prefetch.h>
21 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
24 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
26 unsigned n = ARRAY_SIZE(iter->data);
28 while (n && __btree_node_iter_set_end(iter, n - 1))
34 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
36 return bch2_bkey_to_bset_inlined(b, k);
40 * There are never duplicate live keys in the btree - but including keys that
41 * have been flagged as deleted (and will be cleaned up later) we _will_ see
44 * Thus the sort order is: usual key comparison first, but for keys that compare
45 * equal the deleted key(s) come first, and the (at most one) live version comes
48 * The main reason for this is insertion: to handle overwrites, we first iterate
49 * over keys that compare equal to our insert key, and then insert immediately
50 * prior to the first key greater than the key we're inserting - our insert
51 * position will be after all keys that compare equal to our insert key, which
52 * by the time we actually do the insert will all be deleted.
55 void bch2_dump_bset(struct bch_fs *c, struct btree *b,
56 struct bset *i, unsigned set)
58 struct bkey_packed *_k, *_n;
61 struct printbuf buf = PRINTBUF;
72 printk(KERN_ERR "block %u key %5zu - u64s 0? aieee!\n", set,
73 _k->_data - i->_data);
77 k = bkey_disassemble(b, _k, &uk);
81 bch2_bkey_val_to_text(&buf, c, k);
83 bch2_bkey_to_text(&buf, k.k);
84 printk(KERN_ERR "block %u key %5zu: %s\n", set,
85 _k->_data - i->_data, buf.buf);
87 if (_n == vstruct_last(i))
90 n = bkey_unpack_key(b, _n);
92 if (bpos_lt(n.p, k.k->p)) {
93 printk(KERN_ERR "Key skipped backwards\n");
97 if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
98 printk(KERN_ERR "Duplicate keys\n");
104 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
110 bch2_dump_bset(c, b, bset(b, t), t - b->set);
114 void bch2_dump_btree_node_iter(struct btree *b,
115 struct btree_node_iter *iter)
117 struct btree_node_iter_set *set;
118 struct printbuf buf = PRINTBUF;
120 printk(KERN_ERR "btree node iter with %u/%u sets:\n",
121 __btree_node_iter_used(iter), b->nsets);
123 btree_node_iter_for_each(iter, set) {
124 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
125 struct bset_tree *t = bch2_bkey_to_bset(b, k);
126 struct bkey uk = bkey_unpack_key(b, k);
128 printbuf_reset(&buf);
129 bch2_bkey_to_text(&buf, &uk);
130 printk(KERN_ERR "set %zu key %u: %s\n",
131 t - b->set, set->k, buf.buf);
137 #ifdef CONFIG_BCACHEFS_DEBUG
139 void __bch2_verify_btree_nr_keys(struct btree *b)
142 struct bkey_packed *k;
143 struct btree_nr_keys nr = { 0 };
146 bset_tree_for_each_key(b, t, k)
147 if (!bkey_deleted(k))
148 btree_keys_account_key_add(&nr, t - b->set, k);
150 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
153 static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
156 struct btree_node_iter iter = *_iter;
157 const struct bkey_packed *k, *n;
159 k = bch2_btree_node_iter_peek_all(&iter, b);
160 __bch2_btree_node_iter_advance(&iter, b);
161 n = bch2_btree_node_iter_peek_all(&iter, b);
163 bkey_unpack_key(b, k);
166 bkey_iter_cmp(b, k, n) > 0) {
167 struct btree_node_iter_set *set;
168 struct bkey ku = bkey_unpack_key(b, k);
169 struct bkey nu = bkey_unpack_key(b, n);
170 struct printbuf buf1 = PRINTBUF;
171 struct printbuf buf2 = PRINTBUF;
173 bch2_dump_btree_node(NULL, b);
174 bch2_bkey_to_text(&buf1, &ku);
175 bch2_bkey_to_text(&buf2, &nu);
176 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
178 printk(KERN_ERR "iter was:");
180 btree_node_iter_for_each(_iter, set) {
181 struct bkey_packed *k2 = __btree_node_offset_to_key(b, set->k);
182 struct bset_tree *t = bch2_bkey_to_bset(b, k2);
183 printk(" [%zi %zi]", t - b->set,
184 k2->_data - bset(b, t)->_data);
190 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
193 struct btree_node_iter_set *set, *s2;
194 struct bkey_packed *k, *p;
197 if (bch2_btree_node_iter_end(iter))
200 /* Verify no duplicates: */
201 btree_node_iter_for_each(iter, set) {
202 BUG_ON(set->k > set->end);
203 btree_node_iter_for_each(iter, s2)
204 BUG_ON(set != s2 && set->end == s2->end);
207 /* Verify that set->end is correct: */
208 btree_node_iter_for_each(iter, set) {
210 if (set->end == t->end_offset)
214 BUG_ON(set->k < btree_bkey_first_offset(t) ||
215 set->k >= t->end_offset);
218 /* Verify iterator is sorted: */
219 btree_node_iter_for_each(iter, set)
220 BUG_ON(set != iter->data &&
221 btree_node_iter_cmp(b, set[-1], set[0]) > 0);
223 k = bch2_btree_node_iter_peek_all(iter, b);
225 for_each_bset(b, t) {
226 if (iter->data[0].end == t->end_offset)
229 p = bch2_bkey_prev_all(b, t,
230 bch2_btree_node_iter_bset_pos(iter, b, t));
232 BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
236 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
237 struct bkey_packed *insert, unsigned clobber_u64s)
239 struct bset_tree *t = bch2_bkey_to_bset(b, where);
240 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
241 struct bkey_packed *next = (void *) ((u64 *) where->_data + clobber_u64s);
242 struct printbuf buf1 = PRINTBUF;
243 struct printbuf buf2 = PRINTBUF;
246 bkey_iter_cmp(b, prev, insert) > 0);
249 bkey_iter_cmp(b, prev, insert) > 0) {
250 struct bkey k1 = bkey_unpack_key(b, prev);
251 struct bkey k2 = bkey_unpack_key(b, insert);
253 bch2_dump_btree_node(NULL, b);
254 bch2_bkey_to_text(&buf1, &k1);
255 bch2_bkey_to_text(&buf2, &k2);
257 panic("prev > insert:\n"
264 BUG_ON(next != btree_bkey_last(b, t) &&
265 bkey_iter_cmp(b, insert, next) > 0);
267 if (next != btree_bkey_last(b, t) &&
268 bkey_iter_cmp(b, insert, next) > 0) {
269 struct bkey k1 = bkey_unpack_key(b, insert);
270 struct bkey k2 = bkey_unpack_key(b, next);
272 bch2_dump_btree_node(NULL, b);
273 bch2_bkey_to_text(&buf1, &k1);
274 bch2_bkey_to_text(&buf2, &k2);
276 panic("insert > next:\n"
286 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
291 /* Auxiliary search trees */
293 #define BFLOAT_FAILED_UNPACKED U8_MAX
294 #define BFLOAT_FAILED U8_MAX
301 #define BKEY_MANTISSA_BITS 16
303 static unsigned bkey_float_byte_offset(unsigned idx)
305 return idx * sizeof(struct bkey_float);
310 struct bkey_float f[];
318 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
320 BUG_ON(t->aux_data_offset == U16_MAX);
322 switch (bset_aux_tree_type(t)) {
323 case BSET_NO_AUX_TREE:
324 return t->aux_data_offset;
325 case BSET_RO_AUX_TREE:
326 return t->aux_data_offset +
327 DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
328 t->size * sizeof(u8), 8);
329 case BSET_RW_AUX_TREE:
330 return t->aux_data_offset +
331 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
337 static unsigned bset_aux_tree_buf_start(const struct btree *b,
338 const struct bset_tree *t)
341 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
342 : bset_aux_tree_buf_end(t - 1);
345 static void *__aux_tree_base(const struct btree *b,
346 const struct bset_tree *t)
348 return b->aux_data + t->aux_data_offset * 8;
351 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
352 const struct bset_tree *t)
354 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
356 return __aux_tree_base(b, t);
359 static u8 *ro_aux_tree_prev(const struct btree *b,
360 const struct bset_tree *t)
362 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
364 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
367 static struct bkey_float *bkey_float(const struct btree *b,
368 const struct bset_tree *t,
371 return ro_aux_tree_base(b, t)->f + idx;
374 static void bset_aux_tree_verify(const struct btree *b)
376 #ifdef CONFIG_BCACHEFS_DEBUG
377 const struct bset_tree *t;
379 for_each_bset(b, t) {
380 if (t->aux_data_offset == U16_MAX)
383 BUG_ON(t != b->set &&
384 t[-1].aux_data_offset == U16_MAX);
386 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
387 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
388 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
393 void bch2_btree_keys_init(struct btree *b)
398 memset(&b->nr, 0, sizeof(b->nr));
400 for (i = 0; i < MAX_BSETS; i++)
401 b->set[i].data_offset = U16_MAX;
403 bch2_bset_set_no_aux_tree(b, b->set);
406 /* Binary tree stuff for auxiliary search trees */
409 * Cacheline/offset <-> bkey pointer arithmetic:
411 * t->tree is a binary search tree in an array; each node corresponds to a key
412 * in one cacheline in t->set (BSET_CACHELINE bytes).
414 * This means we don't have to store the full index of the key that a node in
415 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
416 * then bkey_float->m gives us the offset within that cacheline, in units of 8
419 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
422 * To construct the bfloat for an arbitrary key we need to know what the key
423 * immediately preceding it is: we have to check if the two keys differ in the
424 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
425 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
428 static inline void *bset_cacheline(const struct btree *b,
429 const struct bset_tree *t,
432 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
434 cacheline * BSET_CACHELINE;
437 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
438 const struct bset_tree *t,
442 return bset_cacheline(b, t, cacheline) + offset * 8;
445 static unsigned bkey_to_cacheline(const struct btree *b,
446 const struct bset_tree *t,
447 const struct bkey_packed *k)
449 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
452 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
453 const struct bset_tree *t,
455 const struct bkey_packed *k)
457 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
460 static unsigned bkey_to_cacheline_offset(const struct btree *b,
461 const struct bset_tree *t,
463 const struct bkey_packed *k)
465 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
471 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
472 const struct bset_tree *t,
475 return cacheline_to_bkey(b, t,
476 __eytzinger1_to_inorder(j, t->size - 1, t->extra),
477 bkey_float(b, t, j)->key_offset);
480 static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
481 const struct bset_tree *t,
484 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
486 return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
489 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
490 const struct bset_tree *t)
492 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
494 return __aux_tree_base(b, t);
498 * For the write set - the one we're currently inserting keys into - we don't
499 * maintain a full search tree, we just keep a simple lookup table in t->prev.
501 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
505 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
508 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
509 unsigned j, struct bkey_packed *k)
511 EBUG_ON(k >= btree_bkey_last(b, t));
513 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
514 .offset = __btree_node_key_to_offset(b, k),
515 .k = bkey_unpack_pos(b, k),
519 static void bch2_bset_verify_rw_aux_tree(struct btree *b,
522 struct bkey_packed *k = btree_bkey_first(b, t);
525 if (!bch2_expensive_debug_checks)
528 BUG_ON(bset_has_ro_aux_tree(t));
530 if (!bset_has_rw_aux_tree(t))
534 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
538 if (rw_aux_to_bkey(b, t, j) == k) {
539 BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
540 bkey_unpack_pos(b, k)));
545 BUG_ON(rw_aux_tree(b, t)[j].offset <=
546 rw_aux_tree(b, t)[j - 1].offset);
550 BUG_ON(k >= btree_bkey_last(b, t));
554 /* returns idx of first entry >= offset: */
555 static unsigned rw_aux_tree_bsearch(struct btree *b,
559 unsigned bset_offs = offset - btree_bkey_first_offset(t);
560 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
561 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
563 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
565 EBUG_ON(idx > t->size);
567 while (idx < t->size &&
568 rw_aux_tree(b, t)[idx].offset < offset)
572 rw_aux_tree(b, t)[idx - 1].offset >= offset)
575 EBUG_ON(idx < t->size &&
576 rw_aux_tree(b, t)[idx].offset < offset);
577 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
578 EBUG_ON(idx + 1 < t->size &&
579 rw_aux_tree(b, t)[idx].offset ==
580 rw_aux_tree(b, t)[idx + 1].offset);
585 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
586 const struct bkey_float *f,
591 EBUG_ON(!bkey_packed(k));
593 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
596 * In little endian, we're shifting off low bits (and then the bits we
597 * want are at the low end), in big endian we're shifting off high bits
598 * (and then the bits we want are at the high end, so we shift them
601 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
602 v >>= f->exponent & 7;
604 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
609 static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
611 struct bkey_packed *min_key,
612 struct bkey_packed *max_key)
614 struct bkey_float *f = bkey_float(b, t, j);
615 struct bkey_packed *m = tree_to_bkey(b, t, j);
616 struct bkey_packed *l = is_power_of_2(j)
618 : tree_to_prev_bkey(b, t, j >> ffs(j));
619 struct bkey_packed *r = is_power_of_2(j + 1)
621 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
623 int shift, exponent, high_bit;
626 * for failed bfloats, the lookup code falls back to comparing against
630 if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
632 f->exponent = BFLOAT_FAILED_UNPACKED;
637 * The greatest differing bit of l and r is the first bit we must
638 * include in the bfloat mantissa we're creating in order to do
639 * comparisons - that bit always becomes the high bit of
640 * bfloat->mantissa, and thus the exponent we're calculating here is
641 * the position of what will become the low bit in bfloat->mantissa:
643 * Note that this may be negative - we may be running off the low end
644 * of the key: we handle this later:
646 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
647 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
648 exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
651 * Then we calculate the actual shift value, from the start of the key
652 * (k->_data), to get the key bits starting at exponent:
654 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
655 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
657 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
659 shift = high_bit_offset +
664 EBUG_ON(shift < KEY_PACKED_BITS_START);
666 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
669 mantissa = bkey_mantissa(m, f, j);
672 * If we've got garbage bits, set them to all 1s - it's legal for the
673 * bfloat to compare larger than the original key, but not smaller:
676 mantissa |= ~(~0U << -exponent);
678 f->mantissa = mantissa;
681 /* bytes remaining - only valid for last bset: */
682 static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
684 bset_aux_tree_verify(b);
686 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
689 static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
691 return __bset_tree_capacity(b, t) /
692 (sizeof(struct bkey_float) + sizeof(u8));
695 static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
697 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
700 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
702 struct bkey_packed *k;
705 t->extra = BSET_RW_AUX_TREE_VAL;
706 rw_aux_tree(b, t)[0].offset =
707 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
709 bset_tree_for_each_key(b, t, k) {
710 if (t->size == bset_rw_tree_capacity(b, t))
713 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
715 rw_aux_tree_set(b, t, t->size++, k);
719 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
721 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
722 struct bkey_i min_key, max_key;
723 unsigned cacheline = 1;
725 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
726 bset_ro_tree_capacity(b, t));
730 t->extra = BSET_NO_AUX_TREE_VAL;
734 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
736 /* First we figure out where the first key in each cacheline is */
737 eytzinger1_for_each(j, t->size - 1) {
738 while (bkey_to_cacheline(b, t, k) < cacheline)
739 prev = k, k = bkey_p_next(k);
741 if (k >= btree_bkey_last(b, t)) {
742 /* XXX: this path sucks */
747 ro_aux_tree_prev(b, t)[j] = prev->u64s;
748 bkey_float(b, t, j)->key_offset =
749 bkey_to_cacheline_offset(b, t, cacheline++, k);
751 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
752 EBUG_ON(tree_to_bkey(b, t, j) != k);
755 while (k != btree_bkey_last(b, t))
756 prev = k, k = bkey_p_next(k);
758 if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
759 bkey_init(&min_key.k);
760 min_key.k.p = b->data->min_key;
763 if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
764 bkey_init(&max_key.k);
765 max_key.k.p = b->data->max_key;
768 /* Then we build the tree */
769 eytzinger1_for_each(j, t->size - 1)
771 bkey_to_packed(&min_key),
772 bkey_to_packed(&max_key));
775 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
779 for (i = b->set; i != t; i++)
780 BUG_ON(bset_has_rw_aux_tree(i));
782 bch2_bset_set_no_aux_tree(b, t);
784 /* round up to next cacheline: */
785 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
786 SMP_CACHE_BYTES / sizeof(u64));
788 bset_aux_tree_verify(b);
791 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
795 ? bset_has_rw_aux_tree(t)
796 : bset_has_ro_aux_tree(t))
799 bset_alloc_tree(b, t);
801 if (!__bset_tree_capacity(b, t))
805 __build_rw_aux_tree(b, t);
807 __build_ro_aux_tree(b, t);
809 bset_aux_tree_verify(b);
812 void bch2_bset_init_first(struct btree *b, struct bset *i)
818 memset(i, 0, sizeof(*i));
819 get_random_bytes(&i->seq, sizeof(i->seq));
820 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
822 t = &b->set[b->nsets++];
823 set_btree_bset(b, t, i);
826 void bch2_bset_init_next(struct btree *b, struct btree_node_entry *bne)
828 struct bset *i = &bne->keys;
831 BUG_ON(bset_byte_offset(b, bne) >= btree_buf_bytes(b));
832 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
833 BUG_ON(b->nsets >= MAX_BSETS);
835 memset(i, 0, sizeof(*i));
836 i->seq = btree_bset_first(b)->seq;
837 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
839 t = &b->set[b->nsets++];
840 set_btree_bset(b, t, i);
844 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
845 * immediate predecessor:
847 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
848 struct bkey_packed *k)
850 struct bkey_packed *p;
854 EBUG_ON(k < btree_bkey_first(b, t) ||
855 k > btree_bkey_last(b, t));
857 if (k == btree_bkey_first(b, t))
860 switch (bset_aux_tree_type(t)) {
861 case BSET_NO_AUX_TREE:
862 p = btree_bkey_first(b, t);
864 case BSET_RO_AUX_TREE:
865 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
868 p = j ? tree_to_bkey(b, t,
869 __inorder_to_eytzinger1(j--,
870 t->size - 1, t->extra))
871 : btree_bkey_first(b, t);
874 case BSET_RW_AUX_TREE:
875 offset = __btree_node_key_to_offset(b, k);
876 j = rw_aux_tree_bsearch(b, t, offset);
877 p = j ? rw_aux_to_bkey(b, t, j - 1)
878 : btree_bkey_first(b, t);
885 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
887 struct bkey_packed *k,
888 unsigned min_key_type)
890 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
892 while ((p = __bkey_prev(b, t, k)) && !ret) {
893 for (i = p; i != k; i = bkey_p_next(i))
894 if (i->type >= min_key_type)
900 if (bch2_expensive_debug_checks) {
901 BUG_ON(ret >= orig_k);
905 : btree_bkey_first(b, t);
908 BUG_ON(i->type >= min_key_type);
916 static void bch2_bset_fix_lookup_table(struct btree *b,
918 struct bkey_packed *_where,
919 unsigned clobber_u64s,
922 int shift = new_u64s - clobber_u64s;
923 unsigned l, j, where = __btree_node_key_to_offset(b, _where);
925 EBUG_ON(bset_has_ro_aux_tree(t));
927 if (!bset_has_rw_aux_tree(t))
930 /* returns first entry >= where */
931 l = rw_aux_tree_bsearch(b, t, where);
933 if (!l) /* never delete first entry */
935 else if (l < t->size &&
936 where < t->end_offset &&
937 rw_aux_tree(b, t)[l].offset == where)
938 rw_aux_tree_set(b, t, l++, _where);
944 rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
949 rw_aux_tree(b, t)[j].offset + shift ==
950 rw_aux_tree(b, t)[l - 1].offset)
953 memmove(&rw_aux_tree(b, t)[l],
954 &rw_aux_tree(b, t)[j],
955 (void *) &rw_aux_tree(b, t)[t->size] -
956 (void *) &rw_aux_tree(b, t)[j]);
959 for (j = l; j < t->size; j++)
960 rw_aux_tree(b, t)[j].offset += shift;
962 EBUG_ON(l < t->size &&
963 rw_aux_tree(b, t)[l].offset ==
964 rw_aux_tree(b, t)[l - 1].offset);
966 if (t->size < bset_rw_tree_capacity(b, t) &&
968 ? rw_aux_tree(b, t)[l].offset
970 rw_aux_tree(b, t)[l - 1].offset >
971 L1_CACHE_BYTES / sizeof(u64)) {
972 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
973 struct bkey_packed *end = l < t->size
974 ? rw_aux_to_bkey(b, t, l)
975 : btree_bkey_last(b, t);
976 struct bkey_packed *k = start;
983 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
984 memmove(&rw_aux_tree(b, t)[l + 1],
985 &rw_aux_tree(b, t)[l],
986 (void *) &rw_aux_tree(b, t)[t->size] -
987 (void *) &rw_aux_tree(b, t)[l]);
989 rw_aux_tree_set(b, t, l, k);
995 bch2_bset_verify_rw_aux_tree(b, t);
996 bset_aux_tree_verify(b);
999 void bch2_bset_insert(struct btree *b,
1000 struct btree_node_iter *iter,
1001 struct bkey_packed *where,
1002 struct bkey_i *insert,
1003 unsigned clobber_u64s)
1005 struct bkey_format *f = &b->format;
1006 struct bset_tree *t = bset_tree_last(b);
1007 struct bkey_packed packed, *src = bkey_to_packed(insert);
1009 bch2_bset_verify_rw_aux_tree(b, t);
1010 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
1012 if (bch2_bkey_pack_key(&packed, &insert->k, f))
1015 if (!bkey_deleted(&insert->k))
1016 btree_keys_account_key_add(&b->nr, t - b->set, src);
1018 if (src->u64s != clobber_u64s) {
1019 u64 *src_p = (u64 *) where->_data + clobber_u64s;
1020 u64 *dst_p = (u64 *) where->_data + src->u64s;
1022 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1023 (int) clobber_u64s - src->u64s);
1025 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1026 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1027 set_btree_bset_end(b, t);
1030 memcpy_u64s_small(where, src,
1031 bkeyp_key_u64s(f, src));
1032 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1033 bkeyp_val_u64s(f, src));
1035 if (src->u64s != clobber_u64s)
1036 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1038 bch2_verify_btree_nr_keys(b);
1041 void bch2_bset_delete(struct btree *b,
1042 struct bkey_packed *where,
1043 unsigned clobber_u64s)
1045 struct bset_tree *t = bset_tree_last(b);
1046 u64 *src_p = (u64 *) where->_data + clobber_u64s;
1047 u64 *dst_p = where->_data;
1049 bch2_bset_verify_rw_aux_tree(b, t);
1051 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1053 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1054 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1055 set_btree_bset_end(b, t);
1057 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1063 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1064 struct bset_tree *t,
1065 struct bpos *search)
1067 unsigned l = 0, r = t->size;
1069 while (l + 1 != r) {
1070 unsigned m = (l + r) >> 1;
1072 if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
1078 return rw_aux_to_bkey(b, t, l);
1081 static inline void prefetch_four_cachelines(void *p)
1083 #ifdef CONFIG_X86_64
1084 asm("prefetcht0 (-127 + 64 * 0)(%0);"
1085 "prefetcht0 (-127 + 64 * 1)(%0);"
1086 "prefetcht0 (-127 + 64 * 2)(%0);"
1087 "prefetcht0 (-127 + 64 * 3)(%0);"
1091 prefetch(p + L1_CACHE_BYTES * 0);
1092 prefetch(p + L1_CACHE_BYTES * 1);
1093 prefetch(p + L1_CACHE_BYTES * 2);
1094 prefetch(p + L1_CACHE_BYTES * 3);
1098 static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
1099 const struct bkey_float *f,
1102 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1103 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
1105 return f->exponent > key_bits_start;
1107 unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
1109 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
1114 static struct bkey_packed *bset_search_tree(const struct btree *b,
1115 const struct bset_tree *t,
1116 const struct bpos *search,
1117 const struct bkey_packed *packed_search)
1119 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1120 struct bkey_float *f;
1121 struct bkey_packed *k;
1122 unsigned inorder, n = 1, l, r;
1126 if (likely(n << 4 < t->size))
1127 prefetch(&base->f[n << 4]);
1130 if (unlikely(f->exponent >= BFLOAT_FAILED))
1134 r = bkey_mantissa(packed_search, f, n);
1136 if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
1139 n = n * 2 + (l < r);
1142 k = tree_to_bkey(b, t, n);
1143 cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
1147 n = n * 2 + (cmp < 0);
1148 } while (n < t->size);
1150 inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
1153 * n would have been the node we recursed to - the low bit tells us if
1154 * we recursed left or recursed right.
1156 if (likely(!(n & 1))) {
1158 if (unlikely(!inorder))
1159 return btree_bkey_first(b, t);
1161 f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
1164 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1167 static __always_inline __flatten
1168 struct bkey_packed *__bch2_bset_search(struct btree *b,
1169 struct bset_tree *t,
1170 struct bpos *search,
1171 const struct bkey_packed *lossy_packed_search)
1175 * First, we search for a cacheline, then lastly we do a linear search
1176 * within that cacheline.
1178 * To search for the cacheline, there's three different possibilities:
1179 * * The set is too small to have a search tree, so we just do a linear
1180 * search over the whole set.
1181 * * The set is the one we're currently inserting into; keeping a full
1182 * auxiliary search tree up to date would be too expensive, so we
1183 * use a much simpler lookup table to do a binary search -
1184 * bset_search_write_set().
1185 * * Or we use the auxiliary search tree we constructed earlier -
1186 * bset_search_tree()
1189 switch (bset_aux_tree_type(t)) {
1190 case BSET_NO_AUX_TREE:
1191 return btree_bkey_first(b, t);
1192 case BSET_RW_AUX_TREE:
1193 return bset_search_write_set(b, t, search);
1194 case BSET_RO_AUX_TREE:
1195 return bset_search_tree(b, t, search, lossy_packed_search);
1201 static __always_inline __flatten
1202 struct bkey_packed *bch2_bset_search_linear(struct btree *b,
1203 struct bset_tree *t,
1204 struct bpos *search,
1205 struct bkey_packed *packed_search,
1206 const struct bkey_packed *lossy_packed_search,
1207 struct bkey_packed *m)
1209 if (lossy_packed_search)
1210 while (m != btree_bkey_last(b, t) &&
1211 bkey_iter_cmp_p_or_unp(b, m,
1212 lossy_packed_search, search) < 0)
1216 while (m != btree_bkey_last(b, t) &&
1217 bkey_iter_pos_cmp(b, m, search) < 0)
1220 if (bch2_expensive_debug_checks) {
1221 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1224 bkey_iter_cmp_p_or_unp(b, prev,
1225 packed_search, search) >= 0);
1231 /* Btree node iterator */
1233 static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
1235 const struct bkey_packed *k,
1236 const struct bkey_packed *end)
1239 struct btree_node_iter_set *pos;
1241 btree_node_iter_for_each(iter, pos)
1244 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
1245 *pos = (struct btree_node_iter_set) {
1246 __btree_node_key_to_offset(b, k),
1247 __btree_node_key_to_offset(b, end)
1252 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1254 const struct bkey_packed *k,
1255 const struct bkey_packed *end)
1257 __bch2_btree_node_iter_push(iter, b, k, end);
1258 bch2_btree_node_iter_sort(iter, b);
1261 noinline __flatten __cold
1262 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1263 struct btree *b, struct bpos *search)
1265 struct bkey_packed *k;
1267 trace_bkey_pack_pos_fail(search);
1269 bch2_btree_node_iter_init_from_start(iter, b);
1271 while ((k = bch2_btree_node_iter_peek(iter, b)) &&
1272 bkey_iter_pos_cmp(b, k, search) < 0)
1273 bch2_btree_node_iter_advance(iter, b);
1277 * bch2_btree_node_iter_init - initialize a btree node iterator, starting from a
1280 * @iter: iterator to initialize
1281 * @b: btree node to search
1282 * @search: search key
1284 * Main entry point to the lookup code for individual btree nodes:
1288 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1289 * keys. This doesn't matter for most code, but it does matter for lookups.
1291 * Some adjacent keys with a string of equal keys:
1294 * If you search for k, the lookup code isn't guaranteed to return you any
1295 * specific k. The lookup code is conceptually doing a binary search and
1296 * iterating backwards is very expensive so if the pivot happens to land at the
1297 * last k that's what you'll get.
1299 * This works out ok, but it's something to be aware of:
1301 * - For non extents, we guarantee that the live key comes last - see
1302 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1303 * see will only be deleted keys you don't care about.
1305 * - For extents, deleted keys sort last (see the comment at the top of this
1306 * file). But when you're searching for extents, you actually want the first
1307 * key strictly greater than your search key - an extent that compares equal
1308 * to the search key is going to have 0 sectors after the search key.
1310 * But this does mean that we can't just search for
1311 * bpos_successor(start_of_range) to get the first extent that overlaps with
1312 * the range we want - if we're unlucky and there's an extent that ends
1313 * exactly where we searched, then there could be a deleted key at the same
1314 * position and we'd get that when we search instead of the preceding extent
1317 * So we've got to search for start_of_range, then after the lookup iterate
1318 * past any extents that compare equal to the position we searched for.
1321 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1322 struct btree *b, struct bpos *search)
1324 struct bkey_packed p, *packed_search = NULL;
1325 struct btree_node_iter_set *pos = iter->data;
1326 struct bkey_packed *k[MAX_BSETS];
1329 EBUG_ON(bpos_lt(*search, b->data->min_key));
1330 EBUG_ON(bpos_gt(*search, b->data->max_key));
1331 bset_aux_tree_verify(b);
1333 memset(iter, 0, sizeof(*iter));
1335 switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
1336 case BKEY_PACK_POS_EXACT:
1339 case BKEY_PACK_POS_SMALLER:
1340 packed_search = NULL;
1342 case BKEY_PACK_POS_FAIL:
1343 btree_node_iter_init_pack_failed(iter, b, search);
1347 for (i = 0; i < b->nsets; i++) {
1348 k[i] = __bch2_bset_search(b, b->set + i, search, &p);
1349 prefetch_four_cachelines(k[i]);
1352 for (i = 0; i < b->nsets; i++) {
1353 struct bset_tree *t = b->set + i;
1354 struct bkey_packed *end = btree_bkey_last(b, t);
1356 k[i] = bch2_bset_search_linear(b, t, search,
1357 packed_search, &p, k[i]);
1359 *pos++ = (struct btree_node_iter_set) {
1360 __btree_node_key_to_offset(b, k[i]),
1361 __btree_node_key_to_offset(b, end)
1365 bch2_btree_node_iter_sort(iter, b);
1368 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1371 struct bset_tree *t;
1373 memset(iter, 0, sizeof(*iter));
1376 __bch2_btree_node_iter_push(iter, b,
1377 btree_bkey_first(b, t),
1378 btree_bkey_last(b, t));
1379 bch2_btree_node_iter_sort(iter, b);
1382 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1384 struct bset_tree *t)
1386 struct btree_node_iter_set *set;
1388 btree_node_iter_for_each(iter, set)
1389 if (set->end == t->end_offset)
1390 return __btree_node_offset_to_key(b, set->k);
1392 return btree_bkey_last(b, t);
1395 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1401 if ((ret = (btree_node_iter_cmp(b,
1403 iter->data[first + 1]) > 0)))
1404 swap(iter->data[first], iter->data[first + 1]);
1408 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1411 /* unrolled bubble sort: */
1413 if (!__btree_node_iter_set_end(iter, 2)) {
1414 btree_node_iter_sort_two(iter, b, 0);
1415 btree_node_iter_sort_two(iter, b, 1);
1418 if (!__btree_node_iter_set_end(iter, 1))
1419 btree_node_iter_sort_two(iter, b, 0);
1422 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1423 struct btree_node_iter_set *set)
1425 struct btree_node_iter_set *last =
1426 iter->data + ARRAY_SIZE(iter->data) - 1;
1428 memmove(&set[0], &set[1], (void *) last - (void *) set);
1429 *last = (struct btree_node_iter_set) { 0, 0 };
1432 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1435 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1437 EBUG_ON(iter->data->k > iter->data->end);
1439 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1440 /* avoid an expensive memmove call: */
1441 iter->data[0] = iter->data[1];
1442 iter->data[1] = iter->data[2];
1443 iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
1447 if (__btree_node_iter_set_end(iter, 1))
1450 if (!btree_node_iter_sort_two(iter, b, 0))
1453 if (__btree_node_iter_set_end(iter, 2))
1456 btree_node_iter_sort_two(iter, b, 1);
1459 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1462 if (bch2_expensive_debug_checks) {
1463 bch2_btree_node_iter_verify(iter, b);
1464 bch2_btree_node_iter_next_check(iter, b);
1467 __bch2_btree_node_iter_advance(iter, b);
1473 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
1476 struct bkey_packed *k, *prev = NULL;
1477 struct btree_node_iter_set *set;
1478 struct bset_tree *t;
1481 if (bch2_expensive_debug_checks)
1482 bch2_btree_node_iter_verify(iter, b);
1484 for_each_bset(b, t) {
1485 k = bch2_bkey_prev_all(b, t,
1486 bch2_btree_node_iter_bset_pos(iter, b, t));
1488 (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
1490 end = t->end_offset;
1498 * We're manually memmoving instead of just calling sort() to ensure the
1499 * prev we picked ends up in slot 0 - sort won't necessarily put it
1500 * there because of duplicate deleted keys:
1502 btree_node_iter_for_each(iter, set)
1503 if (set->end == end)
1506 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1508 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1510 memmove(&iter->data[1],
1512 (void *) set - (void *) &iter->data[0]);
1514 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1515 iter->data[0].end = end;
1517 if (bch2_expensive_debug_checks)
1518 bch2_btree_node_iter_verify(iter, b);
1522 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
1525 struct bkey_packed *prev;
1528 prev = bch2_btree_node_iter_prev_all(iter, b);
1529 } while (prev && bkey_deleted(prev));
1534 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1538 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1540 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1545 void bch2_btree_keys_stats(const struct btree *b, struct bset_stats *stats)
1547 const struct bset_tree *t;
1549 for_each_bset(b, t) {
1550 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1553 stats->sets[type].nr++;
1554 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1557 if (bset_has_ro_aux_tree(t)) {
1558 stats->floats += t->size - 1;
1560 for (j = 1; j < t->size; j++)
1562 bkey_float(b, t, j)->exponent ==
1568 void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
1569 struct bkey_packed *k)
1571 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1573 unsigned j, inorder;
1575 if (!bset_has_ro_aux_tree(t))
1578 inorder = bkey_to_cacheline(b, t, k);
1579 if (!inorder || inorder >= t->size)
1582 j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
1583 if (k != tree_to_bkey(b, t, j))
1586 switch (bkey_float(b, t, j)->exponent) {
1588 uk = bkey_unpack_key(b, k);
1590 " failed unpacked at depth %u\n"
1593 bch2_bpos_to_text(out, uk.p);
1594 prt_printf(out, "\n");