1 // SPDX-License-Identifier: GPL-2.0
3 * Code for working with individual keys, and sorted sets of keys with in a
6 * Copyright 2012 Google, Inc.
10 #include "btree_cache.h"
12 #include "eytzinger.h"
15 #include <asm/unaligned.h>
16 #include <linux/console.h>
17 #include <linux/random.h>
18 #include <linux/prefetch.h>
21 #include "alloc_types.h"
22 #include <trace/events/bcachefs.h>
24 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *,
27 static inline unsigned __btree_node_iter_used(struct btree_node_iter *iter)
29 unsigned n = ARRAY_SIZE(iter->data);
31 while (n && __btree_node_iter_set_end(iter, n - 1))
37 struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
39 unsigned offset = __btree_node_key_to_offset(b, k);
43 if (offset <= t->end_offset) {
44 EBUG_ON(offset < btree_bkey_first_offset(t));
52 * There are never duplicate live keys in the btree - but including keys that
53 * have been flagged as deleted (and will be cleaned up later) we _will_ see
56 * Thus the sort order is: usual key comparison first, but for keys that compare
57 * equal the deleted key(s) come first, and the (at most one) live version comes
60 * The main reason for this is insertion: to handle overwrites, we first iterate
61 * over keys that compare equal to our insert key, and then insert immediately
62 * prior to the first key greater than the key we're inserting - our insert
63 * position will be after all keys that compare equal to our insert key, which
64 * by the time we actually do the insert will all be deleted.
67 void bch2_dump_bset(struct bch_fs *c, struct btree *b,
68 struct bset *i, unsigned set)
70 struct bkey_packed *_k, *_n;
73 struct printbuf buf = PRINTBUF;
83 k = bkey_disassemble(b, _k, &uk);
87 bch2_bkey_val_to_text(&buf, c, k);
89 bch2_bkey_to_text(&buf, k.k);
90 printk(KERN_ERR "block %u key %5zu: %s\n", set,
91 _k->_data - i->_data, buf.buf);
93 if (_n == vstruct_last(i))
96 n = bkey_unpack_key(b, _n);
98 if (bpos_lt(n.p, k.k->p)) {
99 printk(KERN_ERR "Key skipped backwards\n");
103 if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
104 printk(KERN_ERR "Duplicate keys\n");
110 void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
116 bch2_dump_bset(c, b, bset(b, t), t - b->set);
120 void bch2_dump_btree_node_iter(struct btree *b,
121 struct btree_node_iter *iter)
123 struct btree_node_iter_set *set;
124 struct printbuf buf = PRINTBUF;
126 printk(KERN_ERR "btree node iter with %u/%u sets:\n",
127 __btree_node_iter_used(iter), b->nsets);
129 btree_node_iter_for_each(iter, set) {
130 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
131 struct bset_tree *t = bch2_bkey_to_bset(b, k);
132 struct bkey uk = bkey_unpack_key(b, k);
134 printbuf_reset(&buf);
135 bch2_bkey_to_text(&buf, &uk);
136 printk(KERN_ERR "set %zu key %u: %s\n",
137 t - b->set, set->k, buf.buf);
143 #ifdef CONFIG_BCACHEFS_DEBUG
145 void __bch2_verify_btree_nr_keys(struct btree *b)
148 struct bkey_packed *k;
149 struct btree_nr_keys nr = { 0 };
152 bset_tree_for_each_key(b, t, k)
153 if (!bkey_deleted(k))
154 btree_keys_account_key_add(&nr, t - b->set, k);
156 BUG_ON(memcmp(&nr, &b->nr, sizeof(nr)));
159 static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
162 struct btree_node_iter iter = *_iter;
163 const struct bkey_packed *k, *n;
165 k = bch2_btree_node_iter_peek_all(&iter, b);
166 __bch2_btree_node_iter_advance(&iter, b);
167 n = bch2_btree_node_iter_peek_all(&iter, b);
169 bkey_unpack_key(b, k);
172 bkey_iter_cmp(b, k, n) > 0) {
173 struct btree_node_iter_set *set;
174 struct bkey ku = bkey_unpack_key(b, k);
175 struct bkey nu = bkey_unpack_key(b, n);
176 struct printbuf buf1 = PRINTBUF;
177 struct printbuf buf2 = PRINTBUF;
179 bch2_dump_btree_node(NULL, b);
180 bch2_bkey_to_text(&buf1, &ku);
181 bch2_bkey_to_text(&buf2, &nu);
182 printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
184 printk(KERN_ERR "iter was:");
186 btree_node_iter_for_each(_iter, set) {
187 struct bkey_packed *k = __btree_node_offset_to_key(b, set->k);
188 struct bset_tree *t = bch2_bkey_to_bset(b, k);
189 printk(" [%zi %zi]", t - b->set,
190 k->_data - bset(b, t)->_data);
196 void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
199 struct btree_node_iter_set *set, *s2;
200 struct bkey_packed *k, *p;
203 if (bch2_btree_node_iter_end(iter))
206 /* Verify no duplicates: */
207 btree_node_iter_for_each(iter, set) {
208 BUG_ON(set->k > set->end);
209 btree_node_iter_for_each(iter, s2)
210 BUG_ON(set != s2 && set->end == s2->end);
213 /* Verify that set->end is correct: */
214 btree_node_iter_for_each(iter, set) {
216 if (set->end == t->end_offset)
220 BUG_ON(set->k < btree_bkey_first_offset(t) ||
221 set->k >= t->end_offset);
224 /* Verify iterator is sorted: */
225 btree_node_iter_for_each(iter, set)
226 BUG_ON(set != iter->data &&
227 btree_node_iter_cmp(b, set[-1], set[0]) > 0);
229 k = bch2_btree_node_iter_peek_all(iter, b);
231 for_each_bset(b, t) {
232 if (iter->data[0].end == t->end_offset)
235 p = bch2_bkey_prev_all(b, t,
236 bch2_btree_node_iter_bset_pos(iter, b, t));
238 BUG_ON(p && bkey_iter_cmp(b, k, p) < 0);
242 void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
243 struct bkey_packed *insert, unsigned clobber_u64s)
245 struct bset_tree *t = bch2_bkey_to_bset(b, where);
246 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, where);
247 struct bkey_packed *next = (void *) (where->_data + clobber_u64s);
248 struct printbuf buf1 = PRINTBUF;
249 struct printbuf buf2 = PRINTBUF;
252 bkey_iter_cmp(b, prev, insert) > 0);
255 bkey_iter_cmp(b, prev, insert) > 0) {
256 struct bkey k1 = bkey_unpack_key(b, prev);
257 struct bkey k2 = bkey_unpack_key(b, insert);
259 bch2_dump_btree_node(NULL, b);
260 bch2_bkey_to_text(&buf1, &k1);
261 bch2_bkey_to_text(&buf2, &k2);
263 panic("prev > insert:\n"
270 BUG_ON(next != btree_bkey_last(b, t) &&
271 bkey_iter_cmp(b, insert, next) > 0);
273 if (next != btree_bkey_last(b, t) &&
274 bkey_iter_cmp(b, insert, next) > 0) {
275 struct bkey k1 = bkey_unpack_key(b, insert);
276 struct bkey k2 = bkey_unpack_key(b, next);
278 bch2_dump_btree_node(NULL, b);
279 bch2_bkey_to_text(&buf1, &k1);
280 bch2_bkey_to_text(&buf2, &k2);
282 panic("insert > next:\n"
292 static inline void bch2_btree_node_iter_next_check(struct btree_node_iter *iter,
297 /* Auxiliary search trees */
299 #define BFLOAT_FAILED_UNPACKED U8_MAX
300 #define BFLOAT_FAILED U8_MAX
307 #define BKEY_MANTISSA_BITS 16
309 static unsigned bkey_float_byte_offset(unsigned idx)
311 return idx * sizeof(struct bkey_float);
315 struct bkey_float f[0];
323 static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
325 BUG_ON(t->aux_data_offset == U16_MAX);
327 switch (bset_aux_tree_type(t)) {
328 case BSET_NO_AUX_TREE:
329 return t->aux_data_offset;
330 case BSET_RO_AUX_TREE:
331 return t->aux_data_offset +
332 DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
333 t->size * sizeof(u8), 8);
334 case BSET_RW_AUX_TREE:
335 return t->aux_data_offset +
336 DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
342 static unsigned bset_aux_tree_buf_start(const struct btree *b,
343 const struct bset_tree *t)
346 ? DIV_ROUND_UP(b->unpack_fn_len, 8)
347 : bset_aux_tree_buf_end(t - 1);
350 static void *__aux_tree_base(const struct btree *b,
351 const struct bset_tree *t)
353 return b->aux_data + t->aux_data_offset * 8;
356 static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
357 const struct bset_tree *t)
359 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
361 return __aux_tree_base(b, t);
364 static u8 *ro_aux_tree_prev(const struct btree *b,
365 const struct bset_tree *t)
367 EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
369 return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
372 static struct bkey_float *bkey_float(const struct btree *b,
373 const struct bset_tree *t,
376 return ro_aux_tree_base(b, t)->f + idx;
379 static void bset_aux_tree_verify(const struct btree *b)
381 #ifdef CONFIG_BCACHEFS_DEBUG
382 const struct bset_tree *t;
384 for_each_bset(b, t) {
385 if (t->aux_data_offset == U16_MAX)
388 BUG_ON(t != b->set &&
389 t[-1].aux_data_offset == U16_MAX);
391 BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
392 BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
393 BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
398 void bch2_btree_keys_init(struct btree *b)
403 memset(&b->nr, 0, sizeof(b->nr));
405 for (i = 0; i < MAX_BSETS; i++)
406 b->set[i].data_offset = U16_MAX;
408 bch2_bset_set_no_aux_tree(b, b->set);
411 /* Binary tree stuff for auxiliary search trees */
414 * Cacheline/offset <-> bkey pointer arithmetic:
416 * t->tree is a binary search tree in an array; each node corresponds to a key
417 * in one cacheline in t->set (BSET_CACHELINE bytes).
419 * This means we don't have to store the full index of the key that a node in
420 * the binary tree points to; eytzinger1_to_inorder() gives us the cacheline, and
421 * then bkey_float->m gives us the offset within that cacheline, in units of 8
424 * cacheline_to_bkey() and friends abstract out all the pointer arithmetic to
427 * To construct the bfloat for an arbitrary key we need to know what the key
428 * immediately preceding it is: we have to check if the two keys differ in the
429 * bits we're going to store in bkey_float->mantissa. t->prev[j] stores the size
430 * of the previous key so we can walk backwards to it from t->tree[j]'s key.
433 static inline void *bset_cacheline(const struct btree *b,
434 const struct bset_tree *t,
437 return (void *) round_down((unsigned long) btree_bkey_first(b, t),
439 cacheline * BSET_CACHELINE;
442 static struct bkey_packed *cacheline_to_bkey(const struct btree *b,
443 const struct bset_tree *t,
447 return bset_cacheline(b, t, cacheline) + offset * 8;
450 static unsigned bkey_to_cacheline(const struct btree *b,
451 const struct bset_tree *t,
452 const struct bkey_packed *k)
454 return ((void *) k - bset_cacheline(b, t, 0)) / BSET_CACHELINE;
457 static ssize_t __bkey_to_cacheline_offset(const struct btree *b,
458 const struct bset_tree *t,
460 const struct bkey_packed *k)
462 return (u64 *) k - (u64 *) bset_cacheline(b, t, cacheline);
465 static unsigned bkey_to_cacheline_offset(const struct btree *b,
466 const struct bset_tree *t,
468 const struct bkey_packed *k)
470 size_t m = __bkey_to_cacheline_offset(b, t, cacheline, k);
476 static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
477 const struct bset_tree *t,
480 return cacheline_to_bkey(b, t,
481 __eytzinger1_to_inorder(j, t->size - 1, t->extra),
482 bkey_float(b, t, j)->key_offset);
485 static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
486 const struct bset_tree *t,
489 unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
491 return (void *) (tree_to_bkey(b, t, j)->_data - prev_u64s);
494 static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
495 const struct bset_tree *t)
497 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
499 return __aux_tree_base(b, t);
503 * For the write set - the one we're currently inserting keys into - we don't
504 * maintain a full search tree, we just keep a simple lookup table in t->prev.
506 static struct bkey_packed *rw_aux_to_bkey(const struct btree *b,
510 return __btree_node_offset_to_key(b, rw_aux_tree(b, t)[j].offset);
513 static void rw_aux_tree_set(const struct btree *b, struct bset_tree *t,
514 unsigned j, struct bkey_packed *k)
516 EBUG_ON(k >= btree_bkey_last(b, t));
518 rw_aux_tree(b, t)[j] = (struct rw_aux_tree) {
519 .offset = __btree_node_key_to_offset(b, k),
520 .k = bkey_unpack_pos(b, k),
524 static void bch2_bset_verify_rw_aux_tree(struct btree *b,
527 struct bkey_packed *k = btree_bkey_first(b, t);
530 if (!bch2_expensive_debug_checks)
533 BUG_ON(bset_has_ro_aux_tree(t));
535 if (!bset_has_rw_aux_tree(t))
539 BUG_ON(rw_aux_to_bkey(b, t, j) != k);
543 if (rw_aux_to_bkey(b, t, j) == k) {
544 BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
545 bkey_unpack_pos(b, k)));
550 BUG_ON(rw_aux_tree(b, t)[j].offset <=
551 rw_aux_tree(b, t)[j - 1].offset);
555 BUG_ON(k >= btree_bkey_last(b, t));
559 /* returns idx of first entry >= offset: */
560 static unsigned rw_aux_tree_bsearch(struct btree *b,
564 unsigned bset_offs = offset - btree_bkey_first_offset(t);
565 unsigned bset_u64s = t->end_offset - btree_bkey_first_offset(t);
566 unsigned idx = bset_u64s ? bset_offs * t->size / bset_u64s : 0;
568 EBUG_ON(bset_aux_tree_type(t) != BSET_RW_AUX_TREE);
570 EBUG_ON(idx > t->size);
572 while (idx < t->size &&
573 rw_aux_tree(b, t)[idx].offset < offset)
577 rw_aux_tree(b, t)[idx - 1].offset >= offset)
580 EBUG_ON(idx < t->size &&
581 rw_aux_tree(b, t)[idx].offset < offset);
582 EBUG_ON(idx && rw_aux_tree(b, t)[idx - 1].offset >= offset);
583 EBUG_ON(idx + 1 < t->size &&
584 rw_aux_tree(b, t)[idx].offset ==
585 rw_aux_tree(b, t)[idx + 1].offset);
590 static inline unsigned bkey_mantissa(const struct bkey_packed *k,
591 const struct bkey_float *f,
596 EBUG_ON(!bkey_packed(k));
598 v = get_unaligned((u64 *) (((u8 *) k->_data) + (f->exponent >> 3)));
601 * In little endian, we're shifting off low bits (and then the bits we
602 * want are at the low end), in big endian we're shifting off high bits
603 * (and then the bits we want are at the high end, so we shift them
606 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
607 v >>= f->exponent & 7;
609 v >>= 64 - (f->exponent & 7) - BKEY_MANTISSA_BITS;
615 static inline void make_bfloat(struct btree *b, struct bset_tree *t,
617 struct bkey_packed *min_key,
618 struct bkey_packed *max_key)
620 struct bkey_float *f = bkey_float(b, t, j);
621 struct bkey_packed *m = tree_to_bkey(b, t, j);
622 struct bkey_packed *l = is_power_of_2(j)
624 : tree_to_prev_bkey(b, t, j >> ffs(j));
625 struct bkey_packed *r = is_power_of_2(j + 1)
627 : tree_to_bkey(b, t, j >> (ffz(j) + 1));
629 int shift, exponent, high_bit;
632 * for failed bfloats, the lookup code falls back to comparing against
636 if (!bkey_packed(l) || !bkey_packed(r) || !bkey_packed(m) ||
638 f->exponent = BFLOAT_FAILED_UNPACKED;
643 * The greatest differing bit of l and r is the first bit we must
644 * include in the bfloat mantissa we're creating in order to do
645 * comparisons - that bit always becomes the high bit of
646 * bfloat->mantissa, and thus the exponent we're calculating here is
647 * the position of what will become the low bit in bfloat->mantissa:
649 * Note that this may be negative - we may be running off the low end
650 * of the key: we handle this later:
652 high_bit = max(bch2_bkey_greatest_differing_bit(b, l, r),
653 min_t(unsigned, BKEY_MANTISSA_BITS, b->nr_key_bits) - 1);
654 exponent = high_bit - (BKEY_MANTISSA_BITS - 1);
657 * Then we calculate the actual shift value, from the start of the key
658 * (k->_data), to get the key bits starting at exponent:
660 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
661 shift = (int) (b->format.key_u64s * 64 - b->nr_key_bits) + exponent;
663 EBUG_ON(shift + BKEY_MANTISSA_BITS > b->format.key_u64s * 64);
665 shift = high_bit_offset +
670 EBUG_ON(shift < KEY_PACKED_BITS_START);
672 EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
675 mantissa = bkey_mantissa(m, f, j);
678 * If we've got garbage bits, set them to all 1s - it's legal for the
679 * bfloat to compare larger than the original key, but not smaller:
682 mantissa |= ~(~0U << -exponent);
684 f->mantissa = mantissa;
687 /* bytes remaining - only valid for last bset: */
688 static unsigned __bset_tree_capacity(const struct btree *b, const struct bset_tree *t)
690 bset_aux_tree_verify(b);
692 return btree_aux_data_bytes(b) - t->aux_data_offset * sizeof(u64);
695 static unsigned bset_ro_tree_capacity(const struct btree *b, const struct bset_tree *t)
697 return __bset_tree_capacity(b, t) /
698 (sizeof(struct bkey_float) + sizeof(u8));
701 static unsigned bset_rw_tree_capacity(const struct btree *b, const struct bset_tree *t)
703 return __bset_tree_capacity(b, t) / sizeof(struct rw_aux_tree);
706 static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
708 struct bkey_packed *k;
711 t->extra = BSET_RW_AUX_TREE_VAL;
712 rw_aux_tree(b, t)[0].offset =
713 __btree_node_key_to_offset(b, btree_bkey_first(b, t));
715 bset_tree_for_each_key(b, t, k) {
716 if (t->size == bset_rw_tree_capacity(b, t))
719 if ((void *) k - (void *) rw_aux_to_bkey(b, t, t->size - 1) >
721 rw_aux_tree_set(b, t, t->size++, k);
725 static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
727 struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
728 struct bkey_i min_key, max_key;
729 unsigned j, cacheline = 1;
731 t->size = min(bkey_to_cacheline(b, t, btree_bkey_last(b, t)),
732 bset_ro_tree_capacity(b, t));
736 t->extra = BSET_NO_AUX_TREE_VAL;
740 t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
742 /* First we figure out where the first key in each cacheline is */
743 eytzinger1_for_each(j, t->size - 1) {
744 while (bkey_to_cacheline(b, t, k) < cacheline)
745 prev = k, k = bkey_next(k);
747 if (k >= btree_bkey_last(b, t)) {
748 /* XXX: this path sucks */
753 ro_aux_tree_prev(b, t)[j] = prev->u64s;
754 bkey_float(b, t, j)->key_offset =
755 bkey_to_cacheline_offset(b, t, cacheline++, k);
757 EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
758 EBUG_ON(tree_to_bkey(b, t, j) != k);
761 while (k != btree_bkey_last(b, t))
762 prev = k, k = bkey_next(k);
764 if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
765 bkey_init(&min_key.k);
766 min_key.k.p = b->data->min_key;
769 if (!bkey_pack_pos(bkey_to_packed(&max_key), b->data->max_key, b)) {
770 bkey_init(&max_key.k);
771 max_key.k.p = b->data->max_key;
774 /* Then we build the tree */
775 eytzinger1_for_each(j, t->size - 1)
777 bkey_to_packed(&min_key),
778 bkey_to_packed(&max_key));
781 static void bset_alloc_tree(struct btree *b, struct bset_tree *t)
785 for (i = b->set; i != t; i++)
786 BUG_ON(bset_has_rw_aux_tree(i));
788 bch2_bset_set_no_aux_tree(b, t);
790 /* round up to next cacheline: */
791 t->aux_data_offset = round_up(bset_aux_tree_buf_start(b, t),
792 SMP_CACHE_BYTES / sizeof(u64));
794 bset_aux_tree_verify(b);
797 void bch2_bset_build_aux_tree(struct btree *b, struct bset_tree *t,
801 ? bset_has_rw_aux_tree(t)
802 : bset_has_ro_aux_tree(t))
805 bset_alloc_tree(b, t);
807 if (!__bset_tree_capacity(b, t))
811 __build_rw_aux_tree(b, t);
813 __build_ro_aux_tree(b, t);
815 bset_aux_tree_verify(b);
818 void bch2_bset_init_first(struct btree *b, struct bset *i)
824 memset(i, 0, sizeof(*i));
825 get_random_bytes(&i->seq, sizeof(i->seq));
826 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
828 t = &b->set[b->nsets++];
829 set_btree_bset(b, t, i);
832 void bch2_bset_init_next(struct bch_fs *c, struct btree *b,
833 struct btree_node_entry *bne)
835 struct bset *i = &bne->keys;
838 BUG_ON(bset_byte_offset(b, bne) >= btree_bytes(c));
839 BUG_ON((void *) bne < (void *) btree_bkey_last(b, bset_tree_last(b)));
840 BUG_ON(b->nsets >= MAX_BSETS);
842 memset(i, 0, sizeof(*i));
843 i->seq = btree_bset_first(b)->seq;
844 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
846 t = &b->set[b->nsets++];
847 set_btree_bset(b, t, i);
851 * find _some_ key in the same bset as @k that precedes @k - not necessarily the
852 * immediate predecessor:
854 static struct bkey_packed *__bkey_prev(struct btree *b, struct bset_tree *t,
855 struct bkey_packed *k)
857 struct bkey_packed *p;
861 EBUG_ON(k < btree_bkey_first(b, t) ||
862 k > btree_bkey_last(b, t));
864 if (k == btree_bkey_first(b, t))
867 switch (bset_aux_tree_type(t)) {
868 case BSET_NO_AUX_TREE:
869 p = btree_bkey_first(b, t);
871 case BSET_RO_AUX_TREE:
872 j = min_t(unsigned, t->size - 1, bkey_to_cacheline(b, t, k));
875 p = j ? tree_to_bkey(b, t,
876 __inorder_to_eytzinger1(j--,
877 t->size - 1, t->extra))
878 : btree_bkey_first(b, t);
881 case BSET_RW_AUX_TREE:
882 offset = __btree_node_key_to_offset(b, k);
883 j = rw_aux_tree_bsearch(b, t, offset);
884 p = j ? rw_aux_to_bkey(b, t, j - 1)
885 : btree_bkey_first(b, t);
892 struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
894 struct bkey_packed *k,
895 unsigned min_key_type)
897 struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
899 while ((p = __bkey_prev(b, t, k)) && !ret) {
900 for (i = p; i != k; i = bkey_next(i))
901 if (i->type >= min_key_type)
907 if (bch2_expensive_debug_checks) {
908 BUG_ON(ret >= orig_k);
912 : btree_bkey_first(b, t);
915 BUG_ON(i->type >= min_key_type);
923 static void bch2_bset_fix_lookup_table(struct btree *b,
925 struct bkey_packed *_where,
926 unsigned clobber_u64s,
929 int shift = new_u64s - clobber_u64s;
930 unsigned l, j, where = __btree_node_key_to_offset(b, _where);
932 EBUG_ON(bset_has_ro_aux_tree(t));
934 if (!bset_has_rw_aux_tree(t))
937 /* returns first entry >= where */
938 l = rw_aux_tree_bsearch(b, t, where);
940 if (!l) /* never delete first entry */
942 else if (l < t->size &&
943 where < t->end_offset &&
944 rw_aux_tree(b, t)[l].offset == where)
945 rw_aux_tree_set(b, t, l++, _where);
951 rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
956 rw_aux_tree(b, t)[j].offset + shift ==
957 rw_aux_tree(b, t)[l - 1].offset)
960 memmove(&rw_aux_tree(b, t)[l],
961 &rw_aux_tree(b, t)[j],
962 (void *) &rw_aux_tree(b, t)[t->size] -
963 (void *) &rw_aux_tree(b, t)[j]);
966 for (j = l; j < t->size; j++)
967 rw_aux_tree(b, t)[j].offset += shift;
969 EBUG_ON(l < t->size &&
970 rw_aux_tree(b, t)[l].offset ==
971 rw_aux_tree(b, t)[l - 1].offset);
973 if (t->size < bset_rw_tree_capacity(b, t) &&
975 ? rw_aux_tree(b, t)[l].offset
977 rw_aux_tree(b, t)[l - 1].offset >
978 L1_CACHE_BYTES / sizeof(u64)) {
979 struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
980 struct bkey_packed *end = l < t->size
981 ? rw_aux_to_bkey(b, t, l)
982 : btree_bkey_last(b, t);
983 struct bkey_packed *k = start;
990 if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
991 memmove(&rw_aux_tree(b, t)[l + 1],
992 &rw_aux_tree(b, t)[l],
993 (void *) &rw_aux_tree(b, t)[t->size] -
994 (void *) &rw_aux_tree(b, t)[l]);
996 rw_aux_tree_set(b, t, l, k);
1002 bch2_bset_verify_rw_aux_tree(b, t);
1003 bset_aux_tree_verify(b);
1006 void bch2_bset_insert(struct btree *b,
1007 struct btree_node_iter *iter,
1008 struct bkey_packed *where,
1009 struct bkey_i *insert,
1010 unsigned clobber_u64s)
1012 struct bkey_format *f = &b->format;
1013 struct bset_tree *t = bset_tree_last(b);
1014 struct bkey_packed packed, *src = bkey_to_packed(insert);
1016 bch2_bset_verify_rw_aux_tree(b, t);
1017 bch2_verify_insert_pos(b, where, bkey_to_packed(insert), clobber_u64s);
1019 if (bch2_bkey_pack_key(&packed, &insert->k, f))
1022 if (!bkey_deleted(&insert->k))
1023 btree_keys_account_key_add(&b->nr, t - b->set, src);
1025 if (src->u64s != clobber_u64s) {
1026 u64 *src_p = where->_data + clobber_u64s;
1027 u64 *dst_p = where->_data + src->u64s;
1029 EBUG_ON((int) le16_to_cpu(bset(b, t)->u64s) <
1030 (int) clobber_u64s - src->u64s);
1032 memmove_u64s(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1033 le16_add_cpu(&bset(b, t)->u64s, src->u64s - clobber_u64s);
1034 set_btree_bset_end(b, t);
1037 memcpy_u64s(where, src,
1038 bkeyp_key_u64s(f, src));
1039 memcpy_u64s(bkeyp_val(f, where), &insert->v,
1040 bkeyp_val_u64s(f, src));
1042 if (src->u64s != clobber_u64s)
1043 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, src->u64s);
1045 bch2_verify_btree_nr_keys(b);
1048 void bch2_bset_delete(struct btree *b,
1049 struct bkey_packed *where,
1050 unsigned clobber_u64s)
1052 struct bset_tree *t = bset_tree_last(b);
1053 u64 *src_p = where->_data + clobber_u64s;
1054 u64 *dst_p = where->_data;
1056 bch2_bset_verify_rw_aux_tree(b, t);
1058 EBUG_ON(le16_to_cpu(bset(b, t)->u64s) < clobber_u64s);
1060 memmove_u64s_down(dst_p, src_p, btree_bkey_last(b, t)->_data - src_p);
1061 le16_add_cpu(&bset(b, t)->u64s, -clobber_u64s);
1062 set_btree_bset_end(b, t);
1064 bch2_bset_fix_lookup_table(b, t, where, clobber_u64s, 0);
1070 static struct bkey_packed *bset_search_write_set(const struct btree *b,
1071 struct bset_tree *t,
1072 struct bpos *search)
1074 unsigned l = 0, r = t->size;
1076 while (l + 1 != r) {
1077 unsigned m = (l + r) >> 1;
1079 if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
1085 return rw_aux_to_bkey(b, t, l);
1088 static inline void prefetch_four_cachelines(void *p)
1090 #ifdef CONFIG_X86_64
1091 asm("prefetcht0 (-127 + 64 * 0)(%0);"
1092 "prefetcht0 (-127 + 64 * 1)(%0);"
1093 "prefetcht0 (-127 + 64 * 2)(%0);"
1094 "prefetcht0 (-127 + 64 * 3)(%0);"
1098 prefetch(p + L1_CACHE_BYTES * 0);
1099 prefetch(p + L1_CACHE_BYTES * 1);
1100 prefetch(p + L1_CACHE_BYTES * 2);
1101 prefetch(p + L1_CACHE_BYTES * 3);
1105 static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
1106 const struct bkey_float *f,
1109 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1110 unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
1112 return f->exponent > key_bits_start;
1114 unsigned key_bits_end = high_bit_offset + b->nr_key_bits;
1116 return f->exponent + BKEY_MANTISSA_BITS < key_bits_end;
1121 static struct bkey_packed *bset_search_tree(const struct btree *b,
1122 const struct bset_tree *t,
1123 const struct bpos *search,
1124 const struct bkey_packed *packed_search)
1126 struct ro_aux_tree *base = ro_aux_tree_base(b, t);
1127 struct bkey_float *f;
1128 struct bkey_packed *k;
1129 unsigned inorder, n = 1, l, r;
1133 if (likely(n << 4 < t->size))
1134 prefetch(&base->f[n << 4]);
1137 if (unlikely(f->exponent >= BFLOAT_FAILED))
1141 r = bkey_mantissa(packed_search, f, n);
1143 if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
1146 n = n * 2 + (l < r);
1149 k = tree_to_bkey(b, t, n);
1150 cmp = bkey_cmp_p_or_unp(b, k, packed_search, search);
1154 n = n * 2 + (cmp < 0);
1155 } while (n < t->size);
1157 inorder = __eytzinger1_to_inorder(n >> 1, t->size - 1, t->extra);
1160 * n would have been the node we recursed to - the low bit tells us if
1161 * we recursed left or recursed right.
1163 if (likely(!(n & 1))) {
1165 if (unlikely(!inorder))
1166 return btree_bkey_first(b, t);
1168 f = &base->f[eytzinger1_prev(n >> 1, t->size - 1)];
1171 return cacheline_to_bkey(b, t, inorder, f->key_offset);
1174 static __always_inline __flatten
1175 struct bkey_packed *__bch2_bset_search(struct btree *b,
1176 struct bset_tree *t,
1177 struct bpos *search,
1178 const struct bkey_packed *lossy_packed_search)
1182 * First, we search for a cacheline, then lastly we do a linear search
1183 * within that cacheline.
1185 * To search for the cacheline, there's three different possibilities:
1186 * * The set is too small to have a search tree, so we just do a linear
1187 * search over the whole set.
1188 * * The set is the one we're currently inserting into; keeping a full
1189 * auxiliary search tree up to date would be too expensive, so we
1190 * use a much simpler lookup table to do a binary search -
1191 * bset_search_write_set().
1192 * * Or we use the auxiliary search tree we constructed earlier -
1193 * bset_search_tree()
1196 switch (bset_aux_tree_type(t)) {
1197 case BSET_NO_AUX_TREE:
1198 return btree_bkey_first(b, t);
1199 case BSET_RW_AUX_TREE:
1200 return bset_search_write_set(b, t, search);
1201 case BSET_RO_AUX_TREE:
1202 return bset_search_tree(b, t, search, lossy_packed_search);
1208 static __always_inline __flatten
1209 struct bkey_packed *bch2_bset_search_linear(struct btree *b,
1210 struct bset_tree *t,
1211 struct bpos *search,
1212 struct bkey_packed *packed_search,
1213 const struct bkey_packed *lossy_packed_search,
1214 struct bkey_packed *m)
1216 if (lossy_packed_search)
1217 while (m != btree_bkey_last(b, t) &&
1218 bkey_iter_cmp_p_or_unp(b, m,
1219 lossy_packed_search, search) < 0)
1223 while (m != btree_bkey_last(b, t) &&
1224 bkey_iter_pos_cmp(b, m, search) < 0)
1227 if (bch2_expensive_debug_checks) {
1228 struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
1231 bkey_iter_cmp_p_or_unp(b, prev,
1232 packed_search, search) >= 0);
1238 /* Btree node iterator */
1240 static inline void __bch2_btree_node_iter_push(struct btree_node_iter *iter,
1242 const struct bkey_packed *k,
1243 const struct bkey_packed *end)
1246 struct btree_node_iter_set *pos;
1248 btree_node_iter_for_each(iter, pos)
1251 BUG_ON(pos >= iter->data + ARRAY_SIZE(iter->data));
1252 *pos = (struct btree_node_iter_set) {
1253 __btree_node_key_to_offset(b, k),
1254 __btree_node_key_to_offset(b, end)
1259 void bch2_btree_node_iter_push(struct btree_node_iter *iter,
1261 const struct bkey_packed *k,
1262 const struct bkey_packed *end)
1264 __bch2_btree_node_iter_push(iter, b, k, end);
1265 bch2_btree_node_iter_sort(iter, b);
1268 noinline __flatten __cold
1269 static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
1270 struct btree *b, struct bpos *search)
1272 struct bkey_packed *k;
1274 trace_bkey_pack_pos_fail(search);
1276 bch2_btree_node_iter_init_from_start(iter, b);
1278 while ((k = bch2_btree_node_iter_peek(iter, b)) &&
1279 bkey_iter_pos_cmp(b, k, search) < 0)
1280 bch2_btree_node_iter_advance(iter, b);
1284 * bch_btree_node_iter_init - initialize a btree node iterator, starting from a
1287 * Main entry point to the lookup code for individual btree nodes:
1291 * When you don't filter out deleted keys, btree nodes _do_ contain duplicate
1292 * keys. This doesn't matter for most code, but it does matter for lookups.
1294 * Some adjacent keys with a string of equal keys:
1297 * If you search for k, the lookup code isn't guaranteed to return you any
1298 * specific k. The lookup code is conceptually doing a binary search and
1299 * iterating backwards is very expensive so if the pivot happens to land at the
1300 * last k that's what you'll get.
1302 * This works out ok, but it's something to be aware of:
1304 * - For non extents, we guarantee that the live key comes last - see
1305 * btree_node_iter_cmp(), keys_out_of_order(). So the duplicates you don't
1306 * see will only be deleted keys you don't care about.
1308 * - For extents, deleted keys sort last (see the comment at the top of this
1309 * file). But when you're searching for extents, you actually want the first
1310 * key strictly greater than your search key - an extent that compares equal
1311 * to the search key is going to have 0 sectors after the search key.
1313 * But this does mean that we can't just search for
1314 * bpos_successor(start_of_range) to get the first extent that overlaps with
1315 * the range we want - if we're unlucky and there's an extent that ends
1316 * exactly where we searched, then there could be a deleted key at the same
1317 * position and we'd get that when we search instead of the preceding extent
1320 * So we've got to search for start_of_range, then after the lookup iterate
1321 * past any extents that compare equal to the position we searched for.
1324 void bch2_btree_node_iter_init(struct btree_node_iter *iter,
1325 struct btree *b, struct bpos *search)
1327 struct bkey_packed p, *packed_search = NULL;
1328 struct btree_node_iter_set *pos = iter->data;
1329 struct bkey_packed *k[MAX_BSETS];
1332 EBUG_ON(bpos_lt(*search, b->data->min_key));
1333 EBUG_ON(bpos_gt(*search, b->data->max_key));
1334 bset_aux_tree_verify(b);
1336 memset(iter, 0, sizeof(*iter));
1338 switch (bch2_bkey_pack_pos_lossy(&p, *search, b)) {
1339 case BKEY_PACK_POS_EXACT:
1342 case BKEY_PACK_POS_SMALLER:
1343 packed_search = NULL;
1345 case BKEY_PACK_POS_FAIL:
1346 btree_node_iter_init_pack_failed(iter, b, search);
1350 for (i = 0; i < b->nsets; i++) {
1351 k[i] = __bch2_bset_search(b, b->set + i, search, &p);
1352 prefetch_four_cachelines(k[i]);
1355 for (i = 0; i < b->nsets; i++) {
1356 struct bset_tree *t = b->set + i;
1357 struct bkey_packed *end = btree_bkey_last(b, t);
1359 k[i] = bch2_bset_search_linear(b, t, search,
1360 packed_search, &p, k[i]);
1362 *pos++ = (struct btree_node_iter_set) {
1363 __btree_node_key_to_offset(b, k[i]),
1364 __btree_node_key_to_offset(b, end)
1368 bch2_btree_node_iter_sort(iter, b);
1371 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *iter,
1374 struct bset_tree *t;
1376 memset(iter, 0, sizeof(*iter));
1379 __bch2_btree_node_iter_push(iter, b,
1380 btree_bkey_first(b, t),
1381 btree_bkey_last(b, t));
1382 bch2_btree_node_iter_sort(iter, b);
1385 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *iter,
1387 struct bset_tree *t)
1389 struct btree_node_iter_set *set;
1391 btree_node_iter_for_each(iter, set)
1392 if (set->end == t->end_offset)
1393 return __btree_node_offset_to_key(b, set->k);
1395 return btree_bkey_last(b, t);
1398 static inline bool btree_node_iter_sort_two(struct btree_node_iter *iter,
1404 if ((ret = (btree_node_iter_cmp(b,
1406 iter->data[first + 1]) > 0)))
1407 swap(iter->data[first], iter->data[first + 1]);
1411 void bch2_btree_node_iter_sort(struct btree_node_iter *iter,
1414 /* unrolled bubble sort: */
1416 if (!__btree_node_iter_set_end(iter, 2)) {
1417 btree_node_iter_sort_two(iter, b, 0);
1418 btree_node_iter_sort_two(iter, b, 1);
1421 if (!__btree_node_iter_set_end(iter, 1))
1422 btree_node_iter_sort_two(iter, b, 0);
1425 void bch2_btree_node_iter_set_drop(struct btree_node_iter *iter,
1426 struct btree_node_iter_set *set)
1428 struct btree_node_iter_set *last =
1429 iter->data + ARRAY_SIZE(iter->data) - 1;
1431 memmove(&set[0], &set[1], (void *) last - (void *) set);
1432 *last = (struct btree_node_iter_set) { 0, 0 };
1435 static inline void __bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1438 iter->data->k += __bch2_btree_node_iter_peek_all(iter, b)->u64s;
1440 EBUG_ON(iter->data->k > iter->data->end);
1442 if (unlikely(__btree_node_iter_set_end(iter, 0))) {
1443 /* avoid an expensive memmove call: */
1444 iter->data[0] = iter->data[1];
1445 iter->data[1] = iter->data[2];
1446 iter->data[2] = (struct btree_node_iter_set) { 0, 0 };
1450 if (__btree_node_iter_set_end(iter, 1))
1453 if (!btree_node_iter_sort_two(iter, b, 0))
1456 if (__btree_node_iter_set_end(iter, 2))
1459 btree_node_iter_sort_two(iter, b, 1);
1462 void bch2_btree_node_iter_advance(struct btree_node_iter *iter,
1465 if (bch2_expensive_debug_checks) {
1466 bch2_btree_node_iter_verify(iter, b);
1467 bch2_btree_node_iter_next_check(iter, b);
1470 __bch2_btree_node_iter_advance(iter, b);
1476 struct bkey_packed *bch2_btree_node_iter_prev_all(struct btree_node_iter *iter,
1479 struct bkey_packed *k, *prev = NULL;
1480 struct btree_node_iter_set *set;
1481 struct bset_tree *t;
1484 if (bch2_expensive_debug_checks)
1485 bch2_btree_node_iter_verify(iter, b);
1487 for_each_bset(b, t) {
1488 k = bch2_bkey_prev_all(b, t,
1489 bch2_btree_node_iter_bset_pos(iter, b, t));
1491 (!prev || bkey_iter_cmp(b, k, prev) > 0)) {
1493 end = t->end_offset;
1501 * We're manually memmoving instead of just calling sort() to ensure the
1502 * prev we picked ends up in slot 0 - sort won't necessarily put it
1503 * there because of duplicate deleted keys:
1505 btree_node_iter_for_each(iter, set)
1506 if (set->end == end)
1509 BUG_ON(set != &iter->data[__btree_node_iter_used(iter)]);
1511 BUG_ON(set >= iter->data + ARRAY_SIZE(iter->data));
1513 memmove(&iter->data[1],
1515 (void *) set - (void *) &iter->data[0]);
1517 iter->data[0].k = __btree_node_key_to_offset(b, prev);
1518 iter->data[0].end = end;
1520 if (bch2_expensive_debug_checks)
1521 bch2_btree_node_iter_verify(iter, b);
1525 struct bkey_packed *bch2_btree_node_iter_prev(struct btree_node_iter *iter,
1528 struct bkey_packed *prev;
1531 prev = bch2_btree_node_iter_prev_all(iter, b);
1532 } while (prev && bkey_deleted(prev));
1537 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *iter,
1541 struct bkey_packed *k = bch2_btree_node_iter_peek(iter, b);
1543 return k ? bkey_disassemble(b, k, u) : bkey_s_c_null;
1548 void bch2_btree_keys_stats(struct btree *b, struct bset_stats *stats)
1550 struct bset_tree *t;
1552 for_each_bset(b, t) {
1553 enum bset_aux_tree_type type = bset_aux_tree_type(t);
1556 stats->sets[type].nr++;
1557 stats->sets[type].bytes += le16_to_cpu(bset(b, t)->u64s) *
1560 if (bset_has_ro_aux_tree(t)) {
1561 stats->floats += t->size - 1;
1563 for (j = 1; j < t->size; j++)
1565 bkey_float(b, t, j)->exponent ==
1571 void bch2_bfloat_to_text(struct printbuf *out, struct btree *b,
1572 struct bkey_packed *k)
1574 struct bset_tree *t = bch2_bkey_to_bset(b, k);
1576 unsigned j, inorder;
1578 if (!bset_has_ro_aux_tree(t))
1581 inorder = bkey_to_cacheline(b, t, k);
1582 if (!inorder || inorder >= t->size)
1585 j = __inorder_to_eytzinger1(inorder, t->size - 1, t->extra);
1586 if (k != tree_to_bkey(b, t, j))
1589 switch (bkey_float(b, t, j)->exponent) {
1591 uk = bkey_unpack_key(b, k);
1593 " failed unpacked at depth %u\n"
1596 bch2_bpos_to_text(out, uk.p);
1597 prt_printf(out, "\n");