1 #ifndef _BCACHEFS_BSET_H
2 #define _BCACHEFS_BSET_H
4 #include <linux/kernel.h>
5 #include <linux/types.h>
7 #include "bcachefs_format.h"
9 #include "bkey_methods.h"
10 #include "btree_types.h"
11 #include "util.h" /* for time_stats */
17 * A bkey contains a key, a size field, a variable number of pointers, and some
18 * ancillary flag bits.
20 * We use two different functions for validating bkeys, bkey_invalid and
23 * The one exception to the rule that ptr_invalid() filters out invalid keys is
24 * that it also filters out keys of size 0 - these are keys that have been
25 * completely overwritten. It'd be safe to delete these in memory while leaving
26 * them on disk, just unnecessary work - so we filter them out when resorting
29 * We can't filter out stale keys when we're resorting, because garbage
30 * collection needs to find them to ensure bucket gens don't wrap around -
31 * unless we're rewriting the btree node those stale keys still exist on disk.
33 * We also implement functions here for removing some number of sectors from the
34 * front or the back of a bkey - this is mainly used for fixing overlapping
35 * extents, by removing the overlapping sectors from the older key.
39 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
40 * along with a header. A btree node is made up of a number of these, written at
43 * There could be many of them on disk, but we never allow there to be more than
44 * 4 in memory - we lazily resort as needed.
46 * We implement code here for creating and maintaining auxiliary search trees
47 * (described below) for searching an individial bset, and on top of that we
48 * implement a btree iterator.
52 * Most of the code in bcache doesn't care about an individual bset - it needs
53 * to search entire btree nodes and iterate over them in sorted order.
55 * The btree iterator code serves both functions; it iterates through the keys
56 * in a btree node in sorted order, starting from either keys after a specific
57 * point (if you pass it a search key) or the start of the btree node.
59 * AUXILIARY SEARCH TREES:
61 * Since keys are variable length, we can't use a binary search on a bset - we
62 * wouldn't be able to find the start of the next key. But binary searches are
63 * slow anyways, due to terrible cache behaviour; bcache originally used binary
64 * searches and that code topped out at under 50k lookups/second.
66 * So we need to construct some sort of lookup table. Since we only insert keys
67 * into the last (unwritten) set, most of the keys within a given btree node are
68 * usually in sets that are mostly constant. We use two different types of
69 * lookup tables to take advantage of this.
71 * Both lookup tables share in common that they don't index every key in the
72 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
73 * is used for the rest.
75 * For sets that have been written to disk and are no longer being inserted
76 * into, we construct a binary search tree in an array - traversing a binary
77 * search tree in an array gives excellent locality of reference and is very
78 * fast, since both children of any node are adjacent to each other in memory
79 * (and their grandchildren, and great grandchildren...) - this means
80 * prefetching can be used to great effect.
82 * It's quite useful performance wise to keep these nodes small - not just
83 * because they're more likely to be in L2, but also because we can prefetch
84 * more nodes on a single cacheline and thus prefetch more iterations in advance
85 * when traversing this tree.
87 * Nodes in the auxiliary search tree must contain both a key to compare against
88 * (we don't want to fetch the key from the set, that would defeat the purpose),
89 * and a pointer to the key. We use a few tricks to compress both of these.
91 * To compress the pointer, we take advantage of the fact that one node in the
92 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
93 * a function (to_inorder()) that takes the index of a node in a binary tree and
94 * returns what its index would be in an inorder traversal, so we only have to
95 * store the low bits of the offset.
97 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
98 * compress that, we take advantage of the fact that when we're traversing the
99 * search tree at every iteration we know that both our search key and the key
100 * we're looking for lie within some range - bounded by our previous
101 * comparisons. (We special case the start of a search so that this is true even
102 * at the root of the tree).
104 * So we know the key we're looking for is between a and b, and a and b don't
105 * differ higher than bit 50, we don't need to check anything higher than bit
108 * We don't usually need the rest of the bits, either; we only need enough bits
109 * to partition the key range we're currently checking. Consider key n - the
110 * key our auxiliary search tree node corresponds to, and key p, the key
111 * immediately preceding n. The lowest bit we need to store in the auxiliary
112 * search tree is the highest bit that differs between n and p.
114 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
115 * comparison. But we'd really like our nodes in the auxiliary search tree to be
118 * The solution is to make them fixed size, and when we're constructing a node
119 * check if p and n differed in the bits we needed them to. If they don't we
120 * flag that node, and when doing lookups we fallback to comparing against the
121 * real key. As long as this doesn't happen to often (and it seems to reliably
122 * happen a bit less than 1% of the time), we win - even on failures, that key
123 * is then more likely to be in cache than if we were doing binary searches all
124 * the way, since we're touching so much less memory.
126 * The keys in the auxiliary search tree are stored in (software) floating
127 * point, with an exponent and a mantissa. The exponent needs to be big enough
128 * to address all the bits in the original key, but the number of bits in the
129 * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
131 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
132 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
133 * We need one node per 128 bytes in the btree node, which means the auxiliary
134 * search trees take up 3% as much memory as the btree itself.
136 * Constructing these auxiliary search trees is moderately expensive, and we
137 * don't want to be constantly rebuilding the search tree for the last set
138 * whenever we insert another key into it. For the unwritten set, we use a much
139 * simpler lookup table - it's just a flat array, so index i in the lookup table
140 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
141 * within each byte range works the same as with the auxiliary search trees.
143 * These are much easier to keep up to date when we insert a key - we do it
144 * somewhat lazily; when we shift a key up we usually just increment the pointer
145 * to it, only when it would overflow do we go to the trouble of finding the
146 * first key in that range of bytes again.
149 extern bool bch2_expensive_debug_checks;
151 static inline bool btree_keys_expensive_checks(const struct btree *b)
153 #ifdef CONFIG_BCACHEFS_DEBUG
154 return bch2_expensive_debug_checks || *b->expensive_debug_checks;
160 enum bset_aux_tree_type {
166 #define BSET_TREE_NR_TYPES 3
168 #define BSET_NO_AUX_TREE_VAL (U16_MAX)
169 #define BSET_RW_AUX_TREE_VAL (U16_MAX - 1)
171 static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree *t)
174 case BSET_NO_AUX_TREE_VAL:
176 return BSET_NO_AUX_TREE;
177 case BSET_RW_AUX_TREE_VAL:
179 return BSET_RW_AUX_TREE;
182 return BSET_RO_AUX_TREE;
186 typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *);
189 __bkey_unpack_key_format_checked(const struct btree *b,
191 const struct bkey_packed *src)
193 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK
195 compiled_unpack_fn unpack_fn = b->aux_data;
198 if (btree_keys_expensive_checks(b)) {
199 struct bkey dst2 = __bch2_bkey_unpack_key(&b->format, src);
202 * hack around a harmless race when compacting whiteouts
205 dst2.needs_whiteout = dst->needs_whiteout;
207 BUG_ON(memcmp(dst, &dst2, sizeof(*dst)));
211 *dst = __bch2_bkey_unpack_key(&b->format, src);
215 static inline struct bkey
216 bkey_unpack_key_format_checked(const struct btree *b,
217 const struct bkey_packed *src)
221 __bkey_unpack_key_format_checked(b, &dst, src);
225 static inline void __bkey_unpack_key(const struct btree *b,
227 const struct bkey_packed *src)
229 if (likely(bkey_packed(src)))
230 __bkey_unpack_key_format_checked(b, dst, src);
232 *dst = *packed_to_bkey_c(src);
236 * bkey_unpack_key -- unpack just the key, not the value
238 static inline struct bkey bkey_unpack_key(const struct btree *b,
239 const struct bkey_packed *src)
241 return likely(bkey_packed(src))
242 ? bkey_unpack_key_format_checked(b, src)
243 : *packed_to_bkey_c(src);
246 static inline struct bpos
247 bkey_unpack_pos_format_checked(const struct btree *b,
248 const struct bkey_packed *src)
250 #ifdef HAVE_BCACHEFS_COMPILED_UNPACK
251 return bkey_unpack_key_format_checked(b, src).p;
253 return __bkey_unpack_pos(&b->format, src);
257 static inline struct bpos bkey_unpack_pos(const struct btree *b,
258 const struct bkey_packed *src)
260 return likely(bkey_packed(src))
261 ? bkey_unpack_pos_format_checked(b, src)
262 : packed_to_bkey_c(src)->p;
265 /* Disassembled bkeys */
267 static inline struct bkey_s_c bkey_disassemble(struct btree *b,
268 const struct bkey_packed *k,
271 __bkey_unpack_key(b, u, k);
273 return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), };
276 /* non const version: */
277 static inline struct bkey_s __bkey_disassemble(struct btree *b,
278 struct bkey_packed *k,
281 __bkey_unpack_key(b, u, k);
283 return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), };
286 #define for_each_bset(_b, _t) \
287 for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
289 static inline bool bset_has_ro_aux_tree(struct bset_tree *t)
291 return bset_aux_tree_type(t) == BSET_RO_AUX_TREE;
294 static inline bool bset_has_rw_aux_tree(struct bset_tree *t)
296 return bset_aux_tree_type(t) == BSET_RW_AUX_TREE;
299 static inline void bch2_bset_set_no_aux_tree(struct btree *b,
304 for (; t < b->set + ARRAY_SIZE(b->set); t++) {
306 t->extra = BSET_NO_AUX_TREE_VAL;
307 t->aux_data_offset = U16_MAX;
311 static inline void btree_node_set_format(struct btree *b,
312 struct bkey_format f)
317 b->nr_key_bits = bkey_format_key_bits(&f);
319 len = bch2_compile_bkey_format(&b->format, b->aux_data);
320 BUG_ON(len < 0 || len > U8_MAX);
322 b->unpack_fn_len = len;
324 bch2_bset_set_no_aux_tree(b, b->set);
327 static inline struct bset *bset_next_set(struct btree *b,
328 unsigned block_bytes)
330 struct bset *i = btree_bset_last(b);
332 EBUG_ON(!is_power_of_2(block_bytes));
334 return ((void *) i) + round_up(vstruct_bytes(i), block_bytes);
337 void bch2_btree_keys_free(struct btree *);
338 int bch2_btree_keys_alloc(struct btree *, unsigned, gfp_t);
339 void bch2_btree_keys_init(struct btree *, bool *);
341 void bch2_bset_init_first(struct btree *, struct bset *);
342 void bch2_bset_init_next(struct bch_fs *, struct btree *,
343 struct btree_node_entry *);
344 void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
345 void bch2_bset_fix_invalidated_key(struct btree *, struct bkey_packed *);
347 void bch2_bset_insert(struct btree *, struct btree_node_iter *,
348 struct bkey_packed *, struct bkey_i *, unsigned);
349 void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
351 /* Bkey utility code */
353 /* packed or unpacked */
354 static inline int bkey_cmp_p_or_unp(const struct btree *b,
355 const struct bkey_packed *l,
356 const struct bkey_packed *r_packed,
359 EBUG_ON(r_packed && !bkey_packed(r_packed));
361 if (unlikely(!bkey_packed(l)))
362 return bkey_cmp(packed_to_bkey_c(l)->p, *r);
364 if (likely(r_packed))
365 return __bch2_bkey_cmp_packed_format_checked(l, r_packed, b);
367 return __bch2_bkey_cmp_left_packed_format_checked(b, l, r);
370 struct bset_tree *bch2_bkey_to_bset(struct btree *, struct bkey_packed *);
372 struct bkey_packed *bch2_bkey_prev_filter(struct btree *, struct bset_tree *,
373 struct bkey_packed *, unsigned);
375 static inline struct bkey_packed *
376 bch2_bkey_prev_all(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
378 return bch2_bkey_prev_filter(b, t, k, 0);
381 static inline struct bkey_packed *
382 bch2_bkey_prev(struct btree *b, struct bset_tree *t, struct bkey_packed *k)
384 return bch2_bkey_prev_filter(b, t, k, KEY_TYPE_DISCARD + 1);
387 enum bch_extent_overlap {
388 BCH_EXTENT_OVERLAP_ALL = 0,
389 BCH_EXTENT_OVERLAP_BACK = 1,
390 BCH_EXTENT_OVERLAP_FRONT = 2,
391 BCH_EXTENT_OVERLAP_MIDDLE = 3,
394 /* Returns how k overlaps with m */
395 static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
396 const struct bkey *m)
398 int cmp1 = bkey_cmp(k->p, m->p) < 0;
399 int cmp2 = bkey_cmp(bkey_start_pos(k),
400 bkey_start_pos(m)) > 0;
402 return (cmp1 << 1) + cmp2;
405 /* Btree key iteration */
407 void bch2_btree_node_iter_push(struct btree_node_iter *, struct btree *,
408 const struct bkey_packed *,
409 const struct bkey_packed *);
410 void bch2_btree_node_iter_init(struct btree_node_iter *, struct btree *,
412 void bch2_btree_node_iter_init_from_start(struct btree_node_iter *,
414 struct bkey_packed *bch2_btree_node_iter_bset_pos(struct btree_node_iter *,
418 void bch2_btree_node_iter_sort(struct btree_node_iter *, struct btree *);
419 void bch2_btree_node_iter_set_drop(struct btree_node_iter *,
420 struct btree_node_iter_set *);
421 void bch2_btree_node_iter_advance(struct btree_node_iter *, struct btree *);
423 #define btree_node_iter_for_each(_iter, _set) \
424 for (_set = (_iter)->data; \
425 _set < (_iter)->data + ARRAY_SIZE((_iter)->data) && \
426 (_set)->k != (_set)->end; \
429 static inline bool __btree_node_iter_set_end(struct btree_node_iter *iter,
432 return iter->data[i].k == iter->data[i].end;
435 static inline bool bch2_btree_node_iter_end(struct btree_node_iter *iter)
437 return __btree_node_iter_set_end(iter, 0);
441 * When keys compare equal, deleted keys compare first:
443 * XXX: only need to compare pointers for keys that are both within a
444 * btree_node_iterator - we need to break ties for prev() to work correctly
446 static inline int bkey_iter_cmp(struct btree *b,
447 const struct bkey_packed *l,
448 const struct bkey_packed *r)
450 return bkey_cmp_packed(b, l, r)
451 ?: (int) bkey_deleted(r) - (int) bkey_deleted(l)
452 ?: (l > r) - (l < r);
455 static inline int btree_node_iter_cmp(struct btree *b,
456 struct btree_node_iter_set l,
457 struct btree_node_iter_set r)
459 return bkey_iter_cmp(b,
460 __btree_node_offset_to_key(b, l.k),
461 __btree_node_offset_to_key(b, r.k));
464 /* These assume l (the search key) is not a deleted key: */
465 static inline int bkey_iter_pos_cmp(struct btree *b,
467 const struct bkey_packed *r)
469 return -bkey_cmp_left_packed(b, r, l)
470 ?: (int) bkey_deleted(r);
473 static inline int bkey_iter_cmp_p_or_unp(struct btree *b,
475 const struct bkey_packed *l_packed,
476 const struct bkey_packed *r)
478 return -bkey_cmp_p_or_unp(b, r, l_packed, l)
479 ?: (int) bkey_deleted(r);
482 static inline struct bkey_packed *
483 __bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
486 return __btree_node_offset_to_key(b, iter->data->k);
489 static inline struct bkey_packed *
490 bch2_btree_node_iter_peek_filter(struct btree_node_iter *iter,
492 unsigned min_key_type)
494 while (!bch2_btree_node_iter_end(iter)) {
495 struct bkey_packed *k = __bch2_btree_node_iter_peek_all(iter, b);
497 if (k->type >= min_key_type)
500 bch2_btree_node_iter_advance(iter, b);
506 static inline struct bkey_packed *
507 bch2_btree_node_iter_peek_all(struct btree_node_iter *iter,
510 return bch2_btree_node_iter_peek_filter(iter, b, 0);
513 static inline struct bkey_packed *
514 bch2_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
516 return bch2_btree_node_iter_peek_filter(iter, b, KEY_TYPE_DISCARD + 1);
519 static inline struct bkey_packed *
520 bch2_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
522 struct bkey_packed *ret = bch2_btree_node_iter_peek_all(iter, b);
525 bch2_btree_node_iter_advance(iter, b);
530 struct bkey_packed *bch2_btree_node_iter_prev_filter(struct btree_node_iter *,
531 struct btree *, unsigned);
533 static inline struct bkey_packed *
534 bch2_btree_node_iter_prev_all(struct btree_node_iter *iter, struct btree *b)
536 return bch2_btree_node_iter_prev_filter(iter, b, 0);
539 static inline struct bkey_packed *
540 bch2_btree_node_iter_prev(struct btree_node_iter *iter, struct btree *b)
542 return bch2_btree_node_iter_prev_filter(iter, b, KEY_TYPE_DISCARD + 1);
545 struct bkey_s_c bch2_btree_node_iter_peek_unpack(struct btree_node_iter *,
549 #define for_each_btree_node_key_unpack(b, k, iter, unpacked) \
550 for (bch2_btree_node_iter_init_from_start((iter), (b)); \
551 (k = bch2_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
552 bch2_btree_node_iter_advance(iter, b))
556 static inline void btree_keys_account_key(struct btree_nr_keys *n,
558 struct bkey_packed *k,
561 n->live_u64s += k->u64s * sign;
562 n->bset_u64s[bset] += k->u64s * sign;
565 n->packed_keys += sign;
567 n->unpacked_keys += sign;
570 #define btree_keys_account_key_add(_nr, _bset_idx, _k) \
571 btree_keys_account_key(_nr, _bset_idx, _k, 1)
572 #define btree_keys_account_key_drop(_nr, _bset_idx, _k) \
573 btree_keys_account_key(_nr, _bset_idx, _k, -1)
575 #define btree_account_key_add(_b, _k) \
576 btree_keys_account_key(&(_b)->nr, \
577 bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, 1)
578 #define btree_account_key_drop(_b, _k) \
579 btree_keys_account_key(&(_b)->nr, \
580 bch2_bkey_to_bset(_b, _k) - (_b)->set, _k, -1)
585 } sets[BSET_TREE_NR_TYPES];
588 size_t failed_unpacked;
590 size_t failed_overflow;
593 void bch2_btree_keys_stats(struct btree *, struct bset_stats *);
594 void bch2_bfloat_to_text(struct printbuf *, struct btree *,
595 struct bkey_packed *);
599 void bch2_dump_bset(struct btree *, struct bset *, unsigned);
600 void bch2_dump_btree_node(struct btree *);
601 void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
603 #ifdef CONFIG_BCACHEFS_DEBUG
605 void __bch2_verify_btree_nr_keys(struct btree *);
606 void bch2_btree_node_iter_verify(struct btree_node_iter *, struct btree *);
607 void bch2_verify_insert_pos(struct btree *, struct bkey_packed *,
608 struct bkey_packed *, unsigned);
612 static inline void __bch2_verify_btree_nr_keys(struct btree *b) {}
613 static inline void bch2_btree_node_iter_verify(struct btree_node_iter *iter,
615 static inline void bch2_verify_insert_pos(struct btree *b,
616 struct bkey_packed *where,
617 struct bkey_packed *insert,
618 unsigned clobber_u64s) {}
621 static inline void bch2_verify_btree_nr_keys(struct btree *b)
623 if (btree_keys_expensive_checks(b))
624 __bch2_verify_btree_nr_keys(b);
627 #endif /* _BCACHEFS_BSET_H */