4 #include <linux/bcache.h>
5 #include <linux/kernel.h>
6 #include <linux/types.h>
9 #include "bkey_methods.h"
10 #include "btree_types.h"
11 #include "util.h" /* for time_stats */
16 * A bkey contains a key, a size field, a variable number of pointers, and some
17 * ancillary flag bits.
19 * We use two different functions for validating bkeys, bkey_invalid and
22 * The one exception to the rule that ptr_invalid() filters out invalid keys is
23 * that it also filters out keys of size 0 - these are keys that have been
24 * completely overwritten. It'd be safe to delete these in memory while leaving
25 * them on disk, just unnecessary work - so we filter them out when resorting
28 * We can't filter out stale keys when we're resorting, because garbage
29 * collection needs to find them to ensure bucket gens don't wrap around -
30 * unless we're rewriting the btree node those stale keys still exist on disk.
32 * We also implement functions here for removing some number of sectors from the
33 * front or the back of a bkey - this is mainly used for fixing overlapping
34 * extents, by removing the overlapping sectors from the older key.
38 * A bset is an array of bkeys laid out contiguously in memory in sorted order,
39 * along with a header. A btree node is made up of a number of these, written at
42 * There could be many of them on disk, but we never allow there to be more than
43 * 4 in memory - we lazily resort as needed.
45 * We implement code here for creating and maintaining auxiliary search trees
46 * (described below) for searching an individial bset, and on top of that we
47 * implement a btree iterator.
51 * Most of the code in bcache doesn't care about an individual bset - it needs
52 * to search entire btree nodes and iterate over them in sorted order.
54 * The btree iterator code serves both functions; it iterates through the keys
55 * in a btree node in sorted order, starting from either keys after a specific
56 * point (if you pass it a search key) or the start of the btree node.
58 * AUXILIARY SEARCH TREES:
60 * Since keys are variable length, we can't use a binary search on a bset - we
61 * wouldn't be able to find the start of the next key. But binary searches are
62 * slow anyways, due to terrible cache behaviour; bcache originally used binary
63 * searches and that code topped out at under 50k lookups/second.
65 * So we need to construct some sort of lookup table. Since we only insert keys
66 * into the last (unwritten) set, most of the keys within a given btree node are
67 * usually in sets that are mostly constant. We use two different types of
68 * lookup tables to take advantage of this.
70 * Both lookup tables share in common that they don't index every key in the
71 * set; they index one key every BSET_CACHELINE bytes, and then a linear search
72 * is used for the rest.
74 * For sets that have been written to disk and are no longer being inserted
75 * into, we construct a binary search tree in an array - traversing a binary
76 * search tree in an array gives excellent locality of reference and is very
77 * fast, since both children of any node are adjacent to each other in memory
78 * (and their grandchildren, and great grandchildren...) - this means
79 * prefetching can be used to great effect.
81 * It's quite useful performance wise to keep these nodes small - not just
82 * because they're more likely to be in L2, but also because we can prefetch
83 * more nodes on a single cacheline and thus prefetch more iterations in advance
84 * when traversing this tree.
86 * Nodes in the auxiliary search tree must contain both a key to compare against
87 * (we don't want to fetch the key from the set, that would defeat the purpose),
88 * and a pointer to the key. We use a few tricks to compress both of these.
90 * To compress the pointer, we take advantage of the fact that one node in the
91 * search tree corresponds to precisely BSET_CACHELINE bytes in the set. We have
92 * a function (to_inorder()) that takes the index of a node in a binary tree and
93 * returns what its index would be in an inorder traversal, so we only have to
94 * store the low bits of the offset.
96 * The key is 84 bits (KEY_DEV + key->key, the offset on the device). To
97 * compress that, we take advantage of the fact that when we're traversing the
98 * search tree at every iteration we know that both our search key and the key
99 * we're looking for lie within some range - bounded by our previous
100 * comparisons. (We special case the start of a search so that this is true even
101 * at the root of the tree).
103 * So we know the key we're looking for is between a and b, and a and b don't
104 * differ higher than bit 50, we don't need to check anything higher than bit
107 * We don't usually need the rest of the bits, either; we only need enough bits
108 * to partition the key range we're currently checking. Consider key n - the
109 * key our auxiliary search tree node corresponds to, and key p, the key
110 * immediately preceding n. The lowest bit we need to store in the auxiliary
111 * search tree is the highest bit that differs between n and p.
113 * Note that this could be bit 0 - we might sometimes need all 80 bits to do the
114 * comparison. But we'd really like our nodes in the auxiliary search tree to be
117 * The solution is to make them fixed size, and when we're constructing a node
118 * check if p and n differed in the bits we needed them to. If they don't we
119 * flag that node, and when doing lookups we fallback to comparing against the
120 * real key. As long as this doesn't happen to often (and it seems to reliably
121 * happen a bit less than 1% of the time), we win - even on failures, that key
122 * is then more likely to be in cache than if we were doing binary searches all
123 * the way, since we're touching so much less memory.
125 * The keys in the auxiliary search tree are stored in (software) floating
126 * point, with an exponent and a mantissa. The exponent needs to be big enough
127 * to address all the bits in the original key, but the number of bits in the
128 * mantissa is somewhat arbitrary; more bits just gets us fewer failures.
130 * We need 7 bits for the exponent and 3 bits for the key's offset (since keys
131 * are 8 byte aligned); using 22 bits for the mantissa means a node is 4 bytes.
132 * We need one node per 128 bytes in the btree node, which means the auxiliary
133 * search trees take up 3% as much memory as the btree itself.
135 * Constructing these auxiliary search trees is moderately expensive, and we
136 * don't want to be constantly rebuilding the search tree for the last set
137 * whenever we insert another key into it. For the unwritten set, we use a much
138 * simpler lookup table - it's just a flat array, so index i in the lookup table
139 * corresponds to the i range of BSET_CACHELINE bytes in the set. Indexing
140 * within each byte range works the same as with the auxiliary search trees.
142 * These are much easier to keep up to date when we insert a key - we do it
143 * somewhat lazily; when we shift a key up we usually just increment the pointer
144 * to it, only when it would overflow do we go to the trouble of finding the
145 * first key in that range of bytes again.
148 struct btree_node_iter;
149 struct btree_node_iter_set;
151 enum bset_aux_tree_type {
157 #define BSET_TREE_NR_TYPES 3
159 #define BSET_NO_AUX_TREE_VAL (U16_MAX)
160 #define BSET_RW_AUX_TREE_VAL (U16_MAX - 1)
162 static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree *t)
165 case BSET_NO_AUX_TREE_VAL:
167 return BSET_NO_AUX_TREE;
168 case BSET_RW_AUX_TREE_VAL:
170 return BSET_RW_AUX_TREE;
173 return BSET_RO_AUX_TREE;
177 typedef void (*compiled_unpack_fn)(struct bkey *, const struct bkey_packed *);
179 static inline struct bkey
180 bkey_unpack_key_format_checked(const struct btree *b,
181 const struct bkey_packed *src)
185 #ifdef HAVE_BCACHE_COMPILED_UNPACK
187 compiled_unpack_fn unpack_fn = b->aux_data;
188 unpack_fn(&dst, src);
190 if (IS_ENABLED(CONFIG_BCACHE_DEBUG)) {
191 struct bkey dst2 = __bkey_unpack_key(&b->format, src);
193 BUG_ON(memcmp(&dst, &dst2, sizeof(dst)));
197 dst = __bkey_unpack_key(&b->format, src);
203 * bkey_unpack_key -- unpack just the key, not the value
205 static inline struct bkey bkey_unpack_key(const struct btree *b,
206 const struct bkey_packed *src)
208 return likely(bkey_packed(src))
209 ? bkey_unpack_key_format_checked(b, src)
210 : *packed_to_bkey_c(src);
213 static inline struct bpos
214 bkey_unpack_pos_format_checked(const struct btree *b,
215 const struct bkey_packed *src)
217 #ifdef HAVE_BCACHE_COMPILED_UNPACK
218 return bkey_unpack_key_format_checked(b, src).p;
220 return __bkey_unpack_pos(&b->format, src);
224 static inline struct bpos bkey_unpack_pos(const struct btree *b,
225 const struct bkey_packed *src)
227 return likely(bkey_packed(src))
228 ? bkey_unpack_pos_format_checked(b, src)
229 : packed_to_bkey_c(src)->p;
232 /* Disassembled bkeys */
234 static inline struct bkey_s_c bkey_disassemble(struct btree *b,
235 const struct bkey_packed *k,
238 *u = bkey_unpack_key(b, k);
240 return (struct bkey_s_c) { u, bkeyp_val(&b->format, k), };
243 /* non const version: */
244 static inline struct bkey_s __bkey_disassemble(struct btree *b,
245 struct bkey_packed *k,
248 *u = bkey_unpack_key(b, k);
250 return (struct bkey_s) { .k = u, .v = bkeyp_val(&b->format, k), };
253 #define for_each_bset(_b, _t) \
254 for (_t = (_b)->set; _t < (_b)->set + (_b)->nsets; _t++)
256 extern bool bch_expensive_debug_checks;
258 static inline bool btree_keys_expensive_checks(struct btree *b)
260 #ifdef CONFIG_BCACHE_DEBUG
261 return bch_expensive_debug_checks || *b->expensive_debug_checks;
267 static inline bool bset_has_ro_aux_tree(struct bset_tree *t)
269 return bset_aux_tree_type(t) == BSET_RO_AUX_TREE;
272 static inline bool bset_has_rw_aux_tree(struct bset_tree *t)
274 return bset_aux_tree_type(t) == BSET_RW_AUX_TREE;
277 static inline void bch_bset_set_no_aux_tree(struct btree *b,
282 for (; t < b->set + ARRAY_SIZE(b->set); t++) {
284 t->extra = BSET_NO_AUX_TREE_VAL;
285 t->aux_data_offset = U16_MAX;
289 static inline void btree_node_set_format(struct btree *b,
290 struct bkey_format f)
295 b->nr_key_bits = bkey_format_key_bits(&f);
297 len = bch_compile_bkey_format(&b->format, b->aux_data);
298 BUG_ON(len < 0 || len > U8_MAX);
300 b->unpack_fn_len = len;
302 bch_bset_set_no_aux_tree(b, b->set);
305 #define __set_bytes(_i, _u64s) (sizeof(*(_i)) + (_u64s) * sizeof(u64))
306 #define set_bytes(_i) __set_bytes(_i, (_i)->u64s)
308 #define __set_blocks(_i, _u64s, _block_bytes) \
309 DIV_ROUND_UP((size_t) __set_bytes((_i), (_u64s)), (_block_bytes))
311 #define set_blocks(_i, _block_bytes) \
312 __set_blocks((_i), (_i)->u64s, (_block_bytes))
314 static inline struct bset *bset_next_set(struct btree *b,
315 unsigned block_bytes)
317 struct bset *i = btree_bset_last(b);
319 EBUG_ON(!is_power_of_2(block_bytes));
321 return ((void *) i) + round_up(set_bytes(i), block_bytes);
324 void bch_btree_keys_free(struct btree *);
325 int bch_btree_keys_alloc(struct btree *, unsigned, gfp_t);
326 void bch_btree_keys_init(struct btree *, bool *);
328 void bch_bset_init_first(struct btree *, struct bset *);
329 void bch_bset_init_next(struct btree *, struct bset *);
330 void bch_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
331 void bch_bset_fix_invalidated_key(struct btree *, struct bset_tree *,
332 struct bkey_packed *);
334 void bch_bset_insert(struct btree *, struct btree_node_iter *,
335 struct bkey_packed *, struct bkey_i *, unsigned);
336 void bch_bset_delete(struct btree *, struct bkey_packed *, unsigned);
338 /* Bkey utility code */
340 /* packed or unpacked */
341 static inline int bkey_cmp_p_or_unp(const struct btree *b,
342 const struct bkey_packed *l,
343 const struct bkey_packed *r_packed,
346 EBUG_ON(r_packed && !bkey_packed(r_packed));
348 if (unlikely(!bkey_packed(l)))
349 return bkey_cmp(packed_to_bkey_c(l)->p, *r);
351 if (likely(r_packed))
352 return __bkey_cmp_packed_format_checked(l, r_packed, b);
354 return __bkey_cmp_left_packed_format_checked(b, l, r);
357 /* Returns true if @k is after iterator position @pos */
358 static inline bool btree_iter_pos_cmp(struct bpos pos, const struct bkey *k,
359 bool strictly_greater)
361 int cmp = bkey_cmp(k->p, pos);
364 (cmp == 0 && !strictly_greater && !bkey_deleted(k));
367 static inline bool btree_iter_pos_cmp_packed(const struct btree *b,
369 const struct bkey_packed *k,
370 bool strictly_greater)
372 int cmp = bkey_cmp_left_packed(b, k, pos);
375 (cmp == 0 && !strictly_greater && !bkey_deleted(k));
378 static inline bool btree_iter_pos_cmp_p_or_unp(const struct btree *b,
380 const struct bkey_packed *pos_packed,
381 const struct bkey_packed *k,
382 bool strictly_greater)
384 int cmp = bkey_cmp_p_or_unp(b, k, pos_packed, &pos);
387 (cmp == 0 && !strictly_greater && !bkey_deleted(k));
390 static inline struct bkey_packed *bset_bkey_idx(struct bset *i, unsigned idx)
392 return bkey_idx(i, idx);
395 struct bset_tree *bch_bkey_to_bset(struct btree *, struct bkey_packed *);
396 struct bkey_packed *bkey_prev_all(struct btree *, struct bset_tree *,
397 struct bkey_packed *);
398 struct bkey_packed *bkey_prev(struct btree *, struct bset_tree *,
399 struct bkey_packed *);
401 enum bch_extent_overlap {
402 BCH_EXTENT_OVERLAP_ALL = 0,
403 BCH_EXTENT_OVERLAP_BACK = 1,
404 BCH_EXTENT_OVERLAP_FRONT = 2,
405 BCH_EXTENT_OVERLAP_MIDDLE = 3,
408 /* Returns how k overlaps with m */
409 static inline enum bch_extent_overlap bch_extent_overlap(const struct bkey *k,
410 const struct bkey *m)
412 int cmp1 = bkey_cmp(k->p, m->p) < 0;
413 int cmp2 = bkey_cmp(bkey_start_pos(k),
414 bkey_start_pos(m)) > 0;
416 return (cmp1 << 1) + cmp2;
419 /* Btree key iteration */
421 struct btree_node_iter {
425 struct btree_node_iter_set {
430 static inline void __bch_btree_node_iter_init(struct btree_node_iter *iter,
434 iter->is_extents = is_extents;
437 void bch_btree_node_iter_push(struct btree_node_iter *, struct btree *,
438 const struct bkey_packed *,
439 const struct bkey_packed *);
440 void bch_btree_node_iter_init(struct btree_node_iter *, struct btree *,
441 struct bpos, bool, bool);
442 void bch_btree_node_iter_init_from_start(struct btree_node_iter *,
443 struct btree *, bool);
444 struct bkey_packed *bch_btree_node_iter_bset_pos(struct btree_node_iter *,
448 void bch_btree_node_iter_sort(struct btree_node_iter *, struct btree *);
449 void bch_btree_node_iter_advance(struct btree_node_iter *, struct btree *);
451 #define btree_node_iter_for_each(_iter, _set) \
452 for (_set = (_iter)->data; \
453 _set < (_iter)->data + (_iter)->used; \
456 static inline bool bch_btree_node_iter_end(struct btree_node_iter *iter)
461 static inline int __btree_node_iter_cmp(bool is_extents,
463 struct bkey_packed *l,
464 struct bkey_packed *r)
467 * For non extents, when keys compare equal the deleted keys have to
468 * come first - so that bch_btree_node_iter_next_check() can detect
469 * duplicate nondeleted keys (and possibly other reasons?)
471 * For extents, bkey_deleted() is used as a proxy for k->size == 0, so
472 * deleted keys have to sort last.
474 return bkey_cmp_packed(b, l, r) ?: is_extents
475 ? (int) bkey_deleted(l) - (int) bkey_deleted(r)
476 : (int) bkey_deleted(r) - (int) bkey_deleted(l);
479 static inline int btree_node_iter_cmp(struct btree_node_iter *iter,
481 struct btree_node_iter_set l,
482 struct btree_node_iter_set r)
484 return __btree_node_iter_cmp(iter->is_extents, b,
485 __btree_node_offset_to_key(b, l.k),
486 __btree_node_offset_to_key(b, r.k));
489 static inline void __bch_btree_node_iter_push(struct btree_node_iter *iter,
491 const struct bkey_packed *k,
492 const struct bkey_packed *end)
495 iter->data[iter->used++] = (struct btree_node_iter_set) {
496 __btree_node_key_to_offset(b, k),
497 __btree_node_key_to_offset(b, end)
501 static inline struct bkey_packed *
502 __bch_btree_node_iter_peek_all(struct btree_node_iter *iter,
505 return __btree_node_offset_to_key(b, iter->data->k);
508 static inline struct bkey_packed *
509 bch_btree_node_iter_peek_all(struct btree_node_iter *iter,
512 return bch_btree_node_iter_end(iter)
514 : __bch_btree_node_iter_peek_all(iter, b);
517 static inline struct bkey_packed *
518 bch_btree_node_iter_peek(struct btree_node_iter *iter, struct btree *b)
520 struct bkey_packed *ret;
522 while ((ret = bch_btree_node_iter_peek_all(iter, b)) &&
524 bch_btree_node_iter_advance(iter, b);
529 static inline struct bkey_packed *
530 bch_btree_node_iter_next_all(struct btree_node_iter *iter, struct btree *b)
532 struct bkey_packed *ret = bch_btree_node_iter_peek_all(iter, b);
535 bch_btree_node_iter_advance(iter, b);
540 struct bkey_packed *bch_btree_node_iter_prev_all(struct btree_node_iter *,
542 struct bkey_packed *bch_btree_node_iter_prev(struct btree_node_iter *,
546 * Iterates over all _live_ keys - skipping deleted (and potentially
549 #define for_each_btree_node_key(b, k, iter, _is_extents) \
550 for (bch_btree_node_iter_init_from_start((iter), (b), (_is_extents));\
551 ((k) = bch_btree_node_iter_peek(iter, b)); \
552 bch_btree_node_iter_advance(iter, b))
554 struct bkey_s_c bch_btree_node_iter_peek_unpack(struct btree_node_iter *,
558 #define for_each_btree_node_key_unpack(b, k, iter, _is_extents, unpacked)\
559 for (bch_btree_node_iter_init_from_start((iter), (b), (_is_extents));\
560 (k = bch_btree_node_iter_peek_unpack((iter), (b), (unpacked))).k;\
561 bch_btree_node_iter_advance(iter, b))
565 static inline void btree_keys_account_key(struct btree_nr_keys *n,
567 struct bkey_packed *k,
570 n->live_u64s += k->u64s * sign;
571 n->bset_u64s[bset] += k->u64s * sign;
574 n->packed_keys += sign;
576 n->unpacked_keys += sign;
579 #define btree_keys_account_key_add(_nr, _bset_idx, _k) \
580 btree_keys_account_key(_nr, _bset_idx, _k, 1)
581 #define btree_keys_account_key_drop(_nr, _bset_idx, _k) \
582 btree_keys_account_key(_nr, _bset_idx, _k, -1)
587 } sets[BSET_TREE_NR_TYPES];
590 size_t failed_unpacked;
592 size_t failed_overflow;
595 void bch_btree_keys_stats(struct btree *, struct bset_stats *);
596 int bch_bkey_print_bfloat(struct btree *, struct bkey_packed *,
601 void bch_dump_bset(struct btree *, struct bset *, unsigned);
602 void bch_dump_btree_node(struct btree *);
603 void bch_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
605 #ifdef CONFIG_BCACHE_DEBUG
607 void __bch_verify_btree_nr_keys(struct btree *);
608 void bch_btree_node_iter_verify(struct btree_node_iter *, struct btree *);
609 void bch_verify_key_order(struct btree *, struct btree_node_iter *,
610 struct bkey_packed *);
614 static inline void __bch_verify_btree_nr_keys(struct btree *b) {}
615 static inline void bch_btree_node_iter_verify(struct btree_node_iter *iter,
617 static inline void bch_verify_key_order(struct btree *b,
618 struct btree_node_iter *iter,
619 struct bkey_packed *where) {}
622 static inline void bch_verify_btree_nr_keys(struct btree *b)
624 if (btree_keys_expensive_checks(b))
625 __bch_verify_btree_nr_keys(b);