3 #include "bkey_methods.h"
4 #include "btree_cache.h"
5 #include "btree_update.h"
7 #include "btree_iter.h"
8 #include "btree_locking.h"
18 #include <trace/events/bcache.h>
20 static void verify_no_dups(struct btree *b,
21 struct bkey_packed *start,
22 struct bkey_packed *end)
24 #ifdef CONFIG_BCACHE_DEBUG
25 struct bkey_packed *k;
27 for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
28 struct bkey l = bkey_unpack_key(b, k);
29 struct bkey r = bkey_unpack_key(b, bkey_next(k));
31 BUG_ON(btree_node_is_extents(b)
32 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
33 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
34 //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
39 static void clear_needs_whiteout(struct bset *i)
41 struct bkey_packed *k;
43 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
44 k->needs_whiteout = false;
47 static void set_needs_whiteout(struct bset *i)
49 struct bkey_packed *k;
51 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
52 k->needs_whiteout = true;
55 static void btree_bounce_free(struct bch_fs *c, unsigned order,
56 bool used_mempool, void *p)
59 mempool_free(virt_to_page(p), &c->btree_bounce_pool);
61 free_pages((unsigned long) p, order);
64 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
69 BUG_ON(1 << order > btree_pages(c));
71 *used_mempool = false;
72 p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
77 return page_address(mempool_alloc(&c->btree_bounce_pool, GFP_NOIO));
80 typedef int (*sort_cmp_fn)(struct btree *,
82 struct bkey_packed *);
88 struct sort_iter_set {
89 struct bkey_packed *k, *end;
90 } data[MAX_BSETS + 1];
93 static void sort_iter_init(struct sort_iter *iter, struct btree *b)
95 memset(iter, 0, sizeof(*iter));
99 static inline void __sort_iter_sift(struct sort_iter *iter,
106 i + 1 < iter->used &&
107 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
109 swap(iter->data[i], iter->data[i + 1]);
112 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
115 __sort_iter_sift(iter, 0, cmp);
118 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
120 unsigned i = iter->used;
123 __sort_iter_sift(iter, i, cmp);
126 static void sort_iter_add(struct sort_iter *iter,
127 struct bkey_packed *k,
128 struct bkey_packed *end)
130 BUG_ON(iter->used >= ARRAY_SIZE(iter->data));
133 iter->data[iter->used++] = (struct sort_iter_set) { k, end };
136 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
138 return iter->used ? iter->data->k : NULL;
141 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
143 iter->data->k = bkey_next(iter->data->k);
145 BUG_ON(iter->data->k > iter->data->end);
147 if (iter->data->k == iter->data->end)
148 memmove(&iter->data[0],
150 sizeof(iter->data[0]) * --iter->used);
152 sort_iter_sift(iter, cmp);
155 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
158 struct bkey_packed *ret = sort_iter_peek(iter);
161 sort_iter_advance(iter, cmp);
166 static inline int sort_key_whiteouts_cmp(struct btree *b,
167 struct bkey_packed *l,
168 struct bkey_packed *r)
170 return bkey_cmp_packed(b, l, r);
173 static unsigned sort_key_whiteouts(struct bkey_packed *dst,
174 struct sort_iter *iter)
176 struct bkey_packed *in, *out = dst;
178 sort_iter_sort(iter, sort_key_whiteouts_cmp);
180 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
182 out = bkey_next(out);
185 return (u64 *) out - (u64 *) dst;
188 static inline int sort_extent_whiteouts_cmp(struct btree *b,
189 struct bkey_packed *l,
190 struct bkey_packed *r)
192 struct bkey ul = bkey_unpack_key(b, l);
193 struct bkey ur = bkey_unpack_key(b, r);
195 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
198 static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
199 struct sort_iter *iter)
201 const struct bkey_format *f = &iter->b->format;
202 struct bkey_packed *in, *out = dst;
204 bool prev = false, l_packed = false;
205 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
206 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
209 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
211 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
213 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
214 EBUG_ON(bkeyp_val_u64s(f, in));
215 EBUG_ON(in->type != KEY_TYPE_DISCARD);
217 r.k = bkey_unpack_key(iter->b, in);
220 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
221 if (bkey_cmp(l.k.p, r.k.p) >= 0)
225 ? min(max_packed_size, max_packed_offset -
226 bkey_start_offset(&l.k))
229 new_size = min(new_size, r.k.p.offset -
230 bkey_start_offset(&l.k));
232 BUG_ON(new_size < l.k.size);
234 bch_key_resize(&l.k, new_size);
236 if (bkey_cmp(l.k.p, r.k.p) >= 0)
239 bch_cut_front(l.k.p, &r);
243 if (!bkey_pack(out, &l, f)) {
247 out = bkey_next(out);
252 l_packed = bkey_packed(in);
256 if (!bkey_pack(out, &l, f)) {
260 out = bkey_next(out);
263 return (u64 *) out - (u64 *) dst;
266 static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
268 enum compact_mode mode)
270 unsigned live_u64s = b->nr.bset_u64s[t - b->set];
271 unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
273 if (live_u64s == bset_u64s)
276 if (mode == COMPACT_LAZY) {
277 if (live_u64s * 4 < bset_u64s * 3 ||
278 (compacting && bset_unwritten(b, bset(b, t))))
279 return bset_u64s - live_u64s;
281 if (bset_written(b, bset(b, t)))
282 return bset_u64s - live_u64s;
288 bool __bch_compact_whiteouts(struct bch_fs *c, struct btree *b,
289 enum compact_mode mode)
291 const struct bkey_format *f = &b->format;
293 struct bkey_packed *whiteouts = NULL;
294 struct bkey_packed *u_start, *u_pos;
295 struct sort_iter sort_iter;
296 unsigned order, whiteout_u64s = 0, u64s;
297 bool used_mempool, compacting = false;
300 whiteout_u64s += should_compact_bset(b, t,
301 whiteout_u64s != 0, mode);
306 sort_iter_init(&sort_iter, b);
308 whiteout_u64s += b->whiteout_u64s;
309 order = get_order(whiteout_u64s * sizeof(u64));
311 whiteouts = btree_bounce_alloc(c, order, &used_mempool);
312 u_start = u_pos = whiteouts;
314 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
316 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
318 sort_iter_add(&sort_iter, u_start, u_pos);
320 for_each_bset(b, t) {
321 struct bset *i = bset(b, t);
322 struct bkey_packed *k, *n, *out, *start, *end;
323 struct btree_node_entry *src = NULL, *dst = NULL;
325 if (t != b->set && bset_unwritten(b, i)) {
326 src = container_of(i, struct btree_node_entry, keys);
327 dst = max(write_block(b),
328 (void *) btree_bkey_last(b, t -1));
331 if (!should_compact_bset(b, t, compacting, mode)) {
333 memmove(dst, src, sizeof(*src) +
334 le16_to_cpu(src->keys.u64s) *
337 set_btree_bset(b, t, i);
345 end = vstruct_last(i);
348 memmove(dst, src, sizeof(*src));
350 set_btree_bset(b, t, i);
355 for (k = start; k != end; k = n) {
358 if (bkey_deleted(k) && btree_node_is_extents(b))
361 if (bkey_whiteout(k) && !k->needs_whiteout)
364 if (bkey_whiteout(k)) {
365 unreserve_whiteout(b, t, k);
366 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
367 set_bkeyp_val_u64s(f, u_pos, 0);
368 u_pos = bkey_next(u_pos);
369 } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
371 out = bkey_next(out);
375 sort_iter_add(&sort_iter, u_start, u_pos);
377 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
378 i->u64s = cpu_to_le16((u64 *) out - i->_data);
379 set_btree_bset_end(b, t);
380 bch_bset_set_no_aux_tree(b, t);
384 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
386 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
387 (void *) btree_bkey_last(b, bset_tree_last(b)));
389 u64s = btree_node_is_extents(b)
390 ? sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
392 : sort_key_whiteouts(unwritten_whiteouts_start(c, b),
395 BUG_ON(u64s > b->whiteout_u64s);
396 BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
397 BUG_ON(u_pos != whiteouts && !u64s);
399 if (u64s != b->whiteout_u64s) {
400 void *src = unwritten_whiteouts_start(c, b);
402 b->whiteout_u64s = u64s;
403 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
407 unwritten_whiteouts_start(c, b),
408 unwritten_whiteouts_end(c, b));
410 btree_bounce_free(c, order, used_mempool, whiteouts);
412 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
413 bch_btree_build_aux_trees(b);
415 bch_btree_keys_u64s_remaining(c, b);
416 bch_verify_btree_nr_keys(b);
421 static bool bch_drop_whiteouts(struct btree *b)
426 for_each_bset(b, t) {
427 struct bset *i = bset(b, t);
428 struct bkey_packed *k, *n, *out, *start, *end;
430 if (!should_compact_bset(b, t, true, true))
433 start = btree_bkey_first(b, t);
434 end = btree_bkey_last(b, t);
436 if (bset_unwritten(b, i) &&
439 max_t(struct bset *, write_block(b),
440 (void *) btree_bkey_last(b, t -1));
442 memmove(dst, i, sizeof(struct bset));
444 set_btree_bset(b, t, i);
449 for (k = start; k != end; k = n) {
452 if (!bkey_whiteout(k)) {
454 out = bkey_next(out);
458 i->u64s = cpu_to_le16((u64 *) out - i->_data);
459 bch_bset_set_no_aux_tree(b, t);
463 bch_verify_btree_nr_keys(b);
468 static inline int sort_keys_cmp(struct btree *b,
469 struct bkey_packed *l,
470 struct bkey_packed *r)
472 return bkey_cmp_packed(b, l, r) ?:
473 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
474 (int) l->needs_whiteout - (int) r->needs_whiteout;
477 static unsigned sort_keys(struct bkey_packed *dst,
478 struct sort_iter *iter,
479 bool filter_whiteouts)
481 const struct bkey_format *f = &iter->b->format;
482 struct bkey_packed *in, *next, *out = dst;
484 sort_iter_sort(iter, sort_keys_cmp);
486 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
487 if (bkey_whiteout(in) &&
488 (filter_whiteouts || !in->needs_whiteout))
491 if (bkey_whiteout(in) &&
492 (next = sort_iter_peek(iter)) &&
493 !bkey_cmp_packed(iter->b, in, next)) {
494 BUG_ON(in->needs_whiteout &&
495 next->needs_whiteout);
497 * XXX racy, called with read lock from write path
499 * leads to spurious BUG_ON() in bkey_unpack_key() in
502 next->needs_whiteout |= in->needs_whiteout;
506 if (bkey_whiteout(in)) {
507 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
508 set_bkeyp_val_u64s(f, out, 0);
512 out = bkey_next(out);
515 return (u64 *) out - (u64 *) dst;
518 static inline int sort_extents_cmp(struct btree *b,
519 struct bkey_packed *l,
520 struct bkey_packed *r)
522 return bkey_cmp_packed(b, l, r) ?:
523 (int) bkey_deleted(l) - (int) bkey_deleted(r);
526 static unsigned sort_extents(struct bkey_packed *dst,
527 struct sort_iter *iter,
528 bool filter_whiteouts)
530 struct bkey_packed *in, *out = dst;
532 sort_iter_sort(iter, sort_extents_cmp);
534 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
535 if (bkey_deleted(in))
538 if (bkey_whiteout(in) &&
539 (filter_whiteouts || !in->needs_whiteout))
543 out = bkey_next(out);
546 return (u64 *) out - (u64 *) dst;
549 static void btree_node_sort(struct bch_fs *c, struct btree *b,
550 struct btree_iter *iter,
553 bool filter_whiteouts)
555 struct btree_node *out;
556 struct sort_iter sort_iter;
558 struct bset *start_bset = bset(b, &b->set[start_idx]);
559 bool used_mempool = false;
561 unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
562 bool sorting_entire_node = start_idx == 0 &&
565 sort_iter_init(&sort_iter, b);
567 for (t = b->set + start_idx;
568 t < b->set + end_idx;
570 u64s += le16_to_cpu(bset(b, t)->u64s);
571 sort_iter_add(&sort_iter,
572 btree_bkey_first(b, t),
573 btree_bkey_last(b, t));
576 order = sorting_entire_node
577 ? btree_page_order(c)
578 : get_order(__vstruct_bytes(struct btree_node, u64s));
580 out = btree_bounce_alloc(c, order, &used_mempool);
582 start_time = local_clock();
584 if (btree_node_is_extents(b))
585 filter_whiteouts = bset_written(b, start_bset);
587 u64s = btree_node_is_extents(b)
588 ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
589 : sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
591 out->keys.u64s = cpu_to_le16(u64s);
593 BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
595 if (sorting_entire_node)
596 bch_time_stats_update(&c->btree_sort_time, start_time);
598 /* Make sure we preserve bset journal_seq: */
599 for (t = b->set + start_idx + 1;
600 t < b->set + end_idx;
602 start_bset->journal_seq =
603 max(start_bset->journal_seq,
604 bset(b, t)->journal_seq);
606 if (sorting_entire_node) {
607 unsigned u64s = le16_to_cpu(out->keys.u64s);
609 BUG_ON(order != btree_page_order(c));
612 * Our temporary buffer is the same size as the btree node's
613 * buffer, we can just swap buffers instead of doing a big
617 out->keys.u64s = cpu_to_le16(u64s);
619 set_btree_bset(b, b->set, &b->data->keys);
621 start_bset->u64s = out->keys.u64s;
622 memcpy_u64s(start_bset->start,
624 le16_to_cpu(out->keys.u64s));
627 for (i = start_idx + 1; i < end_idx; i++)
628 b->nr.bset_u64s[start_idx] +=
633 for (i = start_idx + 1; i < b->nsets; i++) {
634 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
635 b->set[i] = b->set[i + shift];
638 for (i = b->nsets; i < MAX_BSETS; i++)
639 b->nr.bset_u64s[i] = 0;
641 set_btree_bset_end(b, &b->set[start_idx]);
642 bch_bset_set_no_aux_tree(b, &b->set[start_idx]);
644 btree_bounce_free(c, order, used_mempool, out);
646 bch_verify_btree_nr_keys(b);
649 /* Sort + repack in a new format: */
650 static struct btree_nr_keys sort_repack(struct bset *dst,
652 struct btree_node_iter *src_iter,
653 struct bkey_format *out_f,
654 bool filter_whiteouts)
656 struct bkey_format *in_f = &src->format;
657 struct bkey_packed *in, *out = vstruct_last(dst);
658 struct btree_nr_keys nr;
660 memset(&nr, 0, sizeof(nr));
662 while ((in = bch_btree_node_iter_next_all(src_iter, src))) {
663 if (filter_whiteouts && bkey_whiteout(in))
666 if (bch_bkey_transform(out_f, out, bkey_packed(in)
667 ? in_f : &bch_bkey_format_current, in))
668 out->format = KEY_FORMAT_LOCAL_BTREE;
670 bkey_unpack(src, (void *) out, in);
672 btree_keys_account_key_add(&nr, 0, out);
673 out = bkey_next(out);
676 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
680 /* Sort, repack, and merge: */
681 static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
684 struct btree_node_iter *iter,
685 struct bkey_format *out_f,
686 bool filter_whiteouts,
687 key_filter_fn filter,
690 struct bkey_packed *k, *prev = NULL, *out;
691 struct btree_nr_keys nr;
694 memset(&nr, 0, sizeof(nr));
696 while ((k = bch_btree_node_iter_next_all(iter, src))) {
697 if (filter_whiteouts && bkey_whiteout(k))
701 * The filter might modify pointers, so we have to unpack the
702 * key and values to &tmp.k:
704 bkey_unpack(src, &tmp.k, k);
706 if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
709 /* prev is always unpacked, for key merging: */
713 merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
717 * the current key becomes the new prev: advance prev, then
718 * copy the current key - but first pack prev (in place):
721 bkey_pack(prev, (void *) prev, out_f);
723 btree_keys_account_key_add(&nr, 0, prev);
724 prev = bkey_next(prev);
726 prev = vstruct_last(dst);
729 bkey_copy(prev, &tmp.k);
733 bkey_pack(prev, (void *) prev, out_f);
734 btree_keys_account_key_add(&nr, 0, prev);
735 out = bkey_next(prev);
737 out = vstruct_last(dst);
740 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
744 void bch_btree_sort_into(struct bch_fs *c,
748 struct btree_nr_keys nr;
749 struct btree_node_iter src_iter;
750 u64 start_time = local_clock();
752 BUG_ON(dst->nsets != 1);
754 bch_bset_set_no_aux_tree(dst, dst->set);
756 bch_btree_node_iter_init_from_start(&src_iter, src,
757 btree_node_is_extents(src));
759 if (btree_node_ops(src)->key_normalize ||
760 btree_node_ops(src)->key_merge)
761 nr = sort_repack_merge(c, btree_bset_first(dst),
765 btree_node_ops(src)->key_normalize,
766 btree_node_ops(src)->key_merge);
768 nr = sort_repack(btree_bset_first(dst),
773 bch_time_stats_update(&c->btree_sort_time, start_time);
775 set_btree_bset_end(dst, dst->set);
777 dst->nr.live_u64s += nr.live_u64s;
778 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
779 dst->nr.packed_keys += nr.packed_keys;
780 dst->nr.unpacked_keys += nr.unpacked_keys;
782 bch_verify_btree_nr_keys(dst);
785 #define SORT_CRIT (4096 / sizeof(u64))
788 * We're about to add another bset to the btree node, so if there's currently
789 * too many bsets - sort some of them together:
791 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
792 struct btree_iter *iter)
794 unsigned unwritten_idx;
797 for (unwritten_idx = 0;
798 unwritten_idx < b->nsets;
800 if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
803 if (b->nsets - unwritten_idx > 1) {
804 btree_node_sort(c, b, iter, unwritten_idx,
809 if (unwritten_idx > 1) {
810 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
817 void bch_btree_build_aux_trees(struct btree *b)
822 bch_bset_build_aux_tree(b, t,
823 bset_unwritten(b, bset(b, t)) &&
824 t == bset_tree_last(b));
828 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
831 * Safe to call if there already is an unwritten bset - will only add a new bset
832 * if @b doesn't already have one.
834 * Returns true if we sorted (i.e. invalidated iterators
836 void bch_btree_init_next(struct bch_fs *c, struct btree *b,
837 struct btree_iter *iter)
839 struct btree_node_entry *bne;
842 EBUG_ON(!(b->lock.state.seq & 1));
843 EBUG_ON(iter && iter->nodes[b->level] != b);
845 did_sort = btree_node_compact(c, b, iter);
847 bne = want_new_bset(c, b);
849 bch_bset_init_next(b, &bne->keys);
851 bch_btree_build_aux_trees(b);
853 if (iter && did_sort)
854 bch_btree_iter_reinit_node(iter, b);
857 static struct nonce btree_nonce(struct btree *b,
861 return (struct nonce) {{
862 [0] = cpu_to_le32(offset),
863 [1] = ((__le32 *) &i->seq)[0],
864 [2] = ((__le32 *) &i->seq)[1],
865 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
869 static void bset_encrypt(struct bch_fs *c, struct bset *i, struct nonce nonce)
871 bch_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
872 vstruct_end(i) - (void *) i->_data);
875 #define btree_node_error(b, c, ptr, fmt, ...) \
876 bch_fs_inconsistent(c, \
877 "btree node error at btree %u level %u/%u bucket %zu block %u u64s %u: " fmt,\
878 (b)->btree_id, (b)->level, btree_node_root(c, b) \
879 ? btree_node_root(c, b)->level : -1, \
880 PTR_BUCKET_NR(ca, ptr), (b)->written, \
881 le16_to_cpu((i)->u64s), ##__VA_ARGS__)
883 static const char *validate_bset(struct bch_fs *c, struct btree *b,
885 const struct bch_extent_ptr *ptr,
886 struct bset *i, unsigned sectors,
887 unsigned *whiteout_u64s)
889 struct bkey_packed *k, *prev = NULL;
890 struct bpos prev_pos = POS_MIN;
891 bool seen_non_whiteout = false;
893 if (le16_to_cpu(i->version) != BCACHE_BSET_VERSION)
894 return "unsupported bset version";
896 if (b->written + sectors > c->sb.btree_node_size)
897 return "bset past end of btree node";
899 if (i != &b->data->keys && !i->u64s)
900 btree_node_error(b, c, ptr, "empty set");
902 if (!BSET_SEPARATE_WHITEOUTS(i)) {
903 seen_non_whiteout = true;
908 k != vstruct_last(i);) {
914 btree_node_error(b, c, ptr,
915 "KEY_U64s 0: %zu bytes of metadata lost",
916 vstruct_end(i) - (void *) k);
918 i->u64s = cpu_to_le16((u64 *) k - i->_data);
922 if (bkey_next(k) > vstruct_last(i)) {
923 btree_node_error(b, c, ptr,
924 "key extends past end of bset");
926 i->u64s = cpu_to_le16((u64 *) k - i->_data);
930 if (k->format > KEY_FORMAT_CURRENT) {
931 btree_node_error(b, c, ptr,
932 "invalid bkey format %u", k->format);
934 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
935 memmove_u64s_down(k, bkey_next(k),
936 (u64 *) vstruct_end(i) - (u64 *) k);
940 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
941 bch_bkey_swab(btree_node_type(b), &b->format, k);
943 u = bkey_disassemble(b, k, &tmp);
945 invalid = btree_bkey_invalid(c, b, u);
949 bch_bkey_val_to_text(c, btree_node_type(b),
950 buf, sizeof(buf), u);
951 btree_node_error(b, c, ptr,
952 "invalid bkey %s: %s", buf, invalid);
954 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
955 memmove_u64s_down(k, bkey_next(k),
956 (u64 *) vstruct_end(i) - (u64 *) k);
961 * with the separate whiteouts thing (used for extents), the
962 * second set of keys actually can have whiteouts too, so we
963 * can't solely go off bkey_whiteout()...
966 if (!seen_non_whiteout &&
967 (!bkey_whiteout(k) ||
968 (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
969 *whiteout_u64s = k->_data - i->_data;
970 seen_non_whiteout = true;
971 } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
972 btree_node_error(b, c, ptr,
973 "keys out of order: %llu:%llu > %llu:%llu",
977 bkey_start_offset(u.k));
978 /* XXX: repair this */
986 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
990 static bool extent_contains_ptr(struct bkey_s_c_extent e,
991 struct bch_extent_ptr match)
993 const struct bch_extent_ptr *ptr;
995 extent_for_each_ptr(e, ptr)
996 if (!memcmp(ptr, &match, sizeof(*ptr)))
1002 void bch_btree_node_read_done(struct bch_fs *c, struct btree *b,
1004 const struct bch_extent_ptr *ptr)
1006 struct btree_node_entry *bne;
1007 struct bset *i = &b->data->keys;
1008 struct btree_node_iter *iter;
1009 struct btree_node *sorted;
1013 struct bch_csum csum;
1017 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
1018 __bch_btree_node_iter_init(iter, btree_node_is_extents(b));
1020 err = "dynamic fault";
1021 if (bch_meta_read_fault("btree"))
1024 while (b->written < c->sb.btree_node_size) {
1025 unsigned sectors, whiteout_u64s = 0;
1031 if (le64_to_cpu(b->data->magic) != bset_magic(c))
1034 err = "bad btree header";
1035 if (!b->data->keys.seq)
1038 err = "unknown checksum type";
1039 if (!bch_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
1042 /* XXX: retry checksum errors */
1044 nonce = btree_nonce(b, i, b->written << 9);
1045 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1047 err = "bad checksum";
1048 if (bch_crc_cmp(csum, b->data->csum))
1051 bch_encrypt(c, BSET_CSUM_TYPE(i), nonce,
1053 (void *) &b->data->keys -
1054 (void *) &b->data->flags);
1055 nonce = nonce_add(nonce,
1056 round_up((void *) &b->data->keys -
1057 (void *) &b->data->flags,
1058 CHACHA20_BLOCK_SIZE));
1059 bset_encrypt(c, i, nonce);
1061 sectors = vstruct_sectors(b->data, c->block_bits);
1063 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
1064 u64 *p = (u64 *) &b->data->ptr;
1067 bch_bpos_swab(&b->data->min_key);
1068 bch_bpos_swab(&b->data->max_key);
1071 err = "incorrect btree id";
1072 if (BTREE_NODE_ID(b->data) != b->btree_id)
1075 err = "incorrect level";
1076 if (BTREE_NODE_LEVEL(b->data) != b->level)
1079 err = "incorrect max key";
1080 if (bkey_cmp(b->data->max_key, b->key.k.p))
1083 err = "incorrect backpointer";
1084 if (!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
1088 err = bch_bkey_format_validate(&b->data->format);
1092 set_btree_bset(b, b->set, &b->data->keys);
1094 btree_node_set_format(b, b->data->format);
1096 bne = write_block(b);
1099 if (i->seq != b->data->keys.seq)
1102 err = "unknown checksum type";
1103 if (!bch_checksum_type_valid(c, BSET_CSUM_TYPE(i)))
1106 nonce = btree_nonce(b, i, b->written << 9);
1107 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1109 err = "bad checksum";
1110 if (memcmp(&csum, &bne->csum, sizeof(csum)))
1113 bset_encrypt(c, i, nonce);
1115 sectors = vstruct_sectors(bne, c->block_bits);
1118 err = validate_bset(c, b, ca, ptr, i, sectors, &whiteout_u64s);
1122 b->written += sectors;
1124 err = "insufficient memory";
1125 ret = bch_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
1132 __bch_btree_node_iter_push(iter, b,
1134 vstruct_idx(i, whiteout_u64s));
1136 __bch_btree_node_iter_push(iter, b,
1137 vstruct_idx(i, whiteout_u64s),
1141 err = "corrupted btree";
1142 for (bne = write_block(b);
1143 bset_byte_offset(b, bne) < btree_bytes(c);
1144 bne = (void *) bne + block_bytes(c))
1145 if (bne->keys.seq == b->data->keys.seq)
1148 sorted = btree_bounce_alloc(c, ilog2(btree_pages(c)), &used_mempool);
1149 sorted->keys.u64s = 0;
1151 b->nr = btree_node_is_extents(b)
1152 ? bch_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
1153 : bch_key_sort_fix_overlapping(&sorted->keys, b, iter);
1155 u64s = le16_to_cpu(sorted->keys.u64s);
1157 sorted->keys.u64s = cpu_to_le16(u64s);
1158 swap(sorted, b->data);
1159 set_btree_bset(b, b->set, &b->data->keys);
1162 BUG_ON(b->nr.live_u64s != u64s);
1164 btree_bounce_free(c, ilog2(btree_pages(c)), used_mempool, sorted);
1166 bch_bset_build_aux_tree(b, b->set, false);
1168 set_needs_whiteout(btree_bset_first(b));
1170 btree_node_reset_sib_u64s(b);
1172 mempool_free(iter, &c->fill_iter);
1175 set_btree_node_read_error(b);
1176 btree_node_error(b, c, ptr, "%s", err);
1180 static void btree_node_read_endio(struct bio *bio)
1182 closure_put(bio->bi_private);
1185 void bch_btree_node_read(struct bch_fs *c, struct btree *b)
1187 uint64_t start_time = local_clock();
1190 struct extent_pick_ptr pick;
1192 trace_bcache_btree_read(c, b);
1194 closure_init_stack(&cl);
1196 pick = bch_btree_pick_ptr(c, b);
1197 if (bch_fs_fatal_err_on(!pick.ca, c,
1198 "no cache device for btree node")) {
1199 set_btree_node_read_error(b);
1203 bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_read_bio);
1204 bio->bi_bdev = pick.ca->disk_sb.bdev;
1205 bio->bi_iter.bi_sector = pick.ptr.offset;
1206 bio->bi_iter.bi_size = btree_bytes(c);
1207 bio->bi_end_io = btree_node_read_endio;
1208 bio->bi_private = &cl;
1209 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META|READ_SYNC);
1211 bch_bio_map(bio, b->data);
1214 bch_generic_make_request(bio, c);
1217 if (bch_dev_fatal_io_err_on(bio->bi_error,
1218 pick.ca, "IO error reading bucket %zu",
1219 PTR_BUCKET_NR(pick.ca, &pick.ptr)) ||
1220 bch_meta_read_fault("btree")) {
1221 set_btree_node_read_error(b);
1225 bch_btree_node_read_done(c, b, pick.ca, &pick.ptr);
1226 bch_time_stats_update(&c->btree_read_time, start_time);
1229 percpu_ref_put(&pick.ca->io_ref);
1232 int bch_btree_root_read(struct bch_fs *c, enum btree_id id,
1233 const struct bkey_i *k, unsigned level)
1239 closure_init_stack(&cl);
1242 ret = mca_cannibalize_lock(c, &cl);
1247 mca_cannibalize_unlock(c);
1251 bkey_copy(&b->key, k);
1252 BUG_ON(mca_hash_insert(c, b, level, id));
1254 bch_btree_node_read(c, b);
1255 six_unlock_write(&b->lock);
1257 if (btree_node_read_error(b)) {
1258 six_unlock_intent(&b->lock);
1262 bch_btree_set_root_initial(c, b, NULL);
1263 six_unlock_intent(&b->lock);
1268 void bch_btree_complete_write(struct bch_fs *c, struct btree *b,
1269 struct btree_write *w)
1271 bch_journal_pin_drop(&c->journal, &w->journal);
1272 closure_wake_up(&w->wait);
1275 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1277 struct btree_write *w = btree_prev_write(b);
1280 * Before calling bch_btree_complete_write() - if the write errored, we
1281 * have to halt new journal writes before they see this btree node
1282 * write as completed:
1284 if (btree_node_write_error(b))
1285 bch_journal_halt(&c->journal);
1287 bch_btree_complete_write(c, b, w);
1288 btree_node_io_unlock(b);
1291 static void btree_node_write_endio(struct bio *bio)
1293 struct btree *b = bio->bi_private;
1294 struct bch_write_bio *wbio = to_wbio(bio);
1295 struct bch_fs *c = wbio->c;
1296 struct bio *orig = wbio->split ? wbio->orig : NULL;
1297 struct closure *cl = !wbio->split ? wbio->cl : NULL;
1298 struct bch_dev *ca = wbio->ca;
1300 if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "btree write") ||
1301 bch_meta_write_fault("btree"))
1302 set_btree_node_write_error(b);
1305 btree_bounce_free(c,
1308 page_address(bio->bi_io_vec[0].bv_page));
1316 btree_node_write_done(c, b);
1322 percpu_ref_put(&ca->io_ref);
1325 void __bch_btree_node_write(struct bch_fs *c, struct btree *b,
1326 struct closure *parent,
1327 enum six_lock_type lock_type_held,
1331 struct bch_write_bio *wbio;
1332 struct bset_tree *t;
1334 struct btree_node *bn = NULL;
1335 struct btree_node_entry *bne = NULL;
1337 struct bkey_s_extent e;
1338 struct bch_extent_ptr *ptr;
1339 struct sort_iter sort_iter;
1341 unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1344 unsigned long old, new;
1348 * We may only have a read lock on the btree node - the dirty bit is our
1349 * "lock" against racing with other threads that may be trying to start
1350 * a write, we do a write iff we clear the dirty bit. Since setting the
1351 * dirty bit requires a write lock, we can't race with other threads
1355 old = new = READ_ONCE(b->flags);
1357 if (!(old & (1 << BTREE_NODE_dirty)))
1360 if (idx_to_write >= 0 &&
1361 idx_to_write != !!(old & (1 << BTREE_NODE_write_idx)))
1364 if (old & (1 << BTREE_NODE_write_in_flight)) {
1365 wait_on_bit_io(&b->flags,
1366 BTREE_NODE_write_in_flight,
1367 TASK_UNINTERRUPTIBLE);
1371 new &= ~(1 << BTREE_NODE_dirty);
1372 new |= (1 << BTREE_NODE_write_in_flight);
1373 new |= (1 << BTREE_NODE_just_written);
1374 new ^= (1 << BTREE_NODE_write_idx);
1375 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1377 BUG_ON(!list_empty(&b->write_blocked));
1379 BUG_ON(b->written >= c->sb.btree_node_size);
1380 BUG_ON(bset_written(b, btree_bset_last(b)));
1381 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1382 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1384 if (lock_type_held == SIX_LOCK_intent) {
1385 six_lock_write(&b->lock);
1386 __bch_compact_whiteouts(c, b, COMPACT_WRITTEN);
1387 six_unlock_write(&b->lock);
1389 __bch_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
1392 BUG_ON(b->uncompacted_whiteout_u64s);
1394 sort_iter_init(&sort_iter, b);
1397 ? sizeof(struct btree_node)
1398 : sizeof(struct btree_node_entry);
1400 bytes += b->whiteout_u64s * sizeof(u64);
1402 for_each_bset(b, t) {
1405 if (bset_written(b, i))
1408 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1409 sort_iter_add(&sort_iter,
1410 btree_bkey_first(b, t),
1411 btree_bkey_last(b, t));
1412 seq = max(seq, le64_to_cpu(i->journal_seq));
1415 order = get_order(bytes);
1416 data = btree_bounce_alloc(c, order, &used_mempool);
1424 bne->keys = b->data->keys;
1428 i->journal_seq = cpu_to_le64(seq);
1431 if (!btree_node_is_extents(b)) {
1432 sort_iter_add(&sort_iter,
1433 unwritten_whiteouts_start(c, b),
1434 unwritten_whiteouts_end(c, b));
1435 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1437 memcpy_u64s(i->start,
1438 unwritten_whiteouts_start(c, b),
1440 i->u64s = cpu_to_le16(b->whiteout_u64s);
1441 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1444 b->whiteout_u64s = 0;
1446 u64s = btree_node_is_extents(b)
1447 ? sort_extents(vstruct_last(i), &sort_iter, false)
1448 : sort_keys(i->start, &sort_iter, false);
1449 le16_add_cpu(&i->u64s, u64s);
1451 clear_needs_whiteout(i);
1453 if (b->written && !i->u64s) {
1454 /* Nothing to write: */
1455 btree_bounce_free(c, order, used_mempool, data);
1456 btree_node_write_done(c, b);
1460 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1461 BUG_ON(i->seq != b->data->keys.seq);
1463 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1464 SET_BSET_CSUM_TYPE(i, bch_meta_checksum_type(c));
1466 nonce = btree_nonce(b, i, b->written << 9);
1469 bch_encrypt(c, BSET_CSUM_TYPE(i), nonce,
1471 (void *) &b->data->keys -
1472 (void *) &b->data->flags);
1473 nonce = nonce_add(nonce,
1474 round_up((void *) &b->data->keys -
1475 (void *) &b->data->flags,
1476 CHACHA20_BLOCK_SIZE));
1477 bset_encrypt(c, i, nonce);
1479 nonce = btree_nonce(b, i, b->written << 9);
1480 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1482 bset_encrypt(c, i, nonce);
1484 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1487 bytes_to_write = vstruct_end(i) - data;
1488 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1490 memset(data + bytes_to_write, 0,
1491 (sectors_to_write << 9) - bytes_to_write);
1493 BUG_ON(b->written + sectors_to_write > c->sb.btree_node_size);
1495 trace_bcache_btree_write(b, bytes_to_write, sectors_to_write);
1498 * We handle btree write errors by immediately halting the journal -
1499 * after we've done that, we can't issue any subsequent btree writes
1500 * because they might have pointers to new nodes that failed to write.
1502 * Furthermore, there's no point in doing any more btree writes because
1503 * with the journal stopped, we're never going to update the journal to
1504 * reflect that those writes were done and the data flushed from the
1507 * Make sure to update b->written so bch_btree_init_next() doesn't
1510 if (bch_journal_error(&c->journal) ||
1511 c->opts.nochanges) {
1512 set_btree_node_noevict(b);
1513 b->written += sectors_to_write;
1515 btree_bounce_free(c, order, used_mempool, data);
1516 btree_node_write_done(c, b);
1520 bio = bio_alloc_bioset(GFP_NOIO, 1 << order, &c->bio_write);
1522 wbio = to_wbio(bio);
1524 wbio->bounce = true;
1525 wbio->put_bio = true;
1526 wbio->order = order;
1527 wbio->used_mempool = used_mempool;
1528 bio->bi_iter.bi_size = sectors_to_write << 9;
1529 bio->bi_end_io = btree_node_write_endio;
1530 bio->bi_private = b;
1531 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META|WRITE_SYNC|REQ_FUA);
1534 closure_get(parent);
1536 bch_bio_map(bio, data);
1539 * If we're appending to a leaf node, we don't technically need FUA -
1540 * this write just needs to be persisted before the next journal write,
1541 * which will be marked FLUSH|FUA.
1543 * Similarly if we're writing a new btree root - the pointer is going to
1544 * be in the next journal entry.
1546 * But if we're writing a new btree node (that isn't a root) or
1547 * appending to a non leaf btree node, we need either FUA or a flush
1548 * when we write the parent with the new pointer. FUA is cheaper than a
1549 * flush, and writes appending to leaf nodes aren't blocking anything so
1550 * just make all btree node writes FUA to keep things sane.
1553 bkey_copy(&k.key, &b->key);
1554 e = bkey_i_to_s_extent(&k.key);
1556 extent_for_each_ptr(e, ptr)
1557 ptr->offset += b->written;
1559 extent_for_each_ptr(e, ptr)
1560 atomic64_add(sectors_to_write,
1561 &c->devs[ptr->dev]->btree_sectors_written);
1563 b->written += sectors_to_write;
1565 bch_submit_wbio_replicas(wbio, c, &k.key, true);
1569 * Work that must be done with write lock held:
1571 bool bch_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1573 bool invalidated_iter = false;
1574 struct btree_node_entry *bne;
1575 struct bset_tree *t;
1577 if (!btree_node_just_written(b))
1580 BUG_ON(b->whiteout_u64s);
1581 BUG_ON(b->uncompacted_whiteout_u64s);
1583 clear_btree_node_just_written(b);
1586 * Note: immediately after write, bset_unwritten()/bset_written() don't
1587 * work - the amount of data we had to write after compaction might have
1588 * been smaller than the offset of the last bset.
1590 * However, we know that all bsets have been written here, as long as
1591 * we're still holding the write lock:
1595 * XXX: decide if we really want to unconditionally sort down to a
1599 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1600 invalidated_iter = true;
1602 invalidated_iter = bch_drop_whiteouts(b);
1606 set_needs_whiteout(bset(b, t));
1608 bch_btree_verify(c, b);
1611 * If later we don't unconditionally sort down to a single bset, we have
1612 * to ensure this is still true:
1614 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1616 bne = want_new_bset(c, b);
1618 bch_bset_init_next(b, &bne->keys);
1620 bch_btree_build_aux_trees(b);
1622 return invalidated_iter;
1626 * Use this one if the node is intent locked:
1628 void bch_btree_node_write(struct bch_fs *c, struct btree *b,
1629 struct closure *parent,
1630 enum six_lock_type lock_type_held,
1633 BUG_ON(lock_type_held == SIX_LOCK_write);
1635 if (lock_type_held == SIX_LOCK_intent ||
1636 six_trylock_convert(&b->lock, SIX_LOCK_read,
1638 __bch_btree_node_write(c, b, parent, SIX_LOCK_intent, idx_to_write);
1640 six_lock_write(&b->lock);
1641 bch_btree_post_write_cleanup(c, b);
1642 six_unlock_write(&b->lock);
1644 if (lock_type_held == SIX_LOCK_read)
1645 six_lock_downgrade(&b->lock);
1647 __bch_btree_node_write(c, b, parent, SIX_LOCK_read, idx_to_write);
1651 static void bch_btree_node_write_dirty(struct bch_fs *c, struct btree *b,
1652 struct closure *parent)
1654 six_lock_read(&b->lock);
1657 bch_btree_node_write(c, b, parent, SIX_LOCK_read, -1);
1658 six_unlock_read(&b->lock);
1662 * Write all dirty btree nodes to disk, including roots
1664 void bch_btree_flush(struct bch_fs *c)
1668 struct bucket_table *tbl;
1669 struct rhash_head *pos;
1673 closure_init_stack(&cl);
1678 dropped_lock = false;
1681 tbl = rht_dereference_rcu(c->btree_cache_table.tbl,
1682 &c->btree_cache_table);
1684 for (; i < tbl->size; i++)
1685 rht_for_each_entry_rcu(b, pos, tbl, i, hash)
1687 * XXX - locking for b->level, when called from
1688 * bch_journal_move()
1690 if (!b->level && btree_node_dirty(b)) {
1692 bch_btree_node_write_dirty(c, b, &cl);
1693 dropped_lock = true;
1697 } while (dropped_lock);
1705 * bch_btree_node_flush_journal - flush any journal entries that contain keys
1708 * The bset's journal sequence number is used for preserving ordering of index
1709 * updates across unclean shutdowns - it's used to ignore bsets newer than the
1710 * most recent journal entry.
1712 * But when rewriting btree nodes we compact all the bsets in a btree node - and
1713 * if we compacted a bset that should be ignored with bsets we do need, that
1714 * would be bad. So to avoid that, prior to making the new node visible ensure
1715 * that the journal has been flushed so that all the bsets we compacted should
1718 void bch_btree_node_flush_journal_entries(struct bch_fs *c,
1725 * Journal sequence numbers in the different bsets will always be in
1726 * ascending order, we only need to flush the highest - except that the
1727 * most recent bset might not have a journal sequence number yet, so we
1731 u64 seq = le64_to_cpu(bset(b, &b->set[i])->journal_seq);
1734 bch_journal_flush_seq_async(&c->journal, seq, cl);