3 #include "bkey_methods.h"
4 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
19 #include <trace/events/bcachefs.h>
21 static void verify_no_dups(struct btree *b,
22 struct bkey_packed *start,
23 struct bkey_packed *end)
25 #ifdef CONFIG_BCACHEFS_DEBUG
26 struct bkey_packed *k;
28 for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
29 struct bkey l = bkey_unpack_key(b, k);
30 struct bkey r = bkey_unpack_key(b, bkey_next(k));
32 BUG_ON(btree_node_is_extents(b)
33 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
34 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
35 //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
40 static void clear_needs_whiteout(struct bset *i)
42 struct bkey_packed *k;
44 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
45 k->needs_whiteout = false;
48 static void set_needs_whiteout(struct bset *i)
50 struct bkey_packed *k;
52 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
53 k->needs_whiteout = true;
56 static void btree_bounce_free(struct bch_fs *c, unsigned order,
57 bool used_mempool, void *p)
60 mempool_free(p, &c->btree_bounce_pool);
62 vpfree(p, PAGE_SIZE << order);
65 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
70 BUG_ON(order > btree_page_order(c));
72 *used_mempool = false;
73 p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
78 return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
81 typedef int (*sort_cmp_fn)(struct btree *,
83 struct bkey_packed *);
89 struct sort_iter_set {
90 struct bkey_packed *k, *end;
91 } data[MAX_BSETS + 1];
94 static void sort_iter_init(struct sort_iter *iter, struct btree *b)
96 memset(iter, 0, sizeof(*iter));
100 static inline void __sort_iter_sift(struct sort_iter *iter,
107 i + 1 < iter->used &&
108 cmp(iter->b, iter->data[i].k, iter->data[i + 1].k) > 0;
110 swap(iter->data[i], iter->data[i + 1]);
113 static inline void sort_iter_sift(struct sort_iter *iter, sort_cmp_fn cmp)
116 __sort_iter_sift(iter, 0, cmp);
119 static inline void sort_iter_sort(struct sort_iter *iter, sort_cmp_fn cmp)
121 unsigned i = iter->used;
124 __sort_iter_sift(iter, i, cmp);
127 static void sort_iter_add(struct sort_iter *iter,
128 struct bkey_packed *k,
129 struct bkey_packed *end)
131 BUG_ON(iter->used >= ARRAY_SIZE(iter->data));
134 iter->data[iter->used++] = (struct sort_iter_set) { k, end };
137 static inline struct bkey_packed *sort_iter_peek(struct sort_iter *iter)
139 return iter->used ? iter->data->k : NULL;
142 static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
144 iter->data->k = bkey_next(iter->data->k);
146 BUG_ON(iter->data->k > iter->data->end);
148 if (iter->data->k == iter->data->end)
149 array_remove_item(iter->data, iter->used, 0);
151 sort_iter_sift(iter, cmp);
154 static inline struct bkey_packed *sort_iter_next(struct sort_iter *iter,
157 struct bkey_packed *ret = sort_iter_peek(iter);
160 sort_iter_advance(iter, cmp);
165 static inline int sort_key_whiteouts_cmp(struct btree *b,
166 struct bkey_packed *l,
167 struct bkey_packed *r)
169 return bkey_cmp_packed(b, l, r);
172 static unsigned sort_key_whiteouts(struct bkey_packed *dst,
173 struct sort_iter *iter)
175 struct bkey_packed *in, *out = dst;
177 sort_iter_sort(iter, sort_key_whiteouts_cmp);
179 while ((in = sort_iter_next(iter, sort_key_whiteouts_cmp))) {
181 out = bkey_next(out);
184 return (u64 *) out - (u64 *) dst;
187 static inline int sort_extent_whiteouts_cmp(struct btree *b,
188 struct bkey_packed *l,
189 struct bkey_packed *r)
191 struct bkey ul = bkey_unpack_key(b, l);
192 struct bkey ur = bkey_unpack_key(b, r);
194 return bkey_cmp(bkey_start_pos(&ul), bkey_start_pos(&ur));
197 static unsigned sort_extent_whiteouts(struct bkey_packed *dst,
198 struct sort_iter *iter)
200 const struct bkey_format *f = &iter->b->format;
201 struct bkey_packed *in, *out = dst;
203 bool prev = false, l_packed = false;
204 u64 max_packed_size = bkey_field_max(f, BKEY_FIELD_SIZE);
205 u64 max_packed_offset = bkey_field_max(f, BKEY_FIELD_OFFSET);
208 max_packed_size = min_t(u64, max_packed_size, KEY_SIZE_MAX);
210 sort_iter_sort(iter, sort_extent_whiteouts_cmp);
212 while ((in = sort_iter_next(iter, sort_extent_whiteouts_cmp))) {
213 EBUG_ON(bkeyp_val_u64s(f, in));
214 EBUG_ON(in->type != KEY_TYPE_DISCARD);
216 r.k = bkey_unpack_key(iter->b, in);
219 bkey_cmp(l.k.p, bkey_start_pos(&r.k)) >= 0) {
220 if (bkey_cmp(l.k.p, r.k.p) >= 0)
224 ? min(max_packed_size, max_packed_offset -
225 bkey_start_offset(&l.k))
228 new_size = min(new_size, r.k.p.offset -
229 bkey_start_offset(&l.k));
231 BUG_ON(new_size < l.k.size);
233 bch2_key_resize(&l.k, new_size);
235 if (bkey_cmp(l.k.p, r.k.p) >= 0)
238 bch2_cut_front(l.k.p, &r);
242 if (!bch2_bkey_pack(out, &l, f)) {
246 out = bkey_next(out);
251 l_packed = bkey_packed(in);
255 if (!bch2_bkey_pack(out, &l, f)) {
259 out = bkey_next(out);
262 return (u64 *) out - (u64 *) dst;
265 static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
267 enum compact_mode mode)
269 unsigned live_u64s = b->nr.bset_u64s[t - b->set];
270 unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
272 if (live_u64s == bset_u64s)
275 if (mode == COMPACT_LAZY) {
276 if (live_u64s * 4 < bset_u64s * 3 ||
277 (compacting && bset_unwritten(b, bset(b, t))))
278 return bset_u64s - live_u64s;
280 if (bset_written(b, bset(b, t)))
281 return bset_u64s - live_u64s;
287 bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
288 enum compact_mode mode)
290 const struct bkey_format *f = &b->format;
292 struct bkey_packed *whiteouts = NULL;
293 struct bkey_packed *u_start, *u_pos;
294 struct sort_iter sort_iter;
295 unsigned order, whiteout_u64s = 0, u64s;
296 bool used_mempool, compacting = false;
299 whiteout_u64s += should_compact_bset(b, t,
300 whiteout_u64s != 0, mode);
305 sort_iter_init(&sort_iter, b);
307 whiteout_u64s += b->whiteout_u64s;
308 order = get_order(whiteout_u64s * sizeof(u64));
310 whiteouts = btree_bounce_alloc(c, order, &used_mempool);
311 u_start = u_pos = whiteouts;
313 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
315 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
317 sort_iter_add(&sort_iter, u_start, u_pos);
319 for_each_bset(b, t) {
320 struct bset *i = bset(b, t);
321 struct bkey_packed *k, *n, *out, *start, *end;
322 struct btree_node_entry *src = NULL, *dst = NULL;
324 if (t != b->set && bset_unwritten(b, i)) {
325 src = container_of(i, struct btree_node_entry, keys);
326 dst = max(write_block(b),
327 (void *) btree_bkey_last(b, t -1));
330 if (!should_compact_bset(b, t, compacting, mode)) {
332 memmove(dst, src, sizeof(*src) +
333 le16_to_cpu(src->keys.u64s) *
336 set_btree_bset(b, t, i);
344 end = vstruct_last(i);
347 memmove(dst, src, sizeof(*src));
349 set_btree_bset(b, t, i);
354 for (k = start; k != end; k = n) {
357 if (bkey_deleted(k) && btree_node_is_extents(b))
360 if (bkey_whiteout(k) && !k->needs_whiteout)
363 if (bkey_whiteout(k)) {
364 unreserve_whiteout(b, t, k);
365 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
366 set_bkeyp_val_u64s(f, u_pos, 0);
367 u_pos = bkey_next(u_pos);
368 } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
370 out = bkey_next(out);
374 sort_iter_add(&sort_iter, u_start, u_pos);
376 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
377 i->u64s = cpu_to_le16((u64 *) out - i->_data);
378 set_btree_bset_end(b, t);
379 bch2_bset_set_no_aux_tree(b, t);
383 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
385 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
386 (void *) btree_bkey_last(b, bset_tree_last(b)));
388 u64s = btree_node_is_extents(b)
389 ? sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
391 : sort_key_whiteouts(unwritten_whiteouts_start(c, b),
394 BUG_ON(u64s > b->whiteout_u64s);
395 BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
396 BUG_ON(u_pos != whiteouts && !u64s);
398 if (u64s != b->whiteout_u64s) {
399 void *src = unwritten_whiteouts_start(c, b);
401 b->whiteout_u64s = u64s;
402 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
406 unwritten_whiteouts_start(c, b),
407 unwritten_whiteouts_end(c, b));
409 btree_bounce_free(c, order, used_mempool, whiteouts);
411 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
412 bch2_btree_build_aux_trees(b);
414 bch_btree_keys_u64s_remaining(c, b);
415 bch2_verify_btree_nr_keys(b);
420 static bool bch2_drop_whiteouts(struct btree *b)
425 for_each_bset(b, t) {
426 struct bset *i = bset(b, t);
427 struct bkey_packed *k, *n, *out, *start, *end;
429 if (!should_compact_bset(b, t, true, true))
432 start = btree_bkey_first(b, t);
433 end = btree_bkey_last(b, t);
435 if (bset_unwritten(b, i) &&
438 max_t(struct bset *, write_block(b),
439 (void *) btree_bkey_last(b, t -1));
441 memmove(dst, i, sizeof(struct bset));
443 set_btree_bset(b, t, i);
448 for (k = start; k != end; k = n) {
451 if (!bkey_whiteout(k)) {
453 out = bkey_next(out);
457 i->u64s = cpu_to_le16((u64 *) out - i->_data);
458 bch2_bset_set_no_aux_tree(b, t);
462 bch2_verify_btree_nr_keys(b);
467 static inline int sort_keys_cmp(struct btree *b,
468 struct bkey_packed *l,
469 struct bkey_packed *r)
471 return bkey_cmp_packed(b, l, r) ?:
472 (int) bkey_whiteout(r) - (int) bkey_whiteout(l) ?:
473 (int) l->needs_whiteout - (int) r->needs_whiteout;
476 static unsigned sort_keys(struct bkey_packed *dst,
477 struct sort_iter *iter,
478 bool filter_whiteouts)
480 const struct bkey_format *f = &iter->b->format;
481 struct bkey_packed *in, *next, *out = dst;
483 sort_iter_sort(iter, sort_keys_cmp);
485 while ((in = sort_iter_next(iter, sort_keys_cmp))) {
486 if (bkey_whiteout(in) &&
487 (filter_whiteouts || !in->needs_whiteout))
490 if (bkey_whiteout(in) &&
491 (next = sort_iter_peek(iter)) &&
492 !bkey_cmp_packed(iter->b, in, next)) {
493 BUG_ON(in->needs_whiteout &&
494 next->needs_whiteout);
496 * XXX racy, called with read lock from write path
498 * leads to spurious BUG_ON() in bkey_unpack_key() in
501 next->needs_whiteout |= in->needs_whiteout;
505 if (bkey_whiteout(in)) {
506 memcpy_u64s(out, in, bkeyp_key_u64s(f, in));
507 set_bkeyp_val_u64s(f, out, 0);
511 out = bkey_next(out);
514 return (u64 *) out - (u64 *) dst;
517 static inline int sort_extents_cmp(struct btree *b,
518 struct bkey_packed *l,
519 struct bkey_packed *r)
521 return bkey_cmp_packed(b, l, r) ?:
522 (int) bkey_deleted(l) - (int) bkey_deleted(r);
525 static unsigned sort_extents(struct bkey_packed *dst,
526 struct sort_iter *iter,
527 bool filter_whiteouts)
529 struct bkey_packed *in, *out = dst;
531 sort_iter_sort(iter, sort_extents_cmp);
533 while ((in = sort_iter_next(iter, sort_extents_cmp))) {
534 if (bkey_deleted(in))
537 if (bkey_whiteout(in) &&
538 (filter_whiteouts || !in->needs_whiteout))
542 out = bkey_next(out);
545 return (u64 *) out - (u64 *) dst;
548 static void btree_node_sort(struct bch_fs *c, struct btree *b,
549 struct btree_iter *iter,
552 bool filter_whiteouts)
554 struct btree_node *out;
555 struct sort_iter sort_iter;
557 struct bset *start_bset = bset(b, &b->set[start_idx]);
558 bool used_mempool = false;
559 u64 start_time, seq = 0;
560 unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
561 bool sorting_entire_node = start_idx == 0 &&
564 sort_iter_init(&sort_iter, b);
566 for (t = b->set + start_idx;
567 t < b->set + end_idx;
569 u64s += le16_to_cpu(bset(b, t)->u64s);
570 sort_iter_add(&sort_iter,
571 btree_bkey_first(b, t),
572 btree_bkey_last(b, t));
575 order = sorting_entire_node
576 ? btree_page_order(c)
577 : get_order(__vstruct_bytes(struct btree_node, u64s));
579 out = btree_bounce_alloc(c, order, &used_mempool);
581 start_time = local_clock();
583 if (btree_node_is_extents(b))
584 filter_whiteouts = bset_written(b, start_bset);
586 u64s = btree_node_is_extents(b)
587 ? sort_extents(out->keys.start, &sort_iter, filter_whiteouts)
588 : sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
590 out->keys.u64s = cpu_to_le16(u64s);
592 BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
594 if (sorting_entire_node)
595 bch2_time_stats_update(&c->btree_sort_time, start_time);
597 /* Make sure we preserve bset journal_seq: */
598 for (t = b->set + start_idx; t < b->set + end_idx; t++)
599 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
600 start_bset->journal_seq = cpu_to_le64(seq);
602 if (sorting_entire_node) {
603 unsigned u64s = le16_to_cpu(out->keys.u64s);
605 BUG_ON(order != btree_page_order(c));
608 * Our temporary buffer is the same size as the btree node's
609 * buffer, we can just swap buffers instead of doing a big
613 out->keys.u64s = cpu_to_le16(u64s);
615 set_btree_bset(b, b->set, &b->data->keys);
617 start_bset->u64s = out->keys.u64s;
618 memcpy_u64s(start_bset->start,
620 le16_to_cpu(out->keys.u64s));
623 for (i = start_idx + 1; i < end_idx; i++)
624 b->nr.bset_u64s[start_idx] +=
629 for (i = start_idx + 1; i < b->nsets; i++) {
630 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
631 b->set[i] = b->set[i + shift];
634 for (i = b->nsets; i < MAX_BSETS; i++)
635 b->nr.bset_u64s[i] = 0;
637 set_btree_bset_end(b, &b->set[start_idx]);
638 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
640 btree_bounce_free(c, order, used_mempool, out);
642 bch2_verify_btree_nr_keys(b);
645 /* Sort + repack in a new format: */
646 static struct btree_nr_keys sort_repack(struct bset *dst,
648 struct btree_node_iter *src_iter,
649 struct bkey_format *out_f,
650 bool filter_whiteouts)
652 struct bkey_format *in_f = &src->format;
653 struct bkey_packed *in, *out = vstruct_last(dst);
654 struct btree_nr_keys nr;
656 memset(&nr, 0, sizeof(nr));
658 while ((in = bch2_btree_node_iter_next_all(src_iter, src))) {
659 if (filter_whiteouts && bkey_whiteout(in))
662 if (bch2_bkey_transform(out_f, out, bkey_packed(in)
663 ? in_f : &bch2_bkey_format_current, in))
664 out->format = KEY_FORMAT_LOCAL_BTREE;
666 bch2_bkey_unpack(src, (void *) out, in);
668 btree_keys_account_key_add(&nr, 0, out);
669 out = bkey_next(out);
672 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
676 /* Sort, repack, and merge: */
677 static struct btree_nr_keys sort_repack_merge(struct bch_fs *c,
680 struct btree_node_iter *iter,
681 struct bkey_format *out_f,
682 bool filter_whiteouts,
683 key_filter_fn filter,
686 struct bkey_packed *k, *prev = NULL, *out;
687 struct btree_nr_keys nr;
690 memset(&nr, 0, sizeof(nr));
692 while ((k = bch2_btree_node_iter_next_all(iter, src))) {
693 if (filter_whiteouts && bkey_whiteout(k))
697 * The filter might modify pointers, so we have to unpack the
698 * key and values to &tmp.k:
700 bch2_bkey_unpack(src, &tmp.k, k);
702 if (filter && filter(c, src, bkey_i_to_s(&tmp.k)))
705 /* prev is always unpacked, for key merging: */
709 merge(c, src, (void *) prev, &tmp.k) == BCH_MERGE_MERGE)
713 * the current key becomes the new prev: advance prev, then
714 * copy the current key - but first pack prev (in place):
717 bch2_bkey_pack(prev, (void *) prev, out_f);
719 btree_keys_account_key_add(&nr, 0, prev);
720 prev = bkey_next(prev);
722 prev = vstruct_last(dst);
725 bkey_copy(prev, &tmp.k);
729 bch2_bkey_pack(prev, (void *) prev, out_f);
730 btree_keys_account_key_add(&nr, 0, prev);
731 out = bkey_next(prev);
733 out = vstruct_last(dst);
736 dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
740 void bch2_btree_sort_into(struct bch_fs *c,
744 struct btree_nr_keys nr;
745 struct btree_node_iter src_iter;
746 u64 start_time = local_clock();
748 BUG_ON(dst->nsets != 1);
750 bch2_bset_set_no_aux_tree(dst, dst->set);
752 bch2_btree_node_iter_init_from_start(&src_iter, src,
753 btree_node_is_extents(src));
755 if (btree_node_ops(src)->key_normalize ||
756 btree_node_ops(src)->key_merge)
757 nr = sort_repack_merge(c, btree_bset_first(dst),
761 btree_node_ops(src)->key_normalize,
762 btree_node_ops(src)->key_merge);
764 nr = sort_repack(btree_bset_first(dst),
769 bch2_time_stats_update(&c->btree_sort_time, start_time);
771 set_btree_bset_end(dst, dst->set);
773 dst->nr.live_u64s += nr.live_u64s;
774 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
775 dst->nr.packed_keys += nr.packed_keys;
776 dst->nr.unpacked_keys += nr.unpacked_keys;
778 bch2_verify_btree_nr_keys(dst);
781 #define SORT_CRIT (4096 / sizeof(u64))
784 * We're about to add another bset to the btree node, so if there's currently
785 * too many bsets - sort some of them together:
787 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
788 struct btree_iter *iter)
790 unsigned unwritten_idx;
793 for (unwritten_idx = 0;
794 unwritten_idx < b->nsets;
796 if (bset_unwritten(b, bset(b, &b->set[unwritten_idx])))
799 if (b->nsets - unwritten_idx > 1) {
800 btree_node_sort(c, b, iter, unwritten_idx,
805 if (unwritten_idx > 1) {
806 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
813 void bch2_btree_build_aux_trees(struct btree *b)
818 bch2_bset_build_aux_tree(b, t,
819 bset_unwritten(b, bset(b, t)) &&
820 t == bset_tree_last(b));
824 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
827 * Safe to call if there already is an unwritten bset - will only add a new bset
828 * if @b doesn't already have one.
830 * Returns true if we sorted (i.e. invalidated iterators
832 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
833 struct btree_iter *iter)
835 struct btree_node_entry *bne;
838 EBUG_ON(!(b->lock.state.seq & 1));
839 EBUG_ON(iter && iter->nodes[b->level] != b);
841 did_sort = btree_node_compact(c, b, iter);
843 bne = want_new_bset(c, b);
845 bch2_bset_init_next(b, &bne->keys);
847 bch2_btree_build_aux_trees(b);
849 if (iter && did_sort)
850 bch2_btree_iter_reinit_node(iter, b);
853 static struct nonce btree_nonce(struct bset *i, unsigned offset)
855 return (struct nonce) {{
856 [0] = cpu_to_le32(offset),
857 [1] = ((__le32 *) &i->seq)[0],
858 [2] = ((__le32 *) &i->seq)[1],
859 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
863 static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
865 struct nonce nonce = btree_nonce(i, offset);
868 struct btree_node *bn = container_of(i, struct btree_node, keys);
869 unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
871 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
874 nonce = nonce_add(nonce, round_up(bytes, CHACHA20_BLOCK_SIZE));
877 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
878 vstruct_end(i) - (void *) i->_data);
881 static int btree_err_msg(struct bch_fs *c, struct btree *b, struct bset *i,
882 unsigned offset, int write, char *buf, size_t len)
884 char *out = buf, *end = buf + len;
886 out += scnprintf(out, end - out,
887 "error validating btree node %s "
888 "at btree %u level %u/%u\n"
889 "pos %llu:%llu node offset %u",
890 write ? "before write " : "",
891 b->btree_id, b->level,
892 c->btree_roots[b->btree_id].level,
893 b->key.k.p.inode, b->key.k.p.offset,
896 out += scnprintf(out, end - out,
898 le16_to_cpu(i->u64s));
903 enum btree_err_type {
905 BTREE_ERR_WANT_RETRY,
906 BTREE_ERR_MUST_RETRY,
910 enum btree_validate_ret {
911 BTREE_RETRY_READ = 64,
914 #define btree_err(type, c, b, i, msg, ...) \
916 char buf[200], *out = buf, *end = out + sizeof(buf); \
918 out += btree_err_msg(c, b, i, b->written, write, out, end - out);\
919 out += scnprintf(out, end - out, ": " msg, ##__VA_ARGS__); \
921 if (type == BTREE_ERR_FIXABLE && \
923 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
924 mustfix_fsck_err(c, "%s", buf); \
926 bch_err(c, "%s", buf); \
929 case BTREE_ERR_FIXABLE: \
930 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
932 case BTREE_ERR_WANT_RETRY: \
934 ret = BTREE_RETRY_READ; \
938 case BTREE_ERR_MUST_RETRY: \
939 ret = BTREE_RETRY_READ; \
941 case BTREE_ERR_FATAL: \
942 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
949 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
951 static int validate_bset(struct bch_fs *c, struct btree *b,
952 struct bset *i, unsigned sectors,
953 unsigned *whiteout_u64s, int write,
956 struct bkey_packed *k, *prev = NULL;
957 struct bpos prev_pos = POS_MIN;
958 enum bkey_type type = btree_node_type(b);
959 bool seen_non_whiteout = false;
963 if (i == &b->data->keys) {
964 /* These indicate that we read the wrong btree node: */
965 btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
966 BTREE_ERR_MUST_RETRY, c, b, i,
967 "incorrect btree id");
969 btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
970 BTREE_ERR_MUST_RETRY, c, b, i,
973 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
974 u64 *p = (u64 *) &b->data->ptr;
977 bch2_bpos_swab(&b->data->min_key);
978 bch2_bpos_swab(&b->data->max_key);
981 btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
982 BTREE_ERR_MUST_RETRY, c, b, i,
983 "incorrect max key");
985 /* XXX: ideally we would be validating min_key too */
988 * not correct anymore, due to btree node write error
991 * need to add b->data->seq to btree keys and verify
994 btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
996 BTREE_ERR_FATAL, c, b, i,
997 "incorrect backpointer");
999 err = bch2_bkey_format_validate(&b->data->format);
1001 BTREE_ERR_FATAL, c, b, i,
1002 "invalid bkey format: %s", err);
1005 if (btree_err_on(le16_to_cpu(i->version) != BCACHE_BSET_VERSION,
1006 BTREE_ERR_FIXABLE, c, b, i,
1007 "unsupported bset version")) {
1008 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1013 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
1014 BTREE_ERR_FIXABLE, c, b, i,
1015 "bset past end of btree node")) {
1020 btree_err_on(b->written && !i->u64s,
1021 BTREE_ERR_FIXABLE, c, b, i,
1024 if (!BSET_SEPARATE_WHITEOUTS(i)) {
1025 seen_non_whiteout = true;
1030 k != vstruct_last(i);) {
1033 const char *invalid;
1035 if (btree_err_on(!k->u64s,
1036 BTREE_ERR_FIXABLE, c, b, i,
1037 "KEY_U64s 0: %zu bytes of metadata lost",
1038 vstruct_end(i) - (void *) k)) {
1039 i->u64s = cpu_to_le16((u64 *) k - i->_data);
1043 if (btree_err_on(bkey_next(k) > vstruct_last(i),
1044 BTREE_ERR_FIXABLE, c, b, i,
1045 "key extends past end of bset")) {
1046 i->u64s = cpu_to_le16((u64 *) k - i->_data);
1050 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
1051 BTREE_ERR_FIXABLE, c, b, i,
1052 "invalid bkey format %u", k->format)) {
1053 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1054 memmove_u64s_down(k, bkey_next(k),
1055 (u64 *) vstruct_end(i) - (u64 *) k);
1059 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
1060 bch2_bkey_swab(type, &b->format, k);
1062 u = bkey_disassemble(b, k, &tmp);
1064 invalid = __bch2_bkey_invalid(c, type, u) ?:
1065 bch2_bkey_in_btree_node(b, u) ?:
1066 (write ? bch2_bkey_val_invalid(c, type, u) : NULL);
1070 bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
1071 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1072 "invalid bkey %s: %s", buf, invalid);
1074 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1075 memmove_u64s_down(k, bkey_next(k),
1076 (u64 *) vstruct_end(i) - (u64 *) k);
1081 * with the separate whiteouts thing (used for extents), the
1082 * second set of keys actually can have whiteouts too, so we
1083 * can't solely go off bkey_whiteout()...
1086 if (!seen_non_whiteout &&
1087 (!bkey_whiteout(k) ||
1088 (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
1089 *whiteout_u64s = k->_data - i->_data;
1090 seen_non_whiteout = true;
1091 } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
1092 btree_err(BTREE_ERR_FATAL, c, b, i,
1093 "keys out of order: %llu:%llu > %llu:%llu",
1097 bkey_start_offset(u.k));
1098 /* XXX: repair this */
1106 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1111 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
1113 struct btree_node_entry *bne;
1114 struct btree_node_iter *iter;
1115 struct btree_node *sorted;
1116 struct bkey_packed *k;
1120 int ret, retry_read = 0, write = READ;
1122 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
1123 __bch2_btree_node_iter_init(iter, btree_node_is_extents(b));
1125 if (bch2_meta_read_fault("btree"))
1126 btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
1129 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
1130 BTREE_ERR_MUST_RETRY, c, b, NULL,
1133 btree_err_on(!b->data->keys.seq,
1134 BTREE_ERR_MUST_RETRY, c, b, NULL,
1135 "bad btree header");
1137 while (b->written < c->opts.btree_node_size) {
1138 unsigned sectors, whiteout_u64s = 0;
1140 struct bch_csum csum;
1145 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1146 BTREE_ERR_WANT_RETRY, c, b, i,
1147 "unknown checksum type");
1149 nonce = btree_nonce(i, b->written << 9);
1150 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
1152 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
1153 BTREE_ERR_WANT_RETRY, c, b, i,
1154 "invalid checksum");
1156 bset_encrypt(c, i, b->written << 9);
1158 sectors = vstruct_sectors(b->data, c->block_bits);
1160 set_btree_bset(b, b->set, &b->data->keys);
1161 btree_node_set_format(b, b->data->format);
1163 bne = write_block(b);
1166 if (i->seq != b->data->keys.seq)
1169 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1170 BTREE_ERR_WANT_RETRY, c, b, i,
1171 "unknown checksum type");
1173 nonce = btree_nonce(i, b->written << 9);
1174 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1176 btree_err_on(bch2_crc_cmp(csum, bne->csum),
1177 BTREE_ERR_WANT_RETRY, c, b, i,
1178 "invalid checksum");
1180 bset_encrypt(c, i, b->written << 9);
1182 sectors = vstruct_sectors(bne, c->block_bits);
1185 ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
1190 b->written += sectors;
1192 ret = bch2_journal_seq_should_ignore(c, le64_to_cpu(i->journal_seq), b);
1194 btree_err(BTREE_ERR_FATAL, c, b, i,
1195 "insufficient memory");
1200 btree_err_on(!b->written,
1201 BTREE_ERR_FIXABLE, c, b, i,
1202 "first btree node bset has blacklisted journal seq");
1207 __bch2_btree_node_iter_push(iter, b,
1209 vstruct_idx(i, whiteout_u64s));
1211 __bch2_btree_node_iter_push(iter, b,
1212 vstruct_idx(i, whiteout_u64s),
1216 for (bne = write_block(b);
1217 bset_byte_offset(b, bne) < btree_bytes(c);
1218 bne = (void *) bne + block_bytes(c))
1219 btree_err_on(bne->keys.seq == b->data->keys.seq,
1220 BTREE_ERR_WANT_RETRY, c, b, NULL,
1221 "found bset signature after last bset");
1223 sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
1224 sorted->keys.u64s = 0;
1226 b->nr = btree_node_is_extents(b)
1227 ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
1228 : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
1230 u64s = le16_to_cpu(sorted->keys.u64s);
1232 sorted->keys.u64s = cpu_to_le16(u64s);
1233 swap(sorted, b->data);
1234 set_btree_bset(b, b->set, &b->data->keys);
1237 BUG_ON(b->nr.live_u64s != u64s);
1239 btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
1242 for (k = i->start; k != vstruct_last(i);) {
1243 enum bkey_type type = btree_node_type(b);
1245 struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
1246 const char *invalid = bch2_bkey_val_invalid(c, type, u);
1251 bch2_bkey_val_to_text(c, type, buf, sizeof(buf), u);
1252 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1253 "invalid bkey %s: %s", buf, invalid);
1255 btree_keys_account_key_drop(&b->nr, 0, k);
1257 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1258 memmove_u64s_down(k, bkey_next(k),
1259 (u64 *) vstruct_end(i) - (u64 *) k);
1266 bch2_bset_build_aux_tree(b, b->set, false);
1268 set_needs_whiteout(btree_bset_first(b));
1270 btree_node_reset_sib_u64s(b);
1272 mempool_free(iter, &c->fill_iter);
1276 if (ret == BTREE_RETRY_READ) {
1279 bch2_inconsistent_error(c);
1280 set_btree_node_read_error(b);
1285 static void btree_node_read_work(struct work_struct *work)
1287 struct btree_read_bio *rb =
1288 container_of(work, struct btree_read_bio, work);
1289 struct bch_fs *c = rb->c;
1290 struct btree *b = rb->bio.bi_private;
1291 struct bio *bio = &rb->bio;
1292 struct bch_devs_mask avoid;
1294 memset(&avoid, 0, sizeof(avoid));
1298 bch_info(c, "retrying read");
1300 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1301 bio->bi_bdev = rb->pick.ca->disk_sb.bdev;
1302 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1303 bio->bi_iter.bi_size = btree_bytes(c);
1304 submit_bio_wait(bio);
1306 bch2_dev_io_err_on(bio->bi_status, rb->pick.ca, "btree read");
1307 percpu_ref_put(&rb->pick.ca->io_ref);
1309 __set_bit(rb->pick.ca->dev_idx, avoid.d);
1310 rb->pick = bch2_btree_pick_ptr(c, b, &avoid);
1312 if (!bio->bi_status &&
1313 !bch2_btree_node_read_done(c, b, !IS_ERR_OR_NULL(rb->pick.ca)))
1315 } while (!IS_ERR_OR_NULL(rb->pick.ca));
1317 set_btree_node_read_error(b);
1319 if (!IS_ERR_OR_NULL(rb->pick.ca))
1320 percpu_ref_put(&rb->pick.ca->io_ref);
1322 bch2_time_stats_update(&c->btree_read_time, rb->start_time);
1324 clear_btree_node_read_in_flight(b);
1325 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1328 static void btree_node_read_endio(struct bio *bio)
1330 struct btree_read_bio *rb =
1331 container_of(bio, struct btree_read_bio, bio);
1333 bch2_latency_acct(rb->pick.ca, rb->start_time >> 10, READ);
1335 INIT_WORK(&rb->work, btree_node_read_work);
1336 schedule_work(&rb->work);
1339 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1342 struct extent_pick_ptr pick;
1343 struct btree_read_bio *rb;
1346 trace_btree_read(c, b);
1348 pick = bch2_btree_pick_ptr(c, b, NULL);
1349 if (bch2_fs_fatal_err_on(!pick.ca, c,
1350 "btree node read error: no device to read from")) {
1351 set_btree_node_read_error(b);
1355 bio = bio_alloc_bioset(GFP_NOIO, btree_pages(c), &c->btree_bio);
1356 rb = container_of(bio, struct btree_read_bio, bio);
1358 rb->start_time = local_clock();
1360 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1361 bio->bi_bdev = pick.ca->disk_sb.bdev;
1362 bio->bi_iter.bi_sector = pick.ptr.offset;
1363 bio->bi_iter.bi_size = btree_bytes(c);
1364 bch2_bio_map(bio, b->data);
1366 this_cpu_add(pick.ca->io_done->sectors[READ][BCH_DATA_BTREE],
1369 set_btree_node_read_in_flight(b);
1372 submit_bio_wait(bio);
1373 bio->bi_private = b;
1374 btree_node_read_work(&rb->work);
1376 bio->bi_end_io = btree_node_read_endio;
1377 bio->bi_private = b;
1382 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1383 const struct bkey_i *k, unsigned level)
1389 closure_init_stack(&cl);
1392 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1396 b = bch2_btree_node_mem_alloc(c);
1397 bch2_btree_cache_cannibalize_unlock(c);
1401 bkey_copy(&b->key, k);
1402 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1404 bch2_btree_node_read(c, b, true);
1406 if (btree_node_read_error(b)) {
1407 bch2_btree_node_hash_remove(&c->btree_cache, b);
1409 mutex_lock(&c->btree_cache.lock);
1410 list_move(&b->list, &c->btree_cache.freeable);
1411 mutex_unlock(&c->btree_cache.lock);
1417 bch2_btree_set_root_for_read(c, b);
1419 six_unlock_write(&b->lock);
1420 six_unlock_intent(&b->lock);
1425 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1426 struct btree_write *w)
1428 bch2_journal_pin_drop(&c->journal, &w->journal);
1429 closure_wake_up(&w->wait);
1432 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1434 struct btree_write *w = btree_prev_write(b);
1436 bch2_btree_complete_write(c, b, w);
1437 btree_node_io_unlock(b);
1440 static void bch2_btree_node_write_error(struct bch_fs *c,
1441 struct btree_write_bio *wbio)
1443 struct btree *b = wbio->wbio.bio.bi_private;
1444 struct closure *cl = wbio->cl;
1445 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1446 struct bkey_i_extent *new_key;
1447 struct bkey_s_extent e;
1448 struct bch_extent_ptr *ptr;
1449 struct btree_iter iter;
1452 __bch2_btree_iter_init(&iter, c, b->btree_id, b->key.k.p,
1456 ret = bch2_btree_iter_traverse(&iter);
1460 /* has node been freed? */
1461 if (iter.nodes[b->level] != b) {
1462 /* node has been freed: */
1463 if (!btree_node_dying(b))
1468 if (!btree_node_hashed(b))
1471 bkey_copy(&tmp.k, &b->key);
1473 new_key = bkey_i_to_extent(&tmp.k);
1474 e = extent_i_to_s(new_key);
1475 extent_for_each_ptr_backwards(e, ptr)
1476 if (bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev))
1477 bch2_extent_drop_ptr(e, ptr);
1479 if (!bch2_extent_nr_ptrs(e.c))
1482 ret = bch2_btree_node_update_key(c, &iter, b, new_key);
1488 bch2_btree_iter_unlock(&iter);
1489 bio_put(&wbio->wbio.bio);
1490 btree_node_write_done(c, b);
1495 set_btree_node_noevict(b);
1496 bch2_fs_fatal_error(c, "fatal error writing btree node");
1500 void bch2_btree_write_error_work(struct work_struct *work)
1502 struct bch_fs *c = container_of(work, struct bch_fs,
1503 btree_write_error_work);
1507 spin_lock_irq(&c->btree_write_error_lock);
1508 bio = bio_list_pop(&c->btree_write_error_list);
1509 spin_unlock_irq(&c->btree_write_error_lock);
1514 bch2_btree_node_write_error(c,
1515 container_of(bio, struct btree_write_bio, wbio.bio));
1519 static void btree_node_write_work(struct work_struct *work)
1521 struct btree_write_bio *wbio =
1522 container_of(work, struct btree_write_bio, work);
1523 struct closure *cl = wbio->cl;
1524 struct bch_fs *c = wbio->wbio.c;
1525 struct btree *b = wbio->wbio.bio.bi_private;
1527 btree_bounce_free(c,
1529 wbio->wbio.used_mempool,
1532 if (wbio->wbio.failed.nr) {
1533 unsigned long flags;
1535 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1536 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1537 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1539 queue_work(c->wq, &c->btree_write_error_work);
1543 bio_put(&wbio->wbio.bio);
1544 btree_node_write_done(c, b);
1549 static void btree_node_write_endio(struct bio *bio)
1551 struct bch_write_bio *wbio = to_wbio(bio);
1552 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1553 struct bch_write_bio *orig = parent ?: wbio;
1554 struct bch_fs *c = wbio->c;
1555 struct bch_dev *ca = wbio->ca;
1556 unsigned long flags;
1558 bch2_latency_acct(ca, wbio->submit_time_us, WRITE);
1560 if (bio->bi_status == BLK_STS_REMOVED ||
1561 bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
1562 bch2_meta_write_fault("btree")) {
1563 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1564 bch2_dev_list_add_dev(&orig->failed, ca->dev_idx);
1565 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1568 if (wbio->have_io_ref)
1569 percpu_ref_put(&ca->io_ref);
1573 bio_endio(&parent->bio);
1575 struct btree_write_bio *wb =
1576 container_of(orig, struct btree_write_bio, wbio);
1578 INIT_WORK(&wb->work, btree_node_write_work);
1579 schedule_work(&wb->work);
1583 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1584 struct bset *i, unsigned sectors)
1586 const struct bch_extent_ptr *ptr;
1587 unsigned whiteout_u64s = 0;
1590 extent_for_each_ptr(bkey_i_to_s_c_extent(&b->key), ptr)
1593 ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
1595 bch2_inconsistent_error(c);
1600 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1601 struct closure *parent,
1602 enum six_lock_type lock_type_held)
1604 struct btree_write_bio *wbio;
1605 struct bset_tree *t;
1607 struct btree_node *bn = NULL;
1608 struct btree_node_entry *bne = NULL;
1610 struct bkey_s_extent e;
1611 struct bch_extent_ptr *ptr;
1612 struct sort_iter sort_iter;
1614 unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1617 unsigned long old, new;
1620 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1624 * We may only have a read lock on the btree node - the dirty bit is our
1625 * "lock" against racing with other threads that may be trying to start
1626 * a write, we do a write iff we clear the dirty bit. Since setting the
1627 * dirty bit requires a write lock, we can't race with other threads
1631 old = new = READ_ONCE(b->flags);
1633 if (!(old & (1 << BTREE_NODE_dirty)))
1637 !btree_node_may_write(b))
1640 if (old & (1 << BTREE_NODE_write_in_flight)) {
1641 btree_node_wait_on_io(b);
1645 new &= ~(1 << BTREE_NODE_dirty);
1646 new &= ~(1 << BTREE_NODE_need_write);
1647 new |= (1 << BTREE_NODE_write_in_flight);
1648 new |= (1 << BTREE_NODE_just_written);
1649 new ^= (1 << BTREE_NODE_write_idx);
1650 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1652 BUG_ON(btree_node_fake(b));
1653 BUG_ON(!list_empty(&b->write_blocked));
1654 BUG_ON((b->will_make_reachable != NULL) != !b->written);
1656 BUG_ON(b->written >= c->opts.btree_node_size);
1657 BUG_ON(bset_written(b, btree_bset_last(b)));
1658 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1659 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1661 if (lock_type_held == SIX_LOCK_intent) {
1662 six_lock_write(&b->lock);
1663 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
1664 six_unlock_write(&b->lock);
1666 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
1669 BUG_ON(b->uncompacted_whiteout_u64s);
1671 sort_iter_init(&sort_iter, b);
1674 ? sizeof(struct btree_node)
1675 : sizeof(struct btree_node_entry);
1677 bytes += b->whiteout_u64s * sizeof(u64);
1679 for_each_bset(b, t) {
1682 if (bset_written(b, i))
1685 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1686 sort_iter_add(&sort_iter,
1687 btree_bkey_first(b, t),
1688 btree_bkey_last(b, t));
1689 seq = max(seq, le64_to_cpu(i->journal_seq));
1692 order = get_order(bytes);
1693 data = btree_bounce_alloc(c, order, &used_mempool);
1701 bne->keys = b->data->keys;
1705 i->journal_seq = cpu_to_le64(seq);
1708 if (!btree_node_is_extents(b)) {
1709 sort_iter_add(&sort_iter,
1710 unwritten_whiteouts_start(c, b),
1711 unwritten_whiteouts_end(c, b));
1712 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1714 memcpy_u64s(i->start,
1715 unwritten_whiteouts_start(c, b),
1717 i->u64s = cpu_to_le16(b->whiteout_u64s);
1718 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1721 b->whiteout_u64s = 0;
1723 u64s = btree_node_is_extents(b)
1724 ? sort_extents(vstruct_last(i), &sort_iter, false)
1725 : sort_keys(i->start, &sort_iter, false);
1726 le16_add_cpu(&i->u64s, u64s);
1728 clear_needs_whiteout(i);
1730 /* do we have data to write? */
1731 if (b->written && !i->u64s)
1734 bytes_to_write = vstruct_end(i) - data;
1735 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1737 memset(data + bytes_to_write, 0,
1738 (sectors_to_write << 9) - bytes_to_write);
1740 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1741 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1742 BUG_ON(i->seq != b->data->keys.seq);
1744 i->version = cpu_to_le16(BCACHE_BSET_VERSION);
1745 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1747 /* if we're going to be encrypting, check metadata validity first: */
1748 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1749 validate_bset_for_write(c, b, i, sectors_to_write))
1752 bset_encrypt(c, i, b->written << 9);
1754 nonce = btree_nonce(i, b->written << 9);
1757 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1759 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1761 /* if we're not encrypting, check metadata after checksumming: */
1762 if (!bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)) &&
1763 validate_bset_for_write(c, b, i, sectors_to_write))
1767 * We handle btree write errors by immediately halting the journal -
1768 * after we've done that, we can't issue any subsequent btree writes
1769 * because they might have pointers to new nodes that failed to write.
1771 * Furthermore, there's no point in doing any more btree writes because
1772 * with the journal stopped, we're never going to update the journal to
1773 * reflect that those writes were done and the data flushed from the
1776 * Make sure to update b->written so bch2_btree_init_next() doesn't
1779 if (bch2_journal_error(&c->journal) ||
1783 trace_btree_write(b, bytes_to_write, sectors_to_write);
1785 wbio = container_of(bio_alloc_bioset(GFP_NOIO, 1 << order, &c->btree_bio),
1786 struct btree_write_bio, wbio.bio);
1787 wbio_init(&wbio->wbio.bio);
1790 wbio->wbio.order = order;
1791 wbio->wbio.used_mempool = used_mempool;
1792 wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
1793 wbio->wbio.bio.bi_iter.bi_size = sectors_to_write << 9;
1794 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1795 wbio->wbio.bio.bi_private = b;
1798 closure_get(parent);
1800 bch2_bio_map(&wbio->wbio.bio, data);
1803 * If we're appending to a leaf node, we don't technically need FUA -
1804 * this write just needs to be persisted before the next journal write,
1805 * which will be marked FLUSH|FUA.
1807 * Similarly if we're writing a new btree root - the pointer is going to
1808 * be in the next journal entry.
1810 * But if we're writing a new btree node (that isn't a root) or
1811 * appending to a non leaf btree node, we need either FUA or a flush
1812 * when we write the parent with the new pointer. FUA is cheaper than a
1813 * flush, and writes appending to leaf nodes aren't blocking anything so
1814 * just make all btree node writes FUA to keep things sane.
1817 bkey_copy(&k.key, &b->key);
1818 e = bkey_i_to_s_extent(&k.key);
1820 extent_for_each_ptr(e, ptr)
1821 ptr->offset += b->written;
1823 b->written += sectors_to_write;
1825 bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
1828 set_btree_node_noevict(b);
1829 b->written += sectors_to_write;
1831 btree_bounce_free(c, order, used_mempool, data);
1832 btree_node_write_done(c, b);
1836 * Work that must be done with write lock held:
1838 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1840 bool invalidated_iter = false;
1841 struct btree_node_entry *bne;
1842 struct bset_tree *t;
1844 if (!btree_node_just_written(b))
1847 BUG_ON(b->whiteout_u64s);
1848 BUG_ON(b->uncompacted_whiteout_u64s);
1850 clear_btree_node_just_written(b);
1853 * Note: immediately after write, bset_unwritten()/bset_written() don't
1854 * work - the amount of data we had to write after compaction might have
1855 * been smaller than the offset of the last bset.
1857 * However, we know that all bsets have been written here, as long as
1858 * we're still holding the write lock:
1862 * XXX: decide if we really want to unconditionally sort down to a
1866 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1867 invalidated_iter = true;
1869 invalidated_iter = bch2_drop_whiteouts(b);
1873 set_needs_whiteout(bset(b, t));
1875 bch2_btree_verify(c, b);
1878 * If later we don't unconditionally sort down to a single bset, we have
1879 * to ensure this is still true:
1881 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1883 bne = want_new_bset(c, b);
1885 bch2_bset_init_next(b, &bne->keys);
1887 bch2_btree_build_aux_trees(b);
1889 return invalidated_iter;
1893 * Use this one if the node is intent locked:
1895 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1896 struct closure *parent,
1897 enum six_lock_type lock_type_held)
1899 BUG_ON(lock_type_held == SIX_LOCK_write);
1901 if (lock_type_held == SIX_LOCK_intent ||
1902 six_trylock_convert(&b->lock, SIX_LOCK_read,
1904 __bch2_btree_node_write(c, b, parent, SIX_LOCK_intent);
1906 /* don't cycle lock unnecessarily: */
1907 if (btree_node_just_written(b)) {
1908 six_lock_write(&b->lock);
1909 bch2_btree_post_write_cleanup(c, b);
1910 six_unlock_write(&b->lock);
1913 if (lock_type_held == SIX_LOCK_read)
1914 six_lock_downgrade(&b->lock);
1916 __bch2_btree_node_write(c, b, parent, SIX_LOCK_read);
1920 void bch2_btree_verify_flushed(struct bch_fs *c)
1922 struct bucket_table *tbl;
1923 struct rhash_head *pos;
1928 for_each_cached_btree(b, c, tbl, i, pos)
1929 BUG_ON(btree_node_dirty(b));