1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
22 #include <linux/sched/mm.h>
23 #include <trace/events/bcachefs.h>
25 void bch2_btree_node_io_unlock(struct btree *b)
27 EBUG_ON(!btree_node_write_in_flight(b));
29 clear_btree_node_write_in_flight(b);
30 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
33 void bch2_btree_node_io_lock(struct btree *b)
35 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
37 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
38 TASK_UNINTERRUPTIBLE);
41 void __bch2_btree_node_wait_on_read(struct btree *b)
43 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
44 TASK_UNINTERRUPTIBLE);
47 void __bch2_btree_node_wait_on_write(struct btree *b)
49 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
50 TASK_UNINTERRUPTIBLE);
53 void bch2_btree_node_wait_on_read(struct btree *b)
55 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
57 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
58 TASK_UNINTERRUPTIBLE);
61 void bch2_btree_node_wait_on_write(struct btree *b)
63 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
65 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
66 TASK_UNINTERRUPTIBLE);
69 static void verify_no_dups(struct btree *b,
70 struct bkey_packed *start,
71 struct bkey_packed *end)
73 #ifdef CONFIG_BCACHEFS_DEBUG
74 struct bkey_packed *k, *p;
79 for (p = start, k = bkey_next(start);
81 p = k, k = bkey_next(k)) {
82 struct bkey l = bkey_unpack_key(b, p);
83 struct bkey r = bkey_unpack_key(b, k);
85 BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
90 static void set_needs_whiteout(struct bset *i, int v)
92 struct bkey_packed *k;
94 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
95 k->needs_whiteout = v;
98 static void btree_bounce_free(struct bch_fs *c, size_t size,
99 bool used_mempool, void *p)
102 mempool_free(p, &c->btree_bounce_pool);
107 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
110 unsigned flags = memalloc_nofs_save();
113 BUG_ON(size > btree_bytes(c));
115 *used_mempool = false;
116 p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
118 *used_mempool = true;
119 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
121 memalloc_nofs_restore(flags);
125 static void sort_bkey_ptrs(const struct btree *bt,
126 struct bkey_packed **ptrs, unsigned nr)
128 unsigned n = nr, a = nr / 2, b, c, d;
133 /* Heap sort: see lib/sort.c: */
138 swap(ptrs[0], ptrs[n]);
142 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
143 b = bch2_bkey_cmp_packed(bt,
145 ptrs[d]) >= 0 ? c : d;
150 bch2_bkey_cmp_packed(bt,
157 swap(ptrs[b], ptrs[c]);
162 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
164 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
165 bool used_mempool = false;
166 size_t bytes = b->whiteout_u64s * sizeof(u64);
168 if (!b->whiteout_u64s)
171 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
173 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
175 for (k = unwritten_whiteouts_start(c, b);
176 k != unwritten_whiteouts_end(c, b);
180 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
184 while (ptrs != ptrs_end) {
190 verify_no_dups(b, new_whiteouts,
191 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
193 memcpy_u64s(unwritten_whiteouts_start(c, b),
194 new_whiteouts, b->whiteout_u64s);
196 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
199 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
200 bool compacting, enum compact_mode mode)
202 if (!bset_dead_u64s(b, t))
207 return should_compact_bset_lazy(b, t) ||
208 (compacting && !bset_written(b, bset(b, t)));
216 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
221 for_each_bset(b, t) {
222 struct bset *i = bset(b, t);
223 struct bkey_packed *k, *n, *out, *start, *end;
224 struct btree_node_entry *src = NULL, *dst = NULL;
226 if (t != b->set && !bset_written(b, i)) {
227 src = container_of(i, struct btree_node_entry, keys);
228 dst = max(write_block(b),
229 (void *) btree_bkey_last(b, t - 1));
235 if (!should_compact_bset(b, t, ret, mode)) {
237 memmove(dst, src, sizeof(*src) +
238 le16_to_cpu(src->keys.u64s) *
241 set_btree_bset(b, t, i);
246 start = btree_bkey_first(b, t);
247 end = btree_bkey_last(b, t);
250 memmove(dst, src, sizeof(*src));
252 set_btree_bset(b, t, i);
257 for (k = start; k != end; k = n) {
260 if (!bkey_deleted(k)) {
262 out = bkey_next(out);
264 BUG_ON(k->needs_whiteout);
268 i->u64s = cpu_to_le16((u64 *) out - i->_data);
269 set_btree_bset_end(b, t);
270 bch2_bset_set_no_aux_tree(b, t);
274 bch2_verify_btree_nr_keys(b);
276 bch2_btree_build_aux_trees(b);
281 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
282 enum compact_mode mode)
284 return bch2_drop_whiteouts(b, mode);
287 static void btree_node_sort(struct bch_fs *c, struct btree *b,
290 bool filter_whiteouts)
292 struct btree_node *out;
293 struct sort_iter sort_iter;
295 struct bset *start_bset = bset(b, &b->set[start_idx]);
296 bool used_mempool = false;
297 u64 start_time, seq = 0;
298 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
299 bool sorting_entire_node = start_idx == 0 &&
302 sort_iter_init(&sort_iter, b);
304 for (t = b->set + start_idx;
305 t < b->set + end_idx;
307 u64s += le16_to_cpu(bset(b, t)->u64s);
308 sort_iter_add(&sort_iter,
309 btree_bkey_first(b, t),
310 btree_bkey_last(b, t));
313 bytes = sorting_entire_node
315 : __vstruct_bytes(struct btree_node, u64s);
317 out = btree_bounce_alloc(c, bytes, &used_mempool);
319 start_time = local_clock();
321 u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
323 out->keys.u64s = cpu_to_le16(u64s);
325 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
327 if (sorting_entire_node)
328 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
331 /* Make sure we preserve bset journal_seq: */
332 for (t = b->set + start_idx; t < b->set + end_idx; t++)
333 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
334 start_bset->journal_seq = cpu_to_le64(seq);
336 if (sorting_entire_node) {
337 unsigned u64s = le16_to_cpu(out->keys.u64s);
339 BUG_ON(bytes != btree_bytes(c));
342 * Our temporary buffer is the same size as the btree node's
343 * buffer, we can just swap buffers instead of doing a big
347 out->keys.u64s = cpu_to_le16(u64s);
349 set_btree_bset(b, b->set, &b->data->keys);
351 start_bset->u64s = out->keys.u64s;
352 memcpy_u64s(start_bset->start,
354 le16_to_cpu(out->keys.u64s));
357 for (i = start_idx + 1; i < end_idx; i++)
358 b->nr.bset_u64s[start_idx] +=
363 for (i = start_idx + 1; i < b->nsets; i++) {
364 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
365 b->set[i] = b->set[i + shift];
368 for (i = b->nsets; i < MAX_BSETS; i++)
369 b->nr.bset_u64s[i] = 0;
371 set_btree_bset_end(b, &b->set[start_idx]);
372 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
374 btree_bounce_free(c, bytes, used_mempool, out);
376 bch2_verify_btree_nr_keys(b);
379 void bch2_btree_sort_into(struct bch_fs *c,
383 struct btree_nr_keys nr;
384 struct btree_node_iter src_iter;
385 u64 start_time = local_clock();
387 BUG_ON(dst->nsets != 1);
389 bch2_bset_set_no_aux_tree(dst, dst->set);
391 bch2_btree_node_iter_init_from_start(&src_iter, src);
393 if (btree_node_is_extents(src))
394 nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
399 nr = bch2_sort_repack(btree_bset_first(dst),
404 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
407 set_btree_bset_end(dst, dst->set);
409 dst->nr.live_u64s += nr.live_u64s;
410 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
411 dst->nr.packed_keys += nr.packed_keys;
412 dst->nr.unpacked_keys += nr.unpacked_keys;
414 bch2_verify_btree_nr_keys(dst);
417 #define SORT_CRIT (4096 / sizeof(u64))
420 * We're about to add another bset to the btree node, so if there's currently
421 * too many bsets - sort some of them together:
423 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
425 unsigned unwritten_idx;
428 for (unwritten_idx = 0;
429 unwritten_idx < b->nsets;
431 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
434 if (b->nsets - unwritten_idx > 1) {
435 btree_node_sort(c, b, unwritten_idx,
440 if (unwritten_idx > 1) {
441 btree_node_sort(c, b, 0, unwritten_idx, false);
448 void bch2_btree_build_aux_trees(struct btree *b)
453 bch2_bset_build_aux_tree(b, t,
454 !bset_written(b, bset(b, t)) &&
455 t == bset_tree_last(b));
459 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
462 * Safe to call if there already is an unwritten bset - will only add a new bset
463 * if @b doesn't already have one.
465 * Returns true if we sorted (i.e. invalidated iterators
467 void bch2_btree_init_next(struct btree_trans *trans,
468 struct btree_iter *iter,
471 struct bch_fs *c = trans->c;
472 struct btree_node_entry *bne;
473 bool reinit_iter = false;
475 EBUG_ON(!(b->c.lock.state.seq & 1));
476 EBUG_ON(iter && iter->l[b->c.level].b != b);
477 BUG_ON(bset_written(b, bset(b, &b->set[1])));
479 if (b->nsets == MAX_BSETS &&
480 !btree_node_write_in_flight(b)) {
481 unsigned log_u64s[] = {
482 ilog2(bset_u64s(&b->set[0])),
483 ilog2(bset_u64s(&b->set[1])),
484 ilog2(bset_u64s(&b->set[2])),
487 if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
488 bch2_btree_node_write(c, b, SIX_LOCK_write);
493 if (b->nsets == MAX_BSETS &&
494 btree_node_compact(c, b))
497 BUG_ON(b->nsets >= MAX_BSETS);
499 bne = want_new_bset(c, b);
501 bch2_bset_init_next(c, b, bne);
503 bch2_btree_build_aux_trees(b);
505 if (iter && reinit_iter)
506 bch2_btree_iter_reinit_node(iter, b);
509 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
512 pr_buf(out, "%s level %u/%u\n ",
513 bch2_btree_ids[b->c.btree_id],
515 c->btree_roots[b->c.btree_id].level);
516 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
519 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
521 struct btree *b, struct bset *i,
522 unsigned offset, int write)
524 pr_buf(out, "error validating btree node ");
526 pr_buf(out, "before write ");
528 pr_buf(out, "on %s ", ca->name);
529 pr_buf(out, "at btree ");
530 btree_pos_to_text(out, c, b);
532 pr_buf(out, "\n node offset %u", b->written);
534 pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
537 enum btree_err_type {
539 BTREE_ERR_WANT_RETRY,
540 BTREE_ERR_MUST_RETRY,
544 enum btree_validate_ret {
545 BTREE_RETRY_READ = 64,
548 #define btree_err(type, c, ca, b, i, msg, ...) \
552 char *_buf2 = _buf; \
553 struct printbuf out = PBUF(_buf); \
555 _buf2 = kmalloc(4096, GFP_ATOMIC); \
557 out = _PBUF(_buf2, 4986); \
559 btree_err_msg(&out, c, ca, b, i, b->written, write); \
560 pr_buf(&out, ": " msg, ##__VA_ARGS__); \
562 if (type == BTREE_ERR_FIXABLE && \
564 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
565 mustfix_fsck_err(c, "%s", _buf2); \
571 bch_err(c, "%s", _buf2); \
574 case BTREE_ERR_FIXABLE: \
575 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
577 case BTREE_ERR_WANT_RETRY: \
579 ret = BTREE_RETRY_READ; \
583 case BTREE_ERR_MUST_RETRY: \
584 ret = BTREE_RETRY_READ; \
586 case BTREE_ERR_FATAL: \
587 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
592 bch_err(c, "corrupt metadata before write: %s", _buf2); \
594 if (bch2_fs_inconsistent(c)) { \
595 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
606 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
609 * When btree topology repair changes the start or end of a node, that might
610 * mean we have to drop keys that are no longer inside the node:
612 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
616 struct bkey unpacked;
617 struct btree_node_iter iter;
619 for_each_bset(b, t) {
620 struct bset *i = bset(b, t);
621 struct bkey_packed *k;
623 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
624 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
628 unsigned shift = (u64 *) k - (u64 *) i->start;
630 memmove_u64s_down(i->start, k,
631 (u64 *) vstruct_end(i) - (u64 *) k);
632 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
633 set_btree_bset_end(b, t);
634 bch2_bset_set_no_aux_tree(b, t);
637 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
638 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
641 if (k != vstruct_last(i)) {
642 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
643 set_btree_bset_end(b, t);
644 bch2_bset_set_no_aux_tree(b, t);
648 bch2_btree_build_aux_trees(b);
650 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
651 BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
652 BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
656 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
657 struct btree *b, struct bset *i,
658 unsigned sectors, int write, bool have_retry)
660 unsigned version = le16_to_cpu(i->version);
666 btree_err_on((version != BCH_BSET_VERSION_OLD &&
667 version < bcachefs_metadata_version_min) ||
668 version >= bcachefs_metadata_version_max,
669 BTREE_ERR_FATAL, c, ca, b, i,
670 "unsupported bset version");
672 if (btree_err_on(version < c->sb.version_min,
673 BTREE_ERR_FIXABLE, c, NULL, b, i,
674 "bset version %u older than superblock version_min %u",
675 version, c->sb.version_min)) {
676 mutex_lock(&c->sb_lock);
677 c->disk_sb.sb->version_min = cpu_to_le16(version);
679 mutex_unlock(&c->sb_lock);
682 if (btree_err_on(version > c->sb.version,
683 BTREE_ERR_FIXABLE, c, NULL, b, i,
684 "bset version %u newer than superblock version %u",
685 version, c->sb.version)) {
686 mutex_lock(&c->sb_lock);
687 c->disk_sb.sb->version = cpu_to_le16(version);
689 mutex_unlock(&c->sb_lock);
692 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
693 BTREE_ERR_FATAL, c, ca, b, i,
694 "BSET_SEPARATE_WHITEOUTS no longer supported");
696 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
697 BTREE_ERR_FIXABLE, c, ca, b, i,
698 "bset past end of btree node")) {
703 btree_err_on(b->written && !i->u64s,
704 BTREE_ERR_FIXABLE, c, ca, b, i,
708 struct btree_node *bn =
709 container_of(i, struct btree_node, keys);
710 /* These indicate that we read the wrong btree node: */
712 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
713 struct bch_btree_ptr_v2 *bp =
714 &bkey_i_to_btree_ptr_v2(&b->key)->v;
717 btree_err_on(bp->seq != bn->keys.seq,
718 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
719 "incorrect sequence number (wrong btree node)");
722 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
723 BTREE_ERR_MUST_RETRY, c, ca, b, i,
724 "incorrect btree id");
726 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
727 BTREE_ERR_MUST_RETRY, c, ca, b, i,
731 compat_btree_node(b->c.level, b->c.btree_id, version,
732 BSET_BIG_ENDIAN(i), write, bn);
734 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
735 struct bch_btree_ptr_v2 *bp =
736 &bkey_i_to_btree_ptr_v2(&b->key)->v;
738 if (BTREE_PTR_RANGE_UPDATED(bp)) {
739 b->data->min_key = bp->min_key;
740 b->data->max_key = b->key.k.p;
743 btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
744 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
745 "incorrect min_key: got %s should be %s",
746 (bch2_bpos_to_text(&PBUF(buf1), bn->min_key), buf1),
747 (bch2_bpos_to_text(&PBUF(buf2), bp->min_key), buf2));
750 btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
751 BTREE_ERR_MUST_RETRY, c, ca, b, i,
752 "incorrect max key %s",
753 (bch2_bpos_to_text(&PBUF(buf1), bn->max_key), buf1));
756 compat_btree_node(b->c.level, b->c.btree_id, version,
757 BSET_BIG_ENDIAN(i), write, bn);
759 err = bch2_bkey_format_validate(&bn->format);
761 BTREE_ERR_FATAL, c, ca, b, i,
762 "invalid bkey format: %s", err);
764 compat_bformat(b->c.level, b->c.btree_id, version,
765 BSET_BIG_ENDIAN(i), write,
772 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
773 struct bset *i, unsigned *whiteout_u64s,
774 int write, bool have_retry)
776 unsigned version = le16_to_cpu(i->version);
777 struct bkey_packed *k, *prev = NULL;
778 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
779 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
783 k != vstruct_last(i);) {
788 if (btree_err_on(bkey_next(k) > vstruct_last(i),
789 BTREE_ERR_FIXABLE, c, NULL, b, i,
790 "key extends past end of bset")) {
791 i->u64s = cpu_to_le16((u64 *) k - i->_data);
795 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
796 BTREE_ERR_FIXABLE, c, NULL, b, i,
797 "invalid bkey format %u", k->format)) {
798 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
799 memmove_u64s_down(k, bkey_next(k),
800 (u64 *) vstruct_end(i) - (u64 *) k);
804 /* XXX: validate k->u64s */
806 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
807 BSET_BIG_ENDIAN(i), write,
810 u = __bkey_disassemble(b, k, &tmp);
812 invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
813 (!updated_range ? bch2_bkey_in_btree_node(b, u.s_c) : NULL) ?:
814 (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
818 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
819 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
820 "invalid bkey: %s\n%s", invalid, buf);
822 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
823 memmove_u64s_down(k, bkey_next(k),
824 (u64 *) vstruct_end(i) - (u64 *) k);
829 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
830 BSET_BIG_ENDIAN(i), write,
833 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
836 struct bkey up = bkey_unpack_key(b, prev);
838 bch2_bkey_to_text(&PBUF(buf1), &up);
839 bch2_bkey_to_text(&PBUF(buf2), u.k);
841 bch2_dump_bset(c, b, i, 0);
843 if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
844 "keys out of order: %s > %s",
846 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
847 memmove_u64s_down(k, bkey_next(k),
848 (u64 *) vstruct_end(i) - (u64 *) k);
860 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
861 struct btree *b, bool have_retry)
863 struct btree_node_entry *bne;
864 struct sort_iter *iter;
865 struct btree_node *sorted;
866 struct bkey_packed *k;
867 struct bch_extent_ptr *ptr;
869 bool used_mempool, blacklisted;
870 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
871 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
873 unsigned nonblacklisted_written = 0;
874 int ret, retry_read = 0, write = READ;
876 b->version_ondisk = U16_MAX;
878 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
879 sort_iter_init(iter, b);
880 iter->size = (btree_blocks(c) + 1) * 2;
882 if (bch2_meta_read_fault("btree"))
883 btree_err(BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
886 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
887 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
890 btree_err_on(!b->data->keys.seq,
891 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
894 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
895 struct bch_btree_ptr_v2 *bp =
896 &bkey_i_to_btree_ptr_v2(&b->key)->v;
898 btree_err_on(b->data->keys.seq != bp->seq,
899 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
900 "got wrong btree node (seq %llx want %llx)",
901 b->data->keys.seq, bp->seq);
904 while (b->written < c->opts.btree_node_size) {
905 unsigned sectors, whiteout_u64s = 0;
907 struct bch_csum csum;
908 bool first = !b->written;
913 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
914 BTREE_ERR_WANT_RETRY, c, ca, b, i,
915 "unknown checksum type %llu",
918 nonce = btree_nonce(i, b->written << 9);
919 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
921 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
922 BTREE_ERR_WANT_RETRY, c, ca, b, i,
925 bset_encrypt(c, i, b->written << 9);
927 btree_err_on(btree_node_is_extents(b) &&
928 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
929 BTREE_ERR_FATAL, c, NULL, b, NULL,
930 "btree node does not have NEW_EXTENT_OVERWRITE set");
932 sectors = vstruct_sectors(b->data, c->block_bits);
934 bne = write_block(b);
937 if (i->seq != b->data->keys.seq)
940 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
941 BTREE_ERR_WANT_RETRY, c, ca, b, i,
942 "unknown checksum type %llu",
945 nonce = btree_nonce(i, b->written << 9);
946 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
948 btree_err_on(bch2_crc_cmp(csum, bne->csum),
949 BTREE_ERR_WANT_RETRY, c, ca, b, i,
952 bset_encrypt(c, i, b->written << 9);
954 sectors = vstruct_sectors(bne, c->block_bits);
957 b->version_ondisk = min(b->version_ondisk,
958 le16_to_cpu(i->version));
960 ret = validate_bset(c, ca, b, i, sectors,
966 btree_node_set_format(b, b->data->format);
968 ret = validate_bset_keys(c, b, i, &whiteout_u64s,
973 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
975 b->written += sectors;
977 blacklisted = bch2_journal_seq_is_blacklisted(c,
978 le64_to_cpu(i->journal_seq),
981 btree_err_on(blacklisted && first,
982 BTREE_ERR_FIXABLE, c, ca, b, i,
983 "first btree node bset has blacklisted journal seq");
984 if (blacklisted && !first)
987 sort_iter_add(iter, i->start,
988 vstruct_idx(i, whiteout_u64s));
991 vstruct_idx(i, whiteout_u64s),
994 nonblacklisted_written = b->written;
997 for (bne = write_block(b);
998 bset_byte_offset(b, bne) < btree_bytes(c);
999 bne = (void *) bne + block_bytes(c))
1000 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1001 !bch2_journal_seq_is_blacklisted(c,
1002 le64_to_cpu(bne->keys.journal_seq),
1004 BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
1005 "found bset signature after last bset");
1008 * Blacklisted bsets are those that were written after the most recent
1009 * (flush) journal write. Since there wasn't a flush, they may not have
1010 * made it to all devices - which means we shouldn't write new bsets
1011 * after them, as that could leave a gap and then reads from that device
1012 * wouldn't find all the bsets in that btree node - which means it's
1013 * important that we start writing new bsets after the most recent _non_
1016 b->written = nonblacklisted_written;
1018 sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
1019 sorted->keys.u64s = 0;
1021 set_btree_bset(b, b->set, &b->data->keys);
1023 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1025 u64s = le16_to_cpu(sorted->keys.u64s);
1027 sorted->keys.u64s = cpu_to_le16(u64s);
1028 swap(sorted, b->data);
1029 set_btree_bset(b, b->set, &b->data->keys);
1032 BUG_ON(b->nr.live_u64s != u64s);
1034 btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
1037 bch2_btree_node_drop_keys_outside_node(b);
1040 for (k = i->start; k != vstruct_last(i);) {
1042 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1043 const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
1046 (bch2_inject_invalid_keys &&
1047 !bversion_cmp(u.k->version, MAX_VERSION))) {
1050 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
1051 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i,
1052 "invalid bkey %s: %s", buf, invalid);
1054 btree_keys_account_key_drop(&b->nr, 0, k);
1056 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1057 memmove_u64s_down(k, bkey_next(k),
1058 (u64 *) vstruct_end(i) - (u64 *) k);
1059 set_btree_bset_end(b, b->set);
1063 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1064 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1072 bch2_bset_build_aux_tree(b, b->set, false);
1074 set_needs_whiteout(btree_bset_first(b), true);
1076 btree_node_reset_sib_u64s(b);
1078 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1079 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1081 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1082 set_btree_node_need_rewrite(b);
1085 mempool_free(iter, &c->fill_iter);
1088 if (ret == BTREE_RETRY_READ) {
1091 bch2_inconsistent_error(c);
1092 set_btree_node_read_error(b);
1097 static void btree_node_read_work(struct work_struct *work)
1099 struct btree_read_bio *rb =
1100 container_of(work, struct btree_read_bio, work);
1101 struct bch_fs *c = rb->c;
1102 struct btree *b = rb->b;
1103 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1104 struct bio *bio = &rb->bio;
1105 struct bch_io_failures failed = { .nr = 0 };
1107 struct printbuf out;
1108 bool saw_error = false;
1113 bch_info(c, "retrying read");
1114 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1115 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1117 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1118 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1119 bio->bi_iter.bi_size = btree_bytes(c);
1121 if (rb->have_ioref) {
1122 bio_set_dev(bio, ca->disk_sb.bdev);
1123 submit_bio_wait(bio);
1125 bio->bi_status = BLK_STS_REMOVED;
1129 btree_pos_to_text(&out, c, b);
1130 bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
1131 bch2_blk_status_to_str(bio->bi_status), buf);
1133 percpu_ref_put(&ca->io_ref);
1134 rb->have_ioref = false;
1136 bch2_mark_io_failure(&failed, &rb->pick);
1138 can_retry = bch2_bkey_pick_read_device(c,
1139 bkey_i_to_s_c(&b->key),
1140 &failed, &rb->pick) > 0;
1142 if (!bio->bi_status &&
1143 !bch2_btree_node_read_done(c, ca, b, can_retry))
1149 set_btree_node_read_error(b);
1154 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1158 if (saw_error && !btree_node_read_error(b))
1159 bch2_btree_node_rewrite_async(c, b);
1161 clear_btree_node_read_in_flight(b);
1162 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1165 static void btree_node_read_endio(struct bio *bio)
1167 struct btree_read_bio *rb =
1168 container_of(bio, struct btree_read_bio, bio);
1169 struct bch_fs *c = rb->c;
1171 if (rb->have_ioref) {
1172 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1173 bch2_latency_acct(ca, rb->start_time, READ);
1176 queue_work(c->io_complete_wq, &rb->work);
1179 struct btree_node_read_all {
1184 void *buf[BCH_REPLICAS_MAX];
1185 struct bio *bio[BCH_REPLICAS_MAX];
1186 int err[BCH_REPLICAS_MAX];
1189 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1191 struct btree_node *bn = data;
1192 struct btree_node_entry *bne;
1193 unsigned offset = 0;
1195 if (le64_to_cpu(bn->magic) != bset_magic(c))
1198 while (offset < c->opts.btree_node_size) {
1200 offset += vstruct_sectors(bn, c->block_bits);
1202 bne = data + (offset << 9);
1203 if (bne->keys.seq != bn->keys.seq)
1205 offset += vstruct_sectors(bne, c->block_bits);
1212 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1214 struct btree_node *bn = data;
1215 struct btree_node_entry *bne;
1220 while (offset < c->opts.btree_node_size) {
1221 bne = data + (offset << 9);
1222 if (bne->keys.seq == bn->keys.seq)
1231 static void btree_node_read_all_replicas_done(struct closure *cl)
1233 struct btree_node_read_all *ra =
1234 container_of(cl, struct btree_node_read_all, cl);
1235 struct bch_fs *c = ra->c;
1236 struct btree *b = ra->b;
1237 bool dump_bset_maps = false;
1238 bool have_retry = false;
1239 int ret = 0, best = -1, write = READ;
1240 unsigned i, written, written2;
1241 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1242 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1244 for (i = 0; i < ra->nr; i++) {
1245 struct btree_node *bn = ra->buf[i];
1250 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1251 (seq && seq != bn->keys.seq))
1256 written = btree_node_sectors_written(c, bn);
1260 written2 = btree_node_sectors_written(c, ra->buf[i]);
1261 if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1262 "btree node sectors written mismatch: %u != %u",
1263 written, written2) ||
1264 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1265 BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1266 "found bset signature after last bset") ||
1267 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1268 BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1269 "btree node replicas content mismatch"))
1270 dump_bset_maps = true;
1272 if (written2 > written) {
1278 if (dump_bset_maps) {
1279 for (i = 0; i < ra->nr; i++) {
1281 struct printbuf out = PBUF(buf);
1282 struct btree_node *bn = ra->buf[i];
1283 struct btree_node_entry *bne = NULL;
1284 unsigned offset = 0, sectors;
1290 while (offset < c->opts.btree_node_size) {
1292 sectors = vstruct_sectors(bn, c->block_bits);
1294 bne = ra->buf[i] + (offset << 9);
1295 if (bne->keys.seq != bn->keys.seq)
1297 sectors = vstruct_sectors(bne, c->block_bits);
1300 pr_buf(&out, " %u-%u", offset, offset + sectors);
1301 if (bne && bch2_journal_seq_is_blacklisted(c,
1302 le64_to_cpu(bne->keys.journal_seq), false))
1307 while (offset < c->opts.btree_node_size) {
1308 bne = ra->buf[i] + (offset << 9);
1309 if (bne->keys.seq == bn->keys.seq) {
1311 pr_buf(&out, " GAP");
1314 sectors = vstruct_sectors(bne, c->block_bits);
1315 pr_buf(&out, " %u-%u", offset, offset + sectors);
1316 if (bch2_journal_seq_is_blacklisted(c,
1317 le64_to_cpu(bne->keys.journal_seq), false))
1323 bch_err(c, "replica %u:%s", i, buf);
1328 memcpy(b->data, ra->buf[best], btree_bytes(c));
1329 ret = bch2_btree_node_read_done(c, NULL, b, false);
1335 set_btree_node_read_error(b);
1337 for (i = 0; i < ra->nr; i++) {
1338 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1339 bio_put(ra->bio[i]);
1342 closure_debug_destroy(&ra->cl);
1345 clear_btree_node_read_in_flight(b);
1346 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1349 static void btree_node_read_all_replicas_endio(struct bio *bio)
1351 struct btree_read_bio *rb =
1352 container_of(bio, struct btree_read_bio, bio);
1353 struct bch_fs *c = rb->c;
1354 struct btree_node_read_all *ra = rb->ra;
1356 if (rb->have_ioref) {
1357 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1358 bch2_latency_acct(ca, rb->start_time, READ);
1361 ra->err[rb->idx] = bio->bi_status;
1362 closure_put(&ra->cl);
1366 * XXX This allocates multiple times from the same mempools, and can deadlock
1367 * under sufficient memory pressure (but is only a debug path)
1369 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1371 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1372 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1373 const union bch_extent_entry *entry;
1374 struct extent_ptr_decoded pick;
1375 struct btree_node_read_all *ra;
1378 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1382 closure_init(&ra->cl, NULL);
1385 ra->nr = bch2_bkey_nr_ptrs(k);
1387 for (i = 0; i < ra->nr; i++) {
1388 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1389 ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i],
1395 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1396 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1397 struct btree_read_bio *rb =
1398 container_of(ra->bio[i], struct btree_read_bio, bio);
1402 rb->start_time = local_clock();
1403 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1406 rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1407 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1408 rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
1409 bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
1411 if (rb->have_ioref) {
1412 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1413 bio_sectors(&rb->bio));
1414 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1416 closure_get(&ra->cl);
1417 submit_bio(&rb->bio);
1419 ra->err[i] = BLK_STS_REMOVED;
1426 closure_sync(&ra->cl);
1427 btree_node_read_all_replicas_done(&ra->cl);
1429 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1436 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1439 struct extent_ptr_decoded pick;
1440 struct btree_read_bio *rb;
1446 btree_pos_to_text(&PBUF(buf), c, b);
1447 trace_btree_read(c, b);
1449 if (bch2_verify_all_btree_replicas &&
1450 !btree_node_read_all_replicas(c, b, sync))
1453 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1455 if (bch2_fs_fatal_err_on(ret <= 0, c,
1456 "btree node read error: no device to read from\n"
1458 set_btree_node_read_error(b);
1462 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1464 bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
1467 rb = container_of(bio, struct btree_read_bio, bio);
1471 rb->start_time = local_clock();
1472 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1474 INIT_WORK(&rb->work, btree_node_read_work);
1475 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1476 bio->bi_iter.bi_sector = pick.ptr.offset;
1477 bio->bi_end_io = btree_node_read_endio;
1478 bch2_bio_map(bio, b->data, btree_bytes(c));
1480 if (rb->have_ioref) {
1481 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1483 bio_set_dev(bio, ca->disk_sb.bdev);
1486 submit_bio_wait(bio);
1488 btree_node_read_work(&rb->work);
1493 bio->bi_status = BLK_STS_REMOVED;
1496 btree_node_read_work(&rb->work);
1498 queue_work(c->io_complete_wq, &rb->work);
1502 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1503 const struct bkey_i *k, unsigned level)
1509 closure_init_stack(&cl);
1512 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1516 b = bch2_btree_node_mem_alloc(c);
1517 bch2_btree_cache_cannibalize_unlock(c);
1521 bkey_copy(&b->key, k);
1522 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1524 set_btree_node_read_in_flight(b);
1526 bch2_btree_node_read(c, b, true);
1528 if (btree_node_read_error(b)) {
1529 bch2_btree_node_hash_remove(&c->btree_cache, b);
1531 mutex_lock(&c->btree_cache.lock);
1532 list_move(&b->list, &c->btree_cache.freeable);
1533 mutex_unlock(&c->btree_cache.lock);
1539 bch2_btree_set_root_for_read(c, b);
1541 six_unlock_write(&b->c.lock);
1542 six_unlock_intent(&b->c.lock);
1547 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1548 struct btree_write *w)
1550 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1558 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1561 closure_put(&((struct btree_update *) new)->cl);
1563 bch2_journal_pin_drop(&c->journal, &w->journal);
1566 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1568 struct btree_write *w = btree_prev_write(b);
1569 unsigned long old, new, v;
1571 bch2_btree_complete_write(c, b, w);
1573 v = READ_ONCE(b->flags);
1577 if (old & (1U << BTREE_NODE_need_write))
1580 new &= ~(1U << BTREE_NODE_write_in_flight);
1581 } while ((v = cmpxchg(&b->flags, old, new)) != old);
1583 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1587 six_lock_read(&b->c.lock, NULL, NULL);
1588 v = READ_ONCE(b->flags);
1592 if ((old & (1U << BTREE_NODE_dirty)) &&
1593 (old & (1U << BTREE_NODE_need_write)) &&
1594 !(old & (1U << BTREE_NODE_never_write)) &&
1595 btree_node_may_write(b)) {
1596 new &= ~(1U << BTREE_NODE_dirty);
1597 new &= ~(1U << BTREE_NODE_need_write);
1598 new |= (1U << BTREE_NODE_write_in_flight);
1599 new |= (1U << BTREE_NODE_just_written);
1600 new ^= (1U << BTREE_NODE_write_idx);
1602 new &= ~(1U << BTREE_NODE_write_in_flight);
1604 } while ((v = cmpxchg(&b->flags, old, new)) != old);
1606 if (new & (1U << BTREE_NODE_write_in_flight))
1607 __bch2_btree_node_write(c, b, true);
1609 six_unlock_read(&b->c.lock);
1612 static void bch2_btree_node_write_error(struct bch_fs *c,
1613 struct btree_write_bio *wbio)
1615 struct btree *b = wbio->wbio.bio.bi_private;
1617 struct bch_extent_ptr *ptr;
1618 struct btree_trans trans;
1619 struct btree_iter *iter;
1622 bch2_bkey_buf_init(&k);
1623 bch2_trans_init(&trans, c, 0, 0);
1625 iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
1626 BTREE_MAX_DEPTH, b->c.level, 0);
1628 ret = bch2_btree_iter_traverse(iter);
1632 /* has node been freed? */
1633 if (iter->l[b->c.level].b != b) {
1634 /* node has been freed: */
1635 BUG_ON(!btree_node_dying(b));
1639 BUG_ON(!btree_node_hashed(b));
1641 bch2_bkey_buf_copy(&k, c, &b->key);
1643 bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr,
1644 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1646 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
1649 ret = bch2_btree_node_update_key(&trans, iter, b, k.k);
1655 bch2_trans_iter_put(&trans, iter);
1656 bch2_trans_exit(&trans);
1657 bch2_bkey_buf_exit(&k, c);
1658 bio_put(&wbio->wbio.bio);
1659 btree_node_write_done(c, b);
1662 set_btree_node_noevict(b);
1663 bch2_fs_fatal_error(c, "fatal error writing btree node");
1667 void bch2_btree_write_error_work(struct work_struct *work)
1669 struct bch_fs *c = container_of(work, struct bch_fs,
1670 btree_write_error_work);
1674 spin_lock_irq(&c->btree_write_error_lock);
1675 bio = bio_list_pop(&c->btree_write_error_list);
1676 spin_unlock_irq(&c->btree_write_error_lock);
1681 bch2_btree_node_write_error(c,
1682 container_of(bio, struct btree_write_bio, wbio.bio));
1686 static void btree_node_write_work(struct work_struct *work)
1688 struct btree_write_bio *wbio =
1689 container_of(work, struct btree_write_bio, work);
1690 struct bch_fs *c = wbio->wbio.c;
1691 struct btree *b = wbio->wbio.bio.bi_private;
1693 btree_bounce_free(c,
1695 wbio->wbio.used_mempool,
1698 if (wbio->wbio.failed.nr) {
1699 unsigned long flags;
1701 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1702 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1703 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1705 queue_work(c->btree_error_wq, &c->btree_write_error_work);
1709 bio_put(&wbio->wbio.bio);
1710 btree_node_write_done(c, b);
1713 static void btree_node_write_endio(struct bio *bio)
1715 struct bch_write_bio *wbio = to_wbio(bio);
1716 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1717 struct bch_write_bio *orig = parent ?: wbio;
1718 struct bch_fs *c = wbio->c;
1719 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1720 unsigned long flags;
1722 if (wbio->have_ioref)
1723 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1725 if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
1726 bch2_blk_status_to_str(bio->bi_status)) ||
1727 bch2_meta_write_fault("btree")) {
1728 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1729 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1730 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1733 if (wbio->have_ioref)
1734 percpu_ref_put(&ca->io_ref);
1738 bio_endio(&parent->bio);
1740 struct btree_write_bio *wb =
1741 container_of(orig, struct btree_write_bio, wbio);
1743 INIT_WORK(&wb->work, btree_node_write_work);
1744 queue_work(c->io_complete_wq, &wb->work);
1748 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1749 struct bset *i, unsigned sectors)
1751 unsigned whiteout_u64s = 0;
1754 if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_btree))
1757 ret = validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false) ?:
1758 validate_bset(c, NULL, b, i, sectors, WRITE, false);
1760 bch2_inconsistent_error(c);
1767 static void btree_write_submit(struct work_struct *work)
1769 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1771 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key);
1774 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started)
1776 struct btree_write_bio *wbio;
1777 struct bset_tree *t;
1779 struct btree_node *bn = NULL;
1780 struct btree_node_entry *bne = NULL;
1781 struct bch_extent_ptr *ptr;
1782 struct sort_iter sort_iter;
1784 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1787 unsigned long old, new;
1788 bool validate_before_checksum = false;
1791 if (already_started)
1794 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1798 * We may only have a read lock on the btree node - the dirty bit is our
1799 * "lock" against racing with other threads that may be trying to start
1800 * a write, we do a write iff we clear the dirty bit. Since setting the
1801 * dirty bit requires a write lock, we can't race with other threads
1805 old = new = READ_ONCE(b->flags);
1807 if (!(old & (1 << BTREE_NODE_dirty)))
1810 if (!btree_node_may_write(b))
1813 if (old & (1 << BTREE_NODE_never_write))
1816 BUG_ON(old & (1 << BTREE_NODE_write_in_flight));
1818 new &= ~(1 << BTREE_NODE_dirty);
1819 new &= ~(1 << BTREE_NODE_need_write);
1820 new |= (1 << BTREE_NODE_write_in_flight);
1821 new |= (1 << BTREE_NODE_just_written);
1822 new ^= (1 << BTREE_NODE_write_idx);
1823 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1825 if (new & (1U << BTREE_NODE_need_write))
1828 atomic_dec(&c->btree_cache.dirty);
1830 BUG_ON(btree_node_fake(b));
1831 BUG_ON((b->will_make_reachable != 0) != !b->written);
1833 BUG_ON(b->written >= c->opts.btree_node_size);
1834 BUG_ON(b->written & (c->opts.block_size - 1));
1835 BUG_ON(bset_written(b, btree_bset_last(b)));
1836 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1837 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1839 bch2_sort_whiteouts(c, b);
1841 sort_iter_init(&sort_iter, b);
1844 ? sizeof(struct btree_node)
1845 : sizeof(struct btree_node_entry);
1847 bytes += b->whiteout_u64s * sizeof(u64);
1849 for_each_bset(b, t) {
1852 if (bset_written(b, i))
1855 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1856 sort_iter_add(&sort_iter,
1857 btree_bkey_first(b, t),
1858 btree_bkey_last(b, t));
1859 seq = max(seq, le64_to_cpu(i->journal_seq));
1862 BUG_ON(b->written && !seq);
1864 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
1867 /* buffer must be a multiple of the block size */
1868 bytes = round_up(bytes, block_bytes(c));
1870 data = btree_bounce_alloc(c, bytes, &used_mempool);
1878 bne->keys = b->data->keys;
1882 i->journal_seq = cpu_to_le64(seq);
1885 sort_iter_add(&sort_iter,
1886 unwritten_whiteouts_start(c, b),
1887 unwritten_whiteouts_end(c, b));
1888 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1890 b->whiteout_u64s = 0;
1892 u64s = bch2_sort_keys(i->start, &sort_iter, false);
1893 le16_add_cpu(&i->u64s, u64s);
1895 set_needs_whiteout(i, false);
1897 /* do we have data to write? */
1898 if (b->written && !i->u64s)
1901 bytes_to_write = vstruct_end(i) - data;
1902 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1904 memset(data + bytes_to_write, 0,
1905 (sectors_to_write << 9) - bytes_to_write);
1907 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1908 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1909 BUG_ON(i->seq != b->data->keys.seq);
1911 i->version = c->sb.version < bcachefs_metadata_version_new_versioning
1912 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1913 : cpu_to_le16(c->sb.version);
1914 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1916 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1917 validate_before_checksum = true;
1919 /* validate_bset will be modifying: */
1920 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
1921 validate_before_checksum = true;
1923 /* if we're going to be encrypting, check metadata validity first: */
1924 if (validate_before_checksum &&
1925 validate_bset_for_write(c, b, i, sectors_to_write))
1928 bset_encrypt(c, i, b->written << 9);
1930 nonce = btree_nonce(i, b->written << 9);
1933 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1935 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1937 /* if we're not encrypting, check metadata after checksumming: */
1938 if (!validate_before_checksum &&
1939 validate_bset_for_write(c, b, i, sectors_to_write))
1943 * We handle btree write errors by immediately halting the journal -
1944 * after we've done that, we can't issue any subsequent btree writes
1945 * because they might have pointers to new nodes that failed to write.
1947 * Furthermore, there's no point in doing any more btree writes because
1948 * with the journal stopped, we're never going to update the journal to
1949 * reflect that those writes were done and the data flushed from the
1952 * Also on journal error, the pending write may have updates that were
1953 * never journalled (interior nodes, see btree_update_nodes_written()) -
1954 * it's critical that we don't do the write in that case otherwise we
1955 * will have updates visible that weren't in the journal:
1957 * Make sure to update b->written so bch2_btree_init_next() doesn't
1960 if (bch2_journal_error(&c->journal) ||
1964 trace_btree_write(b, bytes_to_write, sectors_to_write);
1966 wbio = container_of(bio_alloc_bioset(GFP_NOIO,
1967 buf_pages(data, sectors_to_write << 9),
1969 struct btree_write_bio, wbio.bio);
1970 wbio_init(&wbio->wbio.bio);
1972 wbio->bytes = bytes;
1974 wbio->wbio.used_mempool = used_mempool;
1975 wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
1976 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1977 wbio->wbio.bio.bi_private = b;
1979 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
1982 * If we're appending to a leaf node, we don't technically need FUA -
1983 * this write just needs to be persisted before the next journal write,
1984 * which will be marked FLUSH|FUA.
1986 * Similarly if we're writing a new btree root - the pointer is going to
1987 * be in the next journal entry.
1989 * But if we're writing a new btree node (that isn't a root) or
1990 * appending to a non leaf btree node, we need either FUA or a flush
1991 * when we write the parent with the new pointer. FUA is cheaper than a
1992 * flush, and writes appending to leaf nodes aren't blocking anything so
1993 * just make all btree node writes FUA to keep things sane.
1996 bkey_copy(&wbio->key, &b->key);
1998 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&wbio->key)), ptr)
1999 ptr->offset += b->written;
2001 b->written += sectors_to_write;
2003 atomic64_inc(&c->btree_writes_nr);
2004 atomic64_add(sectors_to_write, &c->btree_writes_sectors);
2006 INIT_WORK(&wbio->work, btree_write_submit);
2007 queue_work(c->io_complete_wq, &wbio->work);
2010 set_btree_node_noevict(b);
2011 b->written += sectors_to_write;
2013 btree_bounce_free(c, bytes, used_mempool, data);
2014 btree_node_write_done(c, b);
2018 * Work that must be done with write lock held:
2020 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2022 bool invalidated_iter = false;
2023 struct btree_node_entry *bne;
2024 struct bset_tree *t;
2026 if (!btree_node_just_written(b))
2029 BUG_ON(b->whiteout_u64s);
2031 clear_btree_node_just_written(b);
2034 * Note: immediately after write, bset_written() doesn't work - the
2035 * amount of data we had to write after compaction might have been
2036 * smaller than the offset of the last bset.
2038 * However, we know that all bsets have been written here, as long as
2039 * we're still holding the write lock:
2043 * XXX: decide if we really want to unconditionally sort down to a
2047 btree_node_sort(c, b, 0, b->nsets, true);
2048 invalidated_iter = true;
2050 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2054 set_needs_whiteout(bset(b, t), true);
2056 bch2_btree_verify(c, b);
2059 * If later we don't unconditionally sort down to a single bset, we have
2060 * to ensure this is still true:
2062 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2064 bne = want_new_bset(c, b);
2066 bch2_bset_init_next(c, b, bne);
2068 bch2_btree_build_aux_trees(b);
2070 return invalidated_iter;
2074 * Use this one if the node is intent locked:
2076 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2077 enum six_lock_type lock_type_held)
2079 if (lock_type_held == SIX_LOCK_intent ||
2080 (lock_type_held == SIX_LOCK_read &&
2081 six_lock_tryupgrade(&b->c.lock))) {
2082 __bch2_btree_node_write(c, b, false);
2084 /* don't cycle lock unnecessarily: */
2085 if (btree_node_just_written(b) &&
2086 six_trylock_write(&b->c.lock)) {
2087 bch2_btree_post_write_cleanup(c, b);
2088 six_unlock_write(&b->c.lock);
2091 if (lock_type_held == SIX_LOCK_read)
2092 six_lock_downgrade(&b->c.lock);
2094 __bch2_btree_node_write(c, b, false);
2095 if (lock_type_held == SIX_LOCK_write &&
2096 btree_node_just_written(b))
2097 bch2_btree_post_write_cleanup(c, b);
2101 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2103 struct bucket_table *tbl;
2104 struct rhash_head *pos;
2109 for_each_cached_btree(b, c, tbl, i, pos)
2110 if (test_bit(flag, &b->flags)) {
2112 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2119 void bch2_btree_flush_all_reads(struct bch_fs *c)
2121 __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2124 void bch2_btree_flush_all_writes(struct bch_fs *c)
2126 __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2129 void bch2_dirty_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c)
2131 struct bucket_table *tbl;
2132 struct rhash_head *pos;
2137 for_each_cached_btree(b, c, tbl, i, pos) {
2138 unsigned long flags = READ_ONCE(b->flags);
2140 if (!(flags & (1 << BTREE_NODE_dirty)))
2143 pr_buf(out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
2145 (flags & (1 << BTREE_NODE_dirty)) != 0,
2146 (flags & (1 << BTREE_NODE_need_write)) != 0,
2149 !list_empty_careful(&b->write_blocked),
2150 b->will_make_reachable != 0,
2151 b->will_make_reachable & 1);