1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
22 #include <linux/sched/mm.h>
23 #include <trace/events/bcachefs.h>
25 void bch2_btree_node_io_unlock(struct btree *b)
27 EBUG_ON(!btree_node_write_in_flight(b));
29 clear_btree_node_write_in_flight_inner(b);
30 clear_btree_node_write_in_flight(b);
31 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
34 void bch2_btree_node_io_lock(struct btree *b)
36 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
38 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
39 TASK_UNINTERRUPTIBLE);
42 void __bch2_btree_node_wait_on_read(struct btree *b)
44 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
45 TASK_UNINTERRUPTIBLE);
48 void __bch2_btree_node_wait_on_write(struct btree *b)
50 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
51 TASK_UNINTERRUPTIBLE);
54 void bch2_btree_node_wait_on_read(struct btree *b)
56 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
58 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
59 TASK_UNINTERRUPTIBLE);
62 void bch2_btree_node_wait_on_write(struct btree *b)
64 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
66 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
67 TASK_UNINTERRUPTIBLE);
70 static void verify_no_dups(struct btree *b,
71 struct bkey_packed *start,
72 struct bkey_packed *end)
74 #ifdef CONFIG_BCACHEFS_DEBUG
75 struct bkey_packed *k, *p;
80 for (p = start, k = bkey_next(start);
82 p = k, k = bkey_next(k)) {
83 struct bkey l = bkey_unpack_key(b, p);
84 struct bkey r = bkey_unpack_key(b, k);
86 BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
91 static void set_needs_whiteout(struct bset *i, int v)
93 struct bkey_packed *k;
95 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
96 k->needs_whiteout = v;
99 static void btree_bounce_free(struct bch_fs *c, size_t size,
100 bool used_mempool, void *p)
103 mempool_free(p, &c->btree_bounce_pool);
108 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
111 unsigned flags = memalloc_nofs_save();
114 BUG_ON(size > btree_bytes(c));
116 *used_mempool = false;
117 p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
119 *used_mempool = true;
120 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
122 memalloc_nofs_restore(flags);
126 static void sort_bkey_ptrs(const struct btree *bt,
127 struct bkey_packed **ptrs, unsigned nr)
129 unsigned n = nr, a = nr / 2, b, c, d;
134 /* Heap sort: see lib/sort.c: */
139 swap(ptrs[0], ptrs[n]);
143 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
144 b = bch2_bkey_cmp_packed(bt,
146 ptrs[d]) >= 0 ? c : d;
151 bch2_bkey_cmp_packed(bt,
158 swap(ptrs[b], ptrs[c]);
163 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
165 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
166 bool used_mempool = false;
167 size_t bytes = b->whiteout_u64s * sizeof(u64);
169 if (!b->whiteout_u64s)
172 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
174 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
176 for (k = unwritten_whiteouts_start(c, b);
177 k != unwritten_whiteouts_end(c, b);
181 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
185 while (ptrs != ptrs_end) {
191 verify_no_dups(b, new_whiteouts,
192 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
194 memcpy_u64s(unwritten_whiteouts_start(c, b),
195 new_whiteouts, b->whiteout_u64s);
197 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
200 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
201 bool compacting, enum compact_mode mode)
203 if (!bset_dead_u64s(b, t))
208 return should_compact_bset_lazy(b, t) ||
209 (compacting && !bset_written(b, bset(b, t)));
217 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
222 for_each_bset(b, t) {
223 struct bset *i = bset(b, t);
224 struct bkey_packed *k, *n, *out, *start, *end;
225 struct btree_node_entry *src = NULL, *dst = NULL;
227 if (t != b->set && !bset_written(b, i)) {
228 src = container_of(i, struct btree_node_entry, keys);
229 dst = max(write_block(b),
230 (void *) btree_bkey_last(b, t - 1));
236 if (!should_compact_bset(b, t, ret, mode)) {
238 memmove(dst, src, sizeof(*src) +
239 le16_to_cpu(src->keys.u64s) *
242 set_btree_bset(b, t, i);
247 start = btree_bkey_first(b, t);
248 end = btree_bkey_last(b, t);
251 memmove(dst, src, sizeof(*src));
253 set_btree_bset(b, t, i);
258 for (k = start; k != end; k = n) {
261 if (!bkey_deleted(k)) {
263 out = bkey_next(out);
265 BUG_ON(k->needs_whiteout);
269 i->u64s = cpu_to_le16((u64 *) out - i->_data);
270 set_btree_bset_end(b, t);
271 bch2_bset_set_no_aux_tree(b, t);
275 bch2_verify_btree_nr_keys(b);
277 bch2_btree_build_aux_trees(b);
282 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
283 enum compact_mode mode)
285 return bch2_drop_whiteouts(b, mode);
288 static void btree_node_sort(struct bch_fs *c, struct btree *b,
291 bool filter_whiteouts)
293 struct btree_node *out;
294 struct sort_iter sort_iter;
296 struct bset *start_bset = bset(b, &b->set[start_idx]);
297 bool used_mempool = false;
298 u64 start_time, seq = 0;
299 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
300 bool sorting_entire_node = start_idx == 0 &&
303 sort_iter_init(&sort_iter, b);
305 for (t = b->set + start_idx;
306 t < b->set + end_idx;
308 u64s += le16_to_cpu(bset(b, t)->u64s);
309 sort_iter_add(&sort_iter,
310 btree_bkey_first(b, t),
311 btree_bkey_last(b, t));
314 bytes = sorting_entire_node
316 : __vstruct_bytes(struct btree_node, u64s);
318 out = btree_bounce_alloc(c, bytes, &used_mempool);
320 start_time = local_clock();
322 u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
324 out->keys.u64s = cpu_to_le16(u64s);
326 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
328 if (sorting_entire_node)
329 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
332 /* Make sure we preserve bset journal_seq: */
333 for (t = b->set + start_idx; t < b->set + end_idx; t++)
334 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
335 start_bset->journal_seq = cpu_to_le64(seq);
337 if (sorting_entire_node) {
338 unsigned u64s = le16_to_cpu(out->keys.u64s);
340 BUG_ON(bytes != btree_bytes(c));
343 * Our temporary buffer is the same size as the btree node's
344 * buffer, we can just swap buffers instead of doing a big
348 out->keys.u64s = cpu_to_le16(u64s);
350 set_btree_bset(b, b->set, &b->data->keys);
352 start_bset->u64s = out->keys.u64s;
353 memcpy_u64s(start_bset->start,
355 le16_to_cpu(out->keys.u64s));
358 for (i = start_idx + 1; i < end_idx; i++)
359 b->nr.bset_u64s[start_idx] +=
364 for (i = start_idx + 1; i < b->nsets; i++) {
365 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
366 b->set[i] = b->set[i + shift];
369 for (i = b->nsets; i < MAX_BSETS; i++)
370 b->nr.bset_u64s[i] = 0;
372 set_btree_bset_end(b, &b->set[start_idx]);
373 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
375 btree_bounce_free(c, bytes, used_mempool, out);
377 bch2_verify_btree_nr_keys(b);
380 void bch2_btree_sort_into(struct bch_fs *c,
384 struct btree_nr_keys nr;
385 struct btree_node_iter src_iter;
386 u64 start_time = local_clock();
388 BUG_ON(dst->nsets != 1);
390 bch2_bset_set_no_aux_tree(dst, dst->set);
392 bch2_btree_node_iter_init_from_start(&src_iter, src);
394 nr = bch2_sort_repack(btree_bset_first(dst),
399 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
402 set_btree_bset_end(dst, dst->set);
404 dst->nr.live_u64s += nr.live_u64s;
405 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
406 dst->nr.packed_keys += nr.packed_keys;
407 dst->nr.unpacked_keys += nr.unpacked_keys;
409 bch2_verify_btree_nr_keys(dst);
412 #define SORT_CRIT (4096 / sizeof(u64))
415 * We're about to add another bset to the btree node, so if there's currently
416 * too many bsets - sort some of them together:
418 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
420 unsigned unwritten_idx;
423 for (unwritten_idx = 0;
424 unwritten_idx < b->nsets;
426 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
429 if (b->nsets - unwritten_idx > 1) {
430 btree_node_sort(c, b, unwritten_idx,
435 if (unwritten_idx > 1) {
436 btree_node_sort(c, b, 0, unwritten_idx, false);
443 void bch2_btree_build_aux_trees(struct btree *b)
448 bch2_bset_build_aux_tree(b, t,
449 !bset_written(b, bset(b, t)) &&
450 t == bset_tree_last(b));
454 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
457 * Safe to call if there already is an unwritten bset - will only add a new bset
458 * if @b doesn't already have one.
460 * Returns true if we sorted (i.e. invalidated iterators
462 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
464 struct bch_fs *c = trans->c;
465 struct btree_node_entry *bne;
466 bool reinit_iter = false;
468 EBUG_ON(!(b->c.lock.state.seq & 1));
469 BUG_ON(bset_written(b, bset(b, &b->set[1])));
471 if (b->nsets == MAX_BSETS &&
472 !btree_node_write_in_flight(b)) {
473 unsigned log_u64s[] = {
474 ilog2(bset_u64s(&b->set[0])),
475 ilog2(bset_u64s(&b->set[1])),
476 ilog2(bset_u64s(&b->set[2])),
479 if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
480 bch2_btree_node_write(c, b, SIX_LOCK_write, 0);
485 if (b->nsets == MAX_BSETS &&
486 btree_node_compact(c, b))
489 BUG_ON(b->nsets >= MAX_BSETS);
491 bne = want_new_bset(c, b);
493 bch2_bset_init_next(c, b, bne);
495 bch2_btree_build_aux_trees(b);
498 bch2_trans_node_reinit_iter(trans, b);
501 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
504 prt_printf(out, "%s level %u/%u\n ",
505 bch2_btree_ids[b->c.btree_id],
507 c->btree_roots[b->c.btree_id].level);
508 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
511 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
513 struct btree *b, struct bset *i,
514 unsigned offset, int write)
516 prt_printf(out, "error validating btree node ");
518 prt_printf(out, "before write ");
520 prt_printf(out, "on %s ", ca->name);
521 prt_printf(out, "at btree ");
522 btree_pos_to_text(out, c, b);
524 prt_printf(out, "\n node offset %u", b->written);
526 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
529 enum btree_err_type {
531 BTREE_ERR_WANT_RETRY,
532 BTREE_ERR_MUST_RETRY,
536 enum btree_validate_ret {
537 BTREE_RETRY_READ = 64,
540 #define btree_err(type, c, ca, b, i, msg, ...) \
543 struct printbuf out = PRINTBUF; \
545 btree_err_msg(&out, c, ca, b, i, b->written, write); \
546 prt_printf(&out, ": " msg, ##__VA_ARGS__); \
548 if (type == BTREE_ERR_FIXABLE && \
550 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
551 mustfix_fsck_err(c, "%s", out.buf); \
557 bch_err(c, "%s", out.buf); \
560 case BTREE_ERR_FIXABLE: \
561 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
563 case BTREE_ERR_WANT_RETRY: \
565 ret = BTREE_RETRY_READ; \
569 case BTREE_ERR_MUST_RETRY: \
570 ret = BTREE_RETRY_READ; \
572 case BTREE_ERR_FATAL: \
573 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
578 bch_err(c, "corrupt metadata before write: %s", out.buf);\
580 if (bch2_fs_inconsistent(c)) { \
581 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
587 printbuf_exit(&out); \
591 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
594 * When btree topology repair changes the start or end of a node, that might
595 * mean we have to drop keys that are no longer inside the node:
597 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
601 struct bkey unpacked;
602 struct btree_node_iter iter;
604 for_each_bset(b, t) {
605 struct bset *i = bset(b, t);
606 struct bkey_packed *k;
608 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
609 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
613 unsigned shift = (u64 *) k - (u64 *) i->start;
615 memmove_u64s_down(i->start, k,
616 (u64 *) vstruct_end(i) - (u64 *) k);
617 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
618 set_btree_bset_end(b, t);
619 bch2_bset_set_no_aux_tree(b, t);
622 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
623 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
626 if (k != vstruct_last(i)) {
627 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
628 set_btree_bset_end(b, t);
629 bch2_bset_set_no_aux_tree(b, t);
633 bch2_btree_build_aux_trees(b);
635 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
636 BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
637 BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
641 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
642 struct btree *b, struct bset *i,
643 unsigned offset, unsigned sectors,
644 int write, bool have_retry)
646 unsigned version = le16_to_cpu(i->version);
648 struct printbuf buf1 = PRINTBUF;
649 struct printbuf buf2 = PRINTBUF;
652 btree_err_on((version != BCH_BSET_VERSION_OLD &&
653 version < bcachefs_metadata_version_min) ||
654 version >= bcachefs_metadata_version_max,
655 BTREE_ERR_FATAL, c, ca, b, i,
656 "unsupported bset version");
658 if (btree_err_on(version < c->sb.version_min,
659 BTREE_ERR_FIXABLE, c, NULL, b, i,
660 "bset version %u older than superblock version_min %u",
661 version, c->sb.version_min)) {
662 mutex_lock(&c->sb_lock);
663 c->disk_sb.sb->version_min = cpu_to_le16(version);
665 mutex_unlock(&c->sb_lock);
668 if (btree_err_on(version > c->sb.version,
669 BTREE_ERR_FIXABLE, c, NULL, b, i,
670 "bset version %u newer than superblock version %u",
671 version, c->sb.version)) {
672 mutex_lock(&c->sb_lock);
673 c->disk_sb.sb->version = cpu_to_le16(version);
675 mutex_unlock(&c->sb_lock);
678 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
679 BTREE_ERR_FATAL, c, ca, b, i,
680 "BSET_SEPARATE_WHITEOUTS no longer supported");
682 if (btree_err_on(offset + sectors > btree_sectors(c),
683 BTREE_ERR_FIXABLE, c, ca, b, i,
684 "bset past end of btree node")) {
690 btree_err_on(offset && !i->u64s,
691 BTREE_ERR_FIXABLE, c, ca, b, i,
694 btree_err_on(BSET_OFFSET(i) &&
695 BSET_OFFSET(i) != offset,
696 BTREE_ERR_WANT_RETRY, c, ca, b, i,
697 "bset at wrong sector offset");
700 struct btree_node *bn =
701 container_of(i, struct btree_node, keys);
702 /* These indicate that we read the wrong btree node: */
704 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
705 struct bch_btree_ptr_v2 *bp =
706 &bkey_i_to_btree_ptr_v2(&b->key)->v;
709 btree_err_on(bp->seq != bn->keys.seq,
710 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
711 "incorrect sequence number (wrong btree node)");
714 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
715 BTREE_ERR_MUST_RETRY, c, ca, b, i,
716 "incorrect btree id");
718 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
719 BTREE_ERR_MUST_RETRY, c, ca, b, i,
723 compat_btree_node(b->c.level, b->c.btree_id, version,
724 BSET_BIG_ENDIAN(i), write, bn);
726 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
727 struct bch_btree_ptr_v2 *bp =
728 &bkey_i_to_btree_ptr_v2(&b->key)->v;
730 if (BTREE_PTR_RANGE_UPDATED(bp)) {
731 b->data->min_key = bp->min_key;
732 b->data->max_key = b->key.k.p;
735 btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
736 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
737 "incorrect min_key: got %s should be %s",
738 (printbuf_reset(&buf1),
739 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
740 (printbuf_reset(&buf2),
741 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
744 btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
745 BTREE_ERR_MUST_RETRY, c, ca, b, i,
746 "incorrect max key %s",
747 (printbuf_reset(&buf1),
748 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
751 compat_btree_node(b->c.level, b->c.btree_id, version,
752 BSET_BIG_ENDIAN(i), write, bn);
754 err = bch2_bkey_format_validate(&bn->format);
756 BTREE_ERR_FATAL, c, ca, b, i,
757 "invalid bkey format: %s", err);
759 compat_bformat(b->c.level, b->c.btree_id, version,
760 BSET_BIG_ENDIAN(i), write,
765 printbuf_exit(&buf2);
766 printbuf_exit(&buf1);
770 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
772 bool updated_range, int rw,
773 struct printbuf *err)
775 return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
776 (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?:
777 (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
780 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
781 struct bset *i, unsigned *whiteout_u64s,
782 int write, bool have_retry)
784 unsigned version = le16_to_cpu(i->version);
785 struct bkey_packed *k, *prev = NULL;
786 struct printbuf buf = PRINTBUF;
787 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
788 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
792 k != vstruct_last(i);) {
796 if (btree_err_on(bkey_next(k) > vstruct_last(i),
797 BTREE_ERR_FIXABLE, c, NULL, b, i,
798 "key extends past end of bset")) {
799 i->u64s = cpu_to_le16((u64 *) k - i->_data);
803 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
804 BTREE_ERR_FIXABLE, c, NULL, b, i,
805 "invalid bkey format %u", k->format)) {
806 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
807 memmove_u64s_down(k, bkey_next(k),
808 (u64 *) vstruct_end(i) - (u64 *) k);
812 /* XXX: validate k->u64s */
814 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
815 BSET_BIG_ENDIAN(i), write,
818 u = __bkey_disassemble(b, k, &tmp);
820 printbuf_reset(&buf);
821 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
822 printbuf_reset(&buf);
823 prt_printf(&buf, "invalid bkey: ");
824 bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
825 prt_printf(&buf, "\n ");
826 bch2_bkey_val_to_text(&buf, c, u.s_c);
828 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
830 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
831 memmove_u64s_down(k, bkey_next(k),
832 (u64 *) vstruct_end(i) - (u64 *) k);
837 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
838 BSET_BIG_ENDIAN(i), write,
841 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
842 struct bkey up = bkey_unpack_key(b, prev);
844 printbuf_reset(&buf);
845 prt_printf(&buf, "keys out of order: ");
846 bch2_bkey_to_text(&buf, &up);
847 prt_printf(&buf, " > ");
848 bch2_bkey_to_text(&buf, u.k);
850 bch2_dump_bset(c, b, i, 0);
852 if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) {
853 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
854 memmove_u64s_down(k, bkey_next(k),
855 (u64 *) vstruct_end(i) - (u64 *) k);
868 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
869 struct btree *b, bool have_retry)
871 struct btree_node_entry *bne;
872 struct sort_iter *iter;
873 struct btree_node *sorted;
874 struct bkey_packed *k;
875 struct bch_extent_ptr *ptr;
877 bool used_mempool, blacklisted;
878 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
879 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
881 unsigned blacklisted_written, nonblacklisted_written = 0;
882 unsigned ptr_written = btree_ptr_sectors_written(&b->key);
883 struct printbuf buf = PRINTBUF;
884 int ret, retry_read = 0, write = READ;
886 b->version_ondisk = U16_MAX;
887 /* We might get called multiple times on read retry: */
890 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
891 sort_iter_init(iter, b);
892 iter->size = (btree_blocks(c) + 1) * 2;
894 if (bch2_meta_read_fault("btree"))
895 btree_err(BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
898 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
899 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
900 "bad magic: want %llx, got %llx",
901 bset_magic(c), le64_to_cpu(b->data->magic));
903 btree_err_on(!b->data->keys.seq,
904 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
905 "bad btree header: seq 0");
907 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
908 struct bch_btree_ptr_v2 *bp =
909 &bkey_i_to_btree_ptr_v2(&b->key)->v;
911 btree_err_on(b->data->keys.seq != bp->seq,
912 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
913 "got wrong btree node (seq %llx want %llx)",
914 b->data->keys.seq, bp->seq);
917 while (b->written < (ptr_written ?: btree_sectors(c))) {
918 unsigned sectors, whiteout_u64s = 0;
920 struct bch_csum csum;
921 bool first = !b->written;
926 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
927 BTREE_ERR_WANT_RETRY, c, ca, b, i,
928 "unknown checksum type %llu",
931 nonce = btree_nonce(i, b->written << 9);
932 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
934 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
935 BTREE_ERR_WANT_RETRY, c, ca, b, i,
938 ret = bset_encrypt(c, i, b->written << 9);
939 if (bch2_fs_fatal_err_on(ret, c,
940 "error decrypting btree node: %i", ret))
943 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
944 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
945 BTREE_ERR_FATAL, c, NULL, b, NULL,
946 "btree node does not have NEW_EXTENT_OVERWRITE set");
948 sectors = vstruct_sectors(b->data, c->block_bits);
950 bne = write_block(b);
953 if (i->seq != b->data->keys.seq)
956 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
957 BTREE_ERR_WANT_RETRY, c, ca, b, i,
958 "unknown checksum type %llu",
961 nonce = btree_nonce(i, b->written << 9);
962 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
964 btree_err_on(bch2_crc_cmp(csum, bne->csum),
965 BTREE_ERR_WANT_RETRY, c, ca, b, i,
968 ret = bset_encrypt(c, i, b->written << 9);
969 if (bch2_fs_fatal_err_on(ret, c,
970 "error decrypting btree node: %i\n", ret))
973 sectors = vstruct_sectors(bne, c->block_bits);
976 b->version_ondisk = min(b->version_ondisk,
977 le16_to_cpu(i->version));
979 ret = validate_bset(c, ca, b, i, b->written, sectors,
985 btree_node_set_format(b, b->data->format);
987 ret = validate_bset_keys(c, b, i, &whiteout_u64s,
992 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
994 blacklisted = bch2_journal_seq_is_blacklisted(c,
995 le64_to_cpu(i->journal_seq),
998 btree_err_on(blacklisted && first,
999 BTREE_ERR_FIXABLE, c, ca, b, i,
1000 "first btree node bset has blacklisted journal seq (%llu)",
1001 le64_to_cpu(i->journal_seq));
1003 btree_err_on(blacklisted && ptr_written,
1004 BTREE_ERR_FIXABLE, c, ca, b, i,
1005 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1006 le64_to_cpu(i->journal_seq),
1007 b->written, b->written + sectors, ptr_written);
1009 b->written += sectors;
1011 if (blacklisted && !first)
1014 sort_iter_add(iter, i->start,
1015 vstruct_idx(i, whiteout_u64s));
1018 vstruct_idx(i, whiteout_u64s),
1021 nonblacklisted_written = b->written;
1025 btree_err_on(b->written < ptr_written,
1026 BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
1027 "btree node data missing: expected %u sectors, found %u",
1028 ptr_written, b->written);
1030 for (bne = write_block(b);
1031 bset_byte_offset(b, bne) < btree_bytes(c);
1032 bne = (void *) bne + block_bytes(c))
1033 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1034 !bch2_journal_seq_is_blacklisted(c,
1035 le64_to_cpu(bne->keys.journal_seq),
1037 BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
1038 "found bset signature after last bset");
1041 * Blacklisted bsets are those that were written after the most recent
1042 * (flush) journal write. Since there wasn't a flush, they may not have
1043 * made it to all devices - which means we shouldn't write new bsets
1044 * after them, as that could leave a gap and then reads from that device
1045 * wouldn't find all the bsets in that btree node - which means it's
1046 * important that we start writing new bsets after the most recent _non_
1049 blacklisted_written = b->written;
1050 b->written = nonblacklisted_written;
1053 sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
1054 sorted->keys.u64s = 0;
1056 set_btree_bset(b, b->set, &b->data->keys);
1058 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1060 u64s = le16_to_cpu(sorted->keys.u64s);
1062 sorted->keys.u64s = cpu_to_le16(u64s);
1063 swap(sorted, b->data);
1064 set_btree_bset(b, b->set, &b->data->keys);
1067 BUG_ON(b->nr.live_u64s != u64s);
1069 btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
1072 bch2_btree_node_drop_keys_outside_node(b);
1075 for (k = i->start; k != vstruct_last(i);) {
1077 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1079 printbuf_reset(&buf);
1081 if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1082 (bch2_inject_invalid_keys &&
1083 !bversion_cmp(u.k->version, MAX_VERSION))) {
1084 printbuf_reset(&buf);
1086 prt_printf(&buf, "invalid bkey: ");
1087 bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1088 prt_printf(&buf, "\n ");
1089 bch2_bkey_val_to_text(&buf, c, u.s_c);
1091 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
1093 btree_keys_account_key_drop(&b->nr, 0, k);
1095 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1096 memmove_u64s_down(k, bkey_next(k),
1097 (u64 *) vstruct_end(i) - (u64 *) k);
1098 set_btree_bset_end(b, b->set);
1102 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1103 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1111 bch2_bset_build_aux_tree(b, b->set, false);
1113 set_needs_whiteout(btree_bset_first(b), true);
1115 btree_node_reset_sib_u64s(b);
1117 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1118 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1120 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1121 set_btree_node_need_rewrite(b);
1125 set_btree_node_need_rewrite(b);
1127 mempool_free(iter, &c->fill_iter);
1128 printbuf_exit(&buf);
1131 if (ret == BTREE_RETRY_READ) {
1134 bch2_inconsistent_error(c);
1135 set_btree_node_read_error(b);
1140 static void btree_node_read_work(struct work_struct *work)
1142 struct btree_read_bio *rb =
1143 container_of(work, struct btree_read_bio, work);
1144 struct bch_fs *c = rb->c;
1145 struct btree *b = rb->b;
1146 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1147 struct bio *bio = &rb->bio;
1148 struct bch_io_failures failed = { .nr = 0 };
1149 struct printbuf buf = PRINTBUF;
1150 bool saw_error = false;
1157 bch_info(c, "retrying read");
1158 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1159 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1160 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1161 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1162 bio->bi_iter.bi_size = btree_bytes(c);
1164 if (rb->have_ioref) {
1165 bio_set_dev(bio, ca->disk_sb.bdev);
1166 submit_bio_wait(bio);
1168 bio->bi_status = BLK_STS_REMOVED;
1171 printbuf_reset(&buf);
1172 btree_pos_to_text(&buf, c, b);
1173 bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
1174 bch2_blk_status_to_str(bio->bi_status), buf.buf);
1176 percpu_ref_put(&ca->io_ref);
1177 rb->have_ioref = false;
1179 bch2_mark_io_failure(&failed, &rb->pick);
1181 can_retry = bch2_bkey_pick_read_device(c,
1182 bkey_i_to_s_c(&b->key),
1183 &failed, &rb->pick) > 0;
1185 if (!bio->bi_status &&
1186 !bch2_btree_node_read_done(c, ca, b, can_retry)) {
1188 bch_info(c, "retry success");
1195 set_btree_node_read_error(b);
1200 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1203 printbuf_exit(&buf);
1205 if (saw_error && !btree_node_read_error(b))
1206 bch2_btree_node_rewrite_async(c, b);
1208 clear_btree_node_read_in_flight(b);
1209 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1212 static void btree_node_read_endio(struct bio *bio)
1214 struct btree_read_bio *rb =
1215 container_of(bio, struct btree_read_bio, bio);
1216 struct bch_fs *c = rb->c;
1218 if (rb->have_ioref) {
1219 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1220 bch2_latency_acct(ca, rb->start_time, READ);
1223 queue_work(c->io_complete_wq, &rb->work);
1226 struct btree_node_read_all {
1231 void *buf[BCH_REPLICAS_MAX];
1232 struct bio *bio[BCH_REPLICAS_MAX];
1233 int err[BCH_REPLICAS_MAX];
1236 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1238 struct btree_node *bn = data;
1239 struct btree_node_entry *bne;
1240 unsigned offset = 0;
1242 if (le64_to_cpu(bn->magic) != bset_magic(c))
1245 while (offset < btree_sectors(c)) {
1247 offset += vstruct_sectors(bn, c->block_bits);
1249 bne = data + (offset << 9);
1250 if (bne->keys.seq != bn->keys.seq)
1252 offset += vstruct_sectors(bne, c->block_bits);
1259 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1261 struct btree_node *bn = data;
1262 struct btree_node_entry *bne;
1267 while (offset < btree_sectors(c)) {
1268 bne = data + (offset << 9);
1269 if (bne->keys.seq == bn->keys.seq)
1278 static void btree_node_read_all_replicas_done(struct closure *cl)
1280 struct btree_node_read_all *ra =
1281 container_of(cl, struct btree_node_read_all, cl);
1282 struct bch_fs *c = ra->c;
1283 struct btree *b = ra->b;
1284 struct printbuf buf = PRINTBUF;
1285 bool dump_bset_maps = false;
1286 bool have_retry = false;
1287 int ret = 0, best = -1, write = READ;
1288 unsigned i, written = 0, written2 = 0;
1289 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1290 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1292 for (i = 0; i < ra->nr; i++) {
1293 struct btree_node *bn = ra->buf[i];
1298 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1299 (seq && seq != bn->keys.seq))
1304 written = btree_node_sectors_written(c, bn);
1308 written2 = btree_node_sectors_written(c, ra->buf[i]);
1309 if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1310 "btree node sectors written mismatch: %u != %u",
1311 written, written2) ||
1312 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1313 BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1314 "found bset signature after last bset") ||
1315 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1316 BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1317 "btree node replicas content mismatch"))
1318 dump_bset_maps = true;
1320 if (written2 > written) {
1326 if (dump_bset_maps) {
1327 for (i = 0; i < ra->nr; i++) {
1328 struct btree_node *bn = ra->buf[i];
1329 struct btree_node_entry *bne = NULL;
1330 unsigned offset = 0, sectors;
1336 printbuf_reset(&buf);
1338 while (offset < btree_sectors(c)) {
1340 sectors = vstruct_sectors(bn, c->block_bits);
1342 bne = ra->buf[i] + (offset << 9);
1343 if (bne->keys.seq != bn->keys.seq)
1345 sectors = vstruct_sectors(bne, c->block_bits);
1348 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1349 if (bne && bch2_journal_seq_is_blacklisted(c,
1350 le64_to_cpu(bne->keys.journal_seq), false))
1351 prt_printf(&buf, "*");
1355 while (offset < btree_sectors(c)) {
1356 bne = ra->buf[i] + (offset << 9);
1357 if (bne->keys.seq == bn->keys.seq) {
1359 prt_printf(&buf, " GAP");
1362 sectors = vstruct_sectors(bne, c->block_bits);
1363 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1364 if (bch2_journal_seq_is_blacklisted(c,
1365 le64_to_cpu(bne->keys.journal_seq), false))
1366 prt_printf(&buf, "*");
1371 bch_err(c, "replica %u:%s", i, buf.buf);
1376 memcpy(b->data, ra->buf[best], btree_bytes(c));
1377 ret = bch2_btree_node_read_done(c, NULL, b, false);
1383 set_btree_node_read_error(b);
1385 for (i = 0; i < ra->nr; i++) {
1386 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1387 bio_put(ra->bio[i]);
1390 closure_debug_destroy(&ra->cl);
1392 printbuf_exit(&buf);
1394 clear_btree_node_read_in_flight(b);
1395 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1398 static void btree_node_read_all_replicas_endio(struct bio *bio)
1400 struct btree_read_bio *rb =
1401 container_of(bio, struct btree_read_bio, bio);
1402 struct bch_fs *c = rb->c;
1403 struct btree_node_read_all *ra = rb->ra;
1405 if (rb->have_ioref) {
1406 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1407 bch2_latency_acct(ca, rb->start_time, READ);
1410 ra->err[rb->idx] = bio->bi_status;
1411 closure_put(&ra->cl);
1415 * XXX This allocates multiple times from the same mempools, and can deadlock
1416 * under sufficient memory pressure (but is only a debug path)
1418 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1420 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1421 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1422 const union bch_extent_entry *entry;
1423 struct extent_ptr_decoded pick;
1424 struct btree_node_read_all *ra;
1427 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1431 closure_init(&ra->cl, NULL);
1434 ra->nr = bch2_bkey_nr_ptrs(k);
1436 for (i = 0; i < ra->nr; i++) {
1437 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1438 ra->bio[i] = bio_alloc_bioset(NULL,
1439 buf_pages(ra->buf[i], btree_bytes(c)),
1440 REQ_OP_READ|REQ_SYNC|REQ_META,
1446 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1447 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1448 struct btree_read_bio *rb =
1449 container_of(ra->bio[i], struct btree_read_bio, bio);
1453 rb->start_time = local_clock();
1454 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1457 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1458 rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
1459 bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
1461 if (rb->have_ioref) {
1462 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1463 bio_sectors(&rb->bio));
1464 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1466 closure_get(&ra->cl);
1467 submit_bio(&rb->bio);
1469 ra->err[i] = BLK_STS_REMOVED;
1476 closure_sync(&ra->cl);
1477 btree_node_read_all_replicas_done(&ra->cl);
1479 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1486 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1489 struct extent_ptr_decoded pick;
1490 struct btree_read_bio *rb;
1495 trace_btree_read(c, b);
1497 if (bch2_verify_all_btree_replicas &&
1498 !btree_node_read_all_replicas(c, b, sync))
1501 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1505 struct printbuf buf = PRINTBUF;
1507 prt_str(&buf, "btree node read error: no device to read from\n at ");
1508 btree_pos_to_text(&buf, c, b);
1509 bch_err(c, "%s", buf.buf);
1511 if (test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags))
1512 bch2_fatal_error(c);
1514 set_btree_node_read_error(b);
1515 clear_btree_node_read_in_flight(b);
1516 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1517 printbuf_exit(&buf);
1521 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1523 bio = bio_alloc_bioset(NULL,
1524 buf_pages(b->data, btree_bytes(c)),
1525 REQ_OP_READ|REQ_SYNC|REQ_META,
1528 rb = container_of(bio, struct btree_read_bio, bio);
1532 rb->start_time = local_clock();
1533 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1535 INIT_WORK(&rb->work, btree_node_read_work);
1536 bio->bi_iter.bi_sector = pick.ptr.offset;
1537 bio->bi_end_io = btree_node_read_endio;
1538 bch2_bio_map(bio, b->data, btree_bytes(c));
1540 if (rb->have_ioref) {
1541 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1543 bio_set_dev(bio, ca->disk_sb.bdev);
1546 submit_bio_wait(bio);
1548 btree_node_read_work(&rb->work);
1553 bio->bi_status = BLK_STS_REMOVED;
1556 btree_node_read_work(&rb->work);
1558 queue_work(c->io_complete_wq, &rb->work);
1562 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1563 const struct bkey_i *k, unsigned level)
1569 closure_init_stack(&cl);
1572 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1576 b = bch2_btree_node_mem_alloc(c, level != 0);
1577 bch2_btree_cache_cannibalize_unlock(c);
1581 bkey_copy(&b->key, k);
1582 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1584 set_btree_node_read_in_flight(b);
1586 bch2_btree_node_read(c, b, true);
1588 if (btree_node_read_error(b)) {
1589 bch2_btree_node_hash_remove(&c->btree_cache, b);
1591 mutex_lock(&c->btree_cache.lock);
1592 list_move(&b->list, &c->btree_cache.freeable);
1593 mutex_unlock(&c->btree_cache.lock);
1599 bch2_btree_set_root_for_read(c, b);
1601 six_unlock_write(&b->c.lock);
1602 six_unlock_intent(&b->c.lock);
1607 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1608 struct btree_write *w)
1610 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1618 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1621 closure_put(&((struct btree_update *) new)->cl);
1623 bch2_journal_pin_drop(&c->journal, &w->journal);
1626 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1628 struct btree_write *w = btree_prev_write(b);
1629 unsigned long old, new, v;
1631 bch2_btree_complete_write(c, b, w);
1633 v = READ_ONCE(b->flags);
1637 if ((old & (1U << BTREE_NODE_dirty)) &&
1638 (old & (1U << BTREE_NODE_need_write)) &&
1639 !(old & (1U << BTREE_NODE_never_write)) &&
1640 !(old & (1U << BTREE_NODE_write_blocked)) &&
1641 !(old & (1U << BTREE_NODE_will_make_reachable))) {
1642 new &= ~(1U << BTREE_NODE_dirty);
1643 new &= ~(1U << BTREE_NODE_need_write);
1644 new |= (1U << BTREE_NODE_write_in_flight);
1645 new |= (1U << BTREE_NODE_write_in_flight_inner);
1646 new |= (1U << BTREE_NODE_just_written);
1647 new ^= (1U << BTREE_NODE_write_idx);
1649 new &= ~(1U << BTREE_NODE_write_in_flight);
1650 new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1652 } while ((v = cmpxchg(&b->flags, old, new)) != old);
1654 if (new & (1U << BTREE_NODE_write_in_flight))
1655 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED);
1657 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1660 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1662 six_lock_read(&b->c.lock, NULL, NULL);
1663 __btree_node_write_done(c, b);
1664 six_unlock_read(&b->c.lock);
1667 static void btree_node_write_work(struct work_struct *work)
1669 struct btree_write_bio *wbio =
1670 container_of(work, struct btree_write_bio, work);
1671 struct bch_fs *c = wbio->wbio.c;
1672 struct btree *b = wbio->wbio.bio.bi_private;
1673 struct bch_extent_ptr *ptr;
1676 btree_bounce_free(c,
1678 wbio->wbio.used_mempool,
1681 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1682 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1684 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
1687 if (wbio->wbio.first_btree_write) {
1688 if (wbio->wbio.failed.nr) {
1692 ret = bch2_trans_do(c, NULL, NULL, 0,
1693 bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key,
1694 !wbio->wbio.failed.nr));
1699 bio_put(&wbio->wbio.bio);
1700 btree_node_write_done(c, b);
1703 set_btree_node_noevict(b);
1704 bch2_fs_fatal_error(c, "fatal error writing btree node");
1708 static void btree_node_write_endio(struct bio *bio)
1710 struct bch_write_bio *wbio = to_wbio(bio);
1711 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1712 struct bch_write_bio *orig = parent ?: wbio;
1713 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
1714 struct bch_fs *c = wbio->c;
1715 struct btree *b = wbio->bio.bi_private;
1716 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1717 unsigned long flags;
1719 if (wbio->have_ioref)
1720 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1722 if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
1723 bch2_blk_status_to_str(bio->bi_status)) ||
1724 bch2_meta_write_fault("btree")) {
1725 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1726 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1727 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1730 if (wbio->have_ioref)
1731 percpu_ref_put(&ca->io_ref);
1735 bio_endio(&parent->bio);
1739 clear_btree_node_write_in_flight_inner(b);
1740 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1741 INIT_WORK(&wb->work, btree_node_write_work);
1742 queue_work(c->btree_io_complete_wq, &wb->work);
1745 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1746 struct bset *i, unsigned sectors)
1748 unsigned whiteout_u64s = 0;
1749 struct printbuf buf = PRINTBUF;
1752 ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1753 BKEY_TYPE_btree, WRITE, &buf);
1756 bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1757 printbuf_exit(&buf);
1761 ret = validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false) ?:
1762 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false);
1764 bch2_inconsistent_error(c);
1771 static void btree_write_submit(struct work_struct *work)
1773 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1774 struct bch_extent_ptr *ptr;
1775 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1777 bkey_copy(&tmp.k, &wbio->key);
1779 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1780 ptr->offset += wbio->sector_offset;
1782 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
1785 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1787 struct btree_write_bio *wbio;
1788 struct bset_tree *t;
1790 struct btree_node *bn = NULL;
1791 struct btree_node_entry *bne = NULL;
1792 struct sort_iter sort_iter;
1794 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1797 unsigned long old, new;
1798 bool validate_before_checksum = false;
1802 if (flags & BTREE_WRITE_ALREADY_STARTED)
1806 * We may only have a read lock on the btree node - the dirty bit is our
1807 * "lock" against racing with other threads that may be trying to start
1808 * a write, we do a write iff we clear the dirty bit. Since setting the
1809 * dirty bit requires a write lock, we can't race with other threads
1813 old = new = READ_ONCE(b->flags);
1815 if (!(old & (1 << BTREE_NODE_dirty)))
1818 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
1819 !(old & (1 << BTREE_NODE_need_write)))
1823 ((1 << BTREE_NODE_never_write)|
1824 (1 << BTREE_NODE_write_blocked)))
1828 (old & (1 << BTREE_NODE_will_make_reachable)))
1831 if (old & (1 << BTREE_NODE_write_in_flight))
1834 new &= ~(1 << BTREE_NODE_dirty);
1835 new &= ~(1 << BTREE_NODE_need_write);
1836 new |= (1 << BTREE_NODE_write_in_flight);
1837 new |= (1 << BTREE_NODE_write_in_flight_inner);
1838 new |= (1 << BTREE_NODE_just_written);
1839 new ^= (1 << BTREE_NODE_write_idx);
1840 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1842 if (new & (1U << BTREE_NODE_need_write))
1845 atomic_dec(&c->btree_cache.dirty);
1847 BUG_ON(btree_node_fake(b));
1848 BUG_ON((b->will_make_reachable != 0) != !b->written);
1850 BUG_ON(b->written >= btree_sectors(c));
1851 BUG_ON(b->written & (block_sectors(c) - 1));
1852 BUG_ON(bset_written(b, btree_bset_last(b)));
1853 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1854 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1856 bch2_sort_whiteouts(c, b);
1858 sort_iter_init(&sort_iter, b);
1861 ? sizeof(struct btree_node)
1862 : sizeof(struct btree_node_entry);
1864 bytes += b->whiteout_u64s * sizeof(u64);
1866 for_each_bset(b, t) {
1869 if (bset_written(b, i))
1872 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1873 sort_iter_add(&sort_iter,
1874 btree_bkey_first(b, t),
1875 btree_bkey_last(b, t));
1876 seq = max(seq, le64_to_cpu(i->journal_seq));
1879 BUG_ON(b->written && !seq);
1881 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
1884 /* buffer must be a multiple of the block size */
1885 bytes = round_up(bytes, block_bytes(c));
1887 data = btree_bounce_alloc(c, bytes, &used_mempool);
1895 bne->keys = b->data->keys;
1899 i->journal_seq = cpu_to_le64(seq);
1902 sort_iter_add(&sort_iter,
1903 unwritten_whiteouts_start(c, b),
1904 unwritten_whiteouts_end(c, b));
1905 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1907 b->whiteout_u64s = 0;
1909 u64s = bch2_sort_keys(i->start, &sort_iter, false);
1910 le16_add_cpu(&i->u64s, u64s);
1912 set_needs_whiteout(i, false);
1914 /* do we have data to write? */
1915 if (b->written && !i->u64s)
1918 bytes_to_write = vstruct_end(i) - data;
1919 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1921 memset(data + bytes_to_write, 0,
1922 (sectors_to_write << 9) - bytes_to_write);
1924 BUG_ON(b->written + sectors_to_write > btree_sectors(c));
1925 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1926 BUG_ON(i->seq != b->data->keys.seq);
1928 i->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
1929 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1930 : cpu_to_le16(c->sb.version);
1931 SET_BSET_OFFSET(i, b->written);
1932 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1934 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1935 validate_before_checksum = true;
1937 /* validate_bset will be modifying: */
1938 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
1939 validate_before_checksum = true;
1941 /* if we're going to be encrypting, check metadata validity first: */
1942 if (validate_before_checksum &&
1943 validate_bset_for_write(c, b, i, sectors_to_write))
1946 ret = bset_encrypt(c, i, b->written << 9);
1947 if (bch2_fs_fatal_err_on(ret, c,
1948 "error encrypting btree node: %i\n", ret))
1951 nonce = btree_nonce(i, b->written << 9);
1954 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1956 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1958 /* if we're not encrypting, check metadata after checksumming: */
1959 if (!validate_before_checksum &&
1960 validate_bset_for_write(c, b, i, sectors_to_write))
1964 * We handle btree write errors by immediately halting the journal -
1965 * after we've done that, we can't issue any subsequent btree writes
1966 * because they might have pointers to new nodes that failed to write.
1968 * Furthermore, there's no point in doing any more btree writes because
1969 * with the journal stopped, we're never going to update the journal to
1970 * reflect that those writes were done and the data flushed from the
1973 * Also on journal error, the pending write may have updates that were
1974 * never journalled (interior nodes, see btree_update_nodes_written()) -
1975 * it's critical that we don't do the write in that case otherwise we
1976 * will have updates visible that weren't in the journal:
1978 * Make sure to update b->written so bch2_btree_init_next() doesn't
1981 if (bch2_journal_error(&c->journal) ||
1985 trace_btree_write(b, bytes_to_write, sectors_to_write);
1987 wbio = container_of(bio_alloc_bioset(NULL,
1988 buf_pages(data, sectors_to_write << 9),
1989 REQ_OP_WRITE|REQ_META,
1992 struct btree_write_bio, wbio.bio);
1993 wbio_init(&wbio->wbio.bio);
1995 wbio->data_bytes = bytes;
1996 wbio->sector_offset = b->written;
1998 wbio->wbio.used_mempool = used_mempool;
1999 wbio->wbio.first_btree_write = !b->written;
2000 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
2001 wbio->wbio.bio.bi_private = b;
2003 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2005 bkey_copy(&wbio->key, &b->key);
2007 b->written += sectors_to_write;
2009 if (wbio->wbio.first_btree_write &&
2010 b->key.k.type == KEY_TYPE_btree_ptr_v2)
2011 bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
2012 cpu_to_le16(b->written);
2014 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2015 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2016 cpu_to_le16(b->written);
2018 atomic64_inc(&c->btree_writes_nr);
2019 atomic64_add(sectors_to_write, &c->btree_writes_sectors);
2021 INIT_WORK(&wbio->work, btree_write_submit);
2022 queue_work(c->io_complete_wq, &wbio->work);
2025 set_btree_node_noevict(b);
2027 b->key.k.type == KEY_TYPE_btree_ptr_v2)
2028 bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
2029 cpu_to_le16(sectors_to_write);
2030 b->written += sectors_to_write;
2032 btree_bounce_free(c, bytes, used_mempool, data);
2033 __btree_node_write_done(c, b);
2037 * Work that must be done with write lock held:
2039 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2041 bool invalidated_iter = false;
2042 struct btree_node_entry *bne;
2043 struct bset_tree *t;
2045 if (!btree_node_just_written(b))
2048 BUG_ON(b->whiteout_u64s);
2050 clear_btree_node_just_written(b);
2053 * Note: immediately after write, bset_written() doesn't work - the
2054 * amount of data we had to write after compaction might have been
2055 * smaller than the offset of the last bset.
2057 * However, we know that all bsets have been written here, as long as
2058 * we're still holding the write lock:
2062 * XXX: decide if we really want to unconditionally sort down to a
2066 btree_node_sort(c, b, 0, b->nsets, true);
2067 invalidated_iter = true;
2069 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2073 set_needs_whiteout(bset(b, t), true);
2075 bch2_btree_verify(c, b);
2078 * If later we don't unconditionally sort down to a single bset, we have
2079 * to ensure this is still true:
2081 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2083 bne = want_new_bset(c, b);
2085 bch2_bset_init_next(c, b, bne);
2087 bch2_btree_build_aux_trees(b);
2089 return invalidated_iter;
2093 * Use this one if the node is intent locked:
2095 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2096 enum six_lock_type lock_type_held,
2099 if (lock_type_held == SIX_LOCK_intent ||
2100 (lock_type_held == SIX_LOCK_read &&
2101 six_lock_tryupgrade(&b->c.lock))) {
2102 __bch2_btree_node_write(c, b, flags);
2104 /* don't cycle lock unnecessarily: */
2105 if (btree_node_just_written(b) &&
2106 six_trylock_write(&b->c.lock)) {
2107 bch2_btree_post_write_cleanup(c, b);
2108 six_unlock_write(&b->c.lock);
2111 if (lock_type_held == SIX_LOCK_read)
2112 six_lock_downgrade(&b->c.lock);
2114 __bch2_btree_node_write(c, b, flags);
2115 if (lock_type_held == SIX_LOCK_write &&
2116 btree_node_just_written(b))
2117 bch2_btree_post_write_cleanup(c, b);
2121 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2123 struct bucket_table *tbl;
2124 struct rhash_head *pos;
2130 for_each_cached_btree(b, c, tbl, i, pos)
2131 if (test_bit(flag, &b->flags)) {
2133 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2142 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2144 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2147 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2149 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);