1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
23 #include <linux/sched/mm.h>
25 void bch2_btree_node_io_unlock(struct btree *b)
27 EBUG_ON(!btree_node_write_in_flight(b));
29 clear_btree_node_write_in_flight_inner(b);
30 clear_btree_node_write_in_flight(b);
31 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
34 void bch2_btree_node_io_lock(struct btree *b)
36 bch2_assert_btree_nodes_not_locked();
38 wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
39 TASK_UNINTERRUPTIBLE);
42 void __bch2_btree_node_wait_on_read(struct btree *b)
44 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
45 TASK_UNINTERRUPTIBLE);
48 void __bch2_btree_node_wait_on_write(struct btree *b)
50 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
51 TASK_UNINTERRUPTIBLE);
54 void bch2_btree_node_wait_on_read(struct btree *b)
56 bch2_assert_btree_nodes_not_locked();
58 wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
59 TASK_UNINTERRUPTIBLE);
62 void bch2_btree_node_wait_on_write(struct btree *b)
64 bch2_assert_btree_nodes_not_locked();
66 wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
67 TASK_UNINTERRUPTIBLE);
70 static void verify_no_dups(struct btree *b,
71 struct bkey_packed *start,
72 struct bkey_packed *end)
74 #ifdef CONFIG_BCACHEFS_DEBUG
75 struct bkey_packed *k, *p;
80 for (p = start, k = bkey_p_next(start);
82 p = k, k = bkey_p_next(k)) {
83 struct bkey l = bkey_unpack_key(b, p);
84 struct bkey r = bkey_unpack_key(b, k);
86 BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
91 static void set_needs_whiteout(struct bset *i, int v)
93 struct bkey_packed *k;
95 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
96 k->needs_whiteout = v;
99 static void btree_bounce_free(struct bch_fs *c, size_t size,
100 bool used_mempool, void *p)
103 mempool_free(p, &c->btree_bounce_pool);
108 static void *btree_bounce_alloc_noprof(struct bch_fs *c, size_t size,
111 unsigned flags = memalloc_nofs_save();
114 BUG_ON(size > btree_bytes(c));
116 *used_mempool = false;
117 p = vpmalloc_noprof(size, __GFP_NOWARN|GFP_NOWAIT);
119 *used_mempool = true;
120 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
122 memalloc_nofs_restore(flags);
125 #define btree_bounce_alloc(_c, _size, _used_mempool) \
126 alloc_hooks(btree_bounce_alloc_noprof(_c, _size, _used_mempool))
128 static void sort_bkey_ptrs(const struct btree *bt,
129 struct bkey_packed **ptrs, unsigned nr)
131 unsigned n = nr, a = nr / 2, b, c, d;
136 /* Heap sort: see lib/sort.c: */
141 swap(ptrs[0], ptrs[n]);
145 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
146 b = bch2_bkey_cmp_packed(bt,
148 ptrs[d]) >= 0 ? c : d;
153 bch2_bkey_cmp_packed(bt,
160 swap(ptrs[b], ptrs[c]);
165 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
167 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
168 bool used_mempool = false;
169 size_t bytes = b->whiteout_u64s * sizeof(u64);
171 if (!b->whiteout_u64s)
174 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
176 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
178 for (k = unwritten_whiteouts_start(c, b);
179 k != unwritten_whiteouts_end(c, b);
183 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
187 while (ptrs != ptrs_end) {
193 verify_no_dups(b, new_whiteouts,
194 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s));
196 memcpy_u64s(unwritten_whiteouts_start(c, b),
197 new_whiteouts, b->whiteout_u64s);
199 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
202 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
203 bool compacting, enum compact_mode mode)
205 if (!bset_dead_u64s(b, t))
210 return should_compact_bset_lazy(b, t) ||
211 (compacting && !bset_written(b, bset(b, t)));
219 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
224 for_each_bset(b, t) {
225 struct bset *i = bset(b, t);
226 struct bkey_packed *k, *n, *out, *start, *end;
227 struct btree_node_entry *src = NULL, *dst = NULL;
229 if (t != b->set && !bset_written(b, i)) {
230 src = container_of(i, struct btree_node_entry, keys);
231 dst = max(write_block(b),
232 (void *) btree_bkey_last(b, t - 1));
238 if (!should_compact_bset(b, t, ret, mode)) {
240 memmove(dst, src, sizeof(*src) +
241 le16_to_cpu(src->keys.u64s) *
244 set_btree_bset(b, t, i);
249 start = btree_bkey_first(b, t);
250 end = btree_bkey_last(b, t);
253 memmove(dst, src, sizeof(*src));
255 set_btree_bset(b, t, i);
260 for (k = start; k != end; k = n) {
263 if (!bkey_deleted(k)) {
265 out = bkey_p_next(out);
267 BUG_ON(k->needs_whiteout);
271 i->u64s = cpu_to_le16((u64 *) out - i->_data);
272 set_btree_bset_end(b, t);
273 bch2_bset_set_no_aux_tree(b, t);
277 bch2_verify_btree_nr_keys(b);
279 bch2_btree_build_aux_trees(b);
284 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
285 enum compact_mode mode)
287 return bch2_drop_whiteouts(b, mode);
290 static void btree_node_sort(struct bch_fs *c, struct btree *b,
293 bool filter_whiteouts)
295 struct btree_node *out;
296 struct sort_iter sort_iter;
298 struct bset *start_bset = bset(b, &b->set[start_idx]);
299 bool used_mempool = false;
300 u64 start_time, seq = 0;
301 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
302 bool sorting_entire_node = start_idx == 0 &&
305 sort_iter_init(&sort_iter, b);
307 for (t = b->set + start_idx;
308 t < b->set + end_idx;
310 u64s += le16_to_cpu(bset(b, t)->u64s);
311 sort_iter_add(&sort_iter,
312 btree_bkey_first(b, t),
313 btree_bkey_last(b, t));
316 bytes = sorting_entire_node
318 : __vstruct_bytes(struct btree_node, u64s);
320 out = btree_bounce_alloc(c, bytes, &used_mempool);
322 start_time = local_clock();
324 u64s = bch2_sort_keys(out->keys.start, &sort_iter, filter_whiteouts);
326 out->keys.u64s = cpu_to_le16(u64s);
328 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
330 if (sorting_entire_node)
331 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
334 /* Make sure we preserve bset journal_seq: */
335 for (t = b->set + start_idx; t < b->set + end_idx; t++)
336 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
337 start_bset->journal_seq = cpu_to_le64(seq);
339 if (sorting_entire_node) {
340 unsigned u64s = le16_to_cpu(out->keys.u64s);
342 BUG_ON(bytes != btree_bytes(c));
345 * Our temporary buffer is the same size as the btree node's
346 * buffer, we can just swap buffers instead of doing a big
350 out->keys.u64s = cpu_to_le16(u64s);
352 set_btree_bset(b, b->set, &b->data->keys);
354 start_bset->u64s = out->keys.u64s;
355 memcpy_u64s(start_bset->start,
357 le16_to_cpu(out->keys.u64s));
360 for (i = start_idx + 1; i < end_idx; i++)
361 b->nr.bset_u64s[start_idx] +=
366 for (i = start_idx + 1; i < b->nsets; i++) {
367 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
368 b->set[i] = b->set[i + shift];
371 for (i = b->nsets; i < MAX_BSETS; i++)
372 b->nr.bset_u64s[i] = 0;
374 set_btree_bset_end(b, &b->set[start_idx]);
375 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
377 btree_bounce_free(c, bytes, used_mempool, out);
379 bch2_verify_btree_nr_keys(b);
382 void bch2_btree_sort_into(struct bch_fs *c,
386 struct btree_nr_keys nr;
387 struct btree_node_iter src_iter;
388 u64 start_time = local_clock();
390 BUG_ON(dst->nsets != 1);
392 bch2_bset_set_no_aux_tree(dst, dst->set);
394 bch2_btree_node_iter_init_from_start(&src_iter, src);
396 nr = bch2_sort_repack(btree_bset_first(dst),
401 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
404 set_btree_bset_end(dst, dst->set);
406 dst->nr.live_u64s += nr.live_u64s;
407 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
408 dst->nr.packed_keys += nr.packed_keys;
409 dst->nr.unpacked_keys += nr.unpacked_keys;
411 bch2_verify_btree_nr_keys(dst);
414 #define SORT_CRIT (4096 / sizeof(u64))
417 * We're about to add another bset to the btree node, so if there's currently
418 * too many bsets - sort some of them together:
420 static bool btree_node_compact(struct bch_fs *c, struct btree *b)
422 unsigned unwritten_idx;
425 for (unwritten_idx = 0;
426 unwritten_idx < b->nsets;
428 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
431 if (b->nsets - unwritten_idx > 1) {
432 btree_node_sort(c, b, unwritten_idx,
437 if (unwritten_idx > 1) {
438 btree_node_sort(c, b, 0, unwritten_idx, false);
445 void bch2_btree_build_aux_trees(struct btree *b)
450 bch2_bset_build_aux_tree(b, t,
451 !bset_written(b, bset(b, t)) &&
452 t == bset_tree_last(b));
456 * If we have MAX_BSETS (3) bsets, should we sort them all down to just one?
458 * The first bset is going to be of similar order to the size of the node, the
459 * last bset is bounded by btree_write_set_buffer(), which is set to keep the
460 * memmove on insert from being too expensive: the middle bset should, ideally,
461 * be the geometric mean of the first and the last.
463 * Returns true if the middle bset is greater than that geometric mean:
465 static inline bool should_compact_all(struct bch_fs *c, struct btree *b)
467 unsigned mid_u64s_bits =
468 (ilog2(btree_max_u64s(c)) + BTREE_WRITE_SET_U64s_BITS) / 2;
470 return bset_u64s(&b->set[1]) > 1U << mid_u64s_bits;
474 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
477 * Safe to call if there already is an unwritten bset - will only add a new bset
478 * if @b doesn't already have one.
480 * Returns true if we sorted (i.e. invalidated iterators
482 void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
484 struct bch_fs *c = trans->c;
485 struct btree_node_entry *bne;
486 bool reinit_iter = false;
488 EBUG_ON(!six_lock_counts(&b->c.lock).n[SIX_LOCK_write]);
489 BUG_ON(bset_written(b, bset(b, &b->set[1])));
490 BUG_ON(btree_node_just_written(b));
492 if (b->nsets == MAX_BSETS &&
493 !btree_node_write_in_flight(b) &&
494 should_compact_all(c, b)) {
495 bch2_btree_node_write(c, b, SIX_LOCK_write,
496 BTREE_WRITE_init_next_bset);
500 if (b->nsets == MAX_BSETS &&
501 btree_node_compact(c, b))
504 BUG_ON(b->nsets >= MAX_BSETS);
506 bne = want_new_bset(c, b);
508 bch2_bset_init_next(c, b, bne);
510 bch2_btree_build_aux_trees(b);
513 bch2_trans_node_reinit_iter(trans, b);
516 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
519 prt_printf(out, "%s level %u/%u\n ",
520 bch2_btree_ids[b->c.btree_id],
522 bch2_btree_id_root(c, b->c.btree_id)->level);
523 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
526 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
528 struct btree *b, struct bset *i,
529 unsigned offset, int write)
531 prt_printf(out, bch2_log_msg(c, "%s"),
533 ? "error validating btree node "
534 : "corrupt btree node before write ");
536 prt_printf(out, "on %s ", ca->name);
537 prt_printf(out, "at btree ");
538 btree_pos_to_text(out, c, b);
540 prt_printf(out, "\n node offset %u", b->written);
542 prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
546 enum btree_err_type {
548 * We can repair this locally, and we're after the checksum check so
549 * there's no need to try another replica:
553 * We can repair this if we have to, but we should try reading another
556 BTREE_ERR_WANT_RETRY,
558 * Read another replica if we have one, otherwise consider the whole
561 BTREE_ERR_MUST_RETRY,
563 BTREE_ERR_INCOMPATIBLE,
566 enum btree_validate_ret {
567 BTREE_RETRY_READ = 64,
570 static int __btree_err(enum btree_err_type type,
577 const char *fmt, ...)
579 struct printbuf out = PRINTBUF;
581 int ret = -BCH_ERR_fsck_fix;
583 btree_err_msg(&out, c, ca, b, i, b->written, write);
586 prt_vprintf(&out, fmt, args);
589 if (write == WRITE) {
590 bch2_print_string_as_lines(KERN_ERR, out.buf);
591 ret = c->opts.errors == BCH_ON_ERROR_continue
593 : -BCH_ERR_fsck_errors_not_fixed;
597 if (!have_retry && type == BTREE_ERR_WANT_RETRY)
598 type = BTREE_ERR_FIXABLE;
599 if (!have_retry && type == BTREE_ERR_MUST_RETRY)
600 type = BTREE_ERR_BAD_NODE;
603 case BTREE_ERR_FIXABLE:
604 mustfix_fsck_err(c, "%s", out.buf);
605 ret = -BCH_ERR_fsck_fix;
607 case BTREE_ERR_WANT_RETRY:
608 case BTREE_ERR_MUST_RETRY:
609 bch2_print_string_as_lines(KERN_ERR, out.buf);
610 ret = BTREE_RETRY_READ;
612 case BTREE_ERR_BAD_NODE:
613 bch2_print_string_as_lines(KERN_ERR, out.buf);
614 bch2_topology_error(c);
615 ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
617 case BTREE_ERR_INCOMPATIBLE:
618 bch2_print_string_as_lines(KERN_ERR, out.buf);
619 ret = -BCH_ERR_fsck_errors_not_fixed;
630 #define btree_err(type, c, ca, b, i, msg, ...) \
632 int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\
634 if (_ret != -BCH_ERR_fsck_fix) \
639 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
642 * When btree topology repair changes the start or end of a node, that might
643 * mean we have to drop keys that are no longer inside the node:
646 void bch2_btree_node_drop_keys_outside_node(struct btree *b)
650 struct bkey unpacked;
651 struct btree_node_iter iter;
653 for_each_bset(b, t) {
654 struct bset *i = bset(b, t);
655 struct bkey_packed *k;
657 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
658 if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
662 unsigned shift = (u64 *) k - (u64 *) i->start;
664 memmove_u64s_down(i->start, k,
665 (u64 *) vstruct_end(i) - (u64 *) k);
666 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - shift);
667 set_btree_bset_end(b, t);
670 for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
671 if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
674 if (k != vstruct_last(i)) {
675 i->u64s = cpu_to_le16((u64 *) k - (u64 *) i->start);
676 set_btree_bset_end(b, t);
681 * Always rebuild search trees: eytzinger search tree nodes directly
682 * depend on the values of min/max key:
684 bch2_bset_set_no_aux_tree(b, b->set);
685 bch2_btree_build_aux_trees(b);
687 for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
688 BUG_ON(bpos_lt(k.k->p, b->data->min_key));
689 BUG_ON(bpos_gt(k.k->p, b->data->max_key));
693 static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
694 struct btree *b, struct bset *i,
695 unsigned offset, unsigned sectors,
696 int write, bool have_retry, bool *saw_error)
698 unsigned version = le16_to_cpu(i->version);
700 struct printbuf buf1 = PRINTBUF;
701 struct printbuf buf2 = PRINTBUF;
704 btree_err_on(!bch2_version_compatible(version),
705 BTREE_ERR_INCOMPATIBLE, c, ca, b, i,
706 "unsupported bset version %u.%u",
707 BCH_VERSION_MAJOR(version),
708 BCH_VERSION_MINOR(version));
710 if (btree_err_on(version < c->sb.version_min,
711 BTREE_ERR_FIXABLE, c, NULL, b, i,
712 "bset version %u older than superblock version_min %u",
713 version, c->sb.version_min)) {
714 mutex_lock(&c->sb_lock);
715 c->disk_sb.sb->version_min = cpu_to_le16(version);
717 mutex_unlock(&c->sb_lock);
720 if (btree_err_on(BCH_VERSION_MAJOR(version) >
721 BCH_VERSION_MAJOR(c->sb.version),
722 BTREE_ERR_FIXABLE, c, NULL, b, i,
723 "bset version %u newer than superblock version %u",
724 version, c->sb.version)) {
725 mutex_lock(&c->sb_lock);
726 c->disk_sb.sb->version = cpu_to_le16(version);
728 mutex_unlock(&c->sb_lock);
731 btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
732 BTREE_ERR_INCOMPATIBLE, c, ca, b, i,
733 "BSET_SEPARATE_WHITEOUTS no longer supported");
735 if (btree_err_on(offset + sectors > btree_sectors(c),
736 BTREE_ERR_FIXABLE, c, ca, b, i,
737 "bset past end of btree node")) {
743 btree_err_on(offset && !i->u64s,
744 BTREE_ERR_FIXABLE, c, ca, b, i,
747 btree_err_on(BSET_OFFSET(i) &&
748 BSET_OFFSET(i) != offset,
749 BTREE_ERR_WANT_RETRY, c, ca, b, i,
750 "bset at wrong sector offset");
753 struct btree_node *bn =
754 container_of(i, struct btree_node, keys);
755 /* These indicate that we read the wrong btree node: */
757 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
758 struct bch_btree_ptr_v2 *bp =
759 &bkey_i_to_btree_ptr_v2(&b->key)->v;
762 btree_err_on(bp->seq != bn->keys.seq,
763 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
764 "incorrect sequence number (wrong btree node)");
767 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
768 BTREE_ERR_MUST_RETRY, c, ca, b, i,
769 "incorrect btree id");
771 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
772 BTREE_ERR_MUST_RETRY, c, ca, b, i,
776 compat_btree_node(b->c.level, b->c.btree_id, version,
777 BSET_BIG_ENDIAN(i), write, bn);
779 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
780 struct bch_btree_ptr_v2 *bp =
781 &bkey_i_to_btree_ptr_v2(&b->key)->v;
783 if (BTREE_PTR_RANGE_UPDATED(bp)) {
784 b->data->min_key = bp->min_key;
785 b->data->max_key = b->key.k.p;
788 btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
789 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
790 "incorrect min_key: got %s should be %s",
791 (printbuf_reset(&buf1),
792 bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
793 (printbuf_reset(&buf2),
794 bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
797 btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
798 BTREE_ERR_MUST_RETRY, c, ca, b, i,
799 "incorrect max key %s",
800 (printbuf_reset(&buf1),
801 bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
804 compat_btree_node(b->c.level, b->c.btree_id, version,
805 BSET_BIG_ENDIAN(i), write, bn);
807 err = bch2_bkey_format_validate(&bn->format);
809 BTREE_ERR_BAD_NODE, c, ca, b, i,
810 "invalid bkey format: %s", err);
812 compat_bformat(b->c.level, b->c.btree_id, version,
813 BSET_BIG_ENDIAN(i), write,
818 printbuf_exit(&buf2);
819 printbuf_exit(&buf1);
823 static int bset_key_invalid(struct bch_fs *c, struct btree *b,
825 bool updated_range, int rw,
826 struct printbuf *err)
828 return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
829 (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?:
830 (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
833 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
834 struct bset *i, int write,
835 bool have_retry, bool *saw_error)
837 unsigned version = le16_to_cpu(i->version);
838 struct bkey_packed *k, *prev = NULL;
839 struct printbuf buf = PRINTBUF;
840 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
841 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
845 k != vstruct_last(i);) {
849 if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
850 BTREE_ERR_FIXABLE, c, NULL, b, i,
851 "key extends past end of bset")) {
852 i->u64s = cpu_to_le16((u64 *) k - i->_data);
856 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
857 BTREE_ERR_FIXABLE, c, NULL, b, i,
858 "invalid bkey format %u", k->format)) {
859 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
860 memmove_u64s_down(k, bkey_p_next(k),
861 (u64 *) vstruct_end(i) - (u64 *) k);
865 /* XXX: validate k->u64s */
867 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
868 BSET_BIG_ENDIAN(i), write,
871 u = __bkey_disassemble(b, k, &tmp);
873 printbuf_reset(&buf);
874 if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
875 printbuf_reset(&buf);
876 prt_printf(&buf, "invalid bkey: ");
877 bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
878 prt_printf(&buf, "\n ");
879 bch2_bkey_val_to_text(&buf, c, u.s_c);
881 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
883 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
884 memmove_u64s_down(k, bkey_p_next(k),
885 (u64 *) vstruct_end(i) - (u64 *) k);
890 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
891 BSET_BIG_ENDIAN(i), write,
894 if (prev && bkey_iter_cmp(b, prev, k) > 0) {
895 struct bkey up = bkey_unpack_key(b, prev);
897 printbuf_reset(&buf);
898 prt_printf(&buf, "keys out of order: ");
899 bch2_bkey_to_text(&buf, &up);
900 prt_printf(&buf, " > ");
901 bch2_bkey_to_text(&buf, u.k);
903 bch2_dump_bset(c, b, i, 0);
905 if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) {
906 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
907 memmove_u64s_down(k, bkey_p_next(k),
908 (u64 *) vstruct_end(i) - (u64 *) k);
921 int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
922 struct btree *b, bool have_retry, bool *saw_error)
924 struct btree_node_entry *bne;
925 struct sort_iter *iter;
926 struct btree_node *sorted;
927 struct bkey_packed *k;
928 struct bch_extent_ptr *ptr;
930 bool used_mempool, blacklisted;
931 bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
932 BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
934 unsigned blacklisted_written, nonblacklisted_written = 0;
935 unsigned ptr_written = btree_ptr_sectors_written(&b->key);
936 struct printbuf buf = PRINTBUF;
937 int ret = 0, retry_read = 0, write = READ;
939 b->version_ondisk = U16_MAX;
940 /* We might get called multiple times on read retry: */
943 iter = mempool_alloc(&c->fill_iter, GFP_NOFS);
944 sort_iter_init(iter, b);
945 iter->size = (btree_blocks(c) + 1) * 2;
947 if (bch2_meta_read_fault("btree"))
948 btree_err(BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
951 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
952 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
953 "bad magic: want %llx, got %llx",
954 bset_magic(c), le64_to_cpu(b->data->magic));
956 btree_err_on(!b->data->keys.seq,
957 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
958 "bad btree header: seq 0");
960 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
961 struct bch_btree_ptr_v2 *bp =
962 &bkey_i_to_btree_ptr_v2(&b->key)->v;
964 btree_err_on(b->data->keys.seq != bp->seq,
965 BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
966 "got wrong btree node (seq %llx want %llx)",
967 b->data->keys.seq, bp->seq);
970 while (b->written < (ptr_written ?: btree_sectors(c))) {
973 struct bch_csum csum;
974 bool first = !b->written;
979 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
980 BTREE_ERR_WANT_RETRY, c, ca, b, i,
981 "unknown checksum type %llu",
984 nonce = btree_nonce(i, b->written << 9);
985 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
987 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
988 BTREE_ERR_WANT_RETRY, c, ca, b, i,
991 ret = bset_encrypt(c, i, b->written << 9);
992 if (bch2_fs_fatal_err_on(ret, c,
993 "error decrypting btree node: %i", ret))
996 btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
997 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
998 BTREE_ERR_INCOMPATIBLE, c, NULL, b, NULL,
999 "btree node does not have NEW_EXTENT_OVERWRITE set");
1001 sectors = vstruct_sectors(b->data, c->block_bits);
1003 bne = write_block(b);
1006 if (i->seq != b->data->keys.seq)
1009 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
1010 BTREE_ERR_WANT_RETRY, c, ca, b, i,
1011 "unknown checksum type %llu",
1014 nonce = btree_nonce(i, b->written << 9);
1015 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1017 btree_err_on(bch2_crc_cmp(csum, bne->csum),
1018 BTREE_ERR_WANT_RETRY, c, ca, b, i,
1019 "invalid checksum");
1021 ret = bset_encrypt(c, i, b->written << 9);
1022 if (bch2_fs_fatal_err_on(ret, c,
1023 "error decrypting btree node: %i\n", ret))
1026 sectors = vstruct_sectors(bne, c->block_bits);
1029 b->version_ondisk = min(b->version_ondisk,
1030 le16_to_cpu(i->version));
1032 ret = validate_bset(c, ca, b, i, b->written, sectors,
1033 READ, have_retry, saw_error);
1038 btree_node_set_format(b, b->data->format);
1040 ret = validate_bset_keys(c, b, i, READ, have_retry, saw_error);
1044 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1046 blacklisted = bch2_journal_seq_is_blacklisted(c,
1047 le64_to_cpu(i->journal_seq),
1050 btree_err_on(blacklisted && first,
1051 BTREE_ERR_FIXABLE, c, ca, b, i,
1052 "first btree node bset has blacklisted journal seq (%llu)",
1053 le64_to_cpu(i->journal_seq));
1055 btree_err_on(blacklisted && ptr_written,
1056 BTREE_ERR_FIXABLE, c, ca, b, i,
1057 "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
1058 le64_to_cpu(i->journal_seq),
1059 b->written, b->written + sectors, ptr_written);
1061 b->written += sectors;
1063 if (blacklisted && !first)
1070 nonblacklisted_written = b->written;
1074 btree_err_on(b->written < ptr_written,
1075 BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
1076 "btree node data missing: expected %u sectors, found %u",
1077 ptr_written, b->written);
1079 for (bne = write_block(b);
1080 bset_byte_offset(b, bne) < btree_bytes(c);
1081 bne = (void *) bne + block_bytes(c))
1082 btree_err_on(bne->keys.seq == b->data->keys.seq &&
1083 !bch2_journal_seq_is_blacklisted(c,
1084 le64_to_cpu(bne->keys.journal_seq),
1086 BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
1087 "found bset signature after last bset");
1090 * Blacklisted bsets are those that were written after the most recent
1091 * (flush) journal write. Since there wasn't a flush, they may not have
1092 * made it to all devices - which means we shouldn't write new bsets
1093 * after them, as that could leave a gap and then reads from that device
1094 * wouldn't find all the bsets in that btree node - which means it's
1095 * important that we start writing new bsets after the most recent _non_
1098 blacklisted_written = b->written;
1099 b->written = nonblacklisted_written;
1102 sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
1103 sorted->keys.u64s = 0;
1105 set_btree_bset(b, b->set, &b->data->keys);
1107 b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
1109 u64s = le16_to_cpu(sorted->keys.u64s);
1111 sorted->keys.u64s = cpu_to_le16(u64s);
1112 swap(sorted, b->data);
1113 set_btree_bset(b, b->set, &b->data->keys);
1116 BUG_ON(b->nr.live_u64s != u64s);
1118 btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
1121 bch2_btree_node_drop_keys_outside_node(b);
1124 for (k = i->start; k != vstruct_last(i);) {
1126 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1128 printbuf_reset(&buf);
1130 if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
1131 (bch2_inject_invalid_keys &&
1132 !bversion_cmp(u.k->version, MAX_VERSION))) {
1133 printbuf_reset(&buf);
1135 prt_printf(&buf, "invalid bkey: ");
1136 bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
1137 prt_printf(&buf, "\n ");
1138 bch2_bkey_val_to_text(&buf, c, u.s_c);
1140 btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
1142 btree_keys_account_key_drop(&b->nr, 0, k);
1144 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1145 memmove_u64s_down(k, bkey_p_next(k),
1146 (u64 *) vstruct_end(i) - (u64 *) k);
1147 set_btree_bset_end(b, b->set);
1151 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1152 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1160 bch2_bset_build_aux_tree(b, b->set, false);
1162 set_needs_whiteout(btree_bset_first(b), true);
1164 btree_node_reset_sib_u64s(b);
1166 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1167 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1169 if (ca->mi.state != BCH_MEMBER_STATE_rw)
1170 set_btree_node_need_rewrite(b);
1174 set_btree_node_need_rewrite(b);
1176 mempool_free(iter, &c->fill_iter);
1177 printbuf_exit(&buf);
1180 if (ret == BTREE_RETRY_READ)
1183 set_btree_node_read_error(b);
1187 static void btree_node_read_work(struct work_struct *work)
1189 struct btree_read_bio *rb =
1190 container_of(work, struct btree_read_bio, work);
1191 struct bch_fs *c = rb->c;
1192 struct btree *b = rb->b;
1193 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1194 struct bio *bio = &rb->bio;
1195 struct bch_io_failures failed = { .nr = 0 };
1196 struct printbuf buf = PRINTBUF;
1197 bool saw_error = false;
1204 bch_info(c, "retrying read");
1205 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1206 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1207 bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
1208 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1209 bio->bi_iter.bi_size = btree_bytes(c);
1211 if (rb->have_ioref) {
1212 bio_set_dev(bio, ca->disk_sb.bdev);
1213 submit_bio_wait(bio);
1215 bio->bi_status = BLK_STS_REMOVED;
1218 printbuf_reset(&buf);
1219 btree_pos_to_text(&buf, c, b);
1220 bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
1221 bch2_blk_status_to_str(bio->bi_status), buf.buf);
1223 percpu_ref_put(&ca->io_ref);
1224 rb->have_ioref = false;
1226 bch2_mark_io_failure(&failed, &rb->pick);
1228 can_retry = bch2_bkey_pick_read_device(c,
1229 bkey_i_to_s_c(&b->key),
1230 &failed, &rb->pick) > 0;
1232 if (!bio->bi_status &&
1233 !bch2_btree_node_read_done(c, ca, b, can_retry, &saw_error)) {
1235 bch_info(c, "retry success");
1242 set_btree_node_read_error(b);
1247 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1250 printbuf_exit(&buf);
1252 if (saw_error && !btree_node_read_error(b)) {
1253 struct printbuf buf = PRINTBUF;
1255 bch2_bpos_to_text(&buf, b->key.k.p);
1256 bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
1257 __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
1258 printbuf_exit(&buf);
1260 bch2_btree_node_rewrite_async(c, b);
1263 clear_btree_node_read_in_flight(b);
1264 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1267 static void btree_node_read_endio(struct bio *bio)
1269 struct btree_read_bio *rb =
1270 container_of(bio, struct btree_read_bio, bio);
1271 struct bch_fs *c = rb->c;
1273 if (rb->have_ioref) {
1274 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1276 bch2_latency_acct(ca, rb->start_time, READ);
1279 queue_work(c->io_complete_wq, &rb->work);
1282 struct btree_node_read_all {
1287 void *buf[BCH_REPLICAS_MAX];
1288 struct bio *bio[BCH_REPLICAS_MAX];
1289 blk_status_t err[BCH_REPLICAS_MAX];
1292 static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
1294 struct btree_node *bn = data;
1295 struct btree_node_entry *bne;
1296 unsigned offset = 0;
1298 if (le64_to_cpu(bn->magic) != bset_magic(c))
1301 while (offset < btree_sectors(c)) {
1303 offset += vstruct_sectors(bn, c->block_bits);
1305 bne = data + (offset << 9);
1306 if (bne->keys.seq != bn->keys.seq)
1308 offset += vstruct_sectors(bne, c->block_bits);
1315 static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *data)
1317 struct btree_node *bn = data;
1318 struct btree_node_entry *bne;
1323 while (offset < btree_sectors(c)) {
1324 bne = data + (offset << 9);
1325 if (bne->keys.seq == bn->keys.seq)
1334 static void btree_node_read_all_replicas_done(struct closure *cl)
1336 struct btree_node_read_all *ra =
1337 container_of(cl, struct btree_node_read_all, cl);
1338 struct bch_fs *c = ra->c;
1339 struct btree *b = ra->b;
1340 struct printbuf buf = PRINTBUF;
1341 bool dump_bset_maps = false;
1342 bool have_retry = false;
1343 int ret = 0, best = -1, write = READ;
1344 unsigned i, written = 0, written2 = 0;
1345 __le64 seq = b->key.k.type == KEY_TYPE_btree_ptr_v2
1346 ? bkey_i_to_btree_ptr_v2(&b->key)->v.seq : 0;
1347 bool _saw_error = false, *saw_error = &_saw_error;
1349 for (i = 0; i < ra->nr; i++) {
1350 struct btree_node *bn = ra->buf[i];
1355 if (le64_to_cpu(bn->magic) != bset_magic(c) ||
1356 (seq && seq != bn->keys.seq))
1361 written = btree_node_sectors_written(c, bn);
1365 written2 = btree_node_sectors_written(c, ra->buf[i]);
1366 if (btree_err_on(written2 != written, BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1367 "btree node sectors written mismatch: %u != %u",
1368 written, written2) ||
1369 btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
1370 BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1371 "found bset signature after last bset") ||
1372 btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
1373 BTREE_ERR_FIXABLE, c, NULL, b, NULL,
1374 "btree node replicas content mismatch"))
1375 dump_bset_maps = true;
1377 if (written2 > written) {
1383 if (dump_bset_maps) {
1384 for (i = 0; i < ra->nr; i++) {
1385 struct btree_node *bn = ra->buf[i];
1386 struct btree_node_entry *bne = NULL;
1387 unsigned offset = 0, sectors;
1393 printbuf_reset(&buf);
1395 while (offset < btree_sectors(c)) {
1397 sectors = vstruct_sectors(bn, c->block_bits);
1399 bne = ra->buf[i] + (offset << 9);
1400 if (bne->keys.seq != bn->keys.seq)
1402 sectors = vstruct_sectors(bne, c->block_bits);
1405 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1406 if (bne && bch2_journal_seq_is_blacklisted(c,
1407 le64_to_cpu(bne->keys.journal_seq), false))
1408 prt_printf(&buf, "*");
1412 while (offset < btree_sectors(c)) {
1413 bne = ra->buf[i] + (offset << 9);
1414 if (bne->keys.seq == bn->keys.seq) {
1416 prt_printf(&buf, " GAP");
1419 sectors = vstruct_sectors(bne, c->block_bits);
1420 prt_printf(&buf, " %u-%u", offset, offset + sectors);
1421 if (bch2_journal_seq_is_blacklisted(c,
1422 le64_to_cpu(bne->keys.journal_seq), false))
1423 prt_printf(&buf, "*");
1428 bch_err(c, "replica %u:%s", i, buf.buf);
1433 memcpy(b->data, ra->buf[best], btree_bytes(c));
1434 ret = bch2_btree_node_read_done(c, NULL, b, false, saw_error);
1440 set_btree_node_read_error(b);
1441 else if (*saw_error)
1442 bch2_btree_node_rewrite_async(c, b);
1444 for (i = 0; i < ra->nr; i++) {
1445 mempool_free(ra->buf[i], &c->btree_bounce_pool);
1446 bio_put(ra->bio[i]);
1449 closure_debug_destroy(&ra->cl);
1451 printbuf_exit(&buf);
1453 clear_btree_node_read_in_flight(b);
1454 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1457 static void btree_node_read_all_replicas_endio(struct bio *bio)
1459 struct btree_read_bio *rb =
1460 container_of(bio, struct btree_read_bio, bio);
1461 struct bch_fs *c = rb->c;
1462 struct btree_node_read_all *ra = rb->ra;
1464 if (rb->have_ioref) {
1465 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1467 bch2_latency_acct(ca, rb->start_time, READ);
1470 ra->err[rb->idx] = bio->bi_status;
1471 closure_put(&ra->cl);
1475 * XXX This allocates multiple times from the same mempools, and can deadlock
1476 * under sufficient memory pressure (but is only a debug path)
1478 static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool sync)
1480 struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1481 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1482 const union bch_extent_entry *entry;
1483 struct extent_ptr_decoded pick;
1484 struct btree_node_read_all *ra;
1487 ra = kzalloc(sizeof(*ra), GFP_NOFS);
1489 return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
1491 closure_init(&ra->cl, NULL);
1494 ra->nr = bch2_bkey_nr_ptrs(k);
1496 for (i = 0; i < ra->nr; i++) {
1497 ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
1498 ra->bio[i] = bio_alloc_bioset(NULL,
1499 buf_pages(ra->buf[i], btree_bytes(c)),
1500 REQ_OP_READ|REQ_SYNC|REQ_META,
1506 bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
1507 struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1508 struct btree_read_bio *rb =
1509 container_of(ra->bio[i], struct btree_read_bio, bio);
1513 rb->start_time = local_clock();
1514 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1517 rb->bio.bi_iter.bi_sector = pick.ptr.offset;
1518 rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
1519 bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
1521 if (rb->have_ioref) {
1522 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1523 bio_sectors(&rb->bio));
1524 bio_set_dev(&rb->bio, ca->disk_sb.bdev);
1526 closure_get(&ra->cl);
1527 submit_bio(&rb->bio);
1529 ra->err[i] = BLK_STS_REMOVED;
1536 closure_sync(&ra->cl);
1537 btree_node_read_all_replicas_done(&ra->cl);
1539 continue_at(&ra->cl, btree_node_read_all_replicas_done,
1546 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1549 struct extent_ptr_decoded pick;
1550 struct btree_read_bio *rb;
1555 trace_and_count(c, btree_node_read, c, b);
1557 if (bch2_verify_all_btree_replicas &&
1558 !btree_node_read_all_replicas(c, b, sync))
1561 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1565 struct printbuf buf = PRINTBUF;
1567 prt_str(&buf, "btree node read error: no device to read from\n at ");
1568 btree_pos_to_text(&buf, c, b);
1569 bch_err(c, "%s", buf.buf);
1571 if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
1572 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
1573 bch2_fatal_error(c);
1575 set_btree_node_read_error(b);
1576 clear_btree_node_read_in_flight(b);
1577 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1578 printbuf_exit(&buf);
1582 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1584 bio = bio_alloc_bioset(NULL,
1585 buf_pages(b->data, btree_bytes(c)),
1586 REQ_OP_READ|REQ_SYNC|REQ_META,
1589 rb = container_of(bio, struct btree_read_bio, bio);
1593 rb->start_time = local_clock();
1594 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1596 INIT_WORK(&rb->work, btree_node_read_work);
1597 bio->bi_iter.bi_sector = pick.ptr.offset;
1598 bio->bi_end_io = btree_node_read_endio;
1599 bch2_bio_map(bio, b->data, btree_bytes(c));
1601 if (rb->have_ioref) {
1602 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1604 bio_set_dev(bio, ca->disk_sb.bdev);
1607 submit_bio_wait(bio);
1609 btree_node_read_work(&rb->work);
1614 bio->bi_status = BLK_STS_REMOVED;
1617 btree_node_read_work(&rb->work);
1619 queue_work(c->io_complete_wq, &rb->work);
1623 static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
1624 const struct bkey_i *k, unsigned level)
1626 struct bch_fs *c = trans->c;
1631 closure_init_stack(&cl);
1634 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1638 b = bch2_btree_node_mem_alloc(trans, level != 0);
1639 bch2_btree_cache_cannibalize_unlock(c);
1643 bkey_copy(&b->key, k);
1644 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1646 set_btree_node_read_in_flight(b);
1648 bch2_btree_node_read(c, b, true);
1650 if (btree_node_read_error(b)) {
1651 bch2_btree_node_hash_remove(&c->btree_cache, b);
1653 mutex_lock(&c->btree_cache.lock);
1654 list_move(&b->list, &c->btree_cache.freeable);
1655 mutex_unlock(&c->btree_cache.lock);
1661 bch2_btree_set_root_for_read(c, b);
1663 six_unlock_write(&b->c.lock);
1664 six_unlock_intent(&b->c.lock);
1669 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1670 const struct bkey_i *k, unsigned level)
1672 return bch2_trans_run(c, __bch2_btree_root_read(&trans, id, k, level));
1676 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1677 struct btree_write *w)
1679 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1687 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1690 closure_put(&((struct btree_update *) new)->cl);
1692 bch2_journal_pin_drop(&c->journal, &w->journal);
1695 static void __btree_node_write_done(struct bch_fs *c, struct btree *b)
1697 struct btree_write *w = btree_prev_write(b);
1698 unsigned long old, new, v;
1701 bch2_btree_complete_write(c, b, w);
1703 v = READ_ONCE(b->flags);
1707 if ((old & (1U << BTREE_NODE_dirty)) &&
1708 (old & (1U << BTREE_NODE_need_write)) &&
1709 !(old & (1U << BTREE_NODE_never_write)) &&
1710 !(old & (1U << BTREE_NODE_write_blocked)) &&
1711 !(old & (1U << BTREE_NODE_will_make_reachable))) {
1712 new &= ~(1U << BTREE_NODE_dirty);
1713 new &= ~(1U << BTREE_NODE_need_write);
1714 new |= (1U << BTREE_NODE_write_in_flight);
1715 new |= (1U << BTREE_NODE_write_in_flight_inner);
1716 new |= (1U << BTREE_NODE_just_written);
1717 new ^= (1U << BTREE_NODE_write_idx);
1719 type = new & BTREE_WRITE_TYPE_MASK;
1720 new &= ~BTREE_WRITE_TYPE_MASK;
1722 new &= ~(1U << BTREE_NODE_write_in_flight);
1723 new &= ~(1U << BTREE_NODE_write_in_flight_inner);
1725 } while ((v = cmpxchg(&b->flags, old, new)) != old);
1727 if (new & (1U << BTREE_NODE_write_in_flight))
1728 __bch2_btree_node_write(c, b, BTREE_WRITE_ALREADY_STARTED|type);
1730 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
1733 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1735 struct btree_trans trans;
1737 bch2_trans_init(&trans, c, 0, 0);
1739 btree_node_lock_nopath_nofail(&trans, &b->c, SIX_LOCK_read);
1740 __btree_node_write_done(c, b);
1741 six_unlock_read(&b->c.lock);
1743 bch2_trans_exit(&trans);
1746 static void btree_node_write_work(struct work_struct *work)
1748 struct btree_write_bio *wbio =
1749 container_of(work, struct btree_write_bio, work);
1750 struct bch_fs *c = wbio->wbio.c;
1751 struct btree *b = wbio->wbio.bio.bi_private;
1752 struct bch_extent_ptr *ptr;
1755 btree_bounce_free(c,
1757 wbio->wbio.used_mempool,
1760 bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
1761 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1763 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
1766 if (wbio->wbio.first_btree_write) {
1767 if (wbio->wbio.failed.nr) {
1771 ret = bch2_trans_do(c, NULL, NULL, 0,
1772 bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key,
1773 BCH_WATERMARK_reclaim|
1774 BTREE_INSERT_JOURNAL_RECLAIM|
1775 BTREE_INSERT_NOFAIL|
1776 BTREE_INSERT_NOCHECK_RW,
1777 !wbio->wbio.failed.nr));
1782 bio_put(&wbio->wbio.bio);
1783 btree_node_write_done(c, b);
1786 set_btree_node_noevict(b);
1787 if (!bch2_err_matches(ret, EROFS))
1788 bch2_fs_fatal_error(c, "fatal error writing btree node: %s", bch2_err_str(ret));
1792 static void btree_node_write_endio(struct bio *bio)
1794 struct bch_write_bio *wbio = to_wbio(bio);
1795 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1796 struct bch_write_bio *orig = parent ?: wbio;
1797 struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
1798 struct bch_fs *c = wbio->c;
1799 struct btree *b = wbio->bio.bi_private;
1800 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1801 unsigned long flags;
1803 if (wbio->have_ioref)
1804 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1806 if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
1807 bch2_blk_status_to_str(bio->bi_status)) ||
1808 bch2_meta_write_fault("btree")) {
1809 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1810 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1811 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1814 if (wbio->have_ioref)
1815 percpu_ref_put(&ca->io_ref);
1819 bio_endio(&parent->bio);
1823 clear_btree_node_write_in_flight_inner(b);
1824 wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
1825 INIT_WORK(&wb->work, btree_node_write_work);
1826 queue_work(c->btree_io_complete_wq, &wb->work);
1829 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1830 struct bset *i, unsigned sectors)
1832 struct printbuf buf = PRINTBUF;
1836 ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
1837 BKEY_TYPE_btree, WRITE, &buf);
1840 bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
1841 printbuf_exit(&buf);
1845 ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
1846 validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
1848 bch2_inconsistent_error(c);
1855 static void btree_write_submit(struct work_struct *work)
1857 struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
1858 struct bch_extent_ptr *ptr;
1859 BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1861 bkey_copy(&tmp.k, &wbio->key);
1863 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
1864 ptr->offset += wbio->sector_offset;
1866 bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
1870 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
1872 struct btree_write_bio *wbio;
1873 struct bset_tree *t;
1875 struct btree_node *bn = NULL;
1876 struct btree_node_entry *bne = NULL;
1877 struct sort_iter sort_iter;
1879 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1882 unsigned long old, new;
1883 bool validate_before_checksum = false;
1884 enum btree_write_type type = flags & BTREE_WRITE_TYPE_MASK;
1888 if (flags & BTREE_WRITE_ALREADY_STARTED)
1892 * We may only have a read lock on the btree node - the dirty bit is our
1893 * "lock" against racing with other threads that may be trying to start
1894 * a write, we do a write iff we clear the dirty bit. Since setting the
1895 * dirty bit requires a write lock, we can't race with other threads
1899 old = new = READ_ONCE(b->flags);
1901 if (!(old & (1 << BTREE_NODE_dirty)))
1904 if ((flags & BTREE_WRITE_ONLY_IF_NEED) &&
1905 !(old & (1 << BTREE_NODE_need_write)))
1909 ((1 << BTREE_NODE_never_write)|
1910 (1 << BTREE_NODE_write_blocked)))
1914 (old & (1 << BTREE_NODE_will_make_reachable)))
1917 if (old & (1 << BTREE_NODE_write_in_flight))
1920 if (flags & BTREE_WRITE_ONLY_IF_NEED)
1921 type = new & BTREE_WRITE_TYPE_MASK;
1922 new &= ~BTREE_WRITE_TYPE_MASK;
1924 new &= ~(1 << BTREE_NODE_dirty);
1925 new &= ~(1 << BTREE_NODE_need_write);
1926 new |= (1 << BTREE_NODE_write_in_flight);
1927 new |= (1 << BTREE_NODE_write_in_flight_inner);
1928 new |= (1 << BTREE_NODE_just_written);
1929 new ^= (1 << BTREE_NODE_write_idx);
1930 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1932 if (new & (1U << BTREE_NODE_need_write))
1935 BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
1937 atomic_dec(&c->btree_cache.dirty);
1939 BUG_ON(btree_node_fake(b));
1940 BUG_ON((b->will_make_reachable != 0) != !b->written);
1942 BUG_ON(b->written >= btree_sectors(c));
1943 BUG_ON(b->written & (block_sectors(c) - 1));
1944 BUG_ON(bset_written(b, btree_bset_last(b)));
1945 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1946 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1948 bch2_sort_whiteouts(c, b);
1950 sort_iter_init(&sort_iter, b);
1953 ? sizeof(struct btree_node)
1954 : sizeof(struct btree_node_entry);
1956 bytes += b->whiteout_u64s * sizeof(u64);
1958 for_each_bset(b, t) {
1961 if (bset_written(b, i))
1964 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1965 sort_iter_add(&sort_iter,
1966 btree_bkey_first(b, t),
1967 btree_bkey_last(b, t));
1968 seq = max(seq, le64_to_cpu(i->journal_seq));
1971 BUG_ON(b->written && !seq);
1973 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
1976 /* buffer must be a multiple of the block size */
1977 bytes = round_up(bytes, block_bytes(c));
1979 data = btree_bounce_alloc(c, bytes, &used_mempool);
1987 bne->keys = b->data->keys;
1991 i->journal_seq = cpu_to_le64(seq);
1994 sort_iter_add(&sort_iter,
1995 unwritten_whiteouts_start(c, b),
1996 unwritten_whiteouts_end(c, b));
1997 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1999 b->whiteout_u64s = 0;
2001 u64s = bch2_sort_keys(i->start, &sort_iter, false);
2002 le16_add_cpu(&i->u64s, u64s);
2004 BUG_ON(!b->written && i->u64s != b->data->keys.u64s);
2006 set_needs_whiteout(i, false);
2008 /* do we have data to write? */
2009 if (b->written && !i->u64s)
2012 bytes_to_write = vstruct_end(i) - data;
2013 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
2016 b->key.k.type == KEY_TYPE_btree_ptr_v2)
2017 BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
2019 memset(data + bytes_to_write, 0,
2020 (sectors_to_write << 9) - bytes_to_write);
2022 BUG_ON(b->written + sectors_to_write > btree_sectors(c));
2023 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
2024 BUG_ON(i->seq != b->data->keys.seq);
2026 i->version = cpu_to_le16(c->sb.version);
2027 SET_BSET_OFFSET(i, b->written);
2028 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
2030 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
2031 validate_before_checksum = true;
2033 /* validate_bset will be modifying: */
2034 if (le16_to_cpu(i->version) < bcachefs_metadata_version_current)
2035 validate_before_checksum = true;
2037 /* if we're going to be encrypting, check metadata validity first: */
2038 if (validate_before_checksum &&
2039 validate_bset_for_write(c, b, i, sectors_to_write))
2042 ret = bset_encrypt(c, i, b->written << 9);
2043 if (bch2_fs_fatal_err_on(ret, c,
2044 "error encrypting btree node: %i\n", ret))
2047 nonce = btree_nonce(i, b->written << 9);
2050 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
2052 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
2054 /* if we're not encrypting, check metadata after checksumming: */
2055 if (!validate_before_checksum &&
2056 validate_bset_for_write(c, b, i, sectors_to_write))
2060 * We handle btree write errors by immediately halting the journal -
2061 * after we've done that, we can't issue any subsequent btree writes
2062 * because they might have pointers to new nodes that failed to write.
2064 * Furthermore, there's no point in doing any more btree writes because
2065 * with the journal stopped, we're never going to update the journal to
2066 * reflect that those writes were done and the data flushed from the
2069 * Also on journal error, the pending write may have updates that were
2070 * never journalled (interior nodes, see btree_update_nodes_written()) -
2071 * it's critical that we don't do the write in that case otherwise we
2072 * will have updates visible that weren't in the journal:
2074 * Make sure to update b->written so bch2_btree_init_next() doesn't
2077 if (bch2_journal_error(&c->journal) ||
2081 trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
2083 wbio = container_of(bio_alloc_bioset(NULL,
2084 buf_pages(data, sectors_to_write << 9),
2085 REQ_OP_WRITE|REQ_META,
2088 struct btree_write_bio, wbio.bio);
2089 wbio_init(&wbio->wbio.bio);
2091 wbio->data_bytes = bytes;
2092 wbio->sector_offset = b->written;
2094 wbio->wbio.used_mempool = used_mempool;
2095 wbio->wbio.first_btree_write = !b->written;
2096 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
2097 wbio->wbio.bio.bi_private = b;
2099 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
2101 bkey_copy(&wbio->key, &b->key);
2103 b->written += sectors_to_write;
2105 if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
2106 bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
2107 cpu_to_le16(b->written);
2109 atomic64_inc(&c->btree_write_stats[type].nr);
2110 atomic64_add(bytes_to_write, &c->btree_write_stats[type].bytes);
2112 INIT_WORK(&wbio->work, btree_write_submit);
2113 queue_work(c->io_complete_wq, &wbio->work);
2116 set_btree_node_noevict(b);
2117 b->written += sectors_to_write;
2119 btree_bounce_free(c, bytes, used_mempool, data);
2120 __btree_node_write_done(c, b);
2124 * Work that must be done with write lock held:
2126 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
2128 bool invalidated_iter = false;
2129 struct btree_node_entry *bne;
2130 struct bset_tree *t;
2132 if (!btree_node_just_written(b))
2135 BUG_ON(b->whiteout_u64s);
2137 clear_btree_node_just_written(b);
2140 * Note: immediately after write, bset_written() doesn't work - the
2141 * amount of data we had to write after compaction might have been
2142 * smaller than the offset of the last bset.
2144 * However, we know that all bsets have been written here, as long as
2145 * we're still holding the write lock:
2149 * XXX: decide if we really want to unconditionally sort down to a
2153 btree_node_sort(c, b, 0, b->nsets, true);
2154 invalidated_iter = true;
2156 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
2160 set_needs_whiteout(bset(b, t), true);
2162 bch2_btree_verify(c, b);
2165 * If later we don't unconditionally sort down to a single bset, we have
2166 * to ensure this is still true:
2168 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
2170 bne = want_new_bset(c, b);
2172 bch2_bset_init_next(c, b, bne);
2174 bch2_btree_build_aux_trees(b);
2176 return invalidated_iter;
2180 * Use this one if the node is intent locked:
2182 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
2183 enum six_lock_type lock_type_held,
2186 if (lock_type_held == SIX_LOCK_intent ||
2187 (lock_type_held == SIX_LOCK_read &&
2188 six_lock_tryupgrade(&b->c.lock))) {
2189 __bch2_btree_node_write(c, b, flags);
2191 /* don't cycle lock unnecessarily: */
2192 if (btree_node_just_written(b) &&
2193 six_trylock_write(&b->c.lock)) {
2194 bch2_btree_post_write_cleanup(c, b);
2195 six_unlock_write(&b->c.lock);
2198 if (lock_type_held == SIX_LOCK_read)
2199 six_lock_downgrade(&b->c.lock);
2201 __bch2_btree_node_write(c, b, flags);
2202 if (lock_type_held == SIX_LOCK_write &&
2203 btree_node_just_written(b))
2204 bch2_btree_post_write_cleanup(c, b);
2208 static bool __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
2210 struct bucket_table *tbl;
2211 struct rhash_head *pos;
2217 for_each_cached_btree(b, c, tbl, i, pos)
2218 if (test_bit(flag, &b->flags)) {
2220 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
2229 bool bch2_btree_flush_all_reads(struct bch_fs *c)
2231 return __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
2234 bool bch2_btree_flush_all_writes(struct bch_fs *c)
2236 return __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
2239 static const char * const bch2_btree_write_types[] = {
2240 #define x(t, n) [n] = #t,
2241 BCH_BTREE_WRITE_TYPES()
2245 void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
2247 printbuf_tabstop_push(out, 20);
2248 printbuf_tabstop_push(out, 10);
2253 prt_str(out, "size");
2256 for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
2257 u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
2258 u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
2260 prt_printf(out, "%s:", bch2_btree_write_types[i]);
2264 prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);