1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
22 #include <linux/sched/mm.h>
23 #include <trace/events/bcachefs.h>
25 static void verify_no_dups(struct btree *b,
26 struct bkey_packed *start,
27 struct bkey_packed *end,
30 #ifdef CONFIG_BCACHEFS_DEBUG
31 struct bkey_packed *k, *p;
36 for (p = start, k = bkey_next_skip_noops(start, end);
38 p = k, k = bkey_next_skip_noops(k, end)) {
39 struct bkey l = bkey_unpack_key(b, p);
40 struct bkey r = bkey_unpack_key(b, k);
43 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
44 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
45 //BUG_ON(bch2_bkey_cmp_packed(&b->format, p, k) >= 0);
50 static void set_needs_whiteout(struct bset *i, int v)
52 struct bkey_packed *k;
56 k = bkey_next_skip_noops(k, vstruct_last(i)))
57 k->needs_whiteout = v;
60 static void btree_bounce_free(struct bch_fs *c, size_t size,
61 bool used_mempool, void *p)
64 mempool_free(p, &c->btree_bounce_pool);
69 static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
72 unsigned flags = memalloc_nofs_save();
75 BUG_ON(size > btree_bytes(c));
77 *used_mempool = false;
78 p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
81 p = mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
83 memalloc_nofs_restore(flags);
87 static void sort_bkey_ptrs(const struct btree *bt,
88 struct bkey_packed **ptrs, unsigned nr)
90 unsigned n = nr, a = nr / 2, b, c, d;
95 /* Heap sort: see lib/sort.c: */
100 swap(ptrs[0], ptrs[n]);
104 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
105 b = bch2_bkey_cmp_packed(bt,
107 ptrs[d]) >= 0 ? c : d;
112 bch2_bkey_cmp_packed(bt,
119 swap(ptrs[b], ptrs[c]);
124 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
126 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
127 bool used_mempool = false;
128 size_t bytes = b->whiteout_u64s * sizeof(u64);
130 if (!b->whiteout_u64s)
133 new_whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
135 ptrs = ptrs_end = ((void *) new_whiteouts + bytes);
137 for (k = unwritten_whiteouts_start(c, b);
138 k != unwritten_whiteouts_end(c, b);
142 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
146 while (ptrs != ptrs_end) {
152 verify_no_dups(b, new_whiteouts,
153 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s),
154 btree_node_old_extent_overwrite(b));
156 memcpy_u64s(unwritten_whiteouts_start(c, b),
157 new_whiteouts, b->whiteout_u64s);
159 btree_bounce_free(c, bytes, used_mempool, new_whiteouts);
162 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
163 bool compacting, enum compact_mode mode)
165 if (!bset_dead_u64s(b, t))
170 return should_compact_bset_lazy(b, t) ||
171 (compacting && !bset_written(b, bset(b, t)));
179 static bool bch2_compact_extent_whiteouts(struct bch_fs *c,
181 enum compact_mode mode)
183 const struct bkey_format *f = &b->format;
185 struct bkey_packed *whiteouts = NULL;
186 struct bkey_packed *u_start, *u_pos;
187 struct sort_iter sort_iter;
188 unsigned bytes, whiteout_u64s = 0, u64s;
189 bool used_mempool, compacting = false;
191 BUG_ON(!btree_node_is_extents(b));
194 if (should_compact_bset(b, t, whiteout_u64s != 0, mode))
195 whiteout_u64s += bset_dead_u64s(b, t);
200 bch2_sort_whiteouts(c, b);
202 sort_iter_init(&sort_iter, b);
204 whiteout_u64s += b->whiteout_u64s;
205 bytes = whiteout_u64s * sizeof(u64);
207 whiteouts = btree_bounce_alloc(c, bytes, &used_mempool);
208 u_start = u_pos = whiteouts;
210 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
212 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
214 sort_iter_add(&sort_iter, u_start, u_pos);
216 for_each_bset(b, t) {
217 struct bset *i = bset(b, t);
218 struct bkey_packed *k, *n, *out, *start, *end;
219 struct btree_node_entry *src = NULL, *dst = NULL;
221 if (t != b->set && !bset_written(b, i)) {
222 src = container_of(i, struct btree_node_entry, keys);
223 dst = max(write_block(b),
224 (void *) btree_bkey_last(b, t - 1));
230 if (!should_compact_bset(b, t, compacting, mode)) {
232 memmove(dst, src, sizeof(*src) +
233 le16_to_cpu(src->keys.u64s) *
236 set_btree_bset(b, t, i);
244 end = vstruct_last(i);
247 memmove(dst, src, sizeof(*src));
249 set_btree_bset(b, t, i);
254 for (k = start; k != end; k = n) {
255 n = bkey_next_skip_noops(k, end);
260 BUG_ON(bkey_whiteout(k) &&
264 if (bkey_whiteout(k) && !k->needs_whiteout)
267 if (bkey_whiteout(k)) {
268 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
269 set_bkeyp_val_u64s(f, u_pos, 0);
270 u_pos = bkey_next(u_pos);
273 out = bkey_next(out);
277 sort_iter_add(&sort_iter, u_start, u_pos);
279 i->u64s = cpu_to_le16((u64 *) out - i->_data);
280 set_btree_bset_end(b, t);
281 bch2_bset_set_no_aux_tree(b, t);
284 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
286 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
287 (void *) btree_bkey_last(b, bset_tree_last(b)));
289 u64s = bch2_sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
292 BUG_ON(u64s > b->whiteout_u64s);
293 BUG_ON(u_pos != whiteouts && !u64s);
295 if (u64s != b->whiteout_u64s) {
296 void *src = unwritten_whiteouts_start(c, b);
298 b->whiteout_u64s = u64s;
299 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
303 unwritten_whiteouts_start(c, b),
304 unwritten_whiteouts_end(c, b),
307 btree_bounce_free(c, bytes, used_mempool, whiteouts);
309 bch2_btree_build_aux_trees(b);
311 bch_btree_keys_u64s_remaining(c, b);
312 bch2_verify_btree_nr_keys(b);
317 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
322 for_each_bset(b, t) {
323 struct bset *i = bset(b, t);
324 struct bkey_packed *k, *n, *out, *start, *end;
325 struct btree_node_entry *src = NULL, *dst = NULL;
327 if (t != b->set && !bset_written(b, i)) {
328 src = container_of(i, struct btree_node_entry, keys);
329 dst = max(write_block(b),
330 (void *) btree_bkey_last(b, t - 1));
336 if (!should_compact_bset(b, t, ret, mode)) {
338 memmove(dst, src, sizeof(*src) +
339 le16_to_cpu(src->keys.u64s) *
342 set_btree_bset(b, t, i);
347 start = btree_bkey_first(b, t);
348 end = btree_bkey_last(b, t);
351 memmove(dst, src, sizeof(*src));
353 set_btree_bset(b, t, i);
358 for (k = start; k != end; k = n) {
359 n = bkey_next_skip_noops(k, end);
361 if (!bkey_whiteout(k)) {
363 out = bkey_next(out);
365 BUG_ON(k->needs_whiteout);
369 i->u64s = cpu_to_le16((u64 *) out - i->_data);
370 set_btree_bset_end(b, t);
371 bch2_bset_set_no_aux_tree(b, t);
375 bch2_verify_btree_nr_keys(b);
377 bch2_btree_build_aux_trees(b);
382 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
383 enum compact_mode mode)
385 return !btree_node_old_extent_overwrite(b)
386 ? bch2_drop_whiteouts(b, mode)
387 : bch2_compact_extent_whiteouts(c, b, mode);
390 static void btree_node_sort(struct bch_fs *c, struct btree *b,
391 struct btree_iter *iter,
394 bool filter_whiteouts)
396 struct btree_node *out;
397 struct sort_iter sort_iter;
399 struct bset *start_bset = bset(b, &b->set[start_idx]);
400 bool used_mempool = false;
401 u64 start_time, seq = 0;
402 unsigned i, u64s = 0, bytes, shift = end_idx - start_idx - 1;
403 bool sorting_entire_node = start_idx == 0 &&
406 sort_iter_init(&sort_iter, b);
408 for (t = b->set + start_idx;
409 t < b->set + end_idx;
411 u64s += le16_to_cpu(bset(b, t)->u64s);
412 sort_iter_add(&sort_iter,
413 btree_bkey_first(b, t),
414 btree_bkey_last(b, t));
417 bytes = sorting_entire_node
419 : __vstruct_bytes(struct btree_node, u64s);
421 out = btree_bounce_alloc(c, bytes, &used_mempool);
423 start_time = local_clock();
425 if (btree_node_old_extent_overwrite(b))
426 filter_whiteouts = bset_written(b, start_bset);
428 u64s = (btree_node_old_extent_overwrite(b)
430 : bch2_sort_keys)(out->keys.start,
434 out->keys.u64s = cpu_to_le16(u64s);
436 BUG_ON(vstruct_end(&out->keys) > (void *) out + bytes);
438 if (sorting_entire_node)
439 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
442 /* Make sure we preserve bset journal_seq: */
443 for (t = b->set + start_idx; t < b->set + end_idx; t++)
444 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
445 start_bset->journal_seq = cpu_to_le64(seq);
447 if (sorting_entire_node) {
448 unsigned u64s = le16_to_cpu(out->keys.u64s);
450 BUG_ON(bytes != btree_bytes(c));
453 * Our temporary buffer is the same size as the btree node's
454 * buffer, we can just swap buffers instead of doing a big
458 out->keys.u64s = cpu_to_le16(u64s);
460 set_btree_bset(b, b->set, &b->data->keys);
462 start_bset->u64s = out->keys.u64s;
463 memcpy_u64s(start_bset->start,
465 le16_to_cpu(out->keys.u64s));
468 for (i = start_idx + 1; i < end_idx; i++)
469 b->nr.bset_u64s[start_idx] +=
474 for (i = start_idx + 1; i < b->nsets; i++) {
475 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
476 b->set[i] = b->set[i + shift];
479 for (i = b->nsets; i < MAX_BSETS; i++)
480 b->nr.bset_u64s[i] = 0;
482 set_btree_bset_end(b, &b->set[start_idx]);
483 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
485 btree_bounce_free(c, bytes, used_mempool, out);
487 bch2_verify_btree_nr_keys(b);
490 void bch2_btree_sort_into(struct bch_fs *c,
494 struct btree_nr_keys nr;
495 struct btree_node_iter src_iter;
496 u64 start_time = local_clock();
498 BUG_ON(dst->nsets != 1);
500 bch2_bset_set_no_aux_tree(dst, dst->set);
502 bch2_btree_node_iter_init_from_start(&src_iter, src);
504 if (btree_node_is_extents(src))
505 nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
510 nr = bch2_sort_repack(btree_bset_first(dst),
515 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
518 set_btree_bset_end(dst, dst->set);
520 dst->nr.live_u64s += nr.live_u64s;
521 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
522 dst->nr.packed_keys += nr.packed_keys;
523 dst->nr.unpacked_keys += nr.unpacked_keys;
525 bch2_verify_btree_nr_keys(dst);
528 #define SORT_CRIT (4096 / sizeof(u64))
531 * We're about to add another bset to the btree node, so if there's currently
532 * too many bsets - sort some of them together:
534 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
535 struct btree_iter *iter)
537 unsigned unwritten_idx;
540 for (unwritten_idx = 0;
541 unwritten_idx < b->nsets;
543 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
546 if (b->nsets - unwritten_idx > 1) {
547 btree_node_sort(c, b, iter, unwritten_idx,
552 if (unwritten_idx > 1) {
553 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
560 void bch2_btree_build_aux_trees(struct btree *b)
565 bch2_bset_build_aux_tree(b, t,
566 !bset_written(b, bset(b, t)) &&
567 t == bset_tree_last(b));
571 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
574 * Safe to call if there already is an unwritten bset - will only add a new bset
575 * if @b doesn't already have one.
577 * Returns true if we sorted (i.e. invalidated iterators
579 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
580 struct btree_iter *iter)
582 struct btree_node_entry *bne;
585 EBUG_ON(!(b->c.lock.state.seq & 1));
586 EBUG_ON(iter && iter->l[b->c.level].b != b);
588 did_sort = btree_node_compact(c, b, iter);
590 bne = want_new_bset(c, b);
592 bch2_bset_init_next(c, b, bne);
594 bch2_btree_build_aux_trees(b);
596 if (iter && did_sort)
597 bch2_btree_iter_reinit_node(iter, b);
600 static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
603 pr_buf(out, "%s level %u/%u\n ",
604 bch2_btree_ids[b->c.btree_id],
606 c->btree_roots[b->c.btree_id].level);
607 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
610 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
611 struct btree *b, struct bset *i,
612 unsigned offset, int write)
614 pr_buf(out, "error validating btree node %sat btree ",
615 write ? "before write " : "");
616 btree_pos_to_text(out, c, b);
618 pr_buf(out, "\n node offset %u", b->written);
620 pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
623 enum btree_err_type {
625 BTREE_ERR_WANT_RETRY,
626 BTREE_ERR_MUST_RETRY,
630 enum btree_validate_ret {
631 BTREE_RETRY_READ = 64,
634 #define btree_err(type, c, b, i, msg, ...) \
639 struct printbuf out = PBUF(_buf); \
641 buf2 = kmalloc(4096, GFP_ATOMIC); \
643 out = _PBUF(buf2, 4986); \
645 btree_err_msg(&out, c, b, i, b->written, write); \
646 pr_buf(&out, ": " msg, ##__VA_ARGS__); \
648 if (type == BTREE_ERR_FIXABLE && \
650 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
651 mustfix_fsck_err(c, "%s", buf2); \
657 bch_err(c, "%s", buf2); \
660 case BTREE_ERR_FIXABLE: \
661 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
663 case BTREE_ERR_WANT_RETRY: \
665 ret = BTREE_RETRY_READ; \
669 case BTREE_ERR_MUST_RETRY: \
670 ret = BTREE_RETRY_READ; \
672 case BTREE_ERR_FATAL: \
673 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
678 bch_err(c, "corrupt metadata before write: %s", buf2); \
680 if (bch2_fs_inconsistent(c)) { \
681 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
692 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
694 static int validate_bset(struct bch_fs *c, struct btree *b,
695 struct bset *i, unsigned sectors,
696 int write, bool have_retry)
698 unsigned version = le16_to_cpu(i->version);
702 btree_err_on((version != BCH_BSET_VERSION_OLD &&
703 version < bcachefs_metadata_version_min) ||
704 version >= bcachefs_metadata_version_max,
705 BTREE_ERR_FATAL, c, b, i,
706 "unsupported bset version");
708 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
709 BTREE_ERR_FIXABLE, c, b, i,
710 "bset past end of btree node")) {
715 btree_err_on(b->written && !i->u64s,
716 BTREE_ERR_FIXABLE, c, b, i,
720 struct btree_node *bn =
721 container_of(i, struct btree_node, keys);
722 /* These indicate that we read the wrong btree node: */
724 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
725 struct bch_btree_ptr_v2 *bp =
726 &bkey_i_to_btree_ptr_v2(&b->key)->v;
729 btree_err_on(bp->seq != bn->keys.seq,
730 BTREE_ERR_MUST_RETRY, c, b, NULL,
731 "incorrect sequence number (wrong btree node)");
734 btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
735 BTREE_ERR_MUST_RETRY, c, b, i,
736 "incorrect btree id");
738 btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
739 BTREE_ERR_MUST_RETRY, c, b, i,
742 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
743 u64 *p = (u64 *) &bn->ptr;
749 compat_btree_node(b->c.level, b->c.btree_id, version,
750 BSET_BIG_ENDIAN(i), write, bn);
752 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
753 struct bch_btree_ptr_v2 *bp =
754 &bkey_i_to_btree_ptr_v2(&b->key)->v;
756 if (BTREE_PTR_RANGE_UPDATED(bp)) {
757 b->data->min_key = bp->min_key;
758 b->data->max_key = b->key.k.p;
761 btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
762 BTREE_ERR_MUST_RETRY, c, b, NULL,
763 "incorrect min_key: got %llu:%llu should be %llu:%llu",
764 b->data->min_key.inode,
765 b->data->min_key.offset,
770 btree_err_on(bkey_cmp(bn->max_key, b->key.k.p),
771 BTREE_ERR_MUST_RETRY, c, b, i,
772 "incorrect max key %llu:%llu",
777 compat_btree_node(b->c.level, b->c.btree_id, version,
778 BSET_BIG_ENDIAN(i), write, bn);
780 /* XXX: ideally we would be validating min_key too */
783 * not correct anymore, due to btree node write error
786 * need to add bn->seq to btree keys and verify
789 btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
791 BTREE_ERR_FATAL, c, b, i,
792 "incorrect backpointer");
794 err = bch2_bkey_format_validate(&bn->format);
796 BTREE_ERR_FATAL, c, b, i,
797 "invalid bkey format: %s", err);
799 compat_bformat(b->c.level, b->c.btree_id, version,
800 BSET_BIG_ENDIAN(i), write,
807 static int validate_bset_keys(struct bch_fs *c, struct btree *b,
808 struct bset *i, unsigned *whiteout_u64s,
809 int write, bool have_retry)
811 unsigned version = le16_to_cpu(i->version);
812 struct bkey_packed *k, *prev = NULL;
813 bool seen_non_whiteout = false;
816 if (!BSET_SEPARATE_WHITEOUTS(i)) {
817 seen_non_whiteout = true;
822 k != vstruct_last(i);) {
827 if (btree_err_on(bkey_next(k) > vstruct_last(i),
828 BTREE_ERR_FIXABLE, c, b, i,
829 "key extends past end of bset")) {
830 i->u64s = cpu_to_le16((u64 *) k - i->_data);
834 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
835 BTREE_ERR_FIXABLE, c, b, i,
836 "invalid bkey format %u", k->format)) {
837 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
838 memmove_u64s_down(k, bkey_next(k),
839 (u64 *) vstruct_end(i) - (u64 *) k);
843 /* XXX: validate k->u64s */
845 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
846 BSET_BIG_ENDIAN(i), write,
849 u = __bkey_disassemble(b, k, &tmp);
851 invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
852 bch2_bkey_in_btree_node(b, u.s_c) ?:
853 (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
857 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
858 btree_err(BTREE_ERR_FIXABLE, c, b, i,
859 "invalid bkey: %s\n%s", invalid, buf);
861 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
862 memmove_u64s_down(k, bkey_next(k),
863 (u64 *) vstruct_end(i) - (u64 *) k);
868 bch2_bkey_compat(b->c.level, b->c.btree_id, version,
869 BSET_BIG_ENDIAN(i), write,
873 * with the separate whiteouts thing (used for extents), the
874 * second set of keys actually can have whiteouts too, so we
875 * can't solely go off bkey_whiteout()...
878 if (!seen_non_whiteout &&
879 (!bkey_whiteout(k) ||
880 (prev && bkey_iter_cmp(b, prev, k) > 0))) {
881 *whiteout_u64s = k->_data - i->_data;
882 seen_non_whiteout = true;
883 } else if (prev && bkey_iter_cmp(b, prev, k) > 0) {
886 struct bkey up = bkey_unpack_key(b, prev);
888 bch2_bkey_to_text(&PBUF(buf1), &up);
889 bch2_bkey_to_text(&PBUF(buf2), u.k);
891 bch2_dump_bset(c, b, i, 0);
892 btree_err(BTREE_ERR_FATAL, c, b, i,
893 "keys out of order: %s > %s",
895 /* XXX: repair this */
899 k = bkey_next_skip_noops(k, vstruct_last(i));
905 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
907 struct btree_node_entry *bne;
908 struct sort_iter *iter;
909 struct btree_node *sorted;
910 struct bkey_packed *k;
911 struct bch_extent_ptr *ptr;
913 bool used_mempool, blacklisted;
915 int ret, retry_read = 0, write = READ;
917 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
918 sort_iter_init(iter, b);
919 iter->size = (btree_blocks(c) + 1) * 2;
921 if (bch2_meta_read_fault("btree"))
922 btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
925 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
926 BTREE_ERR_MUST_RETRY, c, b, NULL,
929 btree_err_on(!b->data->keys.seq,
930 BTREE_ERR_MUST_RETRY, c, b, NULL,
933 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
934 struct bch_btree_ptr_v2 *bp =
935 &bkey_i_to_btree_ptr_v2(&b->key)->v;
937 btree_err_on(b->data->keys.seq != bp->seq,
938 BTREE_ERR_MUST_RETRY, c, b, NULL,
939 "got wrong btree node (seq %llx want %llx)",
940 b->data->keys.seq, bp->seq);
943 while (b->written < c->opts.btree_node_size) {
944 unsigned sectors, whiteout_u64s = 0;
946 struct bch_csum csum;
947 bool first = !b->written;
952 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
953 BTREE_ERR_WANT_RETRY, c, b, i,
954 "unknown checksum type %llu",
957 nonce = btree_nonce(i, b->written << 9);
958 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
960 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
961 BTREE_ERR_WANT_RETRY, c, b, i,
964 bset_encrypt(c, i, b->written << 9);
966 if (btree_node_is_extents(b) &&
967 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data)) {
968 set_btree_node_old_extent_overwrite(b);
969 set_btree_node_need_rewrite(b);
972 sectors = vstruct_sectors(b->data, c->block_bits);
974 bne = write_block(b);
977 if (i->seq != b->data->keys.seq)
980 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
981 BTREE_ERR_WANT_RETRY, c, b, i,
982 "unknown checksum type %llu",
985 nonce = btree_nonce(i, b->written << 9);
986 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
988 btree_err_on(bch2_crc_cmp(csum, bne->csum),
989 BTREE_ERR_WANT_RETRY, c, b, i,
992 bset_encrypt(c, i, b->written << 9);
994 sectors = vstruct_sectors(bne, c->block_bits);
997 ret = validate_bset(c, b, i, sectors,
1003 btree_node_set_format(b, b->data->format);
1005 ret = validate_bset_keys(c, b, i, &whiteout_u64s,
1010 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
1012 b->written += sectors;
1014 blacklisted = bch2_journal_seq_is_blacklisted(c,
1015 le64_to_cpu(i->journal_seq),
1018 btree_err_on(blacklisted && first,
1019 BTREE_ERR_FIXABLE, c, b, i,
1020 "first btree node bset has blacklisted journal seq");
1021 if (blacklisted && !first)
1024 sort_iter_add(iter, i->start,
1025 vstruct_idx(i, whiteout_u64s));
1028 vstruct_idx(i, whiteout_u64s),
1032 for (bne = write_block(b);
1033 bset_byte_offset(b, bne) < btree_bytes(c);
1034 bne = (void *) bne + block_bytes(c))
1035 btree_err_on(bne->keys.seq == b->data->keys.seq,
1036 BTREE_ERR_WANT_RETRY, c, b, NULL,
1037 "found bset signature after last bset");
1039 sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
1040 sorted->keys.u64s = 0;
1042 set_btree_bset(b, b->set, &b->data->keys);
1044 b->nr = (btree_node_old_extent_overwrite(b)
1045 ? bch2_extent_sort_fix_overlapping
1046 : bch2_key_sort_fix_overlapping)(c, &sorted->keys, iter);
1048 u64s = le16_to_cpu(sorted->keys.u64s);
1050 sorted->keys.u64s = cpu_to_le16(u64s);
1051 swap(sorted, b->data);
1052 set_btree_bset(b, b->set, &b->data->keys);
1055 BUG_ON(b->nr.live_u64s != u64s);
1057 btree_bounce_free(c, btree_bytes(c), used_mempool, sorted);
1060 for (k = i->start; k != vstruct_last(i);) {
1062 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1063 const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
1066 (bch2_inject_invalid_keys &&
1067 !bversion_cmp(u.k->version, MAX_VERSION))) {
1070 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
1071 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1072 "invalid bkey %s: %s", buf, invalid);
1074 btree_keys_account_key_drop(&b->nr, 0, k);
1076 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1077 memmove_u64s_down(k, bkey_next(k),
1078 (u64 *) vstruct_end(i) - (u64 *) k);
1079 set_btree_bset_end(b, b->set);
1083 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1084 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1089 k = bkey_next_skip_noops(k, vstruct_last(i));
1092 bch2_bset_build_aux_tree(b, b->set, false);
1094 set_needs_whiteout(btree_bset_first(b), true);
1096 btree_node_reset_sib_u64s(b);
1098 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
1099 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1101 if (ca->mi.state != BCH_MEMBER_STATE_RW)
1102 set_btree_node_need_rewrite(b);
1105 mempool_free(iter, &c->fill_iter);
1108 if (ret == BTREE_RETRY_READ) {
1111 bch2_inconsistent_error(c);
1112 set_btree_node_read_error(b);
1117 static void btree_node_read_work(struct work_struct *work)
1119 struct btree_read_bio *rb =
1120 container_of(work, struct btree_read_bio, work);
1121 struct bch_fs *c = rb->c;
1122 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1123 struct btree *b = rb->bio.bi_private;
1124 struct bio *bio = &rb->bio;
1125 struct bch_io_failures failed = { .nr = 0 };
1127 struct printbuf out;
1132 bch_info(c, "retrying read");
1133 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1134 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1136 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1137 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1138 bio->bi_iter.bi_size = btree_bytes(c);
1140 if (rb->have_ioref) {
1141 bio_set_dev(bio, ca->disk_sb.bdev);
1142 submit_bio_wait(bio);
1144 bio->bi_status = BLK_STS_REMOVED;
1148 btree_pos_to_text(&out, c, b);
1149 bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
1150 bch2_blk_status_to_str(bio->bi_status), buf);
1152 percpu_ref_put(&ca->io_ref);
1153 rb->have_ioref = false;
1155 bch2_mark_io_failure(&failed, &rb->pick);
1157 can_retry = bch2_bkey_pick_read_device(c,
1158 bkey_i_to_s_c(&b->key),
1159 &failed, &rb->pick) > 0;
1161 if (!bio->bi_status &&
1162 !bch2_btree_node_read_done(c, b, can_retry))
1166 set_btree_node_read_error(b);
1171 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1174 clear_btree_node_read_in_flight(b);
1175 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1178 static void btree_node_read_endio(struct bio *bio)
1180 struct btree_read_bio *rb =
1181 container_of(bio, struct btree_read_bio, bio);
1182 struct bch_fs *c = rb->c;
1184 if (rb->have_ioref) {
1185 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1186 bch2_latency_acct(ca, rb->start_time, READ);
1189 queue_work(system_unbound_wq, &rb->work);
1192 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1195 struct extent_ptr_decoded pick;
1196 struct btree_read_bio *rb;
1201 trace_btree_read(c, b);
1203 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1205 if (bch2_fs_fatal_err_on(ret <= 0, c,
1206 "btree node read error: no device to read from")) {
1207 set_btree_node_read_error(b);
1211 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1213 bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
1216 rb = container_of(bio, struct btree_read_bio, bio);
1218 rb->start_time = local_clock();
1219 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1221 INIT_WORK(&rb->work, btree_node_read_work);
1222 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1223 bio->bi_iter.bi_sector = pick.ptr.offset;
1224 bio->bi_end_io = btree_node_read_endio;
1225 bio->bi_private = b;
1226 bch2_bio_map(bio, b->data, btree_bytes(c));
1228 set_btree_node_read_in_flight(b);
1230 if (rb->have_ioref) {
1231 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_btree],
1233 bio_set_dev(bio, ca->disk_sb.bdev);
1236 submit_bio_wait(bio);
1238 bio->bi_private = b;
1239 btree_node_read_work(&rb->work);
1244 bio->bi_status = BLK_STS_REMOVED;
1247 btree_node_read_work(&rb->work);
1249 queue_work(system_unbound_wq, &rb->work);
1254 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1255 const struct bkey_i *k, unsigned level)
1261 closure_init_stack(&cl);
1264 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1268 b = bch2_btree_node_mem_alloc(c);
1269 bch2_btree_cache_cannibalize_unlock(c);
1273 bkey_copy(&b->key, k);
1274 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1276 bch2_btree_node_read(c, b, true);
1278 if (btree_node_read_error(b)) {
1279 bch2_btree_node_hash_remove(&c->btree_cache, b);
1281 mutex_lock(&c->btree_cache.lock);
1282 list_move(&b->list, &c->btree_cache.freeable);
1283 mutex_unlock(&c->btree_cache.lock);
1289 bch2_btree_set_root_for_read(c, b);
1291 six_unlock_write(&b->c.lock);
1292 six_unlock_intent(&b->c.lock);
1297 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1298 struct btree_write *w)
1300 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1308 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1311 closure_put(&((struct btree_update *) new)->cl);
1313 bch2_journal_pin_drop(&c->journal, &w->journal);
1316 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1318 struct btree_write *w = btree_prev_write(b);
1320 bch2_btree_complete_write(c, b, w);
1321 btree_node_io_unlock(b);
1324 static void bch2_btree_node_write_error(struct bch_fs *c,
1325 struct btree_write_bio *wbio)
1327 struct btree *b = wbio->wbio.bio.bi_private;
1329 struct bch_extent_ptr *ptr;
1330 struct btree_trans trans;
1331 struct btree_iter *iter;
1334 bch2_bkey_buf_init(&k);
1335 bch2_trans_init(&trans, c, 0, 0);
1337 iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
1338 BTREE_MAX_DEPTH, b->c.level, 0);
1340 ret = bch2_btree_iter_traverse(iter);
1344 /* has node been freed? */
1345 if (iter->l[b->c.level].b != b) {
1346 /* node has been freed: */
1347 BUG_ON(!btree_node_dying(b));
1351 BUG_ON(!btree_node_hashed(b));
1353 bch2_bkey_buf_copy(&k, c, &b->key);
1355 bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr,
1356 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1358 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
1361 ret = bch2_btree_node_update_key(c, iter, b, k.k);
1367 bch2_trans_exit(&trans);
1368 bch2_bkey_buf_exit(&k, c);
1369 bio_put(&wbio->wbio.bio);
1370 btree_node_write_done(c, b);
1373 set_btree_node_noevict(b);
1374 bch2_fs_fatal_error(c, "fatal error writing btree node");
1378 void bch2_btree_write_error_work(struct work_struct *work)
1380 struct bch_fs *c = container_of(work, struct bch_fs,
1381 btree_write_error_work);
1385 spin_lock_irq(&c->btree_write_error_lock);
1386 bio = bio_list_pop(&c->btree_write_error_list);
1387 spin_unlock_irq(&c->btree_write_error_lock);
1392 bch2_btree_node_write_error(c,
1393 container_of(bio, struct btree_write_bio, wbio.bio));
1397 static void btree_node_write_work(struct work_struct *work)
1399 struct btree_write_bio *wbio =
1400 container_of(work, struct btree_write_bio, work);
1401 struct bch_fs *c = wbio->wbio.c;
1402 struct btree *b = wbio->wbio.bio.bi_private;
1404 btree_bounce_free(c,
1406 wbio->wbio.used_mempool,
1409 if (wbio->wbio.failed.nr) {
1410 unsigned long flags;
1412 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1413 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1414 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1416 queue_work(c->wq, &c->btree_write_error_work);
1420 bio_put(&wbio->wbio.bio);
1421 btree_node_write_done(c, b);
1424 static void btree_node_write_endio(struct bio *bio)
1426 struct bch_write_bio *wbio = to_wbio(bio);
1427 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1428 struct bch_write_bio *orig = parent ?: wbio;
1429 struct bch_fs *c = wbio->c;
1430 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1431 unsigned long flags;
1433 if (wbio->have_ioref)
1434 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1436 if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
1437 bch2_blk_status_to_str(bio->bi_status)) ||
1438 bch2_meta_write_fault("btree")) {
1439 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1440 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1441 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1444 if (wbio->have_ioref)
1445 percpu_ref_put(&ca->io_ref);
1449 bio_endio(&parent->bio);
1451 struct btree_write_bio *wb =
1452 container_of(orig, struct btree_write_bio, wbio);
1454 INIT_WORK(&wb->work, btree_node_write_work);
1455 queue_work(system_unbound_wq, &wb->work);
1459 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1460 struct bset *i, unsigned sectors)
1462 unsigned whiteout_u64s = 0;
1465 if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
1468 ret = validate_bset(c, b, i, sectors, WRITE, false) ?:
1469 validate_bset_keys(c, b, i, &whiteout_u64s, WRITE, false);
1471 bch2_inconsistent_error(c);
1478 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1479 enum six_lock_type lock_type_held)
1481 struct btree_write_bio *wbio;
1482 struct bset_tree *t;
1484 struct btree_node *bn = NULL;
1485 struct btree_node_entry *bne = NULL;
1487 struct bch_extent_ptr *ptr;
1488 struct sort_iter sort_iter;
1490 unsigned bytes_to_write, sectors_to_write, bytes, u64s;
1493 unsigned long old, new;
1494 bool validate_before_checksum = false;
1497 bch2_bkey_buf_init(&k);
1499 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1503 * We may only have a read lock on the btree node - the dirty bit is our
1504 * "lock" against racing with other threads that may be trying to start
1505 * a write, we do a write iff we clear the dirty bit. Since setting the
1506 * dirty bit requires a write lock, we can't race with other threads
1510 old = new = READ_ONCE(b->flags);
1512 if (!(old & (1 << BTREE_NODE_dirty)))
1515 if (!btree_node_may_write(b))
1518 if (old & (1 << BTREE_NODE_never_write))
1521 if (old & (1 << BTREE_NODE_write_in_flight)) {
1522 btree_node_wait_on_io(b);
1526 new &= ~(1 << BTREE_NODE_dirty);
1527 new &= ~(1 << BTREE_NODE_need_write);
1528 new |= (1 << BTREE_NODE_write_in_flight);
1529 new |= (1 << BTREE_NODE_just_written);
1530 new ^= (1 << BTREE_NODE_write_idx);
1531 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1533 atomic_dec(&c->btree_cache.dirty);
1535 BUG_ON(btree_node_fake(b));
1536 BUG_ON((b->will_make_reachable != 0) != !b->written);
1538 BUG_ON(b->written >= c->opts.btree_node_size);
1539 BUG_ON(b->written & (c->opts.block_size - 1));
1540 BUG_ON(bset_written(b, btree_bset_last(b)));
1541 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1542 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1544 bch2_sort_whiteouts(c, b);
1546 sort_iter_init(&sort_iter, b);
1549 ? sizeof(struct btree_node)
1550 : sizeof(struct btree_node_entry);
1552 bytes += b->whiteout_u64s * sizeof(u64);
1554 for_each_bset(b, t) {
1557 if (bset_written(b, i))
1560 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1561 sort_iter_add(&sort_iter,
1562 btree_bkey_first(b, t),
1563 btree_bkey_last(b, t));
1564 seq = max(seq, le64_to_cpu(i->journal_seq));
1567 BUG_ON(b->written && !seq);
1569 /* bch2_varint_decode may read up to 7 bytes past the end of the buffer: */
1572 data = btree_bounce_alloc(c, bytes, &used_mempool);
1580 bne->keys = b->data->keys;
1584 i->journal_seq = cpu_to_le64(seq);
1587 if (!btree_node_old_extent_overwrite(b)) {
1588 sort_iter_add(&sort_iter,
1589 unwritten_whiteouts_start(c, b),
1590 unwritten_whiteouts_end(c, b));
1591 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1593 memcpy_u64s(i->start,
1594 unwritten_whiteouts_start(c, b),
1596 i->u64s = cpu_to_le16(b->whiteout_u64s);
1597 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1600 b->whiteout_u64s = 0;
1602 u64s = btree_node_old_extent_overwrite(b)
1603 ? bch2_sort_extents(vstruct_last(i), &sort_iter, false)
1604 : bch2_sort_keys(i->start, &sort_iter, false);
1605 le16_add_cpu(&i->u64s, u64s);
1607 set_needs_whiteout(i, false);
1609 /* do we have data to write? */
1610 if (b->written && !i->u64s)
1613 bytes_to_write = vstruct_end(i) - data;
1614 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1616 memset(data + bytes_to_write, 0,
1617 (sectors_to_write << 9) - bytes_to_write);
1619 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1620 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1621 BUG_ON(i->seq != b->data->keys.seq);
1623 i->version = c->sb.version < bcachefs_metadata_version_new_versioning
1624 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1625 : cpu_to_le16(c->sb.version);
1626 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1628 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1629 validate_before_checksum = true;
1631 /* validate_bset will be modifying: */
1632 if (le16_to_cpu(i->version) <= bcachefs_metadata_version_inode_btree_change)
1633 validate_before_checksum = true;
1635 /* if we're going to be encrypting, check metadata validity first: */
1636 if (validate_before_checksum &&
1637 validate_bset_for_write(c, b, i, sectors_to_write))
1640 bset_encrypt(c, i, b->written << 9);
1642 nonce = btree_nonce(i, b->written << 9);
1645 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1647 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1649 /* if we're not encrypting, check metadata after checksumming: */
1650 if (!validate_before_checksum &&
1651 validate_bset_for_write(c, b, i, sectors_to_write))
1655 * We handle btree write errors by immediately halting the journal -
1656 * after we've done that, we can't issue any subsequent btree writes
1657 * because they might have pointers to new nodes that failed to write.
1659 * Furthermore, there's no point in doing any more btree writes because
1660 * with the journal stopped, we're never going to update the journal to
1661 * reflect that those writes were done and the data flushed from the
1664 * Also on journal error, the pending write may have updates that were
1665 * never journalled (interior nodes, see btree_update_nodes_written()) -
1666 * it's critical that we don't do the write in that case otherwise we
1667 * will have updates visible that weren't in the journal:
1669 * Make sure to update b->written so bch2_btree_init_next() doesn't
1672 if (bch2_journal_error(&c->journal) ||
1676 trace_btree_write(b, bytes_to_write, sectors_to_write);
1678 wbio = container_of(bio_alloc_bioset(GFP_NOIO,
1679 buf_pages(data, sectors_to_write << 9),
1681 struct btree_write_bio, wbio.bio);
1682 wbio_init(&wbio->wbio.bio);
1684 wbio->bytes = bytes;
1685 wbio->wbio.used_mempool = used_mempool;
1686 wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
1687 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1688 wbio->wbio.bio.bi_private = b;
1690 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
1693 * If we're appending to a leaf node, we don't technically need FUA -
1694 * this write just needs to be persisted before the next journal write,
1695 * which will be marked FLUSH|FUA.
1697 * Similarly if we're writing a new btree root - the pointer is going to
1698 * be in the next journal entry.
1700 * But if we're writing a new btree node (that isn't a root) or
1701 * appending to a non leaf btree node, we need either FUA or a flush
1702 * when we write the parent with the new pointer. FUA is cheaper than a
1703 * flush, and writes appending to leaf nodes aren't blocking anything so
1704 * just make all btree node writes FUA to keep things sane.
1707 bch2_bkey_buf_copy(&k, c, &b->key);
1709 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(k.k)), ptr)
1710 ptr->offset += b->written;
1712 b->written += sectors_to_write;
1714 /* XXX: submitting IO with btree locks held: */
1715 bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k);
1716 bch2_bkey_buf_exit(&k, c);
1719 set_btree_node_noevict(b);
1720 b->written += sectors_to_write;
1722 btree_bounce_free(c, bytes, used_mempool, data);
1723 btree_node_write_done(c, b);
1727 * Work that must be done with write lock held:
1729 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1731 bool invalidated_iter = false;
1732 struct btree_node_entry *bne;
1733 struct bset_tree *t;
1735 if (!btree_node_just_written(b))
1738 BUG_ON(b->whiteout_u64s);
1740 clear_btree_node_just_written(b);
1743 * Note: immediately after write, bset_written() doesn't work - the
1744 * amount of data we had to write after compaction might have been
1745 * smaller than the offset of the last bset.
1747 * However, we know that all bsets have been written here, as long as
1748 * we're still holding the write lock:
1752 * XXX: decide if we really want to unconditionally sort down to a
1756 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1757 invalidated_iter = true;
1759 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
1763 set_needs_whiteout(bset(b, t), true);
1765 bch2_btree_verify(c, b);
1768 * If later we don't unconditionally sort down to a single bset, we have
1769 * to ensure this is still true:
1771 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1773 bne = want_new_bset(c, b);
1775 bch2_bset_init_next(c, b, bne);
1777 bch2_btree_build_aux_trees(b);
1779 return invalidated_iter;
1783 * Use this one if the node is intent locked:
1785 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1786 enum six_lock_type lock_type_held)
1788 BUG_ON(lock_type_held == SIX_LOCK_write);
1790 if (lock_type_held == SIX_LOCK_intent ||
1791 six_lock_tryupgrade(&b->c.lock)) {
1792 __bch2_btree_node_write(c, b, SIX_LOCK_intent);
1794 /* don't cycle lock unnecessarily: */
1795 if (btree_node_just_written(b) &&
1796 six_trylock_write(&b->c.lock)) {
1797 bch2_btree_post_write_cleanup(c, b);
1798 six_unlock_write(&b->c.lock);
1801 if (lock_type_held == SIX_LOCK_read)
1802 six_lock_downgrade(&b->c.lock);
1804 __bch2_btree_node_write(c, b, SIX_LOCK_read);
1808 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
1810 struct bucket_table *tbl;
1811 struct rhash_head *pos;
1816 for_each_cached_btree(b, c, tbl, i, pos)
1817 if (test_bit(flag, &b->flags)) {
1819 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
1826 void bch2_btree_flush_all_reads(struct bch_fs *c)
1828 __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
1831 void bch2_btree_flush_all_writes(struct bch_fs *c)
1833 __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
1836 void bch2_dirty_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c)
1838 struct bucket_table *tbl;
1839 struct rhash_head *pos;
1844 for_each_cached_btree(b, c, tbl, i, pos) {
1845 unsigned long flags = READ_ONCE(b->flags);
1847 if (!(flags & (1 << BTREE_NODE_dirty)))
1850 pr_buf(out, "%p d %u n %u l %u w %u b %u r %u:%lu\n",
1852 (flags & (1 << BTREE_NODE_dirty)) != 0,
1853 (flags & (1 << BTREE_NODE_need_write)) != 0,
1856 !list_empty_careful(&b->write_blocked),
1857 b->will_make_reachable != 0,
1858 b->will_make_reachable & 1);