1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
22 #include <trace/events/bcachefs.h>
24 static void verify_no_dups(struct btree *b,
25 struct bkey_packed *start,
26 struct bkey_packed *end,
29 #ifdef CONFIG_BCACHEFS_DEBUG
30 struct bkey_packed *k, *p;
35 for (p = start, k = bkey_next_skip_noops(start, end);
37 p = k, k = bkey_next_skip_noops(k, end)) {
38 struct bkey l = bkey_unpack_key(b, p);
39 struct bkey r = bkey_unpack_key(b, k);
42 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
43 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
44 //BUG_ON(bkey_cmp_packed(&b->format, p, k) >= 0);
49 static void set_needs_whiteout(struct bset *i, int v)
51 struct bkey_packed *k;
55 k = bkey_next_skip_noops(k, vstruct_last(i)))
56 k->needs_whiteout = v;
59 static void btree_bounce_free(struct bch_fs *c, unsigned order,
60 bool used_mempool, void *p)
63 mempool_free(p, &c->btree_bounce_pool);
65 vpfree(p, PAGE_SIZE << order);
68 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
73 BUG_ON(order > btree_page_order(c));
75 *used_mempool = false;
76 p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
81 return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
84 static void sort_bkey_ptrs(const struct btree *bt,
85 struct bkey_packed **ptrs, unsigned nr)
87 unsigned n = nr, a = nr / 2, b, c, d;
92 /* Heap sort: see lib/sort.c: */
97 swap(ptrs[0], ptrs[n]);
101 for (b = a; c = 2 * b + 1, (d = c + 1) < n;)
102 b = bkey_cmp_packed(bt,
104 ptrs[d]) >= 0 ? c : d;
116 swap(ptrs[b], ptrs[c]);
121 static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
123 struct bkey_packed *new_whiteouts, **ptrs, **ptrs_end, *k;
124 bool used_mempool = false;
127 if (!b->whiteout_u64s)
130 order = get_order(b->whiteout_u64s * sizeof(u64));
132 new_whiteouts = btree_bounce_alloc(c, order, &used_mempool);
134 ptrs = ptrs_end = ((void *) new_whiteouts + (PAGE_SIZE << order));
136 for (k = unwritten_whiteouts_start(c, b);
137 k != unwritten_whiteouts_end(c, b);
141 sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
145 while (ptrs != ptrs_end) {
151 verify_no_dups(b, new_whiteouts,
152 (void *) ((u64 *) new_whiteouts + b->whiteout_u64s),
153 btree_node_old_extent_overwrite(b));
155 memcpy_u64s(unwritten_whiteouts_start(c, b),
156 new_whiteouts, b->whiteout_u64s);
158 btree_bounce_free(c, order, used_mempool, new_whiteouts);
161 static bool should_compact_bset(struct btree *b, struct bset_tree *t,
162 bool compacting, enum compact_mode mode)
164 if (!bset_dead_u64s(b, t))
169 return should_compact_bset_lazy(b, t) ||
170 (compacting && !bset_written(b, bset(b, t)));
178 static bool bch2_compact_extent_whiteouts(struct bch_fs *c,
180 enum compact_mode mode)
182 const struct bkey_format *f = &b->format;
184 struct bkey_packed *whiteouts = NULL;
185 struct bkey_packed *u_start, *u_pos;
186 struct sort_iter sort_iter;
187 unsigned order, whiteout_u64s = 0, u64s;
188 bool used_mempool, compacting = false;
190 BUG_ON(!btree_node_is_extents(b));
193 if (should_compact_bset(b, t, whiteout_u64s != 0, mode))
194 whiteout_u64s += bset_dead_u64s(b, t);
199 bch2_sort_whiteouts(c, b);
201 sort_iter_init(&sort_iter, b);
203 whiteout_u64s += b->whiteout_u64s;
204 order = get_order(whiteout_u64s * sizeof(u64));
206 whiteouts = btree_bounce_alloc(c, order, &used_mempool);
207 u_start = u_pos = whiteouts;
209 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
211 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
213 sort_iter_add(&sort_iter, u_start, u_pos);
215 for_each_bset(b, t) {
216 struct bset *i = bset(b, t);
217 struct bkey_packed *k, *n, *out, *start, *end;
218 struct btree_node_entry *src = NULL, *dst = NULL;
220 if (t != b->set && !bset_written(b, i)) {
221 src = container_of(i, struct btree_node_entry, keys);
222 dst = max(write_block(b),
223 (void *) btree_bkey_last(b, t - 1));
229 if (!should_compact_bset(b, t, compacting, mode)) {
231 memmove(dst, src, sizeof(*src) +
232 le16_to_cpu(src->keys.u64s) *
235 set_btree_bset(b, t, i);
243 end = vstruct_last(i);
246 memmove(dst, src, sizeof(*src));
248 set_btree_bset(b, t, i);
253 for (k = start; k != end; k = n) {
254 n = bkey_next_skip_noops(k, end);
259 BUG_ON(bkey_whiteout(k) &&
263 if (bkey_whiteout(k) && !k->needs_whiteout)
266 if (bkey_whiteout(k)) {
267 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
268 set_bkeyp_val_u64s(f, u_pos, 0);
269 u_pos = bkey_next(u_pos);
272 out = bkey_next(out);
276 sort_iter_add(&sort_iter, u_start, u_pos);
278 i->u64s = cpu_to_le16((u64 *) out - i->_data);
279 set_btree_bset_end(b, t);
280 bch2_bset_set_no_aux_tree(b, t);
283 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
285 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
286 (void *) btree_bkey_last(b, bset_tree_last(b)));
288 u64s = bch2_sort_extent_whiteouts(unwritten_whiteouts_start(c, b),
291 BUG_ON(u64s > b->whiteout_u64s);
292 BUG_ON(u_pos != whiteouts && !u64s);
294 if (u64s != b->whiteout_u64s) {
295 void *src = unwritten_whiteouts_start(c, b);
297 b->whiteout_u64s = u64s;
298 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
302 unwritten_whiteouts_start(c, b),
303 unwritten_whiteouts_end(c, b),
306 btree_bounce_free(c, order, used_mempool, whiteouts);
308 bch2_btree_build_aux_trees(b);
310 bch_btree_keys_u64s_remaining(c, b);
311 bch2_verify_btree_nr_keys(b);
316 static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
321 for_each_bset(b, t) {
322 struct bset *i = bset(b, t);
323 struct bkey_packed *k, *n, *out, *start, *end;
324 struct btree_node_entry *src = NULL, *dst = NULL;
326 if (t != b->set && !bset_written(b, i)) {
327 src = container_of(i, struct btree_node_entry, keys);
328 dst = max(write_block(b),
329 (void *) btree_bkey_last(b, t - 1));
335 if (!should_compact_bset(b, t, ret, mode)) {
337 memmove(dst, src, sizeof(*src) +
338 le16_to_cpu(src->keys.u64s) *
341 set_btree_bset(b, t, i);
346 start = btree_bkey_first(b, t);
347 end = btree_bkey_last(b, t);
350 memmove(dst, src, sizeof(*src));
352 set_btree_bset(b, t, i);
357 for (k = start; k != end; k = n) {
358 n = bkey_next_skip_noops(k, end);
360 if (!bkey_whiteout(k)) {
362 out = bkey_next(out);
364 BUG_ON(k->needs_whiteout);
368 i->u64s = cpu_to_le16((u64 *) out - i->_data);
369 set_btree_bset_end(b, t);
370 bch2_bset_set_no_aux_tree(b, t);
374 bch2_verify_btree_nr_keys(b);
376 bch2_btree_build_aux_trees(b);
381 bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
382 enum compact_mode mode)
384 return !btree_node_old_extent_overwrite(b)
385 ? bch2_drop_whiteouts(b, mode)
386 : bch2_compact_extent_whiteouts(c, b, mode);
389 static void btree_node_sort(struct bch_fs *c, struct btree *b,
390 struct btree_iter *iter,
393 bool filter_whiteouts)
395 struct btree_node *out;
396 struct sort_iter sort_iter;
398 struct bset *start_bset = bset(b, &b->set[start_idx]);
399 bool used_mempool = false;
400 u64 start_time, seq = 0;
401 unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
402 bool sorting_entire_node = start_idx == 0 &&
405 sort_iter_init(&sort_iter, b);
407 for (t = b->set + start_idx;
408 t < b->set + end_idx;
410 u64s += le16_to_cpu(bset(b, t)->u64s);
411 sort_iter_add(&sort_iter,
412 btree_bkey_first(b, t),
413 btree_bkey_last(b, t));
416 order = sorting_entire_node
417 ? btree_page_order(c)
418 : get_order(__vstruct_bytes(struct btree_node, u64s));
420 out = btree_bounce_alloc(c, order, &used_mempool);
422 start_time = local_clock();
424 if (btree_node_old_extent_overwrite(b))
425 filter_whiteouts = bset_written(b, start_bset);
427 u64s = (btree_node_old_extent_overwrite(b)
429 : bch2_sort_keys)(out->keys.start,
433 out->keys.u64s = cpu_to_le16(u64s);
435 BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
437 if (sorting_entire_node)
438 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
441 /* Make sure we preserve bset journal_seq: */
442 for (t = b->set + start_idx; t < b->set + end_idx; t++)
443 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
444 start_bset->journal_seq = cpu_to_le64(seq);
446 if (sorting_entire_node) {
447 unsigned u64s = le16_to_cpu(out->keys.u64s);
449 BUG_ON(order != btree_page_order(c));
452 * Our temporary buffer is the same size as the btree node's
453 * buffer, we can just swap buffers instead of doing a big
457 out->keys.u64s = cpu_to_le16(u64s);
459 set_btree_bset(b, b->set, &b->data->keys);
461 start_bset->u64s = out->keys.u64s;
462 memcpy_u64s(start_bset->start,
464 le16_to_cpu(out->keys.u64s));
467 for (i = start_idx + 1; i < end_idx; i++)
468 b->nr.bset_u64s[start_idx] +=
473 for (i = start_idx + 1; i < b->nsets; i++) {
474 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
475 b->set[i] = b->set[i + shift];
478 for (i = b->nsets; i < MAX_BSETS; i++)
479 b->nr.bset_u64s[i] = 0;
481 set_btree_bset_end(b, &b->set[start_idx]);
482 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
484 btree_bounce_free(c, order, used_mempool, out);
486 bch2_verify_btree_nr_keys(b);
489 void bch2_btree_sort_into(struct bch_fs *c,
493 struct btree_nr_keys nr;
494 struct btree_node_iter src_iter;
495 u64 start_time = local_clock();
497 BUG_ON(dst->nsets != 1);
499 bch2_bset_set_no_aux_tree(dst, dst->set);
501 bch2_btree_node_iter_init_from_start(&src_iter, src);
503 if (btree_node_is_extents(src))
504 nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
509 nr = bch2_sort_repack(btree_bset_first(dst),
514 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
517 set_btree_bset_end(dst, dst->set);
519 dst->nr.live_u64s += nr.live_u64s;
520 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
521 dst->nr.packed_keys += nr.packed_keys;
522 dst->nr.unpacked_keys += nr.unpacked_keys;
524 bch2_verify_btree_nr_keys(dst);
527 #define SORT_CRIT (4096 / sizeof(u64))
530 * We're about to add another bset to the btree node, so if there's currently
531 * too many bsets - sort some of them together:
533 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
534 struct btree_iter *iter)
536 unsigned unwritten_idx;
539 for (unwritten_idx = 0;
540 unwritten_idx < b->nsets;
542 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
545 if (b->nsets - unwritten_idx > 1) {
546 btree_node_sort(c, b, iter, unwritten_idx,
551 if (unwritten_idx > 1) {
552 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
559 void bch2_btree_build_aux_trees(struct btree *b)
564 bch2_bset_build_aux_tree(b, t,
565 !bset_written(b, bset(b, t)) &&
566 t == bset_tree_last(b));
570 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
573 * Safe to call if there already is an unwritten bset - will only add a new bset
574 * if @b doesn't already have one.
576 * Returns true if we sorted (i.e. invalidated iterators
578 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
579 struct btree_iter *iter)
581 struct btree_node_entry *bne;
584 EBUG_ON(!(b->lock.state.seq & 1));
585 EBUG_ON(iter && iter->l[b->level].b != b);
587 did_sort = btree_node_compact(c, b, iter);
589 bne = want_new_bset(c, b);
591 bch2_bset_init_next(c, b, bne);
593 bch2_btree_build_aux_trees(b);
595 if (iter && did_sort)
596 bch2_btree_iter_reinit_node(iter, b);
599 static struct nonce btree_nonce(struct bset *i, unsigned offset)
601 return (struct nonce) {{
602 [0] = cpu_to_le32(offset),
603 [1] = ((__le32 *) &i->seq)[0],
604 [2] = ((__le32 *) &i->seq)[1],
605 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
609 static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
611 struct nonce nonce = btree_nonce(i, offset);
614 struct btree_node *bn = container_of(i, struct btree_node, keys);
615 unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
617 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
620 nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
623 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
624 vstruct_end(i) - (void *) i->_data);
627 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
628 struct btree *b, struct bset *i,
629 unsigned offset, int write)
631 pr_buf(out, "error validating btree node %s"
632 "at btree %u level %u/%u\n"
633 "pos %llu:%llu node offset %u",
634 write ? "before write " : "",
635 b->btree_id, b->level,
636 c->btree_roots[b->btree_id].level,
637 b->key.k.p.inode, b->key.k.p.offset,
640 pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
643 enum btree_err_type {
645 BTREE_ERR_WANT_RETRY,
646 BTREE_ERR_MUST_RETRY,
650 enum btree_validate_ret {
651 BTREE_RETRY_READ = 64,
654 #define btree_err(type, c, b, i, msg, ...) \
658 struct printbuf out = PBUF(_buf); \
660 btree_err_msg(&out, c, b, i, b->written, write); \
661 pr_buf(&out, ": " msg, ##__VA_ARGS__); \
663 if (type == BTREE_ERR_FIXABLE && \
665 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
666 mustfix_fsck_err(c, "%s", _buf); \
672 bch_err(c, "%s", _buf); \
675 case BTREE_ERR_FIXABLE: \
676 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
678 case BTREE_ERR_WANT_RETRY: \
680 ret = BTREE_RETRY_READ; \
684 case BTREE_ERR_MUST_RETRY: \
685 ret = BTREE_RETRY_READ; \
687 case BTREE_ERR_FATAL: \
688 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
693 bch_err(c, "corrupt metadata before write: %s", _buf); \
695 if (bch2_fs_inconsistent(c)) { \
696 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
705 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
707 static int validate_bset(struct bch_fs *c, struct btree *b,
708 struct bset *i, unsigned sectors,
709 unsigned *whiteout_u64s, int write,
712 struct bkey_packed *k, *prev = NULL;
713 bool seen_non_whiteout = false;
719 /* These indicate that we read the wrong btree node: */
720 btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
721 BTREE_ERR_MUST_RETRY, c, b, i,
722 "incorrect btree id");
724 btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
725 BTREE_ERR_MUST_RETRY, c, b, i,
728 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
729 u64 *p = (u64 *) &b->data->ptr;
732 bch2_bpos_swab(&b->data->min_key);
733 bch2_bpos_swab(&b->data->max_key);
736 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
737 struct bch_btree_ptr_v2 *bp =
738 &bkey_i_to_btree_ptr_v2(&b->key)->v;
740 btree_err_on(bkey_cmp(b->data->min_key, bp->min_key),
741 BTREE_ERR_MUST_RETRY, c, b, NULL,
742 "incorrect min_key");
745 btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
746 BTREE_ERR_MUST_RETRY, c, b, i,
747 "incorrect max key");
749 /* XXX: ideally we would be validating min_key too */
752 * not correct anymore, due to btree node write error
755 * need to add b->data->seq to btree keys and verify
758 btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
760 BTREE_ERR_FATAL, c, b, i,
761 "incorrect backpointer");
763 err = bch2_bkey_format_validate(&b->data->format);
765 BTREE_ERR_FATAL, c, b, i,
766 "invalid bkey format: %s", err);
769 version = le16_to_cpu(i->version);
770 btree_err_on((version != BCH_BSET_VERSION_OLD &&
771 version < bcachefs_metadata_version_min) ||
772 version >= bcachefs_metadata_version_max,
773 BTREE_ERR_FATAL, c, b, i,
774 "unsupported bset version");
776 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
777 BTREE_ERR_FIXABLE, c, b, i,
778 "bset past end of btree node")) {
783 btree_err_on(b->written && !i->u64s,
784 BTREE_ERR_FIXABLE, c, b, i,
787 if (!BSET_SEPARATE_WHITEOUTS(i)) {
788 seen_non_whiteout = true;
793 k != vstruct_last(i);) {
798 if (btree_err_on(bkey_next(k) > vstruct_last(i),
799 BTREE_ERR_FIXABLE, c, b, i,
800 "key extends past end of bset")) {
801 i->u64s = cpu_to_le16((u64 *) k - i->_data);
805 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
806 BTREE_ERR_FIXABLE, c, b, i,
807 "invalid bkey format %u", k->format)) {
808 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
809 memmove_u64s_down(k, bkey_next(k),
810 (u64 *) vstruct_end(i) - (u64 *) k);
814 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
815 bch2_bkey_swab_key(&b->format, k);
818 version < bcachefs_metadata_version_bkey_renumber)
819 bch2_bkey_renumber(btree_node_type(b), k, write);
821 u = __bkey_disassemble(b, k, &tmp);
823 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
824 bch2_bkey_swab_val(u);
826 invalid = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b)) ?:
827 bch2_bkey_in_btree_node(b, u.s_c) ?:
828 (write ? bch2_bkey_val_invalid(c, u.s_c) : NULL);
832 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
833 btree_err(BTREE_ERR_FIXABLE, c, b, i,
834 "invalid bkey:\n%s\n%s", invalid, buf);
836 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
837 memmove_u64s_down(k, bkey_next(k),
838 (u64 *) vstruct_end(i) - (u64 *) k);
843 version < bcachefs_metadata_version_bkey_renumber)
844 bch2_bkey_renumber(btree_node_type(b), k, write);
847 * with the separate whiteouts thing (used for extents), the
848 * second set of keys actually can have whiteouts too, so we
849 * can't solely go off bkey_whiteout()...
852 if (!seen_non_whiteout &&
853 (!bkey_whiteout(k) ||
854 (prev && bkey_iter_cmp(b, prev, k) > 0))) {
855 *whiteout_u64s = k->_data - i->_data;
856 seen_non_whiteout = true;
857 } else if (prev && bkey_iter_cmp(b, prev, k) > 0) {
860 struct bkey up = bkey_unpack_key(b, prev);
862 bch2_bkey_to_text(&PBUF(buf1), &up);
863 bch2_bkey_to_text(&PBUF(buf2), u.k);
865 bch2_dump_bset(b, i, 0);
866 btree_err(BTREE_ERR_FATAL, c, b, i,
867 "keys out of order: %s > %s",
869 /* XXX: repair this */
873 k = bkey_next_skip_noops(k, vstruct_last(i));
876 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
881 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
883 struct btree_node_entry *bne;
884 struct sort_iter *iter;
885 struct btree_node *sorted;
886 struct bkey_packed *k;
888 bool used_mempool, blacklisted;
890 int ret, retry_read = 0, write = READ;
892 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
893 sort_iter_init(iter, b);
894 iter->size = (btree_blocks(c) + 1) * 2;
896 if (bch2_meta_read_fault("btree"))
897 btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
900 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
901 BTREE_ERR_MUST_RETRY, c, b, NULL,
904 btree_err_on(!b->data->keys.seq,
905 BTREE_ERR_MUST_RETRY, c, b, NULL,
908 if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
909 struct bch_btree_ptr_v2 *bp =
910 &bkey_i_to_btree_ptr_v2(&b->key)->v;
912 btree_err_on(b->data->keys.seq != bp->seq,
913 BTREE_ERR_MUST_RETRY, c, b, NULL,
914 "got wrong btree node");
917 while (b->written < c->opts.btree_node_size) {
918 unsigned sectors, whiteout_u64s = 0;
920 struct bch_csum csum;
921 bool first = !b->written;
926 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
927 BTREE_ERR_WANT_RETRY, c, b, i,
928 "unknown checksum type");
930 nonce = btree_nonce(i, b->written << 9);
931 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
933 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
934 BTREE_ERR_WANT_RETRY, c, b, i,
937 bset_encrypt(c, i, b->written << 9);
939 if (btree_node_is_extents(b) &&
940 !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data))
941 set_btree_node_old_extent_overwrite(b);
943 sectors = vstruct_sectors(b->data, c->block_bits);
945 btree_node_set_format(b, b->data->format);
947 bne = write_block(b);
950 if (i->seq != b->data->keys.seq)
953 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
954 BTREE_ERR_WANT_RETRY, c, b, i,
955 "unknown checksum type");
957 nonce = btree_nonce(i, b->written << 9);
958 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
960 btree_err_on(bch2_crc_cmp(csum, bne->csum),
961 BTREE_ERR_WANT_RETRY, c, b, i,
964 bset_encrypt(c, i, b->written << 9);
966 sectors = vstruct_sectors(bne, c->block_bits);
969 ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
974 b->written += sectors;
976 blacklisted = bch2_journal_seq_is_blacklisted(c,
977 le64_to_cpu(i->journal_seq),
980 btree_err_on(blacklisted && first,
981 BTREE_ERR_FIXABLE, c, b, i,
982 "first btree node bset has blacklisted journal seq");
983 if (blacklisted && !first)
986 sort_iter_add(iter, i->start,
987 vstruct_idx(i, whiteout_u64s));
990 vstruct_idx(i, whiteout_u64s),
994 for (bne = write_block(b);
995 bset_byte_offset(b, bne) < btree_bytes(c);
996 bne = (void *) bne + block_bytes(c))
997 btree_err_on(bne->keys.seq == b->data->keys.seq,
998 BTREE_ERR_WANT_RETRY, c, b, NULL,
999 "found bset signature after last bset");
1001 sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
1002 sorted->keys.u64s = 0;
1004 set_btree_bset(b, b->set, &b->data->keys);
1006 b->nr = (btree_node_old_extent_overwrite(b)
1007 ? bch2_extent_sort_fix_overlapping
1008 : bch2_key_sort_fix_overlapping)(c, &sorted->keys, iter);
1010 u64s = le16_to_cpu(sorted->keys.u64s);
1012 sorted->keys.u64s = cpu_to_le16(u64s);
1013 swap(sorted, b->data);
1014 set_btree_bset(b, b->set, &b->data->keys);
1017 BUG_ON(b->nr.live_u64s != u64s);
1019 btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
1022 for (k = i->start; k != vstruct_last(i);) {
1024 struct bkey_s u = __bkey_disassemble(b, k, &tmp);
1025 const char *invalid = bch2_bkey_val_invalid(c, u.s_c);
1028 (inject_invalid_keys(c) &&
1029 !bversion_cmp(u.k->version, MAX_VERSION))) {
1032 bch2_bkey_val_to_text(&PBUF(buf), c, u.s_c);
1033 btree_err(BTREE_ERR_FIXABLE, c, b, i,
1034 "invalid bkey %s: %s", buf, invalid);
1036 btree_keys_account_key_drop(&b->nr, 0, k);
1038 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
1039 memmove_u64s_down(k, bkey_next(k),
1040 (u64 *) vstruct_end(i) - (u64 *) k);
1041 set_btree_bset_end(b, b->set);
1045 if (u.k->type == KEY_TYPE_btree_ptr_v2) {
1046 struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
1051 k = bkey_next_skip_noops(k, vstruct_last(i));
1054 bch2_bset_build_aux_tree(b, b->set, false);
1056 set_needs_whiteout(btree_bset_first(b), true);
1058 btree_node_reset_sib_u64s(b);
1060 mempool_free(iter, &c->fill_iter);
1063 if (ret == BTREE_RETRY_READ) {
1066 bch2_inconsistent_error(c);
1067 set_btree_node_read_error(b);
1072 static void btree_node_read_work(struct work_struct *work)
1074 struct btree_read_bio *rb =
1075 container_of(work, struct btree_read_bio, work);
1076 struct bch_fs *c = rb->c;
1077 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1078 struct btree *b = rb->bio.bi_private;
1079 struct bio *bio = &rb->bio;
1080 struct bch_io_failures failed = { .nr = 0 };
1085 bch_info(c, "retrying read");
1086 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1087 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1089 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1090 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
1091 bio->bi_iter.bi_size = btree_bytes(c);
1093 if (rb->have_ioref) {
1094 bio_set_dev(bio, ca->disk_sb.bdev);
1095 submit_bio_wait(bio);
1097 bio->bi_status = BLK_STS_REMOVED;
1100 bch2_dev_io_err_on(bio->bi_status, ca, "btree read");
1102 percpu_ref_put(&ca->io_ref);
1103 rb->have_ioref = false;
1105 bch2_mark_io_failure(&failed, &rb->pick);
1107 can_retry = bch2_bkey_pick_read_device(c,
1108 bkey_i_to_s_c(&b->key),
1109 &failed, &rb->pick) > 0;
1111 if (!bio->bi_status &&
1112 !bch2_btree_node_read_done(c, b, can_retry))
1116 set_btree_node_read_error(b);
1121 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
1124 clear_btree_node_read_in_flight(b);
1125 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
1128 static void btree_node_read_endio(struct bio *bio)
1130 struct btree_read_bio *rb =
1131 container_of(bio, struct btree_read_bio, bio);
1132 struct bch_fs *c = rb->c;
1134 if (rb->have_ioref) {
1135 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1136 bch2_latency_acct(ca, rb->start_time, READ);
1139 queue_work(system_unbound_wq, &rb->work);
1142 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1145 struct extent_ptr_decoded pick;
1146 struct btree_read_bio *rb;
1151 trace_btree_read(c, b);
1153 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1155 if (bch2_fs_fatal_err_on(ret <= 0, c,
1156 "btree node read error: no device to read from")) {
1157 set_btree_node_read_error(b);
1161 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1163 bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
1166 rb = container_of(bio, struct btree_read_bio, bio);
1168 rb->start_time = local_clock();
1169 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1171 INIT_WORK(&rb->work, btree_node_read_work);
1172 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1173 bio->bi_iter.bi_sector = pick.ptr.offset;
1174 bio->bi_end_io = btree_node_read_endio;
1175 bio->bi_private = b;
1176 bch2_bio_map(bio, b->data, btree_bytes(c));
1178 set_btree_node_read_in_flight(b);
1180 if (rb->have_ioref) {
1181 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
1183 bio_set_dev(bio, ca->disk_sb.bdev);
1186 submit_bio_wait(bio);
1188 bio->bi_private = b;
1189 btree_node_read_work(&rb->work);
1194 bio->bi_status = BLK_STS_REMOVED;
1197 btree_node_read_work(&rb->work);
1199 queue_work(system_unbound_wq, &rb->work);
1204 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1205 const struct bkey_i *k, unsigned level)
1211 closure_init_stack(&cl);
1214 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1218 b = bch2_btree_node_mem_alloc(c);
1219 bch2_btree_cache_cannibalize_unlock(c);
1223 bkey_copy(&b->key, k);
1224 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1226 bch2_btree_node_read(c, b, true);
1228 if (btree_node_read_error(b)) {
1229 bch2_btree_node_hash_remove(&c->btree_cache, b);
1231 mutex_lock(&c->btree_cache.lock);
1232 list_move(&b->list, &c->btree_cache.freeable);
1233 mutex_unlock(&c->btree_cache.lock);
1239 bch2_btree_set_root_for_read(c, b);
1241 six_unlock_write(&b->lock);
1242 six_unlock_intent(&b->lock);
1247 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1248 struct btree_write *w)
1250 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1258 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1261 closure_put(&((struct btree_update *) new)->cl);
1263 bch2_journal_pin_drop(&c->journal, &w->journal);
1264 closure_wake_up(&w->wait);
1267 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1269 struct btree_write *w = btree_prev_write(b);
1271 bch2_btree_complete_write(c, b, w);
1272 btree_node_io_unlock(b);
1275 static void bch2_btree_node_write_error(struct bch_fs *c,
1276 struct btree_write_bio *wbio)
1278 struct btree *b = wbio->wbio.bio.bi_private;
1279 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1280 struct bch_extent_ptr *ptr;
1281 struct btree_trans trans;
1282 struct btree_iter *iter;
1285 bch2_trans_init(&trans, c, 0, 0);
1287 iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p,
1288 BTREE_MAX_DEPTH, b->level, 0);
1290 ret = bch2_btree_iter_traverse(iter);
1294 /* has node been freed? */
1295 if (iter->l[b->level].b != b) {
1296 /* node has been freed: */
1297 BUG_ON(!btree_node_dying(b));
1301 BUG_ON(!btree_node_hashed(b));
1303 bkey_copy(&tmp.k, &b->key);
1305 bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
1306 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1308 if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&tmp.k)))
1311 ret = bch2_btree_node_update_key(c, iter, b, &tmp.k);
1317 bch2_trans_exit(&trans);
1318 bio_put(&wbio->wbio.bio);
1319 btree_node_write_done(c, b);
1322 set_btree_node_noevict(b);
1323 bch2_fs_fatal_error(c, "fatal error writing btree node");
1327 void bch2_btree_write_error_work(struct work_struct *work)
1329 struct bch_fs *c = container_of(work, struct bch_fs,
1330 btree_write_error_work);
1334 spin_lock_irq(&c->btree_write_error_lock);
1335 bio = bio_list_pop(&c->btree_write_error_list);
1336 spin_unlock_irq(&c->btree_write_error_lock);
1341 bch2_btree_node_write_error(c,
1342 container_of(bio, struct btree_write_bio, wbio.bio));
1346 static void btree_node_write_work(struct work_struct *work)
1348 struct btree_write_bio *wbio =
1349 container_of(work, struct btree_write_bio, work);
1350 struct bch_fs *c = wbio->wbio.c;
1351 struct btree *b = wbio->wbio.bio.bi_private;
1353 btree_bounce_free(c,
1355 wbio->wbio.used_mempool,
1358 if (wbio->wbio.failed.nr) {
1359 unsigned long flags;
1361 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1362 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1363 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1365 queue_work(c->wq, &c->btree_write_error_work);
1369 bio_put(&wbio->wbio.bio);
1370 btree_node_write_done(c, b);
1373 static void btree_node_write_endio(struct bio *bio)
1375 struct bch_write_bio *wbio = to_wbio(bio);
1376 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1377 struct bch_write_bio *orig = parent ?: wbio;
1378 struct bch_fs *c = wbio->c;
1379 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1380 unsigned long flags;
1382 if (wbio->have_ioref)
1383 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1385 if (bio->bi_status == BLK_STS_REMOVED ||
1386 bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
1387 bch2_meta_write_fault("btree")) {
1388 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1389 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1390 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1393 if (wbio->have_ioref)
1394 percpu_ref_put(&ca->io_ref);
1398 bio_endio(&parent->bio);
1400 struct btree_write_bio *wb =
1401 container_of(orig, struct btree_write_bio, wbio);
1403 INIT_WORK(&wb->work, btree_node_write_work);
1404 queue_work(system_unbound_wq, &wb->work);
1408 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1409 struct bset *i, unsigned sectors)
1411 unsigned whiteout_u64s = 0;
1414 if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
1417 ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
1419 bch2_inconsistent_error(c);
1424 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1425 enum six_lock_type lock_type_held)
1427 struct btree_write_bio *wbio;
1428 struct bset_tree *t;
1430 struct btree_node *bn = NULL;
1431 struct btree_node_entry *bne = NULL;
1433 struct bch_extent_ptr *ptr;
1434 struct sort_iter sort_iter;
1436 unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1439 unsigned long old, new;
1440 bool validate_before_checksum = false;
1443 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1447 * We may only have a read lock on the btree node - the dirty bit is our
1448 * "lock" against racing with other threads that may be trying to start
1449 * a write, we do a write iff we clear the dirty bit. Since setting the
1450 * dirty bit requires a write lock, we can't race with other threads
1454 old = new = READ_ONCE(b->flags);
1456 if (!(old & (1 << BTREE_NODE_dirty)))
1459 if (!btree_node_may_write(b))
1462 if (old & (1 << BTREE_NODE_write_in_flight)) {
1463 btree_node_wait_on_io(b);
1467 new &= ~(1 << BTREE_NODE_dirty);
1468 new &= ~(1 << BTREE_NODE_need_write);
1469 new |= (1 << BTREE_NODE_write_in_flight);
1470 new |= (1 << BTREE_NODE_just_written);
1471 new ^= (1 << BTREE_NODE_write_idx);
1472 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1474 BUG_ON(btree_node_fake(b));
1475 BUG_ON((b->will_make_reachable != 0) != !b->written);
1477 BUG_ON(b->written >= c->opts.btree_node_size);
1478 BUG_ON(b->written & (c->opts.block_size - 1));
1479 BUG_ON(bset_written(b, btree_bset_last(b)));
1480 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1481 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1483 bch2_sort_whiteouts(c, b);
1485 sort_iter_init(&sort_iter, b);
1488 ? sizeof(struct btree_node)
1489 : sizeof(struct btree_node_entry);
1491 bytes += b->whiteout_u64s * sizeof(u64);
1493 for_each_bset(b, t) {
1496 if (bset_written(b, i))
1499 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1500 sort_iter_add(&sort_iter,
1501 btree_bkey_first(b, t),
1502 btree_bkey_last(b, t));
1503 seq = max(seq, le64_to_cpu(i->journal_seq));
1506 order = get_order(bytes);
1507 data = btree_bounce_alloc(c, order, &used_mempool);
1515 bne->keys = b->data->keys;
1519 i->journal_seq = cpu_to_le64(seq);
1522 if (!btree_node_old_extent_overwrite(b)) {
1523 sort_iter_add(&sort_iter,
1524 unwritten_whiteouts_start(c, b),
1525 unwritten_whiteouts_end(c, b));
1526 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1528 memcpy_u64s(i->start,
1529 unwritten_whiteouts_start(c, b),
1531 i->u64s = cpu_to_le16(b->whiteout_u64s);
1532 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1535 b->whiteout_u64s = 0;
1537 u64s = btree_node_old_extent_overwrite(b)
1538 ? bch2_sort_extents(vstruct_last(i), &sort_iter, false)
1539 : bch2_sort_keys(i->start, &sort_iter, false);
1540 le16_add_cpu(&i->u64s, u64s);
1542 set_needs_whiteout(i, false);
1544 /* do we have data to write? */
1545 if (b->written && !i->u64s)
1548 bytes_to_write = vstruct_end(i) - data;
1549 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1551 memset(data + bytes_to_write, 0,
1552 (sectors_to_write << 9) - bytes_to_write);
1554 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1555 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1556 BUG_ON(i->seq != b->data->keys.seq);
1558 i->version = c->sb.version < bcachefs_metadata_version_new_versioning
1559 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1560 : cpu_to_le16(c->sb.version);
1561 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1563 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1564 validate_before_checksum = true;
1566 /* validate_bset will be modifying: */
1567 if (le16_to_cpu(i->version) <
1568 bcachefs_metadata_version_bkey_renumber)
1569 validate_before_checksum = true;
1571 /* if we're going to be encrypting, check metadata validity first: */
1572 if (validate_before_checksum &&
1573 validate_bset_for_write(c, b, i, sectors_to_write))
1576 bset_encrypt(c, i, b->written << 9);
1578 nonce = btree_nonce(i, b->written << 9);
1581 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1583 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1585 /* if we're not encrypting, check metadata after checksumming: */
1586 if (!validate_before_checksum &&
1587 validate_bset_for_write(c, b, i, sectors_to_write))
1591 * We handle btree write errors by immediately halting the journal -
1592 * after we've done that, we can't issue any subsequent btree writes
1593 * because they might have pointers to new nodes that failed to write.
1595 * Furthermore, there's no point in doing any more btree writes because
1596 * with the journal stopped, we're never going to update the journal to
1597 * reflect that those writes were done and the data flushed from the
1600 * Make sure to update b->written so bch2_btree_init_next() doesn't
1603 if (bch2_journal_error(&c->journal) ||
1607 trace_btree_write(b, bytes_to_write, sectors_to_write);
1609 wbio = container_of(bio_alloc_bioset(GFP_NOIO,
1610 buf_pages(data, sectors_to_write << 9),
1612 struct btree_write_bio, wbio.bio);
1613 wbio_init(&wbio->wbio.bio);
1615 wbio->wbio.order = order;
1616 wbio->wbio.used_mempool = used_mempool;
1617 wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
1618 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1619 wbio->wbio.bio.bi_private = b;
1621 if (b->level || !b->written)
1622 wbio->wbio.bio.bi_opf |= REQ_FUA;
1624 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
1627 * If we're appending to a leaf node, we don't technically need FUA -
1628 * this write just needs to be persisted before the next journal write,
1629 * which will be marked FLUSH|FUA.
1631 * Similarly if we're writing a new btree root - the pointer is going to
1632 * be in the next journal entry.
1634 * But if we're writing a new btree node (that isn't a root) or
1635 * appending to a non leaf btree node, we need either FUA or a flush
1636 * when we write the parent with the new pointer. FUA is cheaper than a
1637 * flush, and writes appending to leaf nodes aren't blocking anything so
1638 * just make all btree node writes FUA to keep things sane.
1641 bkey_copy(&k.key, &b->key);
1643 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
1644 ptr->offset += b->written;
1646 b->written += sectors_to_write;
1648 /* XXX: submitting IO with btree locks held: */
1649 bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
1652 set_btree_node_noevict(b);
1653 b->written += sectors_to_write;
1655 btree_bounce_free(c, order, used_mempool, data);
1656 btree_node_write_done(c, b);
1660 * Work that must be done with write lock held:
1662 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1664 bool invalidated_iter = false;
1665 struct btree_node_entry *bne;
1666 struct bset_tree *t;
1668 if (!btree_node_just_written(b))
1671 BUG_ON(b->whiteout_u64s);
1673 clear_btree_node_just_written(b);
1676 * Note: immediately after write, bset_written() doesn't work - the
1677 * amount of data we had to write after compaction might have been
1678 * smaller than the offset of the last bset.
1680 * However, we know that all bsets have been written here, as long as
1681 * we're still holding the write lock:
1685 * XXX: decide if we really want to unconditionally sort down to a
1689 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1690 invalidated_iter = true;
1692 invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
1696 set_needs_whiteout(bset(b, t), true);
1698 bch2_btree_verify(c, b);
1701 * If later we don't unconditionally sort down to a single bset, we have
1702 * to ensure this is still true:
1704 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1706 bne = want_new_bset(c, b);
1708 bch2_bset_init_next(c, b, bne);
1710 bch2_btree_build_aux_trees(b);
1712 return invalidated_iter;
1716 * Use this one if the node is intent locked:
1718 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1719 enum six_lock_type lock_type_held)
1721 BUG_ON(lock_type_held == SIX_LOCK_write);
1723 if (lock_type_held == SIX_LOCK_intent ||
1724 six_lock_tryupgrade(&b->lock)) {
1725 __bch2_btree_node_write(c, b, SIX_LOCK_intent);
1727 /* don't cycle lock unnecessarily: */
1728 if (btree_node_just_written(b) &&
1729 six_trylock_write(&b->lock)) {
1730 bch2_btree_post_write_cleanup(c, b);
1731 six_unlock_write(&b->lock);
1734 if (lock_type_held == SIX_LOCK_read)
1735 six_lock_downgrade(&b->lock);
1737 __bch2_btree_node_write(c, b, SIX_LOCK_read);
1741 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
1743 struct bucket_table *tbl;
1744 struct rhash_head *pos;
1749 for_each_cached_btree(b, c, tbl, i, pos)
1750 if (test_bit(flag, &b->flags)) {
1752 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
1759 void bch2_btree_flush_all_reads(struct bch_fs *c)
1761 __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
1764 void bch2_btree_flush_all_writes(struct bch_fs *c)
1766 __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
1769 void bch2_btree_verify_flushed(struct bch_fs *c)
1771 struct bucket_table *tbl;
1772 struct rhash_head *pos;
1777 for_each_cached_btree(b, c, tbl, i, pos) {
1778 unsigned long flags = READ_ONCE(b->flags);
1780 BUG_ON((flags & (1 << BTREE_NODE_dirty)) ||
1781 (flags & (1 << BTREE_NODE_write_in_flight)));
1786 ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
1788 struct printbuf out = _PBUF(buf, PAGE_SIZE);
1789 struct bucket_table *tbl;
1790 struct rhash_head *pos;
1795 for_each_cached_btree(b, c, tbl, i, pos) {
1796 unsigned long flags = READ_ONCE(b->flags);
1797 unsigned idx = (flags & (1 << BTREE_NODE_write_idx)) != 0;
1799 if (!(flags & (1 << BTREE_NODE_dirty)))
1802 pr_buf(&out, "%p d %u n %u l %u w %u b %u r %u:%lu c %u p %u\n",
1804 (flags & (1 << BTREE_NODE_dirty)) != 0,
1805 (flags & (1 << BTREE_NODE_need_write)) != 0,
1808 !list_empty_careful(&b->write_blocked),
1809 b->will_make_reachable != 0,
1810 b->will_make_reachable & 1,
1811 b->writes[ idx].wait.list.first != NULL,
1812 b->writes[!idx].wait.list.first != NULL);
1816 return out.pos - buf;