1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
8 #include "btree_iter.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
18 #include "journal_reclaim.h"
19 #include "journal_seq_blacklist.h"
22 #include <trace/events/bcachefs.h>
24 static void verify_no_dups(struct btree *b,
25 struct bkey_packed *start,
26 struct bkey_packed *end)
28 #ifdef CONFIG_BCACHEFS_DEBUG
29 struct bkey_packed *k;
31 for (k = start; k != end && bkey_next(k) != end; k = bkey_next(k)) {
32 struct bkey l = bkey_unpack_key(b, k);
33 struct bkey r = bkey_unpack_key(b, bkey_next(k));
35 BUG_ON(btree_node_is_extents(b)
36 ? bkey_cmp(l.p, bkey_start_pos(&r)) > 0
37 : bkey_cmp(l.p, bkey_start_pos(&r)) >= 0);
38 //BUG_ON(bkey_cmp_packed(&b->format, k, bkey_next(k)) >= 0);
43 static void clear_needs_whiteout(struct bset *i)
45 struct bkey_packed *k;
47 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
48 k->needs_whiteout = false;
51 static void set_needs_whiteout(struct bset *i)
53 struct bkey_packed *k;
55 for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
56 k->needs_whiteout = true;
59 static void btree_bounce_free(struct bch_fs *c, unsigned order,
60 bool used_mempool, void *p)
63 mempool_free(p, &c->btree_bounce_pool);
65 vpfree(p, PAGE_SIZE << order);
68 static void *btree_bounce_alloc(struct bch_fs *c, unsigned order,
73 BUG_ON(order > btree_page_order(c));
75 *used_mempool = false;
76 p = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOWAIT, order);
81 return mempool_alloc(&c->btree_bounce_pool, GFP_NOIO);
84 static unsigned should_compact_bset(struct btree *b, struct bset_tree *t,
86 enum compact_mode mode)
88 unsigned bset_u64s = le16_to_cpu(bset(b, t)->u64s);
89 unsigned dead_u64s = bset_u64s - b->nr.bset_u64s[t - b->set];
91 if (mode == COMPACT_LAZY) {
92 if (should_compact_bset_lazy(b, t) ||
93 (compacting && !bset_written(b, bset(b, t))))
96 if (bset_written(b, bset(b, t)))
103 bool __bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
104 enum compact_mode mode)
106 const struct bkey_format *f = &b->format;
108 struct bkey_packed *whiteouts = NULL;
109 struct bkey_packed *u_start, *u_pos;
110 struct sort_iter sort_iter;
111 unsigned order, whiteout_u64s = 0, u64s;
112 bool used_mempool, compacting = false;
115 whiteout_u64s += should_compact_bset(b, t,
116 whiteout_u64s != 0, mode);
121 sort_iter_init(&sort_iter, b);
123 whiteout_u64s += b->whiteout_u64s;
124 order = get_order(whiteout_u64s * sizeof(u64));
126 whiteouts = btree_bounce_alloc(c, order, &used_mempool);
127 u_start = u_pos = whiteouts;
129 memcpy_u64s(u_pos, unwritten_whiteouts_start(c, b),
131 u_pos = (void *) u_pos + b->whiteout_u64s * sizeof(u64);
133 sort_iter_add(&sort_iter, u_start, u_pos);
135 for_each_bset(b, t) {
136 struct bset *i = bset(b, t);
137 struct bkey_packed *k, *n, *out, *start, *end;
138 struct btree_node_entry *src = NULL, *dst = NULL;
140 if (t != b->set && !bset_written(b, i)) {
141 src = container_of(i, struct btree_node_entry, keys);
142 dst = max(write_block(b),
143 (void *) btree_bkey_last(b, t -1));
146 if (!should_compact_bset(b, t, compacting, mode)) {
148 memmove(dst, src, sizeof(*src) +
149 le16_to_cpu(src->keys.u64s) *
152 set_btree_bset(b, t, i);
160 end = vstruct_last(i);
163 memmove(dst, src, sizeof(*src));
165 set_btree_bset(b, t, i);
170 for (k = start; k != end; k = n) {
173 if (bkey_deleted(k) && btree_node_is_extents(b))
176 if (bkey_whiteout(k) && !k->needs_whiteout)
179 if (bkey_whiteout(k)) {
180 unreserve_whiteout(b, k);
181 memcpy_u64s(u_pos, k, bkeyp_key_u64s(f, k));
182 set_bkeyp_val_u64s(f, u_pos, 0);
183 u_pos = bkey_next(u_pos);
184 } else if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
186 out = bkey_next(out);
190 sort_iter_add(&sort_iter, u_start, u_pos);
192 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK) {
193 i->u64s = cpu_to_le16((u64 *) out - i->_data);
194 set_btree_bset_end(b, t);
195 bch2_bset_set_no_aux_tree(b, t);
199 b->whiteout_u64s = (u64 *) u_pos - (u64 *) whiteouts;
201 BUG_ON((void *) unwritten_whiteouts_start(c, b) <
202 (void *) btree_bkey_last(b, bset_tree_last(b)));
204 u64s = (btree_node_is_extents(b)
205 ? bch2_sort_extent_whiteouts
206 : bch2_sort_key_whiteouts)(unwritten_whiteouts_start(c, b),
209 BUG_ON(u64s > b->whiteout_u64s);
210 BUG_ON(u64s != b->whiteout_u64s && !btree_node_is_extents(b));
211 BUG_ON(u_pos != whiteouts && !u64s);
213 if (u64s != b->whiteout_u64s) {
214 void *src = unwritten_whiteouts_start(c, b);
216 b->whiteout_u64s = u64s;
217 memmove_u64s_up(unwritten_whiteouts_start(c, b), src, u64s);
221 unwritten_whiteouts_start(c, b),
222 unwritten_whiteouts_end(c, b));
224 btree_bounce_free(c, order, used_mempool, whiteouts);
226 if (mode != COMPACT_WRITTEN_NO_WRITE_LOCK)
227 bch2_btree_build_aux_trees(b);
229 bch_btree_keys_u64s_remaining(c, b);
230 bch2_verify_btree_nr_keys(b);
235 static bool bch2_drop_whiteouts(struct btree *b)
240 for_each_bset(b, t) {
241 struct bset *i = bset(b, t);
242 struct bkey_packed *k, *n, *out, *start, *end;
244 if (!should_compact_bset(b, t, true, COMPACT_WRITTEN))
247 start = btree_bkey_first(b, t);
248 end = btree_bkey_last(b, t);
250 if (!bset_written(b, i) &&
253 max_t(struct bset *, write_block(b),
254 (void *) btree_bkey_last(b, t -1));
256 memmove(dst, i, sizeof(struct bset));
258 set_btree_bset(b, t, i);
263 for (k = start; k != end; k = n) {
266 if (!bkey_whiteout(k)) {
268 out = bkey_next(out);
272 i->u64s = cpu_to_le16((u64 *) out - i->_data);
273 bch2_bset_set_no_aux_tree(b, t);
277 bch2_verify_btree_nr_keys(b);
282 static void btree_node_sort(struct bch_fs *c, struct btree *b,
283 struct btree_iter *iter,
286 bool filter_whiteouts)
288 struct btree_node *out;
289 struct sort_iter sort_iter;
291 struct bset *start_bset = bset(b, &b->set[start_idx]);
292 bool used_mempool = false;
293 u64 start_time, seq = 0;
294 unsigned i, u64s = 0, order, shift = end_idx - start_idx - 1;
295 bool sorting_entire_node = start_idx == 0 &&
298 sort_iter_init(&sort_iter, b);
300 for (t = b->set + start_idx;
301 t < b->set + end_idx;
303 u64s += le16_to_cpu(bset(b, t)->u64s);
304 sort_iter_add(&sort_iter,
305 btree_bkey_first(b, t),
306 btree_bkey_last(b, t));
309 order = sorting_entire_node
310 ? btree_page_order(c)
311 : get_order(__vstruct_bytes(struct btree_node, u64s));
313 out = btree_bounce_alloc(c, order, &used_mempool);
315 start_time = local_clock();
317 if (btree_node_is_extents(b))
318 filter_whiteouts = bset_written(b, start_bset);
320 u64s = (btree_node_is_extents(b)
322 : bch2_sort_keys)(out->keys.start,
326 out->keys.u64s = cpu_to_le16(u64s);
328 BUG_ON(vstruct_end(&out->keys) > (void *) out + (PAGE_SIZE << order));
330 if (sorting_entire_node)
331 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
334 /* Make sure we preserve bset journal_seq: */
335 for (t = b->set + start_idx; t < b->set + end_idx; t++)
336 seq = max(seq, le64_to_cpu(bset(b, t)->journal_seq));
337 start_bset->journal_seq = cpu_to_le64(seq);
339 if (sorting_entire_node) {
340 unsigned u64s = le16_to_cpu(out->keys.u64s);
342 BUG_ON(order != btree_page_order(c));
345 * Our temporary buffer is the same size as the btree node's
346 * buffer, we can just swap buffers instead of doing a big
350 out->keys.u64s = cpu_to_le16(u64s);
352 set_btree_bset(b, b->set, &b->data->keys);
354 start_bset->u64s = out->keys.u64s;
355 memcpy_u64s(start_bset->start,
357 le16_to_cpu(out->keys.u64s));
360 for (i = start_idx + 1; i < end_idx; i++)
361 b->nr.bset_u64s[start_idx] +=
366 for (i = start_idx + 1; i < b->nsets; i++) {
367 b->nr.bset_u64s[i] = b->nr.bset_u64s[i + shift];
368 b->set[i] = b->set[i + shift];
371 for (i = b->nsets; i < MAX_BSETS; i++)
372 b->nr.bset_u64s[i] = 0;
374 set_btree_bset_end(b, &b->set[start_idx]);
375 bch2_bset_set_no_aux_tree(b, &b->set[start_idx]);
377 btree_bounce_free(c, order, used_mempool, out);
379 bch2_verify_btree_nr_keys(b);
382 void bch2_btree_sort_into(struct bch_fs *c,
386 struct btree_nr_keys nr;
387 struct btree_node_iter src_iter;
388 u64 start_time = local_clock();
390 BUG_ON(dst->nsets != 1);
392 bch2_bset_set_no_aux_tree(dst, dst->set);
394 bch2_btree_node_iter_init_from_start(&src_iter, src);
396 if (btree_node_is_extents(src))
397 nr = bch2_sort_repack_merge(c, btree_bset_first(dst),
402 nr = bch2_sort_repack(btree_bset_first(dst),
407 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
410 set_btree_bset_end(dst, dst->set);
412 dst->nr.live_u64s += nr.live_u64s;
413 dst->nr.bset_u64s[0] += nr.bset_u64s[0];
414 dst->nr.packed_keys += nr.packed_keys;
415 dst->nr.unpacked_keys += nr.unpacked_keys;
417 bch2_verify_btree_nr_keys(dst);
420 #define SORT_CRIT (4096 / sizeof(u64))
423 * We're about to add another bset to the btree node, so if there's currently
424 * too many bsets - sort some of them together:
426 static bool btree_node_compact(struct bch_fs *c, struct btree *b,
427 struct btree_iter *iter)
429 unsigned unwritten_idx;
432 for (unwritten_idx = 0;
433 unwritten_idx < b->nsets;
435 if (!bset_written(b, bset(b, &b->set[unwritten_idx])))
438 if (b->nsets - unwritten_idx > 1) {
439 btree_node_sort(c, b, iter, unwritten_idx,
444 if (unwritten_idx > 1) {
445 btree_node_sort(c, b, iter, 0, unwritten_idx, false);
452 void bch2_btree_build_aux_trees(struct btree *b)
457 bch2_bset_build_aux_tree(b, t,
458 !bset_written(b, bset(b, t)) &&
459 t == bset_tree_last(b));
463 * @bch_btree_init_next - initialize a new (unwritten) bset that can then be
466 * Safe to call if there already is an unwritten bset - will only add a new bset
467 * if @b doesn't already have one.
469 * Returns true if we sorted (i.e. invalidated iterators
471 void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
472 struct btree_iter *iter)
474 struct btree_node_entry *bne;
477 EBUG_ON(!(b->lock.state.seq & 1));
478 EBUG_ON(iter && iter->l[b->level].b != b);
480 did_sort = btree_node_compact(c, b, iter);
482 bne = want_new_bset(c, b);
484 bch2_bset_init_next(c, b, bne);
486 bch2_btree_build_aux_trees(b);
488 if (iter && did_sort)
489 bch2_btree_iter_reinit_node(iter, b);
492 static struct nonce btree_nonce(struct bset *i, unsigned offset)
494 return (struct nonce) {{
495 [0] = cpu_to_le32(offset),
496 [1] = ((__le32 *) &i->seq)[0],
497 [2] = ((__le32 *) &i->seq)[1],
498 [3] = ((__le32 *) &i->journal_seq)[0]^BCH_NONCE_BTREE,
502 static void bset_encrypt(struct bch_fs *c, struct bset *i, unsigned offset)
504 struct nonce nonce = btree_nonce(i, offset);
507 struct btree_node *bn = container_of(i, struct btree_node, keys);
508 unsigned bytes = (void *) &bn->keys - (void *) &bn->flags;
510 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, &bn->flags,
513 nonce = nonce_add(nonce, round_up(bytes, CHACHA_BLOCK_SIZE));
516 bch2_encrypt(c, BSET_CSUM_TYPE(i), nonce, i->_data,
517 vstruct_end(i) - (void *) i->_data);
520 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
521 struct btree *b, struct bset *i,
522 unsigned offset, int write)
524 pr_buf(out, "error validating btree node %s"
525 "at btree %u level %u/%u\n"
526 "pos %llu:%llu node offset %u",
527 write ? "before write " : "",
528 b->btree_id, b->level,
529 c->btree_roots[b->btree_id].level,
530 b->key.k.p.inode, b->key.k.p.offset,
533 pr_buf(out, " bset u64s %u", le16_to_cpu(i->u64s));
536 enum btree_err_type {
538 BTREE_ERR_WANT_RETRY,
539 BTREE_ERR_MUST_RETRY,
543 enum btree_validate_ret {
544 BTREE_RETRY_READ = 64,
547 #define btree_err(type, c, b, i, msg, ...) \
551 struct printbuf out = PBUF(_buf); \
553 btree_err_msg(&out, c, b, i, b->written, write); \
554 pr_buf(&out, ": " msg, ##__VA_ARGS__); \
556 if (type == BTREE_ERR_FIXABLE && \
558 !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags)) { \
559 mustfix_fsck_err(c, "%s", _buf); \
565 bch_err(c, "%s", _buf); \
568 case BTREE_ERR_FIXABLE: \
569 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
571 case BTREE_ERR_WANT_RETRY: \
573 ret = BTREE_RETRY_READ; \
577 case BTREE_ERR_MUST_RETRY: \
578 ret = BTREE_RETRY_READ; \
580 case BTREE_ERR_FATAL: \
581 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
586 bch_err(c, "corrupt metadata before write: %s", _buf); \
588 if (bch2_fs_inconsistent(c)) { \
589 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
598 #define btree_err_on(cond, ...) ((cond) ? btree_err(__VA_ARGS__) : false)
600 static int validate_bset(struct bch_fs *c, struct btree *b,
601 struct bset *i, unsigned sectors,
602 unsigned *whiteout_u64s, int write,
605 struct bkey_packed *k, *prev = NULL;
606 struct bpos prev_pos = POS_MIN;
607 bool seen_non_whiteout = false;
612 if (i == &b->data->keys) {
613 /* These indicate that we read the wrong btree node: */
614 btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
615 BTREE_ERR_MUST_RETRY, c, b, i,
616 "incorrect btree id");
618 btree_err_on(BTREE_NODE_LEVEL(b->data) != b->level,
619 BTREE_ERR_MUST_RETRY, c, b, i,
622 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN) {
623 u64 *p = (u64 *) &b->data->ptr;
626 bch2_bpos_swab(&b->data->min_key);
627 bch2_bpos_swab(&b->data->max_key);
630 btree_err_on(bkey_cmp(b->data->max_key, b->key.k.p),
631 BTREE_ERR_MUST_RETRY, c, b, i,
632 "incorrect max key");
634 /* XXX: ideally we would be validating min_key too */
637 * not correct anymore, due to btree node write error
640 * need to add b->data->seq to btree keys and verify
643 btree_err_on(!extent_contains_ptr(bkey_i_to_s_c_extent(&b->key),
645 BTREE_ERR_FATAL, c, b, i,
646 "incorrect backpointer");
648 err = bch2_bkey_format_validate(&b->data->format);
650 BTREE_ERR_FATAL, c, b, i,
651 "invalid bkey format: %s", err);
654 version = le16_to_cpu(i->version);
655 btree_err_on((version != BCH_BSET_VERSION_OLD &&
656 version < bcachefs_metadata_version_min) ||
657 version >= bcachefs_metadata_version_max,
658 BTREE_ERR_FATAL, c, b, i,
659 "unsupported bset version");
661 if (btree_err_on(b->written + sectors > c->opts.btree_node_size,
662 BTREE_ERR_FIXABLE, c, b, i,
663 "bset past end of btree node")) {
668 btree_err_on(b->written && !i->u64s,
669 BTREE_ERR_FIXABLE, c, b, i,
672 if (!BSET_SEPARATE_WHITEOUTS(i)) {
673 seen_non_whiteout = true;
678 k != vstruct_last(i);) {
683 if (btree_err_on(!k->u64s,
684 BTREE_ERR_FIXABLE, c, b, i,
685 "KEY_U64s 0: %zu bytes of metadata lost",
686 vstruct_end(i) - (void *) k)) {
687 i->u64s = cpu_to_le16((u64 *) k - i->_data);
691 if (btree_err_on(bkey_next(k) > vstruct_last(i),
692 BTREE_ERR_FIXABLE, c, b, i,
693 "key extends past end of bset")) {
694 i->u64s = cpu_to_le16((u64 *) k - i->_data);
698 if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
699 BTREE_ERR_FIXABLE, c, b, i,
700 "invalid bkey format %u", k->format)) {
701 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
702 memmove_u64s_down(k, bkey_next(k),
703 (u64 *) vstruct_end(i) - (u64 *) k);
707 if (BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN)
708 bch2_bkey_swab(&b->format, k);
711 version < bcachefs_metadata_version_bkey_renumber)
712 bch2_bkey_renumber(btree_node_type(b), k, write);
714 u = bkey_disassemble(b, k, &tmp);
716 invalid = __bch2_bkey_invalid(c, u, btree_node_type(b)) ?:
717 bch2_bkey_in_btree_node(b, u) ?:
718 (write ? bch2_bkey_val_invalid(c, u) : NULL);
722 bch2_bkey_val_to_text(&PBUF(buf), c, u);
723 btree_err(BTREE_ERR_FIXABLE, c, b, i,
724 "invalid bkey:\n%s\n%s", invalid, buf);
726 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
727 memmove_u64s_down(k, bkey_next(k),
728 (u64 *) vstruct_end(i) - (u64 *) k);
733 version < bcachefs_metadata_version_bkey_renumber)
734 bch2_bkey_renumber(btree_node_type(b), k, write);
737 * with the separate whiteouts thing (used for extents), the
738 * second set of keys actually can have whiteouts too, so we
739 * can't solely go off bkey_whiteout()...
742 if (!seen_non_whiteout &&
743 (!bkey_whiteout(k) ||
744 (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
745 *whiteout_u64s = k->_data - i->_data;
746 seen_non_whiteout = true;
747 } else if (bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0) {
748 btree_err(BTREE_ERR_FATAL, c, b, i,
749 "keys out of order: %llu:%llu > %llu:%llu",
753 bkey_start_offset(u.k));
754 /* XXX: repair this */
762 SET_BSET_BIG_ENDIAN(i, CPU_BIG_ENDIAN);
767 int bch2_btree_node_read_done(struct bch_fs *c, struct btree *b, bool have_retry)
769 struct btree_node_entry *bne;
770 struct btree_node_iter_large *iter;
771 struct btree_node *sorted;
772 struct bkey_packed *k;
774 bool used_mempool, blacklisted;
776 int ret, retry_read = 0, write = READ;
778 iter = mempool_alloc(&c->fill_iter, GFP_NOIO);
781 if (bch2_meta_read_fault("btree"))
782 btree_err(BTREE_ERR_MUST_RETRY, c, b, NULL,
785 btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
786 BTREE_ERR_MUST_RETRY, c, b, NULL,
789 btree_err_on(!b->data->keys.seq,
790 BTREE_ERR_MUST_RETRY, c, b, NULL,
793 while (b->written < c->opts.btree_node_size) {
794 unsigned sectors, whiteout_u64s = 0;
796 struct bch_csum csum;
797 bool first = !b->written;
802 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
803 BTREE_ERR_WANT_RETRY, c, b, i,
804 "unknown checksum type");
806 nonce = btree_nonce(i, b->written << 9);
807 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
809 btree_err_on(bch2_crc_cmp(csum, b->data->csum),
810 BTREE_ERR_WANT_RETRY, c, b, i,
813 bset_encrypt(c, i, b->written << 9);
815 sectors = vstruct_sectors(b->data, c->block_bits);
817 btree_node_set_format(b, b->data->format);
819 bne = write_block(b);
822 if (i->seq != b->data->keys.seq)
825 btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
826 BTREE_ERR_WANT_RETRY, c, b, i,
827 "unknown checksum type");
829 nonce = btree_nonce(i, b->written << 9);
830 csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
832 btree_err_on(bch2_crc_cmp(csum, bne->csum),
833 BTREE_ERR_WANT_RETRY, c, b, i,
836 bset_encrypt(c, i, b->written << 9);
838 sectors = vstruct_sectors(bne, c->block_bits);
841 ret = validate_bset(c, b, i, sectors, &whiteout_u64s,
846 b->written += sectors;
848 blacklisted = bch2_journal_seq_is_blacklisted(c,
849 le64_to_cpu(i->journal_seq),
852 btree_err_on(blacklisted && first,
853 BTREE_ERR_FIXABLE, c, b, i,
854 "first btree node bset has blacklisted journal seq");
855 if (blacklisted && !first)
858 bch2_btree_node_iter_large_push(iter, b,
860 vstruct_idx(i, whiteout_u64s));
862 bch2_btree_node_iter_large_push(iter, b,
863 vstruct_idx(i, whiteout_u64s),
867 for (bne = write_block(b);
868 bset_byte_offset(b, bne) < btree_bytes(c);
869 bne = (void *) bne + block_bytes(c))
870 btree_err_on(bne->keys.seq == b->data->keys.seq,
871 BTREE_ERR_WANT_RETRY, c, b, NULL,
872 "found bset signature after last bset");
874 sorted = btree_bounce_alloc(c, btree_page_order(c), &used_mempool);
875 sorted->keys.u64s = 0;
877 set_btree_bset(b, b->set, &b->data->keys);
879 b->nr = btree_node_is_extents(b)
880 ? bch2_extent_sort_fix_overlapping(c, &sorted->keys, b, iter)
881 : bch2_key_sort_fix_overlapping(&sorted->keys, b, iter);
883 u64s = le16_to_cpu(sorted->keys.u64s);
885 sorted->keys.u64s = cpu_to_le16(u64s);
886 swap(sorted, b->data);
887 set_btree_bset(b, b->set, &b->data->keys);
890 BUG_ON(b->nr.live_u64s != u64s);
892 btree_bounce_free(c, btree_page_order(c), used_mempool, sorted);
895 for (k = i->start; k != vstruct_last(i);) {
897 struct bkey_s_c u = bkey_disassemble(b, k, &tmp);
898 const char *invalid = bch2_bkey_val_invalid(c, u);
901 (inject_invalid_keys(c) &&
902 !bversion_cmp(u.k->version, MAX_VERSION))) {
905 bch2_bkey_val_to_text(&PBUF(buf), c, u);
906 btree_err(BTREE_ERR_FIXABLE, c, b, i,
907 "invalid bkey %s: %s", buf, invalid);
909 btree_keys_account_key_drop(&b->nr, 0, k);
911 i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
912 memmove_u64s_down(k, bkey_next(k),
913 (u64 *) vstruct_end(i) - (u64 *) k);
914 set_btree_bset_end(b, b->set);
921 bch2_bset_build_aux_tree(b, b->set, false);
923 set_needs_whiteout(btree_bset_first(b));
925 btree_node_reset_sib_u64s(b);
927 mempool_free(iter, &c->fill_iter);
930 if (ret == BTREE_RETRY_READ) {
933 bch2_inconsistent_error(c);
934 set_btree_node_read_error(b);
939 static void btree_node_read_work(struct work_struct *work)
941 struct btree_read_bio *rb =
942 container_of(work, struct btree_read_bio, work);
943 struct bch_fs *c = rb->c;
944 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
945 struct btree *b = rb->bio.bi_private;
946 struct bio *bio = &rb->bio;
947 struct bch_io_failures failed = { .nr = 0 };
952 bch_info(c, "retrying read");
953 ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
954 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
956 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
957 bio->bi_iter.bi_sector = rb->pick.ptr.offset;
958 bio->bi_iter.bi_size = btree_bytes(c);
960 if (rb->have_ioref) {
961 bio_set_dev(bio, ca->disk_sb.bdev);
962 submit_bio_wait(bio);
964 bio->bi_status = BLK_STS_REMOVED;
967 bch2_dev_io_err_on(bio->bi_status, ca, "btree read");
969 percpu_ref_put(&ca->io_ref);
970 rb->have_ioref = false;
972 bch2_mark_io_failure(&failed, &rb->pick);
974 can_retry = bch2_bkey_pick_read_device(c,
975 bkey_i_to_s_c(&b->key),
976 &failed, &rb->pick) > 0;
978 if (!bio->bi_status &&
979 !bch2_btree_node_read_done(c, b, can_retry))
983 set_btree_node_read_error(b);
988 bch2_time_stats_update(&c->times[BCH_TIME_btree_node_read],
991 clear_btree_node_read_in_flight(b);
992 wake_up_bit(&b->flags, BTREE_NODE_read_in_flight);
995 static void btree_node_read_endio(struct bio *bio)
997 struct btree_read_bio *rb =
998 container_of(bio, struct btree_read_bio, bio);
999 struct bch_fs *c = rb->c;
1001 if (rb->have_ioref) {
1002 struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
1003 bch2_latency_acct(ca, rb->start_time, READ);
1006 queue_work(system_unbound_wq, &rb->work);
1009 void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
1012 struct extent_ptr_decoded pick;
1013 struct btree_read_bio *rb;
1018 trace_btree_read(c, b);
1020 ret = bch2_bkey_pick_read_device(c, bkey_i_to_s_c(&b->key),
1022 if (bch2_fs_fatal_err_on(ret <= 0, c,
1023 "btree node read error: no device to read from")) {
1024 set_btree_node_read_error(b);
1028 ca = bch_dev_bkey_exists(c, pick.ptr.dev);
1030 bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
1033 rb = container_of(bio, struct btree_read_bio, bio);
1035 rb->start_time = local_clock();
1036 rb->have_ioref = bch2_dev_get_ioref(ca, READ);
1038 INIT_WORK(&rb->work, btree_node_read_work);
1039 bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
1040 bio->bi_iter.bi_sector = pick.ptr.offset;
1041 bio->bi_end_io = btree_node_read_endio;
1042 bio->bi_private = b;
1043 bch2_bio_map(bio, b->data, btree_bytes(c));
1045 set_btree_node_read_in_flight(b);
1047 if (rb->have_ioref) {
1048 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_BTREE],
1050 bio_set_dev(bio, ca->disk_sb.bdev);
1053 submit_bio_wait(bio);
1055 bio->bi_private = b;
1056 btree_node_read_work(&rb->work);
1061 bio->bi_status = BLK_STS_REMOVED;
1064 btree_node_read_work(&rb->work);
1066 queue_work(system_unbound_wq, &rb->work);
1071 int bch2_btree_root_read(struct bch_fs *c, enum btree_id id,
1072 const struct bkey_i *k, unsigned level)
1078 closure_init_stack(&cl);
1081 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1085 b = bch2_btree_node_mem_alloc(c);
1086 bch2_btree_cache_cannibalize_unlock(c);
1090 bkey_copy(&b->key, k);
1091 BUG_ON(bch2_btree_node_hash_insert(&c->btree_cache, b, level, id));
1093 bch2_btree_node_read(c, b, true);
1095 if (btree_node_read_error(b)) {
1096 bch2_btree_node_hash_remove(&c->btree_cache, b);
1098 mutex_lock(&c->btree_cache.lock);
1099 list_move(&b->list, &c->btree_cache.freeable);
1100 mutex_unlock(&c->btree_cache.lock);
1106 bch2_btree_set_root_for_read(c, b);
1108 six_unlock_write(&b->lock);
1109 six_unlock_intent(&b->lock);
1114 void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
1115 struct btree_write *w)
1117 unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
1125 } while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
1128 closure_put(&((struct btree_update *) new)->cl);
1130 bch2_journal_pin_drop(&c->journal, &w->journal);
1131 closure_wake_up(&w->wait);
1134 static void btree_node_write_done(struct bch_fs *c, struct btree *b)
1136 struct btree_write *w = btree_prev_write(b);
1138 bch2_btree_complete_write(c, b, w);
1139 btree_node_io_unlock(b);
1142 static void bch2_btree_node_write_error(struct bch_fs *c,
1143 struct btree_write_bio *wbio)
1145 struct btree *b = wbio->wbio.bio.bi_private;
1146 __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
1147 struct bkey_i_btree_ptr *new_key;
1148 struct bkey_s_btree_ptr bp;
1149 struct bch_extent_ptr *ptr;
1150 struct btree_trans trans;
1151 struct btree_iter *iter;
1154 bch2_trans_init(&trans, c, 0, 0);
1156 iter = bch2_trans_get_node_iter(&trans, b->btree_id, b->key.k.p,
1157 BTREE_MAX_DEPTH, b->level, 0);
1159 ret = bch2_btree_iter_traverse(iter);
1163 /* has node been freed? */
1164 if (iter->l[b->level].b != b) {
1165 /* node has been freed: */
1166 BUG_ON(!btree_node_dying(b));
1170 BUG_ON(!btree_node_hashed(b));
1172 bkey_copy(&tmp.k, &b->key);
1174 new_key = bkey_i_to_btree_ptr(&tmp.k);
1175 bp = btree_ptr_i_to_s(new_key);
1177 bch2_bkey_drop_ptrs(bkey_i_to_s(&tmp.k), ptr,
1178 bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
1180 if (!bch2_bkey_nr_ptrs(bp.s_c))
1183 ret = bch2_btree_node_update_key(c, iter, b, new_key);
1189 bch2_trans_exit(&trans);
1190 bio_put(&wbio->wbio.bio);
1191 btree_node_write_done(c, b);
1194 set_btree_node_noevict(b);
1195 bch2_fs_fatal_error(c, "fatal error writing btree node");
1199 void bch2_btree_write_error_work(struct work_struct *work)
1201 struct bch_fs *c = container_of(work, struct bch_fs,
1202 btree_write_error_work);
1206 spin_lock_irq(&c->btree_write_error_lock);
1207 bio = bio_list_pop(&c->btree_write_error_list);
1208 spin_unlock_irq(&c->btree_write_error_lock);
1213 bch2_btree_node_write_error(c,
1214 container_of(bio, struct btree_write_bio, wbio.bio));
1218 static void btree_node_write_work(struct work_struct *work)
1220 struct btree_write_bio *wbio =
1221 container_of(work, struct btree_write_bio, work);
1222 struct bch_fs *c = wbio->wbio.c;
1223 struct btree *b = wbio->wbio.bio.bi_private;
1225 btree_bounce_free(c,
1227 wbio->wbio.used_mempool,
1230 if (wbio->wbio.failed.nr) {
1231 unsigned long flags;
1233 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1234 bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
1235 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1237 queue_work(c->wq, &c->btree_write_error_work);
1241 bio_put(&wbio->wbio.bio);
1242 btree_node_write_done(c, b);
1245 static void btree_node_write_endio(struct bio *bio)
1247 struct bch_write_bio *wbio = to_wbio(bio);
1248 struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
1249 struct bch_write_bio *orig = parent ?: wbio;
1250 struct bch_fs *c = wbio->c;
1251 struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
1252 unsigned long flags;
1254 if (wbio->have_ioref)
1255 bch2_latency_acct(ca, wbio->submit_time, WRITE);
1257 if (bio->bi_status == BLK_STS_REMOVED ||
1258 bch2_dev_io_err_on(bio->bi_status, ca, "btree write") ||
1259 bch2_meta_write_fault("btree")) {
1260 spin_lock_irqsave(&c->btree_write_error_lock, flags);
1261 bch2_dev_list_add_dev(&orig->failed, wbio->dev);
1262 spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
1265 if (wbio->have_ioref)
1266 percpu_ref_put(&ca->io_ref);
1270 bio_endio(&parent->bio);
1272 struct btree_write_bio *wb =
1273 container_of(orig, struct btree_write_bio, wbio);
1275 INIT_WORK(&wb->work, btree_node_write_work);
1276 queue_work(system_unbound_wq, &wb->work);
1280 static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
1281 struct bset *i, unsigned sectors)
1283 unsigned whiteout_u64s = 0;
1286 if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
1289 ret = validate_bset(c, b, i, sectors, &whiteout_u64s, WRITE, false);
1291 bch2_inconsistent_error(c);
1296 void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1297 enum six_lock_type lock_type_held)
1299 struct btree_write_bio *wbio;
1300 struct bset_tree *t;
1302 struct btree_node *bn = NULL;
1303 struct btree_node_entry *bne = NULL;
1305 struct bch_extent_ptr *ptr;
1306 struct sort_iter sort_iter;
1308 unsigned bytes_to_write, sectors_to_write, order, bytes, u64s;
1311 unsigned long old, new;
1312 bool validate_before_checksum = false;
1315 if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
1319 * We may only have a read lock on the btree node - the dirty bit is our
1320 * "lock" against racing with other threads that may be trying to start
1321 * a write, we do a write iff we clear the dirty bit. Since setting the
1322 * dirty bit requires a write lock, we can't race with other threads
1326 old = new = READ_ONCE(b->flags);
1328 if (!(old & (1 << BTREE_NODE_dirty)))
1331 if (!btree_node_may_write(b))
1334 if (old & (1 << BTREE_NODE_write_in_flight)) {
1335 btree_node_wait_on_io(b);
1339 new &= ~(1 << BTREE_NODE_dirty);
1340 new &= ~(1 << BTREE_NODE_need_write);
1341 new |= (1 << BTREE_NODE_write_in_flight);
1342 new |= (1 << BTREE_NODE_just_written);
1343 new ^= (1 << BTREE_NODE_write_idx);
1344 } while (cmpxchg_acquire(&b->flags, old, new) != old);
1346 BUG_ON(btree_node_fake(b));
1347 BUG_ON((b->will_make_reachable != 0) != !b->written);
1349 BUG_ON(b->written >= c->opts.btree_node_size);
1350 BUG_ON(b->written & (c->opts.block_size - 1));
1351 BUG_ON(bset_written(b, btree_bset_last(b)));
1352 BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
1353 BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
1356 * We can't block on six_lock_write() here; another thread might be
1357 * trying to get a journal reservation with read locks held, and getting
1358 * a journal reservation might be blocked on flushing the journal and
1359 * doing btree writes:
1361 if (lock_type_held == SIX_LOCK_intent &&
1362 six_trylock_write(&b->lock)) {
1363 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN);
1364 six_unlock_write(&b->lock);
1366 __bch2_compact_whiteouts(c, b, COMPACT_WRITTEN_NO_WRITE_LOCK);
1369 BUG_ON(b->uncompacted_whiteout_u64s);
1371 sort_iter_init(&sort_iter, b);
1374 ? sizeof(struct btree_node)
1375 : sizeof(struct btree_node_entry);
1377 bytes += b->whiteout_u64s * sizeof(u64);
1379 for_each_bset(b, t) {
1382 if (bset_written(b, i))
1385 bytes += le16_to_cpu(i->u64s) * sizeof(u64);
1386 sort_iter_add(&sort_iter,
1387 btree_bkey_first(b, t),
1388 btree_bkey_last(b, t));
1389 seq = max(seq, le64_to_cpu(i->journal_seq));
1392 order = get_order(bytes);
1393 data = btree_bounce_alloc(c, order, &used_mempool);
1401 bne->keys = b->data->keys;
1405 i->journal_seq = cpu_to_le64(seq);
1408 if (!btree_node_is_extents(b)) {
1409 sort_iter_add(&sort_iter,
1410 unwritten_whiteouts_start(c, b),
1411 unwritten_whiteouts_end(c, b));
1412 SET_BSET_SEPARATE_WHITEOUTS(i, false);
1414 memcpy_u64s(i->start,
1415 unwritten_whiteouts_start(c, b),
1417 i->u64s = cpu_to_le16(b->whiteout_u64s);
1418 SET_BSET_SEPARATE_WHITEOUTS(i, true);
1421 b->whiteout_u64s = 0;
1423 u64s = btree_node_is_extents(b)
1424 ? bch2_sort_extents(vstruct_last(i), &sort_iter, false)
1425 : bch2_sort_keys(i->start, &sort_iter, false);
1426 le16_add_cpu(&i->u64s, u64s);
1428 clear_needs_whiteout(i);
1430 /* do we have data to write? */
1431 if (b->written && !i->u64s)
1434 bytes_to_write = vstruct_end(i) - data;
1435 sectors_to_write = round_up(bytes_to_write, block_bytes(c)) >> 9;
1437 memset(data + bytes_to_write, 0,
1438 (sectors_to_write << 9) - bytes_to_write);
1440 BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
1441 BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
1442 BUG_ON(i->seq != b->data->keys.seq);
1444 i->version = c->sb.version < bcachefs_metadata_version_new_versioning
1445 ? cpu_to_le16(BCH_BSET_VERSION_OLD)
1446 : cpu_to_le16(c->sb.version);
1447 SET_BSET_CSUM_TYPE(i, bch2_meta_checksum_type(c));
1449 if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(i)))
1450 validate_before_checksum = true;
1452 /* validate_bset will be modifying: */
1453 if (le16_to_cpu(i->version) <
1454 bcachefs_metadata_version_bkey_renumber)
1455 validate_before_checksum = true;
1457 /* if we're going to be encrypting, check metadata validity first: */
1458 if (validate_before_checksum &&
1459 validate_bset_for_write(c, b, i, sectors_to_write))
1462 bset_encrypt(c, i, b->written << 9);
1464 nonce = btree_nonce(i, b->written << 9);
1467 bn->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bn);
1469 bne->csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
1471 /* if we're not encrypting, check metadata after checksumming: */
1472 if (!validate_before_checksum &&
1473 validate_bset_for_write(c, b, i, sectors_to_write))
1477 * We handle btree write errors by immediately halting the journal -
1478 * after we've done that, we can't issue any subsequent btree writes
1479 * because they might have pointers to new nodes that failed to write.
1481 * Furthermore, there's no point in doing any more btree writes because
1482 * with the journal stopped, we're never going to update the journal to
1483 * reflect that those writes were done and the data flushed from the
1486 * Make sure to update b->written so bch2_btree_init_next() doesn't
1489 if (bch2_journal_error(&c->journal) ||
1493 trace_btree_write(b, bytes_to_write, sectors_to_write);
1495 wbio = container_of(bio_alloc_bioset(GFP_NOIO,
1496 buf_pages(data, sectors_to_write << 9),
1498 struct btree_write_bio, wbio.bio);
1499 wbio_init(&wbio->wbio.bio);
1501 wbio->wbio.order = order;
1502 wbio->wbio.used_mempool = used_mempool;
1503 wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
1504 wbio->wbio.bio.bi_end_io = btree_node_write_endio;
1505 wbio->wbio.bio.bi_private = b;
1507 if (b->level || !b->written)
1508 wbio->wbio.bio.bi_opf |= REQ_FUA;
1510 bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
1513 * If we're appending to a leaf node, we don't technically need FUA -
1514 * this write just needs to be persisted before the next journal write,
1515 * which will be marked FLUSH|FUA.
1517 * Similarly if we're writing a new btree root - the pointer is going to
1518 * be in the next journal entry.
1520 * But if we're writing a new btree node (that isn't a root) or
1521 * appending to a non leaf btree node, we need either FUA or a flush
1522 * when we write the parent with the new pointer. FUA is cheaper than a
1523 * flush, and writes appending to leaf nodes aren't blocking anything so
1524 * just make all btree node writes FUA to keep things sane.
1527 bkey_copy(&k.key, &b->key);
1529 bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&k.key)), ptr)
1530 ptr->offset += b->written;
1532 b->written += sectors_to_write;
1534 bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
1537 set_btree_node_noevict(b);
1538 b->written += sectors_to_write;
1540 btree_bounce_free(c, order, used_mempool, data);
1541 btree_node_write_done(c, b);
1545 * Work that must be done with write lock held:
1547 bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
1549 bool invalidated_iter = false;
1550 struct btree_node_entry *bne;
1551 struct bset_tree *t;
1553 if (!btree_node_just_written(b))
1556 BUG_ON(b->whiteout_u64s);
1557 BUG_ON(b->uncompacted_whiteout_u64s);
1559 clear_btree_node_just_written(b);
1562 * Note: immediately after write, bset_written() doesn't work - the
1563 * amount of data we had to write after compaction might have been
1564 * smaller than the offset of the last bset.
1566 * However, we know that all bsets have been written here, as long as
1567 * we're still holding the write lock:
1571 * XXX: decide if we really want to unconditionally sort down to a
1575 btree_node_sort(c, b, NULL, 0, b->nsets, true);
1576 invalidated_iter = true;
1578 invalidated_iter = bch2_drop_whiteouts(b);
1582 set_needs_whiteout(bset(b, t));
1584 bch2_btree_verify(c, b);
1587 * If later we don't unconditionally sort down to a single bset, we have
1588 * to ensure this is still true:
1590 BUG_ON((void *) btree_bkey_last(b, bset_tree_last(b)) > write_block(b));
1592 bne = want_new_bset(c, b);
1594 bch2_bset_init_next(c, b, bne);
1596 bch2_btree_build_aux_trees(b);
1598 return invalidated_iter;
1602 * Use this one if the node is intent locked:
1604 void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
1605 enum six_lock_type lock_type_held)
1607 BUG_ON(lock_type_held == SIX_LOCK_write);
1609 if (lock_type_held == SIX_LOCK_intent ||
1610 six_lock_tryupgrade(&b->lock)) {
1611 __bch2_btree_node_write(c, b, SIX_LOCK_intent);
1613 /* don't cycle lock unnecessarily: */
1614 if (btree_node_just_written(b) &&
1615 six_trylock_write(&b->lock)) {
1616 bch2_btree_post_write_cleanup(c, b);
1617 six_unlock_write(&b->lock);
1620 if (lock_type_held == SIX_LOCK_read)
1621 six_lock_downgrade(&b->lock);
1623 __bch2_btree_node_write(c, b, SIX_LOCK_read);
1627 static void __bch2_btree_flush_all(struct bch_fs *c, unsigned flag)
1629 struct bucket_table *tbl;
1630 struct rhash_head *pos;
1635 for_each_cached_btree(b, c, tbl, i, pos)
1636 if (test_bit(flag, &b->flags)) {
1638 wait_on_bit_io(&b->flags, flag, TASK_UNINTERRUPTIBLE);
1645 void bch2_btree_flush_all_reads(struct bch_fs *c)
1647 __bch2_btree_flush_all(c, BTREE_NODE_read_in_flight);
1650 void bch2_btree_flush_all_writes(struct bch_fs *c)
1652 __bch2_btree_flush_all(c, BTREE_NODE_write_in_flight);
1655 void bch2_btree_verify_flushed(struct bch_fs *c)
1657 struct bucket_table *tbl;
1658 struct rhash_head *pos;
1663 for_each_cached_btree(b, c, tbl, i, pos) {
1664 unsigned long flags = READ_ONCE(b->flags);
1666 BUG_ON((flags & (1 << BTREE_NODE_dirty)) ||
1667 (flags & (1 << BTREE_NODE_write_in_flight)));
1672 ssize_t bch2_dirty_btree_nodes_print(struct bch_fs *c, char *buf)
1674 struct printbuf out = _PBUF(buf, PAGE_SIZE);
1675 struct bucket_table *tbl;
1676 struct rhash_head *pos;
1681 for_each_cached_btree(b, c, tbl, i, pos) {
1682 unsigned long flags = READ_ONCE(b->flags);
1683 unsigned idx = (flags & (1 << BTREE_NODE_write_idx)) != 0;
1685 if (!(flags & (1 << BTREE_NODE_dirty)))
1688 pr_buf(&out, "%p d %u n %u l %u w %u b %u r %u:%lu c %u p %u\n",
1690 (flags & (1 << BTREE_NODE_dirty)) != 0,
1691 (flags & (1 << BTREE_NODE_need_write)) != 0,
1694 !list_empty_careful(&b->write_blocked),
1695 b->will_make_reachable != 0,
1696 b->will_make_reachable & 1,
1697 b->writes[ idx].wait.list.first != NULL,
1698 b->writes[!idx].wait.list.first != NULL);
1702 return out.pos - buf;