1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
8 #include "btree_write_buffer.h"
13 static bool extent_matches_bp(struct bch_fs *c,
14 enum btree_id btree_id, unsigned level,
17 struct bch_backpointer bp)
19 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
20 const union bch_extent_entry *entry;
21 struct extent_ptr_decoded p;
23 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
25 struct bch_backpointer bp2;
30 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
32 if (bpos_eq(bucket, bucket2) &&
33 !memcmp(&bp, &bp2, sizeof(bp)))
40 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
41 enum bkey_invalid_flags flags,
44 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
45 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
47 if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
48 prt_str(err, "backpointer at wrong pos");
49 return -BCH_ERR_invalid_bkey;
55 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
57 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
58 bch2_btree_ids[bp->btree_id],
60 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
61 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
63 bch2_bpos_to_text(out, bp->pos);
66 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
68 prt_str(out, "bucket=");
69 bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
72 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
75 void bch2_backpointer_swab(struct bkey_s k)
77 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
79 bp.v->bucket_offset = swab32(bp.v->bucket_offset);
80 bp.v->bucket_len = swab32(bp.v->bucket_len);
81 bch2_bpos_swab(&bp.v->pos);
84 static noinline int backpointer_mod_err(struct btree_trans *trans,
85 struct bch_backpointer bp,
87 struct bkey_s_c orig_k,
90 struct bch_fs *c = trans->c;
91 struct printbuf buf = PRINTBUF;
94 prt_printf(&buf, "existing backpointer found when inserting ");
95 bch2_backpointer_to_text(&buf, &bp);
97 printbuf_indent_add(&buf, 2);
99 prt_printf(&buf, "found ");
100 bch2_bkey_val_to_text(&buf, c, bp_k);
103 prt_printf(&buf, "for ");
104 bch2_bkey_val_to_text(&buf, c, orig_k);
106 bch_err(c, "%s", buf.buf);
107 } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
108 prt_printf(&buf, "backpointer not found when deleting");
110 printbuf_indent_add(&buf, 2);
112 prt_printf(&buf, "searching for ");
113 bch2_backpointer_to_text(&buf, &bp);
116 prt_printf(&buf, "got ");
117 bch2_bkey_val_to_text(&buf, c, bp_k);
120 prt_printf(&buf, "for ");
121 bch2_bkey_val_to_text(&buf, c, orig_k);
123 bch_err(c, "%s", buf.buf);
128 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
129 bch2_inconsistent_error(c);
136 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
137 struct bkey_i_backpointer *bp_k,
138 struct bch_backpointer bp,
139 struct bkey_s_c orig_k,
142 struct btree_iter bp_iter;
146 k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
150 BTREE_ITER_WITH_UPDATES);
157 : (k.k->type != KEY_TYPE_backpointer ||
158 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
159 ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
164 ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
166 bch2_trans_iter_exit(trans, &bp_iter);
171 * Find the next backpointer >= *bp_offset:
173 int bch2_get_next_backpointer(struct btree_trans *trans,
174 struct bpos bucket, int gen,
176 struct bch_backpointer *bp,
179 struct bch_fs *c = trans->c;
180 struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
181 struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
185 if (bpos_ge(*bp_pos, bp_end_pos))
189 k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
190 bucket, BTREE_ITER_CACHED|iter_flags);
195 if (k.k->type != KEY_TYPE_alloc_v4 ||
196 bkey_s_c_to_alloc_v4(k).v->gen != gen)
200 *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
202 for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
203 *bp_pos, iter_flags, k, ret) {
204 if (bpos_ge(k.k->p, bp_end_pos))
208 *bp = *bkey_s_c_to_backpointer(k).v;
214 bch2_trans_iter_exit(trans, &bp_iter);
215 bch2_trans_iter_exit(trans, &alloc_iter);
219 static void backpointer_not_found(struct btree_trans *trans,
221 struct bch_backpointer bp,
223 const char *thing_it_points_to)
225 struct bch_fs *c = trans->c;
226 struct printbuf buf = PRINTBUF;
227 struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
229 if (likely(!bch2_backpointers_no_use_write_buffer))
232 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
234 prt_printf(&buf, "bucket: ");
235 bch2_bpos_to_text(&buf, bucket);
236 prt_printf(&buf, "\n ");
238 prt_printf(&buf, "backpointer pos: ");
239 bch2_bpos_to_text(&buf, bp_pos);
240 prt_printf(&buf, "\n ");
242 bch2_backpointer_to_text(&buf, &bp);
243 prt_printf(&buf, "\n ");
244 bch2_bkey_val_to_text(&buf, c, k);
245 if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers)
246 bch_err_ratelimited(c, "%s", buf.buf);
248 bch2_trans_inconsistent(trans, "%s", buf.buf);
253 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
254 struct btree_iter *iter,
256 struct bch_backpointer bp,
259 struct bch_fs *c = trans->c;
260 struct btree_root *r = bch2_btree_id_root(c, bp.btree_id);
261 struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
264 bch2_trans_node_iter_init(trans, iter,
268 min(bp.level, r->level),
270 k = bch2_btree_iter_peek_slot(iter);
272 bch2_trans_iter_exit(trans, iter);
276 if (bp.level == r->level + 1)
277 k = bkey_i_to_s_c(&r->key);
279 if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
282 bch2_trans_iter_exit(trans, iter);
284 if (unlikely(bch2_backpointers_no_use_write_buffer)) {
289 * If a backpointer for a btree node wasn't found, it may be
290 * because it was overwritten by a new btree node that hasn't
291 * been written out yet - backpointer_get_node() checks for
294 b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
295 if (!IS_ERR_OR_NULL(b))
296 return bkey_i_to_s_c(&b->key);
298 bch2_trans_iter_exit(trans, iter);
301 return bkey_s_c_err(PTR_ERR(b));
302 return bkey_s_c_null;
305 backpointer_not_found(trans, bp_pos, bp, k, "extent");
308 return bkey_s_c_null;
311 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
312 struct btree_iter *iter,
314 struct bch_backpointer bp)
316 struct bch_fs *c = trans->c;
317 struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
322 bch2_trans_node_iter_init(trans, iter,
328 b = bch2_btree_iter_peek_node(iter);
332 if (b && extent_matches_bp(c, bp.btree_id, bp.level,
333 bkey_i_to_s_c(&b->key),
337 if (b && btree_node_will_make_reachable(b)) {
338 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
340 backpointer_not_found(trans, bp_pos, bp,
341 bkey_i_to_s_c(&b->key), "btree node");
345 bch2_trans_iter_exit(trans, iter);
349 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
352 struct bch_fs *c = trans->c;
353 struct btree_iter alloc_iter = { NULL };
355 struct bkey_s_c alloc_k;
356 struct printbuf buf = PRINTBUF;
359 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
360 "backpointer for mising device:\n%s",
361 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
362 ret = bch2_btree_delete_at(trans, bp_iter, 0);
366 ca = bch_dev_bkey_exists(c, k.k->p.inode);
368 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
369 bp_pos_to_bucket(c, k.k->p), 0);
370 ret = bkey_err(alloc_k);
374 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
375 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
376 alloc_iter.pos.inode, alloc_iter.pos.offset,
377 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
378 ret = bch2_btree_delete_at(trans, bp_iter, 0);
383 bch2_trans_iter_exit(trans, &alloc_iter);
388 /* verify that every backpointer has a corresponding alloc key */
389 int bch2_check_btree_backpointers(struct bch_fs *c)
391 struct btree_iter iter;
395 ret = bch2_trans_run(c,
396 for_each_btree_key_commit(&trans, iter,
397 BTREE_ID_backpointers, POS_MIN, 0, k,
398 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
399 bch2_check_btree_backpointer(&trans, &iter, k)));
410 static int check_bp_exists(struct btree_trans *trans,
412 struct bch_backpointer bp,
413 struct bkey_s_c orig_k,
414 struct bpos bucket_start,
415 struct bpos bucket_end,
416 struct bpos_level *last_flushed)
418 struct bch_fs *c = trans->c;
419 struct btree_iter bp_iter = { NULL };
420 struct printbuf buf = PRINTBUF;
421 struct bkey_s_c bp_k;
424 if (bpos_lt(bucket, bucket_start) ||
425 bpos_gt(bucket, bucket_end))
428 if (!bch2_dev_bucket_exists(c, bucket))
431 bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
432 bucket_pos_to_bp(c, bucket, bp.bucket_offset),
434 ret = bkey_err(bp_k);
438 if (bp_k.k->type != KEY_TYPE_backpointer ||
439 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
440 if (last_flushed->level != bp.level ||
441 !bpos_eq(last_flushed->pos, orig_k.k->p)) {
442 last_flushed->level = bp.level;
443 last_flushed->pos = orig_k.k->p;
445 ret = bch2_btree_write_buffer_flush_sync(trans) ?:
446 -BCH_ERR_transaction_restart_write_buffer_flush;
454 bch2_trans_iter_exit(trans, &bp_iter);
458 prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
459 bch2_btree_ids[bp.btree_id], bp.level);
460 bch2_bkey_val_to_text(&buf, c, orig_k);
461 prt_printf(&buf, "\nbp pos ");
462 bch2_bpos_to_text(&buf, bp_iter.pos);
464 if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers ||
465 c->opts.reconstruct_alloc ||
466 fsck_err(c, "%s", buf.buf))
467 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
472 static int check_extent_to_backpointers(struct btree_trans *trans,
473 struct btree_iter *iter,
474 struct bpos bucket_start,
475 struct bpos bucket_end,
476 struct bpos_level *last_flushed)
478 struct bch_fs *c = trans->c;
479 struct bkey_ptrs_c ptrs;
480 const union bch_extent_entry *entry;
481 struct extent_ptr_decoded p;
485 k = bch2_btree_iter_peek_all_levels(iter);
492 ptrs = bch2_bkey_ptrs_c(k);
493 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
494 struct bpos bucket_pos;
495 struct bch_backpointer bp;
500 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
501 k, p, &bucket_pos, &bp);
503 ret = check_bp_exists(trans, bucket_pos, bp, k,
504 bucket_start, bucket_end,
513 static int check_btree_root_to_backpointers(struct btree_trans *trans,
514 enum btree_id btree_id,
515 struct bpos bucket_start,
516 struct bpos bucket_end,
517 struct bpos_level *last_flushed)
519 struct bch_fs *c = trans->c;
520 struct btree_root *r = bch2_btree_id_root(c, btree_id);
521 struct btree_iter iter;
524 struct bkey_ptrs_c ptrs;
525 struct extent_ptr_decoded p;
526 const union bch_extent_entry *entry;
529 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, r->level, 0);
530 b = bch2_btree_iter_peek_node(&iter);
531 ret = PTR_ERR_OR_ZERO(b);
535 BUG_ON(b != btree_node_root(c, b));
537 k = bkey_i_to_s_c(&b->key);
538 ptrs = bch2_bkey_ptrs_c(k);
539 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
540 struct bpos bucket_pos;
541 struct bch_backpointer bp;
546 bch2_extent_ptr_to_bp(c, iter.btree_id, b->c.level + 1,
547 k, p, &bucket_pos, &bp);
549 ret = check_bp_exists(trans, bucket_pos, bp, k,
550 bucket_start, bucket_end,
556 bch2_trans_iter_exit(trans, &iter);
560 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
562 return (struct bbpos) {
563 .btree = bp.btree_id,
568 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
574 mem_bytes = i.totalram * i.mem_unit;
575 return div_u64(mem_bytes >> 1, btree_bytes(c));
578 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
579 unsigned btree_leaf_mask,
580 unsigned btree_interior_mask,
581 struct bbpos start, struct bbpos *end)
583 struct btree_iter iter;
585 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
589 for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
590 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
592 if (!((1U << btree) & btree_leaf_mask) &&
593 !((1U << btree) & btree_interior_mask))
596 bch2_trans_node_iter_init(trans, &iter, btree,
597 btree == start.btree ? start.pos : POS_MIN,
600 * for_each_btree_key_contineu() doesn't check the return value
601 * from bch2_btree_iter_advance(), which is needed when
602 * iterating over interior nodes where we'll see keys at
606 k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
613 *end = BBPOS(btree, k.k->p);
614 bch2_trans_iter_exit(trans, &iter);
617 } while (bch2_btree_iter_advance(&iter));
618 bch2_trans_iter_exit(trans, &iter);
625 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
626 struct bpos bucket_start,
627 struct bpos bucket_end)
629 struct bch_fs *c = trans->c;
630 struct btree_iter iter;
631 enum btree_id btree_id;
632 struct bpos_level last_flushed = { UINT_MAX };
635 for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
636 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
638 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
640 BTREE_ITER_ALL_LEVELS|
641 BTREE_ITER_PREFETCH);
644 ret = commit_do(trans, NULL, NULL,
645 BTREE_INSERT_LAZY_RW|
647 check_extent_to_backpointers(trans, &iter,
648 bucket_start, bucket_end,
652 } while (!bch2_btree_iter_advance(&iter));
654 bch2_trans_iter_exit(trans, &iter);
659 ret = commit_do(trans, NULL, NULL,
660 BTREE_INSERT_LAZY_RW|
662 check_btree_root_to_backpointers(trans, btree_id,
663 bucket_start, bucket_end,
671 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
674 return bch2_dev_exists2(c, bucket.inode)
675 ? bucket_pos_to_bp(c, bucket, 0)
679 static int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
680 struct bpos start, struct bpos *end)
682 struct btree_iter alloc_iter;
683 struct btree_iter bp_iter;
684 struct bkey_s_c alloc_k, bp_k;
685 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
686 bool alloc_end = false, bp_end = false;
689 bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
691 bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
692 bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
695 ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
698 ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
701 ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
702 if ((!alloc_k.k && !bp_k.k) || ret) {
713 if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
714 bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
715 if (!bch2_btree_iter_advance(&alloc_iter))
718 if (!bch2_btree_iter_advance(&bp_iter))
722 bch2_trans_iter_exit(trans, &bp_iter);
723 bch2_trans_iter_exit(trans, &alloc_iter);
727 int bch2_check_extents_to_backpointers(struct bch_fs *c)
729 struct btree_trans trans;
730 struct bpos start = POS_MIN, end;
733 bch2_trans_init(&trans, c, 0, 0);
735 ret = bch2_get_alloc_in_memory_pos(&trans, start, &end);
739 if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
740 bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
741 __func__, btree_nodes_fit_in_ram(c));
743 if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
744 struct printbuf buf = PRINTBUF;
746 prt_str(&buf, "check_extents_to_backpointers(): ");
747 bch2_bpos_to_text(&buf, start);
749 bch2_bpos_to_text(&buf, end);
751 bch_verbose(c, "%s", buf.buf);
755 ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
756 if (ret || bpos_eq(end, SPOS_MAX))
759 start = bpos_successor(end);
761 bch2_trans_exit(&trans);
768 static int check_one_backpointer(struct btree_trans *trans,
771 struct bkey_s_c_backpointer bp,
772 struct bpos *last_flushed_pos)
774 struct bch_fs *c = trans->c;
775 struct btree_iter iter;
776 struct bbpos pos = bp_to_bbpos(*bp.v);
778 struct printbuf buf = PRINTBUF;
781 if (bbpos_cmp(pos, start) < 0 ||
782 bbpos_cmp(pos, end) > 0)
785 k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
787 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
792 if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
793 *last_flushed_pos = bp.k->p;
794 ret = bch2_btree_write_buffer_flush_sync(trans) ?:
795 -BCH_ERR_transaction_restart_write_buffer_flush;
799 if (fsck_err_on(!k.k, c,
800 "backpointer for missing extent\n %s",
801 (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
802 ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
807 bch2_trans_iter_exit(trans, &iter);
812 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
816 struct btree_iter iter;
818 struct bpos last_flushed_pos = SPOS_MAX;
820 return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
821 POS_MIN, BTREE_ITER_PREFETCH, k,
822 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
823 check_one_backpointer(trans, start, end,
824 bkey_s_c_to_backpointer(k),
828 int bch2_check_backpointers_to_extents(struct bch_fs *c)
830 struct btree_trans trans;
831 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
834 bch2_trans_init(&trans, c, 0, 0);
836 ret = bch2_get_btree_in_memory_pos(&trans,
837 (1U << BTREE_ID_extents)|
838 (1U << BTREE_ID_reflink),
844 if (!bbpos_cmp(start, BBPOS_MIN) &&
845 bbpos_cmp(end, BBPOS_MAX))
846 bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
847 __func__, btree_nodes_fit_in_ram(c));
849 if (bbpos_cmp(start, BBPOS_MIN) ||
850 bbpos_cmp(end, BBPOS_MAX)) {
851 struct printbuf buf = PRINTBUF;
853 prt_str(&buf, "check_backpointers_to_extents(): ");
854 bch2_bbpos_to_text(&buf, start);
856 bch2_bbpos_to_text(&buf, end);
858 bch_verbose(c, "%s", buf.buf);
862 ret = bch2_check_backpointers_to_extents_pass(&trans, start, end);
863 if (ret || !bbpos_cmp(end, BBPOS_MAX))
866 start = bbpos_successor(end);
868 bch2_trans_exit(&trans);