1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
13 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
16 static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
19 struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
20 u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
22 return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
26 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
28 static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
32 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
35 ret = POS(bucket.inode,
36 (bucket_to_sector(ca, bucket.offset) <<
37 MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
39 BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
44 static bool extent_matches_bp(struct bch_fs *c,
45 enum btree_id btree_id, unsigned level,
48 struct bch_backpointer bp)
50 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
51 const union bch_extent_entry *entry;
52 struct extent_ptr_decoded p;
54 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
56 struct bch_backpointer bp2;
61 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
63 if (bpos_eq(bucket, bucket2) &&
64 !memcmp(&bp, &bp2, sizeof(bp)))
71 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
72 unsigned flags, struct printbuf *err)
74 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
75 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
77 if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
78 prt_str(err, "incorrect value size");
79 return -BCH_ERR_invalid_bkey;
82 if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
83 prt_str(err, "backpointer at wrong pos");
84 return -BCH_ERR_invalid_bkey;
90 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
92 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
93 bch2_btree_ids[bp->btree_id],
95 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
96 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
98 bch2_bpos_to_text(out, bp->pos);
101 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
103 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
106 void bch2_backpointer_swab(struct bkey_s k)
108 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
110 bp.v->bucket_offset = swab32(bp.v->bucket_offset);
111 bp.v->bucket_len = swab32(bp.v->bucket_len);
112 bch2_bpos_swab(&bp.v->pos);
115 #define BACKPOINTER_OFFSET_MAX ((1ULL << 40) - 1)
117 static inline int backpointer_cmp(struct bch_backpointer l, struct bch_backpointer r)
119 return cmp_int(l.bucket_offset, r.bucket_offset);
122 static int bch2_backpointer_del_by_offset(struct btree_trans *trans,
125 struct bch_backpointer bp)
127 struct bch_fs *c = trans->c;
128 struct btree_iter iter;
132 if (bp_offset < BACKPOINTER_OFFSET_MAX) {
133 struct bch_backpointer *bps;
134 struct bkey_i_alloc_v4 *a;
137 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
141 BTREE_ITER_WITH_UPDATES);
142 k = bch2_btree_iter_peek_slot(&iter);
147 if (k.k->type != KEY_TYPE_alloc_v4) {
152 a = bch2_alloc_to_v4_mut(trans, k);
153 ret = PTR_ERR_OR_ZERO(a);
156 bps = alloc_v4_backpointers(&a->v);
157 nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
159 for (i = 0; i < nr; i++) {
160 if (bps[i].bucket_offset == bp_offset)
162 if (bps[i].bucket_offset > bp_offset)
169 if (memcmp(&bps[i], &bp, sizeof(bp))) {
173 array_remove_item(bps, nr, i);
174 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
175 set_alloc_v4_u64s(a);
176 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
178 bp_offset -= BACKPOINTER_OFFSET_MAX;
180 bch2_trans_iter_init(trans, &iter, BTREE_ID_backpointers,
181 bucket_pos_to_bp(c, bucket, bp_offset),
184 BTREE_ITER_WITH_UPDATES);
185 k = bch2_btree_iter_peek_slot(&iter);
190 if (k.k->type != KEY_TYPE_backpointer ||
191 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
196 ret = bch2_btree_delete_at(trans, &iter, 0);
199 bch2_trans_iter_exit(trans, &iter);
203 int bch2_bucket_backpointer_del(struct btree_trans *trans,
204 struct bkey_i_alloc_v4 *a,
205 struct bch_backpointer bp,
206 struct bkey_s_c orig_k)
208 struct bch_fs *c = trans->c;
209 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
210 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
211 struct btree_iter bp_iter;
215 for (i = 0; i < nr; i++) {
216 int cmp = backpointer_cmp(bps[i], bp) ?:
217 memcmp(&bps[i], &bp, sizeof(bp));
226 array_remove_item(bps, nr, i);
227 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
228 set_alloc_v4_u64s(a);
231 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
232 bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
235 BTREE_ITER_WITH_UPDATES);
236 k = bch2_btree_iter_peek_slot(&bp_iter);
241 if (k.k->type != KEY_TYPE_backpointer ||
242 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
243 struct printbuf buf = PRINTBUF;
245 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
248 prt_printf(&buf, "backpointer not found when deleting");
250 printbuf_indent_add(&buf, 2);
252 prt_printf(&buf, "searching for ");
253 bch2_backpointer_to_text(&buf, &bp);
256 prt_printf(&buf, "got ");
257 bch2_bkey_val_to_text(&buf, c, k);
260 prt_str(&buf, "alloc ");
261 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
264 prt_printf(&buf, "for ");
265 bch2_bkey_val_to_text(&buf, c, orig_k);
267 bch_err(c, "%s", buf.buf);
268 bch2_inconsistent_error(c);
274 ret = bch2_btree_delete_at(trans, &bp_iter, 0);
276 bch2_trans_iter_exit(trans, &bp_iter);
280 int bch2_bucket_backpointer_add(struct btree_trans *trans,
281 struct bkey_i_alloc_v4 *a,
282 struct bch_backpointer bp,
283 struct bkey_s_c orig_k)
285 struct bch_fs *c = trans->c;
286 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
287 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
288 struct bkey_i_backpointer *bp_k;
289 struct btree_iter bp_iter;
293 /* Check for duplicates: */
294 for (i = 0; i < nr; i++) {
295 int cmp = backpointer_cmp(bps[i], bp);
301 (bps[i - 1].bucket_offset +
302 bps[i - 1].bucket_len > bp.bucket_offset)) ||
304 (bp.bucket_offset + bp.bucket_len > bps[i].bucket_offset))) {
305 struct printbuf buf = PRINTBUF;
307 prt_printf(&buf, "overlapping backpointer found when inserting ");
308 bch2_backpointer_to_text(&buf, &bp);
310 printbuf_indent_add(&buf, 2);
312 prt_printf(&buf, "into ");
313 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
316 prt_printf(&buf, "for ");
317 bch2_bkey_val_to_text(&buf, c, orig_k);
319 bch_err(c, "%s", buf.buf);
321 if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
322 bch2_inconsistent_error(c);
327 if (nr < BCH_ALLOC_V4_NR_BACKPOINTERS_MAX) {
328 array_insert_item(bps, nr, i, bp);
329 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
330 set_alloc_v4_u64s(a);
334 /* Overflow: use backpointer btree */
336 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
337 bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
340 BTREE_ITER_WITH_UPDATES);
341 k = bch2_btree_iter_peek_slot(&bp_iter);
347 struct printbuf buf = PRINTBUF;
349 prt_printf(&buf, "existing btree backpointer key found when inserting ");
350 bch2_backpointer_to_text(&buf, &bp);
352 printbuf_indent_add(&buf, 2);
354 prt_printf(&buf, "found ");
355 bch2_bkey_val_to_text(&buf, c, k);
358 prt_printf(&buf, "for ");
359 bch2_bkey_val_to_text(&buf, c, orig_k);
361 bch_err(c, "%s", buf.buf);
363 if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
364 bch2_inconsistent_error(c);
370 bp_k = bch2_bkey_alloc(trans, &bp_iter, backpointer);
371 ret = PTR_ERR_OR_ZERO(bp_k);
377 ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
379 bch2_trans_iter_exit(trans, &bp_iter);
384 * Find the next backpointer >= *bp_offset:
386 int bch2_get_next_backpointer(struct btree_trans *trans,
387 struct bpos bucket, int gen,
389 struct bch_backpointer *dst,
392 struct bch_fs *c = trans->c;
393 struct bpos bp_pos, bp_end_pos;
394 struct btree_iter alloc_iter, bp_iter = { NULL };
396 struct bkey_s_c_alloc_v4 a;
400 if (*bp_offset == U64_MAX)
403 bp_pos = bucket_pos_to_bp(c, bucket,
404 max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
405 bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
407 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
408 bucket, BTREE_ITER_CACHED);
409 k = bch2_btree_iter_peek_slot(&alloc_iter);
414 if (k.k->type != KEY_TYPE_alloc_v4)
417 a = bkey_s_c_to_alloc_v4(k);
418 if (gen >= 0 && a.v->gen != gen)
421 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++) {
422 if (alloc_v4_backpointers_c(a.v)[i].bucket_offset < *bp_offset)
425 *dst = alloc_v4_backpointers_c(a.v)[i];
426 *bp_offset = dst->bucket_offset;
430 for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
432 if (bpos_ge(k.k->p, bp_end_pos))
435 if (k.k->type != KEY_TYPE_backpointer)
438 *dst = *bkey_s_c_to_backpointer(k).v;
439 *bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
443 *bp_offset = U64_MAX;
445 bch2_trans_iter_exit(trans, &bp_iter);
446 bch2_trans_iter_exit(trans, &alloc_iter);
450 static void backpointer_not_found(struct btree_trans *trans,
453 struct bch_backpointer bp,
455 const char *thing_it_points_to)
457 struct bch_fs *c = trans->c;
458 struct printbuf buf = PRINTBUF;
460 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
462 prt_printf(&buf, "bucket: ");
463 bch2_bpos_to_text(&buf, bucket);
464 prt_printf(&buf, "\n ");
466 if (bp_offset >= BACKPOINTER_OFFSET_MAX) {
468 bucket_pos_to_bp(c, bucket,
469 bp_offset - BACKPOINTER_OFFSET_MAX);
470 prt_printf(&buf, "backpointer pos: ");
471 bch2_bpos_to_text(&buf, bp_pos);
472 prt_printf(&buf, "\n ");
475 bch2_backpointer_to_text(&buf, &bp);
476 prt_printf(&buf, "\n ");
477 bch2_bkey_val_to_text(&buf, c, k);
478 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
479 bch_err_ratelimited(c, "%s", buf.buf);
481 bch2_trans_inconsistent(trans, "%s", buf.buf);
486 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
487 struct btree_iter *iter,
490 struct bch_backpointer bp)
492 struct bch_fs *c = trans->c;
495 bch2_trans_node_iter_init(trans, iter,
499 min(bp.level, c->btree_roots[bp.btree_id].level),
501 k = bch2_btree_iter_peek_slot(iter);
503 bch2_trans_iter_exit(trans, iter);
507 if (bp.level == c->btree_roots[bp.btree_id].level + 1)
508 k = bkey_i_to_s_c(&c->btree_roots[bp.btree_id].key);
510 if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
513 bch2_trans_iter_exit(trans, iter);
519 * If a backpointer for a btree node wasn't found, it may be
520 * because it was overwritten by a new btree node that hasn't
521 * been written out yet - backpointer_get_node() checks for
524 b = bch2_backpointer_get_node(trans, iter, bucket, bp_offset, bp);
525 if (!IS_ERR_OR_NULL(b))
526 return bkey_i_to_s_c(&b->key);
528 bch2_trans_iter_exit(trans, iter);
531 return bkey_s_c_err(PTR_ERR(b));
532 return bkey_s_c_null;
535 backpointer_not_found(trans, bucket, bp_offset, bp, k, "extent");
536 return bkey_s_c_null;
539 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
540 struct btree_iter *iter,
543 struct bch_backpointer bp)
545 struct bch_fs *c = trans->c;
550 bch2_trans_node_iter_init(trans, iter,
556 b = bch2_btree_iter_peek_node(iter);
560 if (b && extent_matches_bp(c, bp.btree_id, bp.level,
561 bkey_i_to_s_c(&b->key),
565 if (b && btree_node_will_make_reachable(b)) {
566 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
568 backpointer_not_found(trans, bucket, bp_offset, bp,
569 bkey_i_to_s_c(&b->key), "btree node");
573 bch2_trans_iter_exit(trans, iter);
577 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
580 struct bch_fs *c = trans->c;
581 struct btree_iter alloc_iter = { NULL };
583 struct bkey_s_c alloc_k;
584 struct printbuf buf = PRINTBUF;
587 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
588 "backpointer for mising device:\n%s",
589 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
590 ret = bch2_btree_delete_at(trans, bp_iter, 0);
594 ca = bch_dev_bkey_exists(c, k.k->p.inode);
596 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
597 bp_pos_to_bucket(c, k.k->p), 0);
599 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
600 ret = bkey_err(alloc_k);
604 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
605 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
606 alloc_iter.pos.inode, alloc_iter.pos.offset,
607 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
608 ret = bch2_btree_delete_at(trans, bp_iter, 0);
613 bch2_trans_iter_exit(trans, &alloc_iter);
618 /* verify that every backpointer has a corresponding alloc key */
619 int bch2_check_btree_backpointers(struct bch_fs *c)
621 struct btree_iter iter;
624 return bch2_trans_run(c,
625 for_each_btree_key_commit(&trans, iter,
626 BTREE_ID_backpointers, POS_MIN, 0, k,
627 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
628 bch2_check_btree_backpointer(&trans, &iter, k)));
631 static int check_bp_exists(struct btree_trans *trans,
632 struct bpos bucket_pos,
633 struct bch_backpointer bp,
634 struct bkey_s_c orig_k,
635 struct bpos bucket_start,
636 struct bpos bucket_end)
638 struct bch_fs *c = trans->c;
639 struct btree_iter alloc_iter, bp_iter = { NULL };
640 struct printbuf buf = PRINTBUF;
641 struct bkey_s_c alloc_k, bp_k;
644 if (bpos_lt(bucket_pos, bucket_start) ||
645 bpos_gt(bucket_pos, bucket_end))
648 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
649 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
650 ret = bkey_err(alloc_k);
654 if (alloc_k.k->type == KEY_TYPE_alloc_v4) {
655 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(alloc_k);
656 const struct bch_backpointer *bps = alloc_v4_backpointers_c(a.v);
657 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(a.v);
659 for (i = 0; i < nr; i++) {
660 int cmp = backpointer_cmp(bps[i], bp) ?:
661 memcmp(&bps[i], &bp, sizeof(bp));
671 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
672 bucket_pos_to_bp(c, bucket_pos, bp.bucket_offset),
674 bp_k = bch2_btree_iter_peek_slot(&bp_iter);
675 ret = bkey_err(bp_k);
679 if (bp_k.k->type != KEY_TYPE_backpointer ||
680 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp)))
685 bch2_trans_iter_exit(trans, &bp_iter);
686 bch2_trans_iter_exit(trans, &alloc_iter);
690 prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
691 bch2_btree_ids[bp.btree_id], bp.level);
692 bch2_bkey_val_to_text(&buf, c, orig_k);
693 prt_printf(&buf, "\nin alloc key ");
694 bch2_bkey_val_to_text(&buf, c, alloc_k);
696 if (c->sb.version < bcachefs_metadata_version_backpointers ||
697 c->opts.reconstruct_alloc ||
698 fsck_err(c, "%s", buf.buf)) {
699 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, alloc_k);
701 ret = PTR_ERR_OR_ZERO(a) ?:
702 bch2_bucket_backpointer_add(trans, a, bp, orig_k) ?:
703 bch2_trans_update(trans, &alloc_iter, &a->k_i, 0);
709 static int check_extent_to_backpointers(struct btree_trans *trans,
710 struct btree_iter *iter,
711 struct bpos bucket_start,
712 struct bpos bucket_end)
714 struct bch_fs *c = trans->c;
715 struct bkey_ptrs_c ptrs;
716 const union bch_extent_entry *entry;
717 struct extent_ptr_decoded p;
721 k = bch2_btree_iter_peek_all_levels(iter);
728 ptrs = bch2_bkey_ptrs_c(k);
729 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
730 struct bpos bucket_pos;
731 struct bch_backpointer bp;
736 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
737 k, p, &bucket_pos, &bp);
739 ret = check_bp_exists(trans, bucket_pos, bp, k, bucket_start, bucket_end);
747 static int check_btree_root_to_backpointers(struct btree_trans *trans,
748 enum btree_id btree_id,
749 struct bpos bucket_start,
750 struct bpos bucket_end)
752 struct bch_fs *c = trans->c;
753 struct btree_iter iter;
756 struct bkey_ptrs_c ptrs;
757 struct extent_ptr_decoded p;
758 const union bch_extent_entry *entry;
761 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
762 c->btree_roots[btree_id].level, 0);
763 b = bch2_btree_iter_peek_node(&iter);
764 ret = PTR_ERR_OR_ZERO(b);
768 BUG_ON(b != btree_node_root(c, b));
770 k = bkey_i_to_s_c(&b->key);
771 ptrs = bch2_bkey_ptrs_c(k);
772 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
773 struct bpos bucket_pos;
774 struct bch_backpointer bp;
779 bch2_extent_ptr_to_bp(c, iter.btree_id, iter.path->level + 1,
780 k, p, &bucket_pos, &bp);
782 ret = check_bp_exists(trans, bucket_pos, bp, k, bucket_start, bucket_end);
787 bch2_trans_iter_exit(trans, &iter);
791 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
793 return (struct bbpos) {
794 .btree = bp.btree_id,
799 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
805 mem_bytes = i.totalram * i.mem_unit;
806 return (mem_bytes >> 1) / btree_bytes(c);
809 int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
810 unsigned btree_leaf_mask,
811 unsigned btree_interior_mask,
812 struct bbpos start, struct bbpos *end)
814 struct btree_iter iter;
816 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
820 for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
821 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
823 if (!((1U << btree) & btree_leaf_mask) &&
824 !((1U << btree) & btree_interior_mask))
827 bch2_trans_node_iter_init(trans, &iter, btree,
828 btree == start.btree ? start.pos : POS_MIN,
831 * for_each_btree_key_contineu() doesn't check the return value
832 * from bch2_btree_iter_advance(), which is needed when
833 * iterating over interior nodes where we'll see keys at
837 k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
844 *end = BBPOS(btree, k.k->p);
845 bch2_trans_iter_exit(trans, &iter);
848 } while (bch2_btree_iter_advance(&iter));
849 bch2_trans_iter_exit(trans, &iter);
856 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
857 struct bpos bucket_start,
858 struct bpos bucket_end)
860 struct btree_iter iter;
861 enum btree_id btree_id;
864 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
865 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
867 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
869 BTREE_ITER_ALL_LEVELS|
870 BTREE_ITER_PREFETCH);
873 ret = commit_do(trans, NULL, NULL,
874 BTREE_INSERT_LAZY_RW|
876 check_extent_to_backpointers(trans, &iter,
877 bucket_start, bucket_end));
880 } while (!bch2_btree_iter_advance(&iter));
882 bch2_trans_iter_exit(trans, &iter);
887 ret = commit_do(trans, NULL, NULL,
888 BTREE_INSERT_LAZY_RW|
890 check_btree_root_to_backpointers(trans, btree_id,
891 bucket_start, bucket_end));
898 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
901 return bch2_dev_exists2(c, bucket.inode)
902 ? bucket_pos_to_bp(c, bucket, 0)
906 int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
907 struct bpos start, struct bpos *end)
909 struct btree_iter alloc_iter;
910 struct btree_iter bp_iter;
911 struct bkey_s_c alloc_k, bp_k;
912 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
913 bool alloc_end = false, bp_end = false;
916 bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
918 bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
919 bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
922 ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
925 ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
928 ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
929 if ((!alloc_k.k && !bp_k.k) || ret) {
940 if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
941 bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
942 if (!bch2_btree_iter_advance(&alloc_iter))
945 if (!bch2_btree_iter_advance(&bp_iter))
949 bch2_trans_iter_exit(trans, &bp_iter);
950 bch2_trans_iter_exit(trans, &alloc_iter);
954 int bch2_check_extents_to_backpointers(struct bch_fs *c)
956 struct btree_trans trans;
957 struct bpos start = POS_MIN, end;
960 bch2_trans_init(&trans, c, 0, 0);
962 ret = bch2_get_alloc_in_memory_pos(&trans, start, &end);
966 if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
967 bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
968 __func__, btree_nodes_fit_in_ram(c));
970 if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
971 struct printbuf buf = PRINTBUF;
973 prt_str(&buf, "check_extents_to_backpointers(): ");
974 bch2_bpos_to_text(&buf, start);
976 bch2_bpos_to_text(&buf, end);
978 bch_verbose(c, "%s", buf.buf);
982 ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
983 if (ret || bpos_eq(end, SPOS_MAX))
986 start = bpos_successor(end);
988 bch2_trans_exit(&trans);
993 static int check_one_backpointer(struct btree_trans *trans,
999 struct btree_iter iter;
1000 struct bch_backpointer bp;
1003 struct printbuf buf = PRINTBUF;
1006 ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp, 0);
1007 if (ret || *bp_offset == U64_MAX)
1010 pos = bp_to_bbpos(bp);
1011 if (bbpos_cmp(pos, start) < 0 ||
1012 bbpos_cmp(pos, end) > 0)
1015 k = bch2_backpointer_get_key(trans, &iter, bucket, *bp_offset, bp);
1017 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
1022 if (fsck_err_on(!k.k, trans->c,
1023 "%s backpointer points to missing extent\n%s",
1024 *bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree",
1025 (bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
1026 ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
1028 bch_err(trans->c, "backpointer at %llu not found", *bp_offset);
1031 bch2_trans_iter_exit(trans, &iter);
1033 printbuf_exit(&buf);
1037 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
1041 struct btree_iter iter;
1045 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
1046 BTREE_ITER_PREFETCH, k, ret) {
1049 while (!(ret = commit_do(trans, NULL, NULL,
1050 BTREE_INSERT_LAZY_RW|
1051 BTREE_INSERT_NOFAIL,
1052 check_one_backpointer(trans, iter.pos, &bp_offset, start, end))) &&
1053 bp_offset < U64_MAX)
1059 bch2_trans_iter_exit(trans, &iter);
1060 return ret < 0 ? ret : 0;
1063 int bch2_check_backpointers_to_extents(struct bch_fs *c)
1065 struct btree_trans trans;
1066 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
1069 bch2_trans_init(&trans, c, 0, 0);
1071 ret = bch2_get_btree_in_memory_pos(&trans,
1072 (1U << BTREE_ID_extents)|
1073 (1U << BTREE_ID_reflink),
1079 if (!bbpos_cmp(start, BBPOS_MIN) &&
1080 bbpos_cmp(end, BBPOS_MAX))
1081 bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
1082 __func__, btree_nodes_fit_in_ram(c));
1084 if (bbpos_cmp(start, BBPOS_MIN) ||
1085 bbpos_cmp(end, BBPOS_MAX)) {
1086 struct printbuf buf = PRINTBUF;
1088 prt_str(&buf, "check_backpointers_to_extents(): ");
1089 bch2_bbpos_to_text(&buf, start);
1091 bch2_bbpos_to_text(&buf, end);
1093 bch_verbose(c, "%s", buf.buf);
1094 printbuf_exit(&buf);
1097 ret = bch2_check_backpointers_to_extents_pass(&trans, start, end);
1098 if (ret || !bbpos_cmp(end, BBPOS_MAX))
1101 start = bbpos_successor(end);
1103 bch2_trans_exit(&trans);