1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "backpointers.h"
5 #include "btree_cache.h"
6 #include "btree_update.h"
9 #define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
12 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
15 static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
18 struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
19 u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
21 return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
25 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
27 static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
31 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
34 ret = POS(bucket.inode,
35 (bucket_to_sector(ca, bucket.offset) <<
36 MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
38 BUG_ON(bpos_cmp(bucket, bp_pos_to_bucket(c, ret)));
43 void bch2_extent_ptr_to_bp(struct bch_fs *c,
44 enum btree_id btree_id, unsigned level,
45 struct bkey_s_c k, struct extent_ptr_decoded p,
46 struct bpos *bucket_pos, struct bch_backpointer *bp)
48 enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user;
49 s64 sectors = level ? btree_sectors(c) : k.k->size;
52 *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
53 *bp = (struct bch_backpointer) {
56 .data_type = data_type,
57 .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
59 .bucket_len = ptr_disk_sectors(sectors, p),
64 static bool extent_matches_bp(struct bch_fs *c,
65 enum btree_id btree_id, unsigned level,
68 struct bch_backpointer bp)
70 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
71 const union bch_extent_entry *entry;
72 struct extent_ptr_decoded p;
74 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
76 struct bch_backpointer bp2;
81 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
83 if (!bpos_cmp(bucket, bucket2) &&
84 !memcmp(&bp, &bp2, sizeof(bp)))
91 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
92 int rw, struct printbuf *err)
94 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
95 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
97 if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
98 prt_str(err, "incorrect value size");
102 if (bpos_cmp(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
103 prt_str(err, "backpointer at wrong pos");
110 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
112 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
113 bch2_btree_ids[bp->btree_id],
115 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
116 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
118 bch2_bpos_to_text(out, bp->pos);
121 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
123 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
126 void bch2_backpointer_swab(struct bkey_s k)
128 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
130 bp.v->bucket_offset = swab32(bp.v->bucket_offset);
131 bp.v->bucket_len = swab32(bp.v->bucket_len);
132 bch2_bpos_swab(&bp.v->pos);
135 #define BACKPOINTER_OFFSET_MAX ((1ULL << 40) - 1)
137 static inline int backpointer_cmp(struct bch_backpointer l, struct bch_backpointer r)
139 return cmp_int(l.bucket_offset, r.bucket_offset);
142 static int bch2_backpointer_del_by_offset(struct btree_trans *trans,
145 struct bch_backpointer bp)
147 struct bch_fs *c = trans->c;
148 struct btree_iter iter;
152 if (bp_offset < BACKPOINTER_OFFSET_MAX) {
153 struct bch_backpointer *bps;
154 struct bkey_i_alloc_v4 *a;
157 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
161 BTREE_ITER_WITH_UPDATES);
162 k = bch2_btree_iter_peek_slot(&iter);
167 if (k.k->type != KEY_TYPE_alloc_v4) {
172 a = bch2_alloc_to_v4_mut(trans, k);
173 ret = PTR_ERR_OR_ZERO(a);
176 bps = alloc_v4_backpointers(&a->v);
177 nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
179 for (i = 0; i < nr; i++) {
180 if (bps[i].bucket_offset == bp_offset)
182 if (bps[i].bucket_offset > bp_offset)
189 if (memcmp(&bps[i], &bp, sizeof(bp))) {
193 array_remove_item(bps, nr, i);
194 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
195 set_alloc_v4_u64s(a);
196 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
198 bp_offset -= BACKPOINTER_OFFSET_MAX;
200 bch2_trans_iter_init(trans, &iter, BTREE_ID_backpointers,
201 bucket_pos_to_bp(c, bucket, bp_offset),
204 BTREE_ITER_WITH_UPDATES);
205 k = bch2_btree_iter_peek_slot(&iter);
210 if (k.k->type != KEY_TYPE_backpointer ||
211 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
216 ret = bch2_btree_delete_at(trans, &iter, 0);
219 bch2_trans_iter_exit(trans, &iter);
223 int bch2_bucket_backpointer_del(struct btree_trans *trans,
224 struct bkey_i_alloc_v4 *a,
225 struct bch_backpointer bp,
226 struct bkey_s_c orig_k)
228 struct bch_fs *c = trans->c;
229 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
230 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
231 struct btree_iter bp_iter;
235 for (i = 0; i < nr; i++) {
236 int cmp = backpointer_cmp(bps[i], bp) ?:
237 memcmp(&bps[i], &bp, sizeof(bp));
246 array_remove_item(bps, nr, i);
247 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
248 set_alloc_v4_u64s(a);
251 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
252 bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
255 BTREE_ITER_WITH_UPDATES);
256 k = bch2_btree_iter_peek_slot(&bp_iter);
261 if (k.k->type != KEY_TYPE_backpointer ||
262 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
263 struct printbuf buf = PRINTBUF;
265 prt_printf(&buf, "backpointer not found when deleting");
267 printbuf_indent_add(&buf, 2);
269 prt_printf(&buf, "searching for ");
270 bch2_backpointer_to_text(&buf, &bp);
273 prt_printf(&buf, "got ");
274 bch2_bkey_val_to_text(&buf, c, k);
277 prt_str(&buf, "alloc ");
278 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
281 prt_printf(&buf, "for ");
282 bch2_bkey_val_to_text(&buf, c, orig_k);
284 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
285 bch_err(c, "%s", buf.buf);
288 bch2_trans_inconsistent(trans, "%s", buf.buf);
294 ret = bch2_btree_delete_at(trans, &bp_iter, 0);
296 bch2_trans_iter_exit(trans, &bp_iter);
300 int bch2_bucket_backpointer_add(struct btree_trans *trans,
301 struct bkey_i_alloc_v4 *a,
302 struct bch_backpointer bp,
303 struct bkey_s_c orig_k)
305 struct bch_fs *c = trans->c;
307 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
308 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
309 struct bkey_i_backpointer *bp_k;
310 struct btree_iter bp_iter;
314 /* Check for duplicates: */
315 for (i = 0; i < nr; i++) {
316 int cmp = backpointer_cmp(bps[i], bp);
322 (bps[i - 1].bucket_offset +
323 bps[i - 1].bucket_len > bp.bucket_offset)) ||
325 (bp.bucket_offset + bp.bucket_len > bps[i].bucket_offset))) {
326 struct printbuf buf = PRINTBUF;
328 prt_printf(&buf, "overlapping backpointer found when inserting ");
329 bch2_backpointer_to_text(&buf, &bp);
331 printbuf_indent_add(&buf, 2);
333 prt_printf(&buf, "into ");
334 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
337 prt_printf(&buf, "for ");
338 bch2_bkey_val_to_text(&buf, c, orig_k);
340 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
341 bch_err(c, "%s", buf.buf);
343 bch2_trans_inconsistent(trans, "%s", buf.buf);
349 if (nr < BCH_ALLOC_V4_NR_BACKPOINTERS_MAX) {
350 array_insert_item(bps, nr, i, bp);
351 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
352 set_alloc_v4_u64s(a);
356 /* Overflow: use backpointer btree */
357 bp_k = bch2_trans_kmalloc(trans, sizeof(*bp_k));
358 ret = PTR_ERR_OR_ZERO(bp_k);
362 ca = bch_dev_bkey_exists(c, a->k.p.inode);
364 bkey_backpointer_init(&bp_k->k_i);
365 bp_k->k.p = bucket_pos_to_bp(c, a->k.p, bp.bucket_offset);
368 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_k->k.p,
371 BTREE_ITER_WITH_UPDATES);
372 k = bch2_btree_iter_peek_slot(&bp_iter);
378 struct printbuf buf = PRINTBUF;
380 prt_printf(&buf, "existing btree backpointer key found when inserting ");
381 bch2_backpointer_to_text(&buf, &bp);
383 printbuf_indent_add(&buf, 2);
385 prt_printf(&buf, "found ");
386 bch2_bkey_val_to_text(&buf, c, k);
389 prt_printf(&buf, "for ");
390 bch2_bkey_val_to_text(&buf, c, orig_k);
392 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
393 bch_err(c, "%s", buf.buf);
395 bch2_trans_inconsistent(trans, "%s", buf.buf);
402 ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
404 bch2_trans_iter_exit(trans, &bp_iter);
409 * Find the next backpointer >= *bp_offset:
411 int bch2_get_next_backpointer(struct btree_trans *trans,
412 struct bpos bucket, int gen,
414 struct bch_backpointer *dst)
416 struct bch_fs *c = trans->c;
417 struct bpos bp_pos, bp_end_pos;
418 struct btree_iter alloc_iter, bp_iter = { NULL };
420 struct bkey_s_c_alloc_v4 a;
424 if (*bp_offset == U64_MAX)
427 bp_pos = bucket_pos_to_bp(c, bucket,
428 max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
429 bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
431 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
432 bucket, BTREE_ITER_CACHED);
433 k = bch2_btree_iter_peek_slot(&alloc_iter);
438 if (k.k->type != KEY_TYPE_alloc_v4)
441 a = bkey_s_c_to_alloc_v4(k);
442 if (gen >= 0 && a.v->gen != gen)
445 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++) {
446 if (alloc_v4_backpointers_c(a.v)[i].bucket_offset < *bp_offset)
449 *dst = alloc_v4_backpointers_c(a.v)[i];
450 *bp_offset = dst->bucket_offset;
454 for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
456 if (bpos_cmp(k.k->p, bp_end_pos) >= 0)
459 if (k.k->type != KEY_TYPE_backpointer)
462 *dst = *bkey_s_c_to_backpointer(k).v;
463 *bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
467 *bp_offset = U64_MAX;
469 bch2_trans_iter_exit(trans, &bp_iter);
470 bch2_trans_iter_exit(trans, &alloc_iter);
474 static void backpointer_not_found(struct btree_trans *trans,
477 struct bch_backpointer bp,
479 const char *thing_it_points_to)
481 struct bch_fs *c = trans->c;
482 struct printbuf buf = PRINTBUF;
484 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
486 prt_printf(&buf, "bucket: ");
487 bch2_bpos_to_text(&buf, bucket);
488 prt_printf(&buf, "\n ");
490 if (bp_offset >= BACKPOINTER_OFFSET_MAX) {
492 bucket_pos_to_bp(c, bucket,
493 bp_offset - BACKPOINTER_OFFSET_MAX);
494 prt_printf(&buf, "backpointer pos: ");
495 bch2_bpos_to_text(&buf, bp_pos);
496 prt_printf(&buf, "\n ");
499 bch2_backpointer_to_text(&buf, &bp);
500 prt_printf(&buf, "\n ");
501 bch2_bkey_val_to_text(&buf, c, k);
502 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
503 bch_err_ratelimited(c, "%s", buf.buf);
505 bch2_trans_inconsistent(trans, "%s", buf.buf);
510 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
511 struct btree_iter *iter,
514 struct bch_backpointer bp)
516 struct bch_fs *c = trans->c;
519 bch2_trans_node_iter_init(trans, iter,
523 min(bp.level, c->btree_roots[bp.btree_id].level),
525 k = bch2_btree_iter_peek_slot(iter);
527 bch2_trans_iter_exit(trans, iter);
531 if (bp.level == c->btree_roots[bp.btree_id].level + 1)
532 k = bkey_i_to_s_c(&c->btree_roots[bp.btree_id].key);
534 if (extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
537 bch2_trans_iter_exit(trans, iter);
543 * If a backpointer for a btree node wasn't found, it may be
544 * because it was overwritten by a new btree node that hasn't
545 * been written out yet - backpointer_get_node() checks for
548 b = bch2_backpointer_get_node(trans, iter, bucket, bp_offset, bp);
549 if (!IS_ERR_OR_NULL(b))
550 return bkey_i_to_s_c(&b->key);
552 bch2_trans_iter_exit(trans, iter);
555 return bkey_s_c_err(PTR_ERR(b));
556 return bkey_s_c_null;
559 backpointer_not_found(trans, bucket, bp_offset, bp, k, "extent");
560 return bkey_s_c_null;
563 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
564 struct btree_iter *iter,
567 struct bch_backpointer bp)
569 struct bch_fs *c = trans->c;
574 bch2_trans_node_iter_init(trans, iter,
580 b = bch2_btree_iter_peek_node(iter);
584 if (extent_matches_bp(c, bp.btree_id, bp.level,
585 bkey_i_to_s_c(&b->key),
589 if (btree_node_will_make_reachable(b)) {
590 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
592 backpointer_not_found(trans, bucket, bp_offset, bp,
593 bkey_i_to_s_c(&b->key), "btree node");
597 bch2_trans_iter_exit(trans, iter);
601 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
604 struct bch_fs *c = trans->c;
605 struct btree_iter alloc_iter = { NULL };
607 struct bkey_s_c alloc_k;
608 struct printbuf buf = PRINTBUF;
611 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
612 "backpointer for mising device:\n%s",
613 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
614 ret = bch2_btree_delete_at(trans, bp_iter, 0);
618 ca = bch_dev_bkey_exists(c, k.k->p.inode);
620 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
621 bp_pos_to_bucket(c, k.k->p), 0);
623 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
624 ret = bkey_err(alloc_k);
628 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
629 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
630 alloc_iter.pos.inode, alloc_iter.pos.offset,
631 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
632 ret = bch2_btree_delete_at(trans, bp_iter, 0);
637 bch2_trans_iter_exit(trans, &alloc_iter);
642 /* verify that every backpointer has a corresponding alloc key */
643 int bch2_check_btree_backpointers(struct bch_fs *c)
645 struct btree_iter iter;
648 return bch2_trans_run(c,
649 for_each_btree_key_commit(&trans, iter,
650 BTREE_ID_backpointers, POS_MIN, 0, k,
651 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
652 bch2_check_btree_backpointer(&trans, &iter, k)));
655 static int check_bp_exists(struct btree_trans *trans,
656 struct bpos bucket_pos,
657 struct bch_backpointer bp,
658 struct bkey_s_c orig_k)
660 struct bch_fs *c = trans->c;
661 struct btree_iter alloc_iter, bp_iter = { NULL };
662 struct printbuf buf = PRINTBUF;
663 struct bkey_s_c alloc_k, bp_k;
666 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
667 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
668 ret = bkey_err(alloc_k);
672 if (alloc_k.k->type == KEY_TYPE_alloc_v4) {
673 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(alloc_k);
674 const struct bch_backpointer *bps = alloc_v4_backpointers_c(a.v);
675 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(a.v);
677 for (i = 0; i < nr; i++) {
678 int cmp = backpointer_cmp(bps[i], bp) ?:
679 memcmp(&bps[i], &bp, sizeof(bp));
689 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
690 bucket_pos_to_bp(c, bucket_pos, bp.bucket_offset),
692 bp_k = bch2_btree_iter_peek_slot(&bp_iter);
693 ret = bkey_err(bp_k);
697 if (bp_k.k->type != KEY_TYPE_backpointer ||
698 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp)))
703 bch2_trans_iter_exit(trans, &bp_iter);
704 bch2_trans_iter_exit(trans, &alloc_iter);
708 prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
709 bch2_btree_ids[bp.btree_id], bp.level);
710 bch2_bkey_val_to_text(&buf, c, orig_k);
711 prt_printf(&buf, "\nin alloc key ");
712 bch2_bkey_val_to_text(&buf, c, alloc_k);
714 if (c->sb.version < bcachefs_metadata_version_backpointers ||
715 c->opts.reconstruct_alloc ||
716 fsck_err(c, "%s", buf.buf)) {
717 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, alloc_k);
719 ret = PTR_ERR_OR_ZERO(a) ?:
720 bch2_bucket_backpointer_add(trans, a, bp, orig_k) ?:
721 bch2_trans_update(trans, &alloc_iter, &a->k_i, 0);
727 static int check_extent_to_backpointers(struct btree_trans *trans,
728 struct btree_iter *iter)
730 struct bch_fs *c = trans->c;
731 struct bkey_ptrs_c ptrs;
732 const union bch_extent_entry *entry;
733 struct extent_ptr_decoded p;
737 k = bch2_btree_iter_peek_all_levels(iter);
744 ptrs = bch2_bkey_ptrs_c(k);
745 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
746 struct bpos bucket_pos;
747 struct bch_backpointer bp;
752 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
753 k, p, &bucket_pos, &bp);
755 ret = check_bp_exists(trans, bucket_pos, bp, k);
763 static int check_btree_root_to_backpointers(struct btree_trans *trans,
764 enum btree_id btree_id)
766 struct bch_fs *c = trans->c;
767 struct btree_iter iter;
770 struct bkey_ptrs_c ptrs;
771 struct extent_ptr_decoded p;
772 const union bch_extent_entry *entry;
775 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
776 c->btree_roots[btree_id].level, 0);
777 b = bch2_btree_iter_peek_node(&iter);
778 ret = PTR_ERR_OR_ZERO(b);
782 BUG_ON(b != btree_node_root(c, b));
784 k = bkey_i_to_s_c(&b->key);
785 ptrs = bch2_bkey_ptrs_c(k);
786 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
787 struct bpos bucket_pos;
788 struct bch_backpointer bp;
793 bch2_extent_ptr_to_bp(c, iter.btree_id, iter.path->level + 1,
794 k, p, &bucket_pos, &bp);
796 ret = check_bp_exists(trans, bucket_pos, bp, k);
801 bch2_trans_iter_exit(trans, &iter);
805 int bch2_check_extents_to_backpointers(struct bch_fs *c)
807 struct btree_trans trans;
808 struct btree_iter iter;
809 enum btree_id btree_id;
812 bch2_trans_init(&trans, c, 0, 0);
813 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
814 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
816 bch2_trans_node_iter_init(&trans, &iter, btree_id, POS_MIN, 0,
818 BTREE_ITER_ALL_LEVELS|
819 BTREE_ITER_PREFETCH);
822 ret = commit_do(&trans, NULL, NULL,
823 BTREE_INSERT_LAZY_RW|
825 check_extent_to_backpointers(&trans, &iter));
828 } while (!bch2_btree_iter_advance(&iter));
830 bch2_trans_iter_exit(&trans, &iter);
835 ret = commit_do(&trans, NULL, NULL,
836 BTREE_INSERT_LAZY_RW|
838 check_btree_root_to_backpointers(&trans, btree_id));
842 bch2_trans_exit(&trans);
846 static int check_one_backpointer(struct btree_trans *trans,
850 struct btree_iter iter;
851 struct bch_backpointer bp;
853 struct printbuf buf = PRINTBUF;
856 ret = bch2_get_next_backpointer(trans, bucket, -1,
858 if (ret || *bp_offset == U64_MAX)
861 k = bch2_backpointer_get_key(trans, &iter, bucket, *bp_offset, bp);
863 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
868 if (fsck_err_on(!k.k, trans->c,
869 "%s backpointer points to missing extent\n%s",
870 *bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree",
871 (bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
872 ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
874 bch_err(trans->c, "backpointer at %llu not found", *bp_offset);
877 bch2_trans_iter_exit(trans, &iter);
883 int bch2_check_backpointers_to_extents(struct bch_fs *c)
885 struct btree_trans trans;
886 struct btree_iter iter;
890 bch2_trans_init(&trans, c, 0, 0);
891 for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
892 BTREE_ITER_PREFETCH, k, ret) {
895 while (!(ret = commit_do(&trans, NULL, NULL,
896 BTREE_INSERT_LAZY_RW|
898 check_one_backpointer(&trans, iter.pos, &bp_offset))) &&
905 bch2_trans_iter_exit(&trans, &iter);
906 bch2_trans_exit(&trans);
907 return ret < 0 ? ret : 0;