1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "backpointers.h"
5 #include "btree_cache.h"
6 #include "btree_update.h"
11 #define MAX_EXTENT_COMPRESS_RATIO_SHIFT 10
14 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
17 static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
20 struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
21 u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
23 return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
27 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
29 static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
33 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
36 ret = POS(bucket.inode,
37 (bucket_to_sector(ca, bucket.offset) <<
38 MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
40 BUG_ON(bpos_cmp(bucket, bp_pos_to_bucket(c, ret)));
45 void bch2_extent_ptr_to_bp(struct bch_fs *c,
46 enum btree_id btree_id, unsigned level,
47 struct bkey_s_c k, struct extent_ptr_decoded p,
48 struct bpos *bucket_pos, struct bch_backpointer *bp)
50 enum bch_data_type data_type = level ? BCH_DATA_btree : BCH_DATA_user;
51 s64 sectors = level ? btree_sectors(c) : k.k->size;
54 *bucket_pos = PTR_BUCKET_POS_OFFSET(c, &p.ptr, &bucket_offset);
55 *bp = (struct bch_backpointer) {
58 .data_type = data_type,
59 .bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
61 .bucket_len = ptr_disk_sectors(sectors, p),
66 static bool extent_matches_bp(struct bch_fs *c,
67 enum btree_id btree_id, unsigned level,
70 struct bch_backpointer bp)
72 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
73 const union bch_extent_entry *entry;
74 struct extent_ptr_decoded p;
76 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
78 struct bch_backpointer bp2;
83 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
85 if (!bpos_cmp(bucket, bucket2) &&
86 !memcmp(&bp, &bp2, sizeof(bp)))
93 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
94 int rw, struct printbuf *err)
96 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
97 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
99 if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
100 prt_str(err, "incorrect value size");
104 if (bpos_cmp(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
105 prt_str(err, "backpointer at wrong pos");
112 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
114 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
115 bch2_btree_ids[bp->btree_id],
117 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
118 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
120 bch2_bpos_to_text(out, bp->pos);
123 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
125 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
128 void bch2_backpointer_swab(struct bkey_s k)
130 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
132 bp.v->bucket_offset = swab32(bp.v->bucket_offset);
133 bp.v->bucket_len = swab32(bp.v->bucket_len);
134 bch2_bpos_swab(&bp.v->pos);
137 #define BACKPOINTER_OFFSET_MAX ((1ULL << 40) - 1)
139 static inline int backpointer_cmp(struct bch_backpointer l, struct bch_backpointer r)
141 return cmp_int(l.bucket_offset, r.bucket_offset);
144 static int bch2_backpointer_del_by_offset(struct btree_trans *trans,
147 struct bch_backpointer bp)
149 struct bch_fs *c = trans->c;
150 struct btree_iter iter;
154 if (bp_offset < BACKPOINTER_OFFSET_MAX) {
155 struct bch_backpointer *bps;
156 struct bkey_i_alloc_v4 *a;
159 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
163 BTREE_ITER_WITH_UPDATES);
164 k = bch2_btree_iter_peek_slot(&iter);
169 if (k.k->type != KEY_TYPE_alloc_v4) {
174 a = bch2_alloc_to_v4_mut(trans, k);
175 ret = PTR_ERR_OR_ZERO(a);
178 bps = alloc_v4_backpointers(&a->v);
179 nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
181 for (i = 0; i < nr; i++) {
182 if (bps[i].bucket_offset == bp_offset)
184 if (bps[i].bucket_offset > bp_offset)
191 if (memcmp(&bps[i], &bp, sizeof(bp))) {
195 array_remove_item(bps, nr, i);
196 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
197 set_alloc_v4_u64s(a);
198 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
200 bp_offset -= BACKPOINTER_OFFSET_MAX;
202 bch2_trans_iter_init(trans, &iter, BTREE_ID_backpointers,
203 bucket_pos_to_bp(c, bucket, bp_offset),
206 BTREE_ITER_WITH_UPDATES);
207 k = bch2_btree_iter_peek_slot(&iter);
212 if (k.k->type != KEY_TYPE_backpointer ||
213 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
218 ret = bch2_btree_delete_at(trans, &iter, 0);
221 bch2_trans_iter_exit(trans, &iter);
225 int bch2_bucket_backpointer_del(struct btree_trans *trans,
226 struct bkey_i_alloc_v4 *a,
227 struct bch_backpointer bp,
228 struct bkey_s_c orig_k)
230 struct bch_fs *c = trans->c;
231 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
232 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
233 struct btree_iter bp_iter;
237 for (i = 0; i < nr; i++) {
238 int cmp = backpointer_cmp(bps[i], bp) ?:
239 memcmp(&bps[i], &bp, sizeof(bp));
248 array_remove_item(bps, nr, i);
249 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
250 set_alloc_v4_u64s(a);
253 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
254 bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
257 BTREE_ITER_WITH_UPDATES);
258 k = bch2_btree_iter_peek_slot(&bp_iter);
263 if (k.k->type != KEY_TYPE_backpointer ||
264 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
265 struct printbuf buf = PRINTBUF;
267 prt_printf(&buf, "backpointer not found when deleting");
269 printbuf_indent_add(&buf, 2);
271 prt_printf(&buf, "searching for ");
272 bch2_backpointer_to_text(&buf, &bp);
275 prt_printf(&buf, "got ");
276 bch2_bkey_val_to_text(&buf, c, k);
279 prt_str(&buf, "alloc ");
280 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
283 prt_printf(&buf, "for ");
284 bch2_bkey_val_to_text(&buf, c, orig_k);
286 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
287 bch_err(c, "%s", buf.buf);
290 bch2_trans_inconsistent(trans, "%s", buf.buf);
296 ret = bch2_btree_delete_at(trans, &bp_iter, 0);
298 bch2_trans_iter_exit(trans, &bp_iter);
302 int bch2_bucket_backpointer_add(struct btree_trans *trans,
303 struct bkey_i_alloc_v4 *a,
304 struct bch_backpointer bp,
305 struct bkey_s_c orig_k)
307 struct bch_fs *c = trans->c;
309 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
310 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
311 struct bkey_i_backpointer *bp_k;
312 struct btree_iter bp_iter;
316 /* Check for duplicates: */
317 for (i = 0; i < nr; i++) {
318 int cmp = backpointer_cmp(bps[i], bp);
324 (bps[i - 1].bucket_offset +
325 bps[i - 1].bucket_len > bp.bucket_offset)) ||
327 (bp.bucket_offset + bp.bucket_len > bps[i].bucket_offset))) {
328 struct printbuf buf = PRINTBUF;
330 prt_printf(&buf, "overlapping backpointer found when inserting ");
331 bch2_backpointer_to_text(&buf, &bp);
333 printbuf_indent_add(&buf, 2);
335 prt_printf(&buf, "into ");
336 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
339 prt_printf(&buf, "for ");
340 bch2_bkey_val_to_text(&buf, c, orig_k);
342 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
343 bch_err(c, "%s", buf.buf);
345 bch2_trans_inconsistent(trans, "%s", buf.buf);
351 if (nr < BCH_ALLOC_V4_NR_BACKPOINTERS_MAX) {
352 array_insert_item(bps, nr, i, bp);
353 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
354 set_alloc_v4_u64s(a);
358 /* Overflow: use backpointer btree */
359 bp_k = bch2_trans_kmalloc(trans, sizeof(*bp_k));
360 ret = PTR_ERR_OR_ZERO(bp_k);
364 ca = bch_dev_bkey_exists(c, a->k.p.inode);
366 bkey_backpointer_init(&bp_k->k_i);
367 bp_k->k.p = bucket_pos_to_bp(c, a->k.p, bp.bucket_offset);
370 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_k->k.p,
373 BTREE_ITER_WITH_UPDATES);
374 k = bch2_btree_iter_peek_slot(&bp_iter);
380 struct printbuf buf = PRINTBUF;
382 prt_printf(&buf, "existing btree backpointer key found when inserting ");
383 bch2_backpointer_to_text(&buf, &bp);
385 printbuf_indent_add(&buf, 2);
387 prt_printf(&buf, "found ");
388 bch2_bkey_val_to_text(&buf, c, k);
391 prt_printf(&buf, "for ");
392 bch2_bkey_val_to_text(&buf, c, orig_k);
394 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
395 bch_err(c, "%s", buf.buf);
397 bch2_trans_inconsistent(trans, "%s", buf.buf);
404 ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
406 bch2_trans_iter_exit(trans, &bp_iter);
411 * Find the next backpointer >= *bp_offset:
413 int bch2_get_next_backpointer(struct btree_trans *trans,
414 struct bpos bucket, int gen,
416 struct bch_backpointer *dst)
418 struct bch_fs *c = trans->c;
419 struct bpos bp_pos, bp_end_pos;
420 struct btree_iter alloc_iter, bp_iter = { NULL };
422 struct bkey_s_c_alloc_v4 a;
426 if (*bp_offset == U64_MAX)
429 bp_pos = bucket_pos_to_bp(c, bucket,
430 max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
431 bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
433 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
434 bucket, BTREE_ITER_CACHED);
435 k = bch2_btree_iter_peek_slot(&alloc_iter);
440 if (k.k->type != KEY_TYPE_alloc_v4)
443 a = bkey_s_c_to_alloc_v4(k);
444 if (gen >= 0 && a.v->gen != gen)
447 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++) {
448 if (alloc_v4_backpointers_c(a.v)[i].bucket_offset < *bp_offset)
451 *dst = alloc_v4_backpointers_c(a.v)[i];
452 *bp_offset = dst->bucket_offset;
456 for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
458 if (bpos_cmp(k.k->p, bp_end_pos) >= 0)
461 if (k.k->type != KEY_TYPE_backpointer)
464 *dst = *bkey_s_c_to_backpointer(k).v;
465 *bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
469 *bp_offset = U64_MAX;
471 bch2_trans_iter_exit(trans, &bp_iter);
472 bch2_trans_iter_exit(trans, &alloc_iter);
476 static void backpointer_not_found(struct btree_trans *trans,
479 struct bch_backpointer bp,
481 const char *thing_it_points_to)
483 struct bch_fs *c = trans->c;
484 struct printbuf buf = PRINTBUF;
486 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
488 prt_printf(&buf, "bucket: ");
489 bch2_bpos_to_text(&buf, bucket);
490 prt_printf(&buf, "\n ");
492 if (bp_offset >= BACKPOINTER_OFFSET_MAX) {
494 bucket_pos_to_bp(c, bucket,
495 bp_offset - BACKPOINTER_OFFSET_MAX);
496 prt_printf(&buf, "backpointer pos: ");
497 bch2_bpos_to_text(&buf, bp_pos);
498 prt_printf(&buf, "\n ");
501 bch2_backpointer_to_text(&buf, &bp);
502 prt_printf(&buf, "\n ");
503 bch2_bkey_val_to_text(&buf, c, k);
504 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
505 bch_err_ratelimited(c, "%s", buf.buf);
507 bch2_trans_inconsistent(trans, "%s", buf.buf);
512 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
513 struct btree_iter *iter,
516 struct bch_backpointer bp)
518 struct bch_fs *c = trans->c;
521 bch2_trans_node_iter_init(trans, iter,
525 min(bp.level, c->btree_roots[bp.btree_id].level),
527 k = bch2_btree_iter_peek_slot(iter);
529 bch2_trans_iter_exit(trans, iter);
533 if (bp.level == c->btree_roots[bp.btree_id].level + 1)
534 k = bkey_i_to_s_c(&c->btree_roots[bp.btree_id].key);
536 if (extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
539 bch2_trans_iter_exit(trans, iter);
545 * If a backpointer for a btree node wasn't found, it may be
546 * because it was overwritten by a new btree node that hasn't
547 * been written out yet - backpointer_get_node() checks for
550 b = bch2_backpointer_get_node(trans, iter, bucket, bp_offset, bp);
551 if (!IS_ERR_OR_NULL(b))
552 return bkey_i_to_s_c(&b->key);
554 bch2_trans_iter_exit(trans, iter);
557 return bkey_s_c_err(PTR_ERR(b));
558 return bkey_s_c_null;
561 backpointer_not_found(trans, bucket, bp_offset, bp, k, "extent");
562 return bkey_s_c_null;
565 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
566 struct btree_iter *iter,
569 struct bch_backpointer bp)
571 struct bch_fs *c = trans->c;
576 bch2_trans_node_iter_init(trans, iter,
582 b = bch2_btree_iter_peek_node(iter);
586 if (extent_matches_bp(c, bp.btree_id, bp.level,
587 bkey_i_to_s_c(&b->key),
591 if (btree_node_will_make_reachable(b)) {
592 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
594 backpointer_not_found(trans, bucket, bp_offset, bp,
595 bkey_i_to_s_c(&b->key), "btree node");
599 bch2_trans_iter_exit(trans, iter);
603 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
606 struct bch_fs *c = trans->c;
607 struct btree_iter alloc_iter = { NULL };
609 struct bkey_s_c alloc_k;
610 struct printbuf buf = PRINTBUF;
613 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
614 "backpointer for mising device:\n%s",
615 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
616 ret = bch2_btree_delete_at(trans, bp_iter, 0);
620 ca = bch_dev_bkey_exists(c, k.k->p.inode);
622 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
623 bp_pos_to_bucket(c, k.k->p), 0);
625 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
626 ret = bkey_err(alloc_k);
630 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
631 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
632 alloc_iter.pos.inode, alloc_iter.pos.offset,
633 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
634 ret = bch2_btree_delete_at(trans, bp_iter, 0);
639 bch2_trans_iter_exit(trans, &alloc_iter);
644 /* verify that every backpointer has a corresponding alloc key */
645 int bch2_check_btree_backpointers(struct bch_fs *c)
647 struct btree_iter iter;
650 return bch2_trans_run(c,
651 for_each_btree_key_commit(&trans, iter,
652 BTREE_ID_backpointers, POS_MIN, 0, k,
653 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
654 bch2_check_btree_backpointer(&trans, &iter, k)));
657 static int check_bp_exists(struct btree_trans *trans,
658 struct bpos bucket_pos,
659 struct bch_backpointer bp,
660 struct bkey_s_c orig_k)
662 struct bch_fs *c = trans->c;
663 struct btree_iter alloc_iter, bp_iter = { NULL };
664 struct printbuf buf = PRINTBUF;
665 struct bkey_s_c alloc_k, bp_k;
668 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
669 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
670 ret = bkey_err(alloc_k);
674 if (alloc_k.k->type == KEY_TYPE_alloc_v4) {
675 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(alloc_k);
676 const struct bch_backpointer *bps = alloc_v4_backpointers_c(a.v);
677 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(a.v);
679 for (i = 0; i < nr; i++) {
680 int cmp = backpointer_cmp(bps[i], bp) ?:
681 memcmp(&bps[i], &bp, sizeof(bp));
691 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
692 bucket_pos_to_bp(c, bucket_pos, bp.bucket_offset),
694 bp_k = bch2_btree_iter_peek_slot(&bp_iter);
695 ret = bkey_err(bp_k);
699 if (bp_k.k->type != KEY_TYPE_backpointer ||
700 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp)))
705 bch2_trans_iter_exit(trans, &bp_iter);
706 bch2_trans_iter_exit(trans, &alloc_iter);
710 prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
711 bch2_btree_ids[bp.btree_id], bp.level);
712 bch2_bkey_val_to_text(&buf, c, orig_k);
713 prt_printf(&buf, "\nin alloc key ");
714 bch2_bkey_val_to_text(&buf, c, alloc_k);
716 if (c->sb.version < bcachefs_metadata_version_backpointers ||
717 c->opts.reconstruct_alloc ||
718 fsck_err(c, "%s", buf.buf)) {
719 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, alloc_k);
721 ret = PTR_ERR_OR_ZERO(a) ?:
722 bch2_bucket_backpointer_add(trans, a, bp, orig_k) ?:
723 bch2_trans_update(trans, &alloc_iter, &a->k_i, 0);
729 static int check_extent_to_backpointers(struct btree_trans *trans,
730 struct btree_iter *iter)
732 struct bch_fs *c = trans->c;
733 struct bkey_ptrs_c ptrs;
734 const union bch_extent_entry *entry;
735 struct extent_ptr_decoded p;
739 k = bch2_btree_iter_peek_all_levels(iter);
746 ptrs = bch2_bkey_ptrs_c(k);
747 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
748 struct bpos bucket_pos;
749 struct bch_backpointer bp;
754 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
755 k, p, &bucket_pos, &bp);
757 ret = check_bp_exists(trans, bucket_pos, bp, k);
765 static int check_btree_root_to_backpointers(struct btree_trans *trans,
766 enum btree_id btree_id)
768 struct bch_fs *c = trans->c;
769 struct btree_iter iter;
772 struct bkey_ptrs_c ptrs;
773 struct extent_ptr_decoded p;
774 const union bch_extent_entry *entry;
777 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
778 c->btree_roots[btree_id].level, 0);
779 b = bch2_btree_iter_peek_node(&iter);
780 ret = PTR_ERR_OR_ZERO(b);
784 BUG_ON(b != btree_node_root(c, b));
786 k = bkey_i_to_s_c(&b->key);
787 ptrs = bch2_bkey_ptrs_c(k);
788 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
789 struct bpos bucket_pos;
790 struct bch_backpointer bp;
795 bch2_extent_ptr_to_bp(c, iter.btree_id, iter.path->level + 1,
796 k, p, &bucket_pos, &bp);
798 ret = check_bp_exists(trans, bucket_pos, bp, k);
803 bch2_trans_iter_exit(trans, &iter);
812 static inline int bbpos_cmp(struct bbpos l, struct bbpos r)
814 return cmp_int(l.btree, r.btree) ?: bpos_cmp(l.pos, r.pos);
817 static inline struct bbpos bbpos_successor(struct bbpos pos)
819 if (bpos_cmp(pos.pos, SPOS_MAX)) {
820 pos.pos = bpos_successor(pos.pos);
824 if (pos.btree != BTREE_ID_NR) {
834 static void bbpos_to_text(struct printbuf *out, struct bbpos pos)
836 prt_str(out, bch2_btree_ids[pos.btree]);
838 bch2_bpos_to_text(out, pos.pos);
842 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
844 return (struct bbpos) {
845 .btree = bp.btree_id,
850 int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
851 unsigned btree_leaf_mask,
852 unsigned btree_interior_mask,
853 struct bbpos start, struct bbpos *end)
855 struct btree_iter iter;
864 btree_nodes = (i.totalram >> 1) / btree_bytes(trans->c);
866 for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
867 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
869 if (!((1U << btree) & btree_leaf_mask) &&
870 !((1U << btree) & btree_interior_mask))
873 bch2_trans_node_iter_init(trans, &iter, btree,
874 btree == start.btree ? start.pos : POS_MIN,
877 * for_each_btree_key_contineu() doesn't check the return value
878 * from bch2_btree_iter_advance(), which is needed when
879 * iterating over interior nodes where we'll see keys at
883 k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
892 bch2_trans_iter_exit(trans, &iter);
895 } while (bch2_btree_iter_advance(&iter));
896 bch2_trans_iter_exit(trans, &iter);
899 end->btree = BTREE_ID_NR;
904 int bch2_check_extents_to_backpointers(struct bch_fs *c)
906 struct btree_trans trans;
907 struct btree_iter iter;
908 enum btree_id btree_id;
911 bch2_trans_init(&trans, c, 0, 0);
912 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
913 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
915 bch2_trans_node_iter_init(&trans, &iter, btree_id, POS_MIN, 0,
917 BTREE_ITER_ALL_LEVELS|
918 BTREE_ITER_PREFETCH);
921 ret = commit_do(&trans, NULL, NULL,
922 BTREE_INSERT_LAZY_RW|
924 check_extent_to_backpointers(&trans, &iter));
927 } while (!bch2_btree_iter_advance(&iter));
929 bch2_trans_iter_exit(&trans, &iter);
934 ret = commit_do(&trans, NULL, NULL,
935 BTREE_INSERT_LAZY_RW|
937 check_btree_root_to_backpointers(&trans, btree_id));
941 bch2_trans_exit(&trans);
945 static int check_one_backpointer(struct btree_trans *trans,
951 struct btree_iter iter;
952 struct bch_backpointer bp;
955 struct printbuf buf = PRINTBUF;
958 ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp);
959 if (ret || *bp_offset == U64_MAX)
962 pos = bp_to_bbpos(bp);
963 if (bbpos_cmp(pos, start) < 0 ||
964 bbpos_cmp(pos, end) > 0)
967 k = bch2_backpointer_get_key(trans, &iter, bucket, *bp_offset, bp);
969 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
974 if (fsck_err_on(!k.k, trans->c,
975 "%s backpointer points to missing extent\n%s",
976 *bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree",
977 (bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
978 ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
980 bch_err(trans->c, "backpointer at %llu not found", *bp_offset);
983 bch2_trans_iter_exit(trans, &iter);
989 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
993 struct btree_iter iter;
997 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
998 BTREE_ITER_PREFETCH, k, ret) {
1001 while (!(ret = commit_do(trans, NULL, NULL,
1002 BTREE_INSERT_LAZY_RW|
1003 BTREE_INSERT_NOFAIL,
1004 check_one_backpointer(trans, iter.pos, &bp_offset, start, end))) &&
1005 bp_offset < U64_MAX)
1011 bch2_trans_iter_exit(trans, &iter);
1012 return ret < 0 ? ret : 0;
1015 int bch2_check_backpointers_to_extents(struct bch_fs *c)
1017 struct btree_trans trans;
1018 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
1021 bch2_trans_init(&trans, c, 0, 0);
1023 ret = bch2_get_btree_in_memory_pos(&trans,
1024 (1U << BTREE_ID_extents)|
1025 (1U << BTREE_ID_reflink),
1028 bch2_check_backpointers_to_extents_pass(&trans, start, end);
1029 if (ret || end.btree == BTREE_ID_NR)
1032 start = bbpos_successor(end);
1034 bch2_trans_exit(&trans);