1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
13 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
16 static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
19 struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
20 u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
22 return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
26 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
28 static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
32 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
35 ret = POS(bucket.inode,
36 (bucket_to_sector(ca, bucket.offset) <<
37 MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
39 BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
44 static bool extent_matches_bp(struct bch_fs *c,
45 enum btree_id btree_id, unsigned level,
48 struct bch_backpointer bp)
50 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
51 const union bch_extent_entry *entry;
52 struct extent_ptr_decoded p;
54 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
56 struct bch_backpointer bp2;
61 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
63 if (bpos_eq(bucket, bucket2) &&
64 !memcmp(&bp, &bp2, sizeof(bp)))
71 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
72 int rw, struct printbuf *err)
74 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
75 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
77 if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
78 prt_str(err, "incorrect value size");
79 return -BCH_ERR_invalid_bkey;
82 if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
83 prt_str(err, "backpointer at wrong pos");
84 return -BCH_ERR_invalid_bkey;
90 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
92 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
93 bch2_btree_ids[bp->btree_id],
95 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
96 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
98 bch2_bpos_to_text(out, bp->pos);
101 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
103 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
106 void bch2_backpointer_swab(struct bkey_s k)
108 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
110 bp.v->bucket_offset = swab32(bp.v->bucket_offset);
111 bp.v->bucket_len = swab32(bp.v->bucket_len);
112 bch2_bpos_swab(&bp.v->pos);
115 #define BACKPOINTER_OFFSET_MAX ((1ULL << 40) - 1)
117 static inline int backpointer_cmp(struct bch_backpointer l, struct bch_backpointer r)
119 return cmp_int(l.bucket_offset, r.bucket_offset);
122 static int bch2_backpointer_del_by_offset(struct btree_trans *trans,
125 struct bch_backpointer bp)
127 struct bch_fs *c = trans->c;
128 struct btree_iter iter;
132 if (bp_offset < BACKPOINTER_OFFSET_MAX) {
133 struct bch_backpointer *bps;
134 struct bkey_i_alloc_v4 *a;
137 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
141 BTREE_ITER_WITH_UPDATES);
142 k = bch2_btree_iter_peek_slot(&iter);
147 if (k.k->type != KEY_TYPE_alloc_v4) {
152 a = bch2_alloc_to_v4_mut(trans, k);
153 ret = PTR_ERR_OR_ZERO(a);
156 bps = alloc_v4_backpointers(&a->v);
157 nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
159 for (i = 0; i < nr; i++) {
160 if (bps[i].bucket_offset == bp_offset)
162 if (bps[i].bucket_offset > bp_offset)
169 if (memcmp(&bps[i], &bp, sizeof(bp))) {
173 array_remove_item(bps, nr, i);
174 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
175 set_alloc_v4_u64s(a);
176 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
178 bp_offset -= BACKPOINTER_OFFSET_MAX;
180 bch2_trans_iter_init(trans, &iter, BTREE_ID_backpointers,
181 bucket_pos_to_bp(c, bucket, bp_offset),
184 BTREE_ITER_WITH_UPDATES);
185 k = bch2_btree_iter_peek_slot(&iter);
190 if (k.k->type != KEY_TYPE_backpointer ||
191 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
196 ret = bch2_btree_delete_at(trans, &iter, 0);
199 bch2_trans_iter_exit(trans, &iter);
203 int bch2_bucket_backpointer_del(struct btree_trans *trans,
204 struct bkey_i_alloc_v4 *a,
205 struct bch_backpointer bp,
206 struct bkey_s_c orig_k)
208 struct bch_fs *c = trans->c;
209 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
210 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
211 struct btree_iter bp_iter;
215 for (i = 0; i < nr; i++) {
216 int cmp = backpointer_cmp(bps[i], bp) ?:
217 memcmp(&bps[i], &bp, sizeof(bp));
226 array_remove_item(bps, nr, i);
227 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
228 set_alloc_v4_u64s(a);
231 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
232 bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
235 BTREE_ITER_WITH_UPDATES);
236 k = bch2_btree_iter_peek_slot(&bp_iter);
241 if (k.k->type != KEY_TYPE_backpointer ||
242 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
243 struct printbuf buf = PRINTBUF;
245 prt_printf(&buf, "backpointer not found when deleting");
247 printbuf_indent_add(&buf, 2);
249 prt_printf(&buf, "searching for ");
250 bch2_backpointer_to_text(&buf, &bp);
253 prt_printf(&buf, "got ");
254 bch2_bkey_val_to_text(&buf, c, k);
257 prt_str(&buf, "alloc ");
258 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
261 prt_printf(&buf, "for ");
262 bch2_bkey_val_to_text(&buf, c, orig_k);
264 bch_err(c, "%s", buf.buf);
265 if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
266 bch2_inconsistent_error(c);
273 ret = bch2_btree_delete_at(trans, &bp_iter, 0);
275 bch2_trans_iter_exit(trans, &bp_iter);
279 int bch2_bucket_backpointer_add(struct btree_trans *trans,
280 struct bkey_i_alloc_v4 *a,
281 struct bch_backpointer bp,
282 struct bkey_s_c orig_k)
284 struct bch_fs *c = trans->c;
285 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
286 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
287 struct bkey_i_backpointer *bp_k;
288 struct btree_iter bp_iter;
292 /* Check for duplicates: */
293 for (i = 0; i < nr; i++) {
294 int cmp = backpointer_cmp(bps[i], bp);
300 (bps[i - 1].bucket_offset +
301 bps[i - 1].bucket_len > bp.bucket_offset)) ||
303 (bp.bucket_offset + bp.bucket_len > bps[i].bucket_offset))) {
304 struct printbuf buf = PRINTBUF;
306 prt_printf(&buf, "overlapping backpointer found when inserting ");
307 bch2_backpointer_to_text(&buf, &bp);
309 printbuf_indent_add(&buf, 2);
311 prt_printf(&buf, "into ");
312 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
315 prt_printf(&buf, "for ");
316 bch2_bkey_val_to_text(&buf, c, orig_k);
318 bch_err(c, "%s", buf.buf);
320 if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
321 bch2_inconsistent_error(c);
326 if (nr < BCH_ALLOC_V4_NR_BACKPOINTERS_MAX) {
327 array_insert_item(bps, nr, i, bp);
328 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
329 set_alloc_v4_u64s(a);
333 /* Overflow: use backpointer btree */
335 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
336 bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
339 BTREE_ITER_WITH_UPDATES);
340 k = bch2_btree_iter_peek_slot(&bp_iter);
346 struct printbuf buf = PRINTBUF;
348 prt_printf(&buf, "existing btree backpointer key found when inserting ");
349 bch2_backpointer_to_text(&buf, &bp);
351 printbuf_indent_add(&buf, 2);
353 prt_printf(&buf, "found ");
354 bch2_bkey_val_to_text(&buf, c, k);
357 prt_printf(&buf, "for ");
358 bch2_bkey_val_to_text(&buf, c, orig_k);
360 bch_err(c, "%s", buf.buf);
362 if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
363 bch2_inconsistent_error(c);
369 bp_k = bch2_bkey_alloc(trans, &bp_iter, backpointer);
370 ret = PTR_ERR_OR_ZERO(bp_k);
376 ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
378 bch2_trans_iter_exit(trans, &bp_iter);
383 * Find the next backpointer >= *bp_offset:
385 int bch2_get_next_backpointer(struct btree_trans *trans,
386 struct bpos bucket, int gen,
388 struct bch_backpointer *dst,
391 struct bch_fs *c = trans->c;
392 struct bpos bp_pos, bp_end_pos;
393 struct btree_iter alloc_iter, bp_iter = { NULL };
395 struct bkey_s_c_alloc_v4 a;
399 if (*bp_offset == U64_MAX)
402 bp_pos = bucket_pos_to_bp(c, bucket,
403 max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
404 bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
406 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
407 bucket, BTREE_ITER_CACHED);
408 k = bch2_btree_iter_peek_slot(&alloc_iter);
413 if (k.k->type != KEY_TYPE_alloc_v4)
416 a = bkey_s_c_to_alloc_v4(k);
417 if (gen >= 0 && a.v->gen != gen)
420 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++) {
421 if (alloc_v4_backpointers_c(a.v)[i].bucket_offset < *bp_offset)
424 *dst = alloc_v4_backpointers_c(a.v)[i];
425 *bp_offset = dst->bucket_offset;
429 for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
431 if (bpos_ge(k.k->p, bp_end_pos))
434 if (k.k->type != KEY_TYPE_backpointer)
437 *dst = *bkey_s_c_to_backpointer(k).v;
438 *bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
442 *bp_offset = U64_MAX;
444 bch2_trans_iter_exit(trans, &bp_iter);
445 bch2_trans_iter_exit(trans, &alloc_iter);
449 static void backpointer_not_found(struct btree_trans *trans,
452 struct bch_backpointer bp,
454 const char *thing_it_points_to)
456 struct bch_fs *c = trans->c;
457 struct printbuf buf = PRINTBUF;
459 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
461 prt_printf(&buf, "bucket: ");
462 bch2_bpos_to_text(&buf, bucket);
463 prt_printf(&buf, "\n ");
465 if (bp_offset >= BACKPOINTER_OFFSET_MAX) {
467 bucket_pos_to_bp(c, bucket,
468 bp_offset - BACKPOINTER_OFFSET_MAX);
469 prt_printf(&buf, "backpointer pos: ");
470 bch2_bpos_to_text(&buf, bp_pos);
471 prt_printf(&buf, "\n ");
474 bch2_backpointer_to_text(&buf, &bp);
475 prt_printf(&buf, "\n ");
476 bch2_bkey_val_to_text(&buf, c, k);
477 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
478 bch_err_ratelimited(c, "%s", buf.buf);
480 bch2_trans_inconsistent(trans, "%s", buf.buf);
485 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
486 struct btree_iter *iter,
489 struct bch_backpointer bp)
491 struct bch_fs *c = trans->c;
494 bch2_trans_node_iter_init(trans, iter,
498 min(bp.level, c->btree_roots[bp.btree_id].level),
500 k = bch2_btree_iter_peek_slot(iter);
502 bch2_trans_iter_exit(trans, iter);
506 if (bp.level == c->btree_roots[bp.btree_id].level + 1)
507 k = bkey_i_to_s_c(&c->btree_roots[bp.btree_id].key);
509 if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
512 bch2_trans_iter_exit(trans, iter);
518 * If a backpointer for a btree node wasn't found, it may be
519 * because it was overwritten by a new btree node that hasn't
520 * been written out yet - backpointer_get_node() checks for
523 b = bch2_backpointer_get_node(trans, iter, bucket, bp_offset, bp);
524 if (!IS_ERR_OR_NULL(b))
525 return bkey_i_to_s_c(&b->key);
527 bch2_trans_iter_exit(trans, iter);
530 return bkey_s_c_err(PTR_ERR(b));
531 return bkey_s_c_null;
534 backpointer_not_found(trans, bucket, bp_offset, bp, k, "extent");
535 return bkey_s_c_null;
538 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
539 struct btree_iter *iter,
542 struct bch_backpointer bp)
544 struct bch_fs *c = trans->c;
549 bch2_trans_node_iter_init(trans, iter,
555 b = bch2_btree_iter_peek_node(iter);
559 if (b && extent_matches_bp(c, bp.btree_id, bp.level,
560 bkey_i_to_s_c(&b->key),
564 if (b && btree_node_will_make_reachable(b)) {
565 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
567 backpointer_not_found(trans, bucket, bp_offset, bp,
568 bkey_i_to_s_c(&b->key), "btree node");
572 bch2_trans_iter_exit(trans, iter);
576 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
579 struct bch_fs *c = trans->c;
580 struct btree_iter alloc_iter = { NULL };
582 struct bkey_s_c alloc_k;
583 struct printbuf buf = PRINTBUF;
586 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
587 "backpointer for mising device:\n%s",
588 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
589 ret = bch2_btree_delete_at(trans, bp_iter, 0);
593 ca = bch_dev_bkey_exists(c, k.k->p.inode);
595 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
596 bp_pos_to_bucket(c, k.k->p), 0);
598 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
599 ret = bkey_err(alloc_k);
603 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
604 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
605 alloc_iter.pos.inode, alloc_iter.pos.offset,
606 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
607 ret = bch2_btree_delete_at(trans, bp_iter, 0);
612 bch2_trans_iter_exit(trans, &alloc_iter);
617 /* verify that every backpointer has a corresponding alloc key */
618 int bch2_check_btree_backpointers(struct bch_fs *c)
620 struct btree_iter iter;
623 return bch2_trans_run(c,
624 for_each_btree_key_commit(&trans, iter,
625 BTREE_ID_backpointers, POS_MIN, 0, k,
626 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
627 bch2_check_btree_backpointer(&trans, &iter, k)));
630 static int check_bp_exists(struct btree_trans *trans,
631 struct bpos bucket_pos,
632 struct bch_backpointer bp,
633 struct bkey_s_c orig_k,
634 struct bpos bucket_start,
635 struct bpos bucket_end)
637 struct bch_fs *c = trans->c;
638 struct btree_iter alloc_iter, bp_iter = { NULL };
639 struct printbuf buf = PRINTBUF;
640 struct bkey_s_c alloc_k, bp_k;
643 if (bpos_lt(bucket_pos, bucket_start) ||
644 bpos_gt(bucket_pos, bucket_end))
647 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
648 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
649 ret = bkey_err(alloc_k);
653 if (alloc_k.k->type == KEY_TYPE_alloc_v4) {
654 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(alloc_k);
655 const struct bch_backpointer *bps = alloc_v4_backpointers_c(a.v);
656 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(a.v);
658 for (i = 0; i < nr; i++) {
659 int cmp = backpointer_cmp(bps[i], bp) ?:
660 memcmp(&bps[i], &bp, sizeof(bp));
670 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
671 bucket_pos_to_bp(c, bucket_pos, bp.bucket_offset),
673 bp_k = bch2_btree_iter_peek_slot(&bp_iter);
674 ret = bkey_err(bp_k);
678 if (bp_k.k->type != KEY_TYPE_backpointer ||
679 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp)))
684 bch2_trans_iter_exit(trans, &bp_iter);
685 bch2_trans_iter_exit(trans, &alloc_iter);
689 prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
690 bch2_btree_ids[bp.btree_id], bp.level);
691 bch2_bkey_val_to_text(&buf, c, orig_k);
692 prt_printf(&buf, "\nin alloc key ");
693 bch2_bkey_val_to_text(&buf, c, alloc_k);
695 if (c->sb.version < bcachefs_metadata_version_backpointers ||
696 c->opts.reconstruct_alloc ||
697 fsck_err(c, "%s", buf.buf)) {
698 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, alloc_k);
700 ret = PTR_ERR_OR_ZERO(a) ?:
701 bch2_bucket_backpointer_add(trans, a, bp, orig_k) ?:
702 bch2_trans_update(trans, &alloc_iter, &a->k_i, 0);
708 static int check_extent_to_backpointers(struct btree_trans *trans,
709 struct btree_iter *iter,
710 struct bpos bucket_start,
711 struct bpos bucket_end)
713 struct bch_fs *c = trans->c;
714 struct bkey_ptrs_c ptrs;
715 const union bch_extent_entry *entry;
716 struct extent_ptr_decoded p;
720 k = bch2_btree_iter_peek_all_levels(iter);
727 ptrs = bch2_bkey_ptrs_c(k);
728 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
729 struct bpos bucket_pos;
730 struct bch_backpointer bp;
735 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
736 k, p, &bucket_pos, &bp);
738 ret = check_bp_exists(trans, bucket_pos, bp, k, bucket_start, bucket_end);
746 static int check_btree_root_to_backpointers(struct btree_trans *trans,
747 enum btree_id btree_id,
748 struct bpos bucket_start,
749 struct bpos bucket_end)
751 struct bch_fs *c = trans->c;
752 struct btree_iter iter;
755 struct bkey_ptrs_c ptrs;
756 struct extent_ptr_decoded p;
757 const union bch_extent_entry *entry;
760 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
761 c->btree_roots[btree_id].level, 0);
762 b = bch2_btree_iter_peek_node(&iter);
763 ret = PTR_ERR_OR_ZERO(b);
767 BUG_ON(b != btree_node_root(c, b));
769 k = bkey_i_to_s_c(&b->key);
770 ptrs = bch2_bkey_ptrs_c(k);
771 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
772 struct bpos bucket_pos;
773 struct bch_backpointer bp;
778 bch2_extent_ptr_to_bp(c, iter.btree_id, iter.path->level + 1,
779 k, p, &bucket_pos, &bp);
781 ret = check_bp_exists(trans, bucket_pos, bp, k, bucket_start, bucket_end);
786 bch2_trans_iter_exit(trans, &iter);
790 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
792 return (struct bbpos) {
793 .btree = bp.btree_id,
798 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
804 mem_bytes = i.totalram * i.mem_unit;
805 return (mem_bytes >> 1) / btree_bytes(c);
808 int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
809 unsigned btree_leaf_mask,
810 unsigned btree_interior_mask,
811 struct bbpos start, struct bbpos *end)
813 struct btree_iter iter;
815 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
819 for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
820 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
822 if (!((1U << btree) & btree_leaf_mask) &&
823 !((1U << btree) & btree_interior_mask))
826 bch2_trans_node_iter_init(trans, &iter, btree,
827 btree == start.btree ? start.pos : POS_MIN,
830 * for_each_btree_key_contineu() doesn't check the return value
831 * from bch2_btree_iter_advance(), which is needed when
832 * iterating over interior nodes where we'll see keys at
836 k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
843 *end = BBPOS(btree, k.k->p);
844 bch2_trans_iter_exit(trans, &iter);
847 } while (bch2_btree_iter_advance(&iter));
848 bch2_trans_iter_exit(trans, &iter);
855 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
856 struct bpos bucket_start,
857 struct bpos bucket_end)
859 struct btree_iter iter;
860 enum btree_id btree_id;
863 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
864 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
866 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
868 BTREE_ITER_ALL_LEVELS|
869 BTREE_ITER_PREFETCH);
872 ret = commit_do(trans, NULL, NULL,
873 BTREE_INSERT_LAZY_RW|
875 check_extent_to_backpointers(trans, &iter,
876 bucket_start, bucket_end));
879 } while (!bch2_btree_iter_advance(&iter));
881 bch2_trans_iter_exit(trans, &iter);
886 ret = commit_do(trans, NULL, NULL,
887 BTREE_INSERT_LAZY_RW|
889 check_btree_root_to_backpointers(trans, btree_id,
890 bucket_start, bucket_end));
897 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
900 return bch2_dev_exists2(c, bucket.inode)
901 ? bucket_pos_to_bp(c, bucket, 0)
905 int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
906 struct bpos start, struct bpos *end)
908 struct btree_iter alloc_iter;
909 struct btree_iter bp_iter;
910 struct bkey_s_c alloc_k, bp_k;
911 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
912 bool alloc_end = false, bp_end = false;
915 bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
917 bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
918 bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
921 ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
924 ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
927 ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
928 if ((!alloc_k.k && !bp_k.k) || ret) {
939 if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
940 bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
941 if (!bch2_btree_iter_advance(&alloc_iter))
944 if (!bch2_btree_iter_advance(&bp_iter))
948 bch2_trans_iter_exit(trans, &bp_iter);
949 bch2_trans_iter_exit(trans, &alloc_iter);
953 int bch2_check_extents_to_backpointers(struct bch_fs *c)
955 struct btree_trans trans;
956 struct bpos start = POS_MIN, end;
959 bch2_trans_init(&trans, c, 0, 0);
961 ret = bch2_get_alloc_in_memory_pos(&trans, start, &end);
965 if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
966 bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
967 __func__, btree_nodes_fit_in_ram(c));
969 if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
970 struct printbuf buf = PRINTBUF;
972 prt_str(&buf, "check_extents_to_backpointers(): ");
973 bch2_bpos_to_text(&buf, start);
975 bch2_bpos_to_text(&buf, end);
977 bch_verbose(c, "%s", buf.buf);
981 ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
982 if (ret || bpos_eq(end, SPOS_MAX))
985 start = bpos_successor(end);
987 bch2_trans_exit(&trans);
992 static int check_one_backpointer(struct btree_trans *trans,
998 struct btree_iter iter;
999 struct bch_backpointer bp;
1002 struct printbuf buf = PRINTBUF;
1005 ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp, 0);
1006 if (ret || *bp_offset == U64_MAX)
1009 pos = bp_to_bbpos(bp);
1010 if (bbpos_cmp(pos, start) < 0 ||
1011 bbpos_cmp(pos, end) > 0)
1014 k = bch2_backpointer_get_key(trans, &iter, bucket, *bp_offset, bp);
1016 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
1021 if (fsck_err_on(!k.k, trans->c,
1022 "%s backpointer points to missing extent\n%s",
1023 *bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree",
1024 (bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
1025 ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
1027 bch_err(trans->c, "backpointer at %llu not found", *bp_offset);
1030 bch2_trans_iter_exit(trans, &iter);
1032 printbuf_exit(&buf);
1036 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
1040 struct btree_iter iter;
1044 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
1045 BTREE_ITER_PREFETCH, k, ret) {
1048 while (!(ret = commit_do(trans, NULL, NULL,
1049 BTREE_INSERT_LAZY_RW|
1050 BTREE_INSERT_NOFAIL,
1051 check_one_backpointer(trans, iter.pos, &bp_offset, start, end))) &&
1052 bp_offset < U64_MAX)
1058 bch2_trans_iter_exit(trans, &iter);
1059 return ret < 0 ? ret : 0;
1062 int bch2_check_backpointers_to_extents(struct bch_fs *c)
1064 struct btree_trans trans;
1065 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
1068 bch2_trans_init(&trans, c, 0, 0);
1070 ret = bch2_get_btree_in_memory_pos(&trans,
1071 (1U << BTREE_ID_extents)|
1072 (1U << BTREE_ID_reflink),
1078 if (!bbpos_cmp(start, BBPOS_MIN) &&
1079 bbpos_cmp(end, BBPOS_MAX))
1080 bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
1081 __func__, btree_nodes_fit_in_ram(c));
1083 if (bbpos_cmp(start, BBPOS_MIN) ||
1084 bbpos_cmp(end, BBPOS_MAX)) {
1085 struct printbuf buf = PRINTBUF;
1087 prt_str(&buf, "check_backpointers_to_extents(): ");
1088 bch2_bbpos_to_text(&buf, start);
1090 bch2_bbpos_to_text(&buf, end);
1092 bch_verbose(c, "%s", buf.buf);
1093 printbuf_exit(&buf);
1096 ret = bch2_check_backpointers_to_extents_pass(&trans, start, end);
1097 if (ret || !bbpos_cmp(end, BBPOS_MAX))
1100 start = bbpos_successor(end);
1102 bch2_trans_exit(&trans);