1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
13 * Convert from pos in backpointer btree to pos of corresponding bucket in alloc
16 static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
19 struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
20 u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
22 return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
26 * Convert from pos in alloc btree + bucket offset to pos in backpointer btree:
28 static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
32 struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
35 ret = POS(bucket.inode,
36 (bucket_to_sector(ca, bucket.offset) <<
37 MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
39 BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
44 static bool extent_matches_bp(struct bch_fs *c,
45 enum btree_id btree_id, unsigned level,
48 struct bch_backpointer bp)
50 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
51 const union bch_extent_entry *entry;
52 struct extent_ptr_decoded p;
54 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
56 struct bch_backpointer bp2;
61 bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
63 if (bpos_eq(bucket, bucket2) &&
64 !memcmp(&bp, &bp2, sizeof(bp)))
71 int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
72 int rw, struct printbuf *err)
74 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
75 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
77 if (bkey_val_bytes(bp.k) < sizeof(*bp.v)) {
78 prt_str(err, "incorrect value size");
79 return -BCH_ERR_invalid_bkey;
82 if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
83 prt_str(err, "backpointer at wrong pos");
84 return -BCH_ERR_invalid_bkey;
90 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
92 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
93 bch2_btree_ids[bp->btree_id],
95 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
96 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
98 bch2_bpos_to_text(out, bp->pos);
101 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
103 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
106 void bch2_backpointer_swab(struct bkey_s k)
108 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
110 bp.v->bucket_offset = swab32(bp.v->bucket_offset);
111 bp.v->bucket_len = swab32(bp.v->bucket_len);
112 bch2_bpos_swab(&bp.v->pos);
115 #define BACKPOINTER_OFFSET_MAX ((1ULL << 40) - 1)
117 static inline int backpointer_cmp(struct bch_backpointer l, struct bch_backpointer r)
119 return cmp_int(l.bucket_offset, r.bucket_offset);
122 static int bch2_backpointer_del_by_offset(struct btree_trans *trans,
125 struct bch_backpointer bp)
127 struct bch_fs *c = trans->c;
128 struct btree_iter iter;
132 if (bp_offset < BACKPOINTER_OFFSET_MAX) {
133 struct bch_backpointer *bps;
134 struct bkey_i_alloc_v4 *a;
137 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
141 BTREE_ITER_WITH_UPDATES);
142 k = bch2_btree_iter_peek_slot(&iter);
147 if (k.k->type != KEY_TYPE_alloc_v4) {
152 a = bch2_alloc_to_v4_mut(trans, k);
153 ret = PTR_ERR_OR_ZERO(a);
156 bps = alloc_v4_backpointers(&a->v);
157 nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
159 for (i = 0; i < nr; i++) {
160 if (bps[i].bucket_offset == bp_offset)
162 if (bps[i].bucket_offset > bp_offset)
169 if (memcmp(&bps[i], &bp, sizeof(bp))) {
173 array_remove_item(bps, nr, i);
174 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
175 set_alloc_v4_u64s(a);
176 ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
178 bp_offset -= BACKPOINTER_OFFSET_MAX;
180 bch2_trans_iter_init(trans, &iter, BTREE_ID_backpointers,
181 bucket_pos_to_bp(c, bucket, bp_offset),
184 BTREE_ITER_WITH_UPDATES);
185 k = bch2_btree_iter_peek_slot(&iter);
190 if (k.k->type != KEY_TYPE_backpointer ||
191 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
196 ret = bch2_btree_delete_at(trans, &iter, 0);
199 bch2_trans_iter_exit(trans, &iter);
203 int bch2_bucket_backpointer_del(struct btree_trans *trans,
204 struct bkey_i_alloc_v4 *a,
205 struct bch_backpointer bp,
206 struct bkey_s_c orig_k)
208 struct bch_fs *c = trans->c;
209 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
210 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
211 struct btree_iter bp_iter;
215 for (i = 0; i < nr; i++) {
216 int cmp = backpointer_cmp(bps[i], bp) ?:
217 memcmp(&bps[i], &bp, sizeof(bp));
226 array_remove_item(bps, nr, i);
227 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
228 set_alloc_v4_u64s(a);
231 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
232 bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
235 BTREE_ITER_WITH_UPDATES);
236 k = bch2_btree_iter_peek_slot(&bp_iter);
241 if (k.k->type != KEY_TYPE_backpointer ||
242 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp))) {
243 struct printbuf buf = PRINTBUF;
245 prt_printf(&buf, "backpointer not found when deleting");
247 printbuf_indent_add(&buf, 2);
249 prt_printf(&buf, "searching for ");
250 bch2_backpointer_to_text(&buf, &bp);
253 prt_printf(&buf, "got ");
254 bch2_bkey_val_to_text(&buf, c, k);
257 prt_str(&buf, "alloc ");
258 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
261 prt_printf(&buf, "for ");
262 bch2_bkey_val_to_text(&buf, c, orig_k);
264 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
265 bch_err(c, "%s", buf.buf);
268 bch2_trans_inconsistent(trans, "%s", buf.buf);
274 ret = bch2_btree_delete_at(trans, &bp_iter, 0);
276 bch2_trans_iter_exit(trans, &bp_iter);
280 int bch2_bucket_backpointer_add(struct btree_trans *trans,
281 struct bkey_i_alloc_v4 *a,
282 struct bch_backpointer bp,
283 struct bkey_s_c orig_k)
285 struct bch_fs *c = trans->c;
287 struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
288 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
289 struct bkey_i_backpointer *bp_k;
290 struct btree_iter bp_iter;
294 /* Check for duplicates: */
295 for (i = 0; i < nr; i++) {
296 int cmp = backpointer_cmp(bps[i], bp);
302 (bps[i - 1].bucket_offset +
303 bps[i - 1].bucket_len > bp.bucket_offset)) ||
305 (bp.bucket_offset + bp.bucket_len > bps[i].bucket_offset))) {
306 struct printbuf buf = PRINTBUF;
308 prt_printf(&buf, "overlapping backpointer found when inserting ");
309 bch2_backpointer_to_text(&buf, &bp);
311 printbuf_indent_add(&buf, 2);
313 prt_printf(&buf, "into ");
314 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
317 prt_printf(&buf, "for ");
318 bch2_bkey_val_to_text(&buf, c, orig_k);
320 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
321 bch_err(c, "%s", buf.buf);
323 bch2_trans_inconsistent(trans, "%s", buf.buf);
329 if (nr < BCH_ALLOC_V4_NR_BACKPOINTERS_MAX) {
330 array_insert_item(bps, nr, i, bp);
331 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v, nr);
332 set_alloc_v4_u64s(a);
336 /* Overflow: use backpointer btree */
337 bp_k = bch2_trans_kmalloc(trans, sizeof(*bp_k));
338 ret = PTR_ERR_OR_ZERO(bp_k);
342 ca = bch_dev_bkey_exists(c, a->k.p.inode);
344 bkey_backpointer_init(&bp_k->k_i);
345 bp_k->k.p = bucket_pos_to_bp(c, a->k.p, bp.bucket_offset);
348 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_k->k.p,
351 BTREE_ITER_WITH_UPDATES);
352 k = bch2_btree_iter_peek_slot(&bp_iter);
358 struct printbuf buf = PRINTBUF;
360 prt_printf(&buf, "existing btree backpointer key found when inserting ");
361 bch2_backpointer_to_text(&buf, &bp);
363 printbuf_indent_add(&buf, 2);
365 prt_printf(&buf, "found ");
366 bch2_bkey_val_to_text(&buf, c, k);
369 prt_printf(&buf, "for ");
370 bch2_bkey_val_to_text(&buf, c, orig_k);
372 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
373 bch_err(c, "%s", buf.buf);
375 bch2_trans_inconsistent(trans, "%s", buf.buf);
382 ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
384 bch2_trans_iter_exit(trans, &bp_iter);
389 * Find the next backpointer >= *bp_offset:
391 int bch2_get_next_backpointer(struct btree_trans *trans,
392 struct bpos bucket, int gen,
394 struct bch_backpointer *dst,
397 struct bch_fs *c = trans->c;
398 struct bpos bp_pos, bp_end_pos;
399 struct btree_iter alloc_iter, bp_iter = { NULL };
401 struct bkey_s_c_alloc_v4 a;
405 if (*bp_offset == U64_MAX)
408 bp_pos = bucket_pos_to_bp(c, bucket,
409 max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
410 bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
412 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
413 bucket, BTREE_ITER_CACHED);
414 k = bch2_btree_iter_peek_slot(&alloc_iter);
419 if (k.k->type != KEY_TYPE_alloc_v4)
422 a = bkey_s_c_to_alloc_v4(k);
423 if (gen >= 0 && a.v->gen != gen)
426 for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a.v); i++) {
427 if (alloc_v4_backpointers_c(a.v)[i].bucket_offset < *bp_offset)
430 *dst = alloc_v4_backpointers_c(a.v)[i];
431 *bp_offset = dst->bucket_offset;
435 for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
437 if (bpos_ge(k.k->p, bp_end_pos))
440 if (k.k->type != KEY_TYPE_backpointer)
443 *dst = *bkey_s_c_to_backpointer(k).v;
444 *bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
448 *bp_offset = U64_MAX;
450 bch2_trans_iter_exit(trans, &bp_iter);
451 bch2_trans_iter_exit(trans, &alloc_iter);
455 static void backpointer_not_found(struct btree_trans *trans,
458 struct bch_backpointer bp,
460 const char *thing_it_points_to)
462 struct bch_fs *c = trans->c;
463 struct printbuf buf = PRINTBUF;
465 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
467 prt_printf(&buf, "bucket: ");
468 bch2_bpos_to_text(&buf, bucket);
469 prt_printf(&buf, "\n ");
471 if (bp_offset >= BACKPOINTER_OFFSET_MAX) {
473 bucket_pos_to_bp(c, bucket,
474 bp_offset - BACKPOINTER_OFFSET_MAX);
475 prt_printf(&buf, "backpointer pos: ");
476 bch2_bpos_to_text(&buf, bp_pos);
477 prt_printf(&buf, "\n ");
480 bch2_backpointer_to_text(&buf, &bp);
481 prt_printf(&buf, "\n ");
482 bch2_bkey_val_to_text(&buf, c, k);
483 if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
484 bch_err_ratelimited(c, "%s", buf.buf);
486 bch2_trans_inconsistent(trans, "%s", buf.buf);
491 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
492 struct btree_iter *iter,
495 struct bch_backpointer bp)
497 struct bch_fs *c = trans->c;
500 bch2_trans_node_iter_init(trans, iter,
504 min(bp.level, c->btree_roots[bp.btree_id].level),
506 k = bch2_btree_iter_peek_slot(iter);
508 bch2_trans_iter_exit(trans, iter);
512 if (bp.level == c->btree_roots[bp.btree_id].level + 1)
513 k = bkey_i_to_s_c(&c->btree_roots[bp.btree_id].key);
515 if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
518 bch2_trans_iter_exit(trans, iter);
524 * If a backpointer for a btree node wasn't found, it may be
525 * because it was overwritten by a new btree node that hasn't
526 * been written out yet - backpointer_get_node() checks for
529 b = bch2_backpointer_get_node(trans, iter, bucket, bp_offset, bp);
530 if (!IS_ERR_OR_NULL(b))
531 return bkey_i_to_s_c(&b->key);
533 bch2_trans_iter_exit(trans, iter);
536 return bkey_s_c_err(PTR_ERR(b));
537 return bkey_s_c_null;
540 backpointer_not_found(trans, bucket, bp_offset, bp, k, "extent");
541 return bkey_s_c_null;
544 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
545 struct btree_iter *iter,
548 struct bch_backpointer bp)
550 struct bch_fs *c = trans->c;
555 bch2_trans_node_iter_init(trans, iter,
561 b = bch2_btree_iter_peek_node(iter);
565 if (b && extent_matches_bp(c, bp.btree_id, bp.level,
566 bkey_i_to_s_c(&b->key),
570 if (b && btree_node_will_make_reachable(b)) {
571 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
573 backpointer_not_found(trans, bucket, bp_offset, bp,
574 bkey_i_to_s_c(&b->key), "btree node");
578 bch2_trans_iter_exit(trans, iter);
582 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
585 struct bch_fs *c = trans->c;
586 struct btree_iter alloc_iter = { NULL };
588 struct bkey_s_c alloc_k;
589 struct printbuf buf = PRINTBUF;
592 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
593 "backpointer for mising device:\n%s",
594 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
595 ret = bch2_btree_delete_at(trans, bp_iter, 0);
599 ca = bch_dev_bkey_exists(c, k.k->p.inode);
601 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
602 bp_pos_to_bucket(c, k.k->p), 0);
604 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
605 ret = bkey_err(alloc_k);
609 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
610 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
611 alloc_iter.pos.inode, alloc_iter.pos.offset,
612 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
613 ret = bch2_btree_delete_at(trans, bp_iter, 0);
618 bch2_trans_iter_exit(trans, &alloc_iter);
623 /* verify that every backpointer has a corresponding alloc key */
624 int bch2_check_btree_backpointers(struct bch_fs *c)
626 struct btree_iter iter;
629 return bch2_trans_run(c,
630 for_each_btree_key_commit(&trans, iter,
631 BTREE_ID_backpointers, POS_MIN, 0, k,
632 NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
633 bch2_check_btree_backpointer(&trans, &iter, k)));
636 static int check_bp_exists(struct btree_trans *trans,
637 struct bpos bucket_pos,
638 struct bch_backpointer bp,
639 struct bkey_s_c orig_k,
640 struct bpos bucket_start,
641 struct bpos bucket_end)
643 struct bch_fs *c = trans->c;
644 struct btree_iter alloc_iter, bp_iter = { NULL };
645 struct printbuf buf = PRINTBUF;
646 struct bkey_s_c alloc_k, bp_k;
649 if (bpos_lt(bucket_pos, bucket_start) ||
650 bpos_gt(bucket_pos, bucket_end))
653 bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
654 alloc_k = bch2_btree_iter_peek_slot(&alloc_iter);
655 ret = bkey_err(alloc_k);
659 if (alloc_k.k->type == KEY_TYPE_alloc_v4) {
660 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(alloc_k);
661 const struct bch_backpointer *bps = alloc_v4_backpointers_c(a.v);
662 unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(a.v);
664 for (i = 0; i < nr; i++) {
665 int cmp = backpointer_cmp(bps[i], bp) ?:
666 memcmp(&bps[i], &bp, sizeof(bp));
676 bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
677 bucket_pos_to_bp(c, bucket_pos, bp.bucket_offset),
679 bp_k = bch2_btree_iter_peek_slot(&bp_iter);
680 ret = bkey_err(bp_k);
684 if (bp_k.k->type != KEY_TYPE_backpointer ||
685 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp)))
690 bch2_trans_iter_exit(trans, &bp_iter);
691 bch2_trans_iter_exit(trans, &alloc_iter);
695 prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
696 bch2_btree_ids[bp.btree_id], bp.level);
697 bch2_bkey_val_to_text(&buf, c, orig_k);
698 prt_printf(&buf, "\nin alloc key ");
699 bch2_bkey_val_to_text(&buf, c, alloc_k);
701 if (c->sb.version < bcachefs_metadata_version_backpointers ||
702 c->opts.reconstruct_alloc ||
703 fsck_err(c, "%s", buf.buf)) {
704 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, alloc_k);
706 ret = PTR_ERR_OR_ZERO(a) ?:
707 bch2_bucket_backpointer_add(trans, a, bp, orig_k) ?:
708 bch2_trans_update(trans, &alloc_iter, &a->k_i, 0);
714 static int check_extent_to_backpointers(struct btree_trans *trans,
715 struct btree_iter *iter,
716 struct bpos bucket_start,
717 struct bpos bucket_end)
719 struct bch_fs *c = trans->c;
720 struct bkey_ptrs_c ptrs;
721 const union bch_extent_entry *entry;
722 struct extent_ptr_decoded p;
726 k = bch2_btree_iter_peek_all_levels(iter);
733 ptrs = bch2_bkey_ptrs_c(k);
734 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
735 struct bpos bucket_pos;
736 struct bch_backpointer bp;
741 bch2_extent_ptr_to_bp(c, iter->btree_id, iter->path->level,
742 k, p, &bucket_pos, &bp);
744 ret = check_bp_exists(trans, bucket_pos, bp, k, bucket_start, bucket_end);
752 static int check_btree_root_to_backpointers(struct btree_trans *trans,
753 enum btree_id btree_id,
754 struct bpos bucket_start,
755 struct bpos bucket_end)
757 struct bch_fs *c = trans->c;
758 struct btree_iter iter;
761 struct bkey_ptrs_c ptrs;
762 struct extent_ptr_decoded p;
763 const union bch_extent_entry *entry;
766 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
767 c->btree_roots[btree_id].level, 0);
768 b = bch2_btree_iter_peek_node(&iter);
769 ret = PTR_ERR_OR_ZERO(b);
773 BUG_ON(b != btree_node_root(c, b));
775 k = bkey_i_to_s_c(&b->key);
776 ptrs = bch2_bkey_ptrs_c(k);
777 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
778 struct bpos bucket_pos;
779 struct bch_backpointer bp;
784 bch2_extent_ptr_to_bp(c, iter.btree_id, iter.path->level + 1,
785 k, p, &bucket_pos, &bp);
787 ret = check_bp_exists(trans, bucket_pos, bp, k, bucket_start, bucket_end);
792 bch2_trans_iter_exit(trans, &iter);
796 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
798 return (struct bbpos) {
799 .btree = bp.btree_id,
804 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
810 mem_bytes = i.totalram * i.mem_unit;
811 return (mem_bytes >> 1) / btree_bytes(c);
814 int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
815 unsigned btree_leaf_mask,
816 unsigned btree_interior_mask,
817 struct bbpos start, struct bbpos *end)
819 struct btree_iter iter;
821 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
825 for (btree = start.btree; btree < BTREE_ID_NR && !ret; btree++) {
826 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 1 : 2;
828 if (!((1U << btree) & btree_leaf_mask) &&
829 !((1U << btree) & btree_interior_mask))
832 bch2_trans_node_iter_init(trans, &iter, btree,
833 btree == start.btree ? start.pos : POS_MIN,
836 * for_each_btree_key_contineu() doesn't check the return value
837 * from bch2_btree_iter_advance(), which is needed when
838 * iterating over interior nodes where we'll see keys at
842 k = __bch2_btree_iter_peek_and_restart(trans, &iter, 0);
849 *end = BBPOS(btree, k.k->p);
850 bch2_trans_iter_exit(trans, &iter);
853 } while (bch2_btree_iter_advance(&iter));
854 bch2_trans_iter_exit(trans, &iter);
861 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
862 struct bpos bucket_start,
863 struct bpos bucket_end)
865 struct btree_iter iter;
866 enum btree_id btree_id;
869 for (btree_id = 0; btree_id < BTREE_ID_NR; btree_id++) {
870 unsigned depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
872 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
874 BTREE_ITER_ALL_LEVELS|
875 BTREE_ITER_PREFETCH);
878 ret = commit_do(trans, NULL, NULL,
879 BTREE_INSERT_LAZY_RW|
881 check_extent_to_backpointers(trans, &iter,
882 bucket_start, bucket_end));
885 } while (!bch2_btree_iter_advance(&iter));
887 bch2_trans_iter_exit(trans, &iter);
892 ret = commit_do(trans, NULL, NULL,
893 BTREE_INSERT_LAZY_RW|
895 check_btree_root_to_backpointers(trans, btree_id,
896 bucket_start, bucket_end));
903 static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
906 return bch2_dev_exists2(c, bucket.inode)
907 ? bucket_pos_to_bp(c, bucket, 0)
911 int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
912 struct bpos start, struct bpos *end)
914 struct btree_iter alloc_iter;
915 struct btree_iter bp_iter;
916 struct bkey_s_c alloc_k, bp_k;
917 size_t btree_nodes = btree_nodes_fit_in_ram(trans->c);
918 bool alloc_end = false, bp_end = false;
921 bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
923 bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
924 bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
927 ? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
930 ? __bch2_btree_iter_peek_and_restart(trans, &bp_iter, 0)
933 ret = bkey_err(alloc_k) ?: bkey_err(bp_k);
934 if ((!alloc_k.k && !bp_k.k) || ret) {
945 if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
946 bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
947 if (!bch2_btree_iter_advance(&alloc_iter))
950 if (!bch2_btree_iter_advance(&bp_iter))
954 bch2_trans_iter_exit(trans, &bp_iter);
955 bch2_trans_iter_exit(trans, &alloc_iter);
959 int bch2_check_extents_to_backpointers(struct bch_fs *c)
961 struct btree_trans trans;
962 struct bpos start = POS_MIN, end;
965 bch2_trans_init(&trans, c, 0, 0);
967 ret = bch2_get_alloc_in_memory_pos(&trans, start, &end);
971 if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
972 bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
973 __func__, btree_nodes_fit_in_ram(c));
975 if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
976 struct printbuf buf = PRINTBUF;
978 prt_str(&buf, "check_extents_to_backpointers(): ");
979 bch2_bpos_to_text(&buf, start);
981 bch2_bpos_to_text(&buf, end);
983 bch_verbose(c, "%s", buf.buf);
987 ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
988 if (ret || bpos_eq(end, SPOS_MAX))
991 start = bpos_successor(end);
993 bch2_trans_exit(&trans);
998 static int check_one_backpointer(struct btree_trans *trans,
1004 struct btree_iter iter;
1005 struct bch_backpointer bp;
1008 struct printbuf buf = PRINTBUF;
1011 ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp, 0);
1012 if (ret || *bp_offset == U64_MAX)
1015 pos = bp_to_bbpos(bp);
1016 if (bbpos_cmp(pos, start) < 0 ||
1017 bbpos_cmp(pos, end) > 0)
1020 k = bch2_backpointer_get_key(trans, &iter, bucket, *bp_offset, bp);
1022 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
1027 if (fsck_err_on(!k.k, trans->c,
1028 "%s backpointer points to missing extent\n%s",
1029 *bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree",
1030 (bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
1031 ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
1033 bch_err(trans->c, "backpointer at %llu not found", *bp_offset);
1036 bch2_trans_iter_exit(trans, &iter);
1038 printbuf_exit(&buf);
1042 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
1046 struct btree_iter iter;
1050 for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
1051 BTREE_ITER_PREFETCH, k, ret) {
1054 while (!(ret = commit_do(trans, NULL, NULL,
1055 BTREE_INSERT_LAZY_RW|
1056 BTREE_INSERT_NOFAIL,
1057 check_one_backpointer(trans, iter.pos, &bp_offset, start, end))) &&
1058 bp_offset < U64_MAX)
1064 bch2_trans_iter_exit(trans, &iter);
1065 return ret < 0 ? ret : 0;
1068 int bch2_check_backpointers_to_extents(struct bch_fs *c)
1070 struct btree_trans trans;
1071 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
1074 bch2_trans_init(&trans, c, 0, 0);
1076 ret = bch2_get_btree_in_memory_pos(&trans,
1077 (1U << BTREE_ID_extents)|
1078 (1U << BTREE_ID_reflink),
1084 if (!bbpos_cmp(start, BBPOS_MIN) &&
1085 bbpos_cmp(end, BBPOS_MAX))
1086 bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
1087 __func__, btree_nodes_fit_in_ram(c));
1089 if (bbpos_cmp(start, BBPOS_MIN) ||
1090 bbpos_cmp(end, BBPOS_MAX)) {
1091 struct printbuf buf = PRINTBUF;
1093 prt_str(&buf, "check_backpointers_to_extents(): ");
1094 bch2_bbpos_to_text(&buf, start);
1096 bch2_bbpos_to_text(&buf, end);
1098 bch_verbose(c, "%s", buf.buf);
1099 printbuf_exit(&buf);
1102 ret = bch2_check_backpointers_to_extents_pass(&trans, start, end);
1103 if (ret || !bbpos_cmp(end, BBPOS_MAX))
1106 start = bbpos_successor(end);
1108 bch2_trans_exit(&trans);