1 // SPDX-License-Identifier: GPL-2.0
5 #include "btree_cache.h"
6 #include "btree_update.h"
10 #include "fs-common.h"
19 #include <linux/bsearch.h>
20 #include <linux/darray.h>
21 #include <linux/dcache.h> /* struct qstr */
24 * XXX: this is handling transaction restarts without returning
25 * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
27 static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
32 int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
33 SPOS(inum, 0, snapshot),
36 if (bkey_extent_is_allocation(k.k))
41 return ret ?: sectors;
44 static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
49 int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
50 SPOS(inum, 0, snapshot),
53 if (k.k->type == KEY_TYPE_dirent &&
54 bkey_s_c_to_dirent(k).v->d_type == DT_DIR)
59 return ret ?: subdirs;
62 static int subvol_lookup(struct btree_trans *trans, u32 subvol,
63 u32 *snapshot, u64 *inum)
65 struct bch_subvolume s;
68 ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
70 *snapshot = le32_to_cpu(s.snapshot);
71 *inum = le64_to_cpu(s.inode);
75 static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
76 struct bch_inode_unpacked *inode)
78 struct btree_iter iter;
82 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
84 BTREE_ITER_ALL_SNAPSHOTS);
85 k = bch2_btree_iter_peek(&iter);
90 if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
91 ret = -BCH_ERR_ENOENT_inode;
95 ret = bch2_inode_unpack(k, inode);
97 bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
98 bch2_trans_iter_exit(trans, &iter);
102 static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
103 struct bch_inode_unpacked *inode,
106 struct btree_iter iter;
110 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
111 SPOS(0, inode_nr, *snapshot), 0);
116 ret = bkey_is_inode(k.k)
117 ? bch2_inode_unpack(k, inode)
118 : -BCH_ERR_ENOENT_inode;
120 *snapshot = iter.pos.snapshot;
122 bch2_trans_iter_exit(trans, &iter);
126 static int lookup_dirent_in_snapshot(struct btree_trans *trans,
127 struct bch_hash_info hash_info,
128 subvol_inum dir, struct qstr *name,
129 u64 *target, unsigned *type, u32 snapshot)
131 struct btree_iter iter;
132 struct bkey_s_c_dirent d;
133 int ret = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
134 &hash_info, dir, name, 0, snapshot);
138 d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
139 *target = le64_to_cpu(d.v->d_inum);
141 bch2_trans_iter_exit(trans, &iter);
145 static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
147 struct bch_fs *c = trans->c;
148 struct btree_iter iter;
149 struct bch_inode_unpacked dir_inode;
150 struct bch_hash_info dir_hash_info;
153 ret = lookup_first_inode(trans, pos.inode, &dir_inode);
157 dir_hash_info = bch2_hash_info_init(c, &dir_inode);
159 bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
161 ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
162 &dir_hash_info, &iter,
163 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
164 bch2_trans_iter_exit(trans, &iter);
170 /* Get lost+found, create if it doesn't exist: */
171 static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
172 struct bch_inode_unpacked *lostfound)
174 struct bch_fs *c = trans->c;
175 struct qstr lostfound_str = QSTR("lost+found");
180 struct bch_snapshot_tree st;
181 ret = bch2_snapshot_tree_lookup(trans,
182 bch2_snapshot_tree(c, snapshot), &st);
186 subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) };
189 ret = subvol_lookup(trans, le32_to_cpu(st.master_subvol),
190 &subvol_snapshot, &root_inum.inum);
191 bch_err_msg(c, ret, "looking up root subvol");
195 struct bch_inode_unpacked root_inode;
196 struct bch_hash_info root_hash_info;
197 u32 root_inode_snapshot = snapshot;
198 ret = lookup_inode(trans, root_inum.inum, &root_inode, &root_inode_snapshot);
199 bch_err_msg(c, ret, "looking up root inode");
203 root_hash_info = bch2_hash_info_init(c, &root_inode);
205 ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
206 &lostfound_str, &inum, &d_type, snapshot);
207 if (bch2_err_matches(ret, ENOENT))
208 goto create_lostfound;
214 if (d_type != DT_DIR) {
215 bch_err(c, "error looking up lost+found: not a directory");
216 return -BCH_ERR_ENOENT_not_directory;
220 * The bch2_check_dirents pass has already run, dangling dirents
221 * shouldn't exist here:
223 ret = lookup_inode(trans, inum, lostfound, &snapshot);
224 bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
225 inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
230 * XXX: we could have a nicer log message here if we had a nice way to
231 * walk backpointers to print a path
233 bch_notice(c, "creating lost+found in snapshot %u", le32_to_cpu(st.root_snapshot));
235 u64 now = bch2_current_time(c);
236 struct btree_iter lostfound_iter = { NULL };
237 u64 cpu = raw_smp_processor_id();
239 bch2_inode_init_early(c, lostfound);
240 bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode);
241 lostfound->bi_dir = root_inode.bi_inum;
243 root_inode.bi_nlink++;
245 ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu);
249 bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
250 ret = bch2_btree_iter_traverse(&lostfound_iter);
254 ret = bch2_dirent_create_snapshot(trans,
255 0, root_inode.bi_inum, snapshot, &root_hash_info,
256 mode_to_type(lostfound->bi_mode),
259 &lostfound->bi_dir_offset,
260 BCH_HASH_SET_MUST_CREATE) ?:
261 bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
262 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
264 bch_err_msg(c, ret, "creating lost+found");
265 bch2_trans_iter_exit(trans, &lostfound_iter);
269 static int reattach_inode(struct btree_trans *trans,
270 struct bch_inode_unpacked *inode,
273 struct bch_hash_info dir_hash;
274 struct bch_inode_unpacked lostfound;
278 u32 dirent_snapshot = inode_snapshot;
281 if (inode->bi_subvol) {
282 inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL;
285 ret = subvol_lookup(trans, inode->bi_parent_subvol,
286 &dirent_snapshot, &root_inum);
290 snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol);
292 snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
295 ret = lookup_lostfound(trans, dirent_snapshot, &lostfound);
299 if (S_ISDIR(inode->bi_mode)) {
300 lostfound.bi_nlink++;
302 ret = __bch2_fsck_write_inode(trans, &lostfound, U32_MAX);
307 dir_hash = bch2_hash_info_init(trans->c, &lostfound);
309 name = (struct qstr) QSTR(name_buf);
311 ret = bch2_dirent_create_snapshot(trans,
312 inode->bi_parent_subvol, lostfound.bi_inum,
317 inode->bi_subvol ?: inode->bi_inum,
319 BCH_HASH_SET_MUST_CREATE);
323 inode->bi_dir = lostfound.bi_inum;
324 inode->bi_dir_offset = dir_offset;
326 return __bch2_fsck_write_inode(trans, inode, inode_snapshot);
329 static int remove_backpointer(struct btree_trans *trans,
330 struct bch_inode_unpacked *inode)
332 struct btree_iter iter;
333 struct bkey_s_c_dirent d;
336 d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
337 POS(inode->bi_dir, inode->bi_dir_offset), 0,
340 __remove_dirent(trans, d.k->p);
341 bch2_trans_iter_exit(trans, &iter);
345 struct snapshots_seen_entry {
350 struct snapshots_seen {
352 DARRAY(struct snapshots_seen_entry) ids;
355 static inline void snapshots_seen_exit(struct snapshots_seen *s)
357 darray_exit(&s->ids);
360 static inline void snapshots_seen_init(struct snapshots_seen *s)
362 memset(s, 0, sizeof(*s));
365 static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
367 struct snapshots_seen_entry *i, n = {
369 .equiv = bch2_snapshot_equiv(c, id),
373 __darray_for_each(s->ids, i) {
380 ret = darray_insert_item(&s->ids, i - s->ids.data, n);
382 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
387 static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
388 enum btree_id btree_id, struct bpos pos)
390 struct snapshots_seen_entry n = {
392 .equiv = bch2_snapshot_equiv(c, pos.snapshot),
396 if (!bkey_eq(s->pos, pos))
400 s->pos.snapshot = n.equiv;
402 darray_for_each(s->ids, i) {
407 * We currently don't rigorously track for snapshot cleanup
408 * needing to be run, so it shouldn't be a fsck error yet:
410 if (i->equiv == n.equiv) {
411 bch_err(c, "snapshot deletion did not finish:\n"
412 " duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
413 bch2_btree_id_str(btree_id),
414 pos.inode, pos.offset,
415 i->id, n.id, n.equiv);
416 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
417 return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
421 ret = darray_push(&s->ids, n);
423 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
429 * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
430 * and @ancestor hasn't been overwritten in @seen
432 * @c: filesystem handle
433 * @seen: list of snapshot ids already seen at current position
434 * @id: descendent snapshot id
435 * @ancestor: ancestor snapshot id
437 * Returns: whether key in @ancestor snapshot is visible in @id snapshot
439 static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
440 u32 id, u32 ancestor)
444 EBUG_ON(id > ancestor);
445 EBUG_ON(!bch2_snapshot_is_equiv(c, id));
446 EBUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
448 /* @ancestor should be the snapshot most recently added to @seen */
449 EBUG_ON(ancestor != seen->pos.snapshot);
450 EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv);
455 if (!bch2_snapshot_is_ancestor(c, id, ancestor))
459 * We know that @id is a descendant of @ancestor, we're checking if
460 * we've seen a key that overwrote @ancestor - i.e. also a descendent of
461 * @ascestor and with @id as a descendent.
463 * But we already know that we're scanning IDs between @id and @ancestor
464 * numerically, since snapshot ID lists are kept sorted, so if we find
465 * an id that's an ancestor of @id we're done:
468 for (i = seen->ids.nr - 2;
469 i >= 0 && seen->ids.data[i].equiv >= id;
471 if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv))
478 * ref_visible - given a key with snapshot id @src that points to a key with
479 * snapshot id @dst, test whether there is some snapshot in which @dst is
482 * @c: filesystem handle
483 * @s: list of snapshot IDs already seen at @src
484 * @src: snapshot ID of src key
485 * @dst: snapshot ID of dst key
486 * Returns: true if there is some snapshot in which @dst is visible
488 * Assumes we're visiting @src keys in natural key order
490 static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
494 ? key_visible_in_snapshot(c, s, dst, src)
495 : bch2_snapshot_is_ancestor(c, src, dst);
498 static int ref_visible2(struct bch_fs *c,
499 u32 src, struct snapshots_seen *src_seen,
500 u32 dst, struct snapshots_seen *dst_seen)
502 src = bch2_snapshot_equiv(c, src);
503 dst = bch2_snapshot_equiv(c, dst);
507 swap(dst_seen, src_seen);
509 return key_visible_in_snapshot(c, src_seen, dst, src);
512 #define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \
513 for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \
514 (_i)->snapshot <= (_snapshot); _i++) \
515 if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
517 struct inode_walker_entry {
518 struct bch_inode_unpacked inode;
524 struct inode_walker {
525 bool first_this_inode;
526 bool recalculate_sums;
527 struct bpos last_pos;
529 DARRAY(struct inode_walker_entry) inodes;
532 static void inode_walker_exit(struct inode_walker *w)
534 darray_exit(&w->inodes);
537 static struct inode_walker inode_walker_init(void)
539 return (struct inode_walker) { 0, };
542 static int add_inode(struct bch_fs *c, struct inode_walker *w,
543 struct bkey_s_c inode)
545 struct bch_inode_unpacked u;
547 BUG_ON(bch2_inode_unpack(inode, &u));
549 return darray_push(&w->inodes, ((struct inode_walker_entry) {
551 .snapshot = bch2_snapshot_equiv(c, inode.k->p.snapshot),
555 static int get_inodes_all_snapshots(struct btree_trans *trans,
556 struct inode_walker *w, u64 inum)
558 struct bch_fs *c = trans->c;
559 struct btree_iter iter;
563 w->recalculate_sums = false;
566 for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
567 BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
568 if (k.k->p.offset != inum)
571 if (bkey_is_inode(k.k))
574 bch2_trans_iter_exit(trans, &iter);
579 w->first_this_inode = true;
583 static struct inode_walker_entry *
584 lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c k)
586 bool is_whiteout = k.k->type == KEY_TYPE_whiteout;
587 u32 snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
589 struct inode_walker_entry *i;
590 __darray_for_each(w->inodes, i)
591 if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
596 BUG_ON(snapshot > i->snapshot);
598 if (snapshot != i->snapshot && !is_whiteout) {
599 struct inode_walker_entry new = *i;
601 new.snapshot = snapshot;
604 struct printbuf buf = PRINTBUF;
605 bch2_bkey_val_to_text(&buf, c, k);
607 bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n"
608 "unexpected because we should always update the inode when we update a key in that inode\n"
610 w->last_pos.inode, snapshot, i->snapshot, buf.buf);
613 while (i > w->inodes.data && i[-1].snapshot > snapshot)
616 size_t pos = i - w->inodes.data;
617 int ret = darray_insert_item(&w->inodes, pos, new);
621 i = w->inodes.data + pos;
627 static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
628 struct inode_walker *w,
631 if (w->last_pos.inode != k.k->p.inode) {
632 int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode);
635 } else if (bkey_cmp(w->last_pos, k.k->p)) {
636 darray_for_each(w->inodes, i)
637 i->seen_this_pos = false;
640 w->last_pos = k.k->p;
642 return lookup_inode_for_snapshot(trans->c, w, k);
645 static int __get_visible_inodes(struct btree_trans *trans,
646 struct inode_walker *w,
647 struct snapshots_seen *s,
650 struct bch_fs *c = trans->c;
651 struct btree_iter iter;
657 for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
658 BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
659 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
661 if (k.k->p.offset != inum)
664 if (!ref_visible(c, s, s->pos.snapshot, equiv))
667 if (bkey_is_inode(k.k))
670 if (equiv >= s->pos.snapshot)
673 bch2_trans_iter_exit(trans, &iter);
678 static int check_key_has_snapshot(struct btree_trans *trans,
679 struct btree_iter *iter,
682 struct bch_fs *c = trans->c;
683 struct printbuf buf = PRINTBUF;
686 if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
687 bkey_in_missing_snapshot,
688 "key in missing snapshot: %s",
689 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
690 ret = bch2_btree_delete_at(trans, iter,
691 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
697 static int hash_redo_key(struct btree_trans *trans,
698 const struct bch_hash_desc desc,
699 struct bch_hash_info *hash_info,
700 struct btree_iter *k_iter, struct bkey_s_c k)
702 struct bkey_i *delete;
705 delete = bch2_trans_kmalloc(trans, sizeof(*delete));
707 return PTR_ERR(delete);
709 tmp = bch2_bkey_make_mut_noupdate(trans, k);
713 bkey_init(&delete->k);
714 delete->k.p = k_iter->pos;
715 return bch2_btree_iter_traverse(k_iter) ?:
716 bch2_trans_update(trans, k_iter, delete, 0) ?:
717 bch2_hash_set_in_snapshot(trans, desc, hash_info,
718 (subvol_inum) { 0, k.k->p.inode },
719 k.k->p.snapshot, tmp,
720 BCH_HASH_SET_MUST_CREATE,
721 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
722 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
725 static int hash_check_key(struct btree_trans *trans,
726 const struct bch_hash_desc desc,
727 struct bch_hash_info *hash_info,
728 struct btree_iter *k_iter, struct bkey_s_c hash_k)
730 struct bch_fs *c = trans->c;
731 struct btree_iter iter = { NULL };
732 struct printbuf buf = PRINTBUF;
737 if (hash_k.k->type != desc.key_type)
740 hash = desc.hash_bkey(hash_info, hash_k);
742 if (likely(hash == hash_k.k->p.offset))
745 if (hash_k.k->p.offset < hash)
748 for_each_btree_key_norestart(trans, iter, desc.btree_id,
749 SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
750 BTREE_ITER_SLOTS, k, ret) {
751 if (bkey_eq(k.k->p, hash_k.k->p))
754 if (fsck_err_on(k.k->type == desc.key_type &&
755 !desc.cmp_bkey(k, hash_k), c,
756 hash_table_key_duplicate,
757 "duplicate hash table keys:\n%s",
758 (printbuf_reset(&buf),
759 bch2_bkey_val_to_text(&buf, c, hash_k),
761 ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0) ?: 1;
765 if (bkey_deleted(k.k)) {
766 bch2_trans_iter_exit(trans, &iter);
771 bch2_trans_iter_exit(trans, &iter);
775 if (fsck_err(c, hash_table_key_wrong_offset,
776 "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
777 bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
778 (printbuf_reset(&buf),
779 bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
780 ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
784 ret = -BCH_ERR_transaction_restart_nested;
790 static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
791 struct btree_iter *iter,
794 return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
797 static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans,
798 struct btree_iter *iter,
799 struct bch_inode_unpacked *inode,
802 if (inode->bi_subvol) {
804 int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum);
806 return ((struct bkey_s_c_dirent) { .k = ERR_PTR(ret) });
809 return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot));
812 static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
813 struct bkey_s_c_dirent d)
815 return inode->bi_dir == d.k->p.inode &&
816 inode->bi_dir_offset == d.k->p.offset;
819 static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
820 struct bch_inode_unpacked *inode)
822 return d.v->d_type == DT_SUBVOL
823 ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
824 : le64_to_cpu(d.v->d_inum) == inode->bi_inum;
827 static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
829 struct btree_iter iter;
830 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0);
831 int ret = bkey_err(k);
835 bch2_trans_iter_exit(trans, &iter);
836 return k.k->type == KEY_TYPE_set;
839 static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c inode_k,
840 struct bch_inode_unpacked *inode,
841 u32 inode_snapshot, bool *write_inode)
843 struct bch_fs *c = trans->c;
844 struct printbuf buf = PRINTBUF;
846 struct btree_iter dirent_iter = {};
847 struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
848 int ret = bkey_err(d);
849 if (ret && !bch2_err_matches(ret, ENOENT))
853 c, inode_points_to_missing_dirent,
854 "inode points to missing dirent\n%s",
855 (bch2_bkey_val_to_text(&buf, c, inode_k), buf.buf)) ||
856 fsck_err_on(!ret && !dirent_points_to_inode(d, inode),
857 c, inode_points_to_wrong_dirent,
858 "inode points to dirent that does not point back:\n%s",
859 (bch2_bkey_val_to_text(&buf, c, inode_k),
861 bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
863 * We just clear the backpointer fields for now. If we find a
864 * dirent that points to this inode in check_dirents(), we'll
865 * update it then; then when we get to check_path() if the
866 * backpointer is still 0 we'll reattach it.
869 inode->bi_dir_offset = 0;
870 inode->bi_flags &= ~BCH_INODE_backptr_untrusted;
876 bch2_trans_iter_exit(trans, &dirent_iter);
882 static int check_inode(struct btree_trans *trans,
883 struct btree_iter *iter,
885 struct bch_inode_unpacked *prev,
886 struct snapshots_seen *s,
889 struct bch_fs *c = trans->c;
890 struct bch_inode_unpacked u;
891 bool do_update = false;
894 ret = check_key_has_snapshot(trans, iter, k);
900 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
904 if (!bkey_is_inode(k.k))
907 BUG_ON(bch2_inode_unpack(k, &u));
910 !(u.bi_flags & (BCH_INODE_i_size_dirty|
911 BCH_INODE_i_sectors_dirty|
912 BCH_INODE_unlinked)))
915 if (prev->bi_inum != u.bi_inum)
918 if (fsck_err_on(prev->bi_hash_seed != u.bi_hash_seed ||
919 inode_d_type(prev) != inode_d_type(&u),
920 c, inode_snapshot_mismatch,
921 "inodes in different snapshots don't match")) {
922 bch_err(c, "repair not implemented yet");
923 return -BCH_ERR_fsck_repair_unimplemented;
926 if ((u.bi_flags & (BCH_INODE_i_size_dirty|BCH_INODE_unlinked)) &&
927 bch2_key_has_snapshot_overwrites(trans, BTREE_ID_inodes, k.k->p)) {
928 struct bpos new_min_pos;
930 ret = bch2_propagate_key_to_snapshot_leaves(trans, iter->btree_id, k, &new_min_pos);
934 u.bi_flags &= ~BCH_INODE_i_size_dirty|BCH_INODE_unlinked;
936 ret = __bch2_fsck_write_inode(trans, &u, iter->pos.snapshot);
938 bch_err_msg(c, ret, "in fsck updating inode");
942 if (!bpos_eq(new_min_pos, POS_MIN))
943 bch2_btree_iter_set_pos(iter, bpos_predecessor(new_min_pos));
947 if (u.bi_flags & BCH_INODE_unlinked) {
948 ret = check_inode_deleted_list(trans, k.k->p);
952 fsck_err_on(ret, c, unlinked_inode_not_on_deleted_list,
953 "inode %llu:%u unlinked, but not on deleted list",
954 u.bi_inum, k.k->p.snapshot);
958 if (u.bi_flags & BCH_INODE_unlinked &&
960 fsck_err(c, inode_unlinked_but_clean,
961 "filesystem marked clean, but inode %llu unlinked",
963 ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
964 bch_err_msg(c, ret, "in fsck deleting inode");
968 if (u.bi_flags & BCH_INODE_i_size_dirty &&
970 fsck_err(c, inode_i_size_dirty_but_clean,
971 "filesystem marked clean, but inode %llu has i_size dirty",
973 bch_verbose(c, "truncating inode %llu", u.bi_inum);
976 * XXX: need to truncate partial blocks too here - or ideally
977 * just switch units to bytes and that issue goes away
979 ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
980 SPOS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9,
982 POS(u.bi_inum, U64_MAX),
984 bch_err_msg(c, ret, "in fsck truncating inode");
989 * We truncated without our normal sector accounting hook, just
990 * make sure we recalculate it:
992 u.bi_flags |= BCH_INODE_i_sectors_dirty;
994 u.bi_flags &= ~BCH_INODE_i_size_dirty;
998 if (u.bi_flags & BCH_INODE_i_sectors_dirty &&
1000 fsck_err(c, inode_i_sectors_dirty_but_clean,
1001 "filesystem marked clean, but inode %llu has i_sectors dirty",
1005 bch_verbose(c, "recounting sectors for inode %llu",
1008 sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
1010 bch_err_msg(c, sectors, "in fsck recounting inode sectors");
1014 u.bi_sectors = sectors;
1015 u.bi_flags &= ~BCH_INODE_i_sectors_dirty;
1019 if (u.bi_flags & BCH_INODE_backptr_untrusted) {
1021 u.bi_dir_offset = 0;
1022 u.bi_flags &= ~BCH_INODE_backptr_untrusted;
1026 if (u.bi_dir || u.bi_dir_offset) {
1027 ret = check_inode_dirent_inode(trans, k, &u, k.k->p.snapshot, &do_update);
1032 if (fsck_err_on(u.bi_parent_subvol &&
1033 (u.bi_subvol == 0 ||
1034 u.bi_subvol == BCACHEFS_ROOT_SUBVOL),
1035 c, inode_bi_parent_nonzero,
1036 "inode %llu:%u has subvol %u but nonzero parent subvol %u",
1037 u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) {
1038 u.bi_parent_subvol = 0;
1043 struct bch_subvolume s;
1045 ret = bch2_subvolume_get(trans, u.bi_subvol, false, 0, &s);
1046 if (ret && !bch2_err_matches(ret, ENOENT))
1049 if (fsck_err_on(ret,
1050 c, inode_bi_subvol_missing,
1051 "inode %llu:%u bi_subvol points to missing subvolume %u",
1052 u.bi_inum, k.k->p.snapshot, u.bi_subvol) ||
1053 fsck_err_on(le64_to_cpu(s.inode) != u.bi_inum ||
1054 !bch2_snapshot_is_ancestor(c, le32_to_cpu(s.snapshot),
1056 c, inode_bi_subvol_wrong,
1057 "inode %llu:%u points to subvol %u, but subvol points to %llu:%u",
1058 u.bi_inum, k.k->p.snapshot, u.bi_subvol,
1059 le64_to_cpu(s.inode),
1060 le32_to_cpu(s.snapshot))) {
1062 u.bi_parent_subvol = 0;
1068 ret = __bch2_fsck_write_inode(trans, &u, iter->pos.snapshot);
1069 bch_err_msg(c, ret, "in fsck updating inode");
1079 int bch2_check_inodes(struct bch_fs *c)
1081 bool full = c->opts.fsck;
1082 struct bch_inode_unpacked prev = { 0 };
1083 struct snapshots_seen s;
1085 snapshots_seen_init(&s);
1087 int ret = bch2_trans_run(c,
1088 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
1090 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1091 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1092 check_inode(trans, &iter, k, &prev, &s, full)));
1094 snapshots_seen_exit(&s);
1099 static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
1101 struct bch_fs *c = trans->c;
1102 u32 restart_count = trans->restart_count;
1106 darray_for_each(w->inodes, i) {
1107 if (i->inode.bi_sectors == i->count)
1110 count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
1112 if (w->recalculate_sums)
1115 if (i->count != count2) {
1116 bch_err(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
1117 w->last_pos.inode, i->snapshot, i->count, count2);
1118 return -BCH_ERR_internal_fsck_err;
1121 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
1122 c, inode_i_sectors_wrong,
1123 "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
1124 w->last_pos.inode, i->snapshot,
1125 i->inode.bi_sectors, i->count)) {
1126 i->inode.bi_sectors = i->count;
1127 ret = bch2_fsck_write_inode(trans, &i->inode, i->snapshot);
1134 return ret ?: trans_was_restarted(trans, restart_count);
1140 struct snapshots_seen seen;
1143 struct extent_ends {
1144 struct bpos last_pos;
1145 DARRAY(struct extent_end) e;
1148 static void extent_ends_reset(struct extent_ends *extent_ends)
1150 darray_for_each(extent_ends->e, i)
1151 snapshots_seen_exit(&i->seen);
1152 extent_ends->e.nr = 0;
1155 static void extent_ends_exit(struct extent_ends *extent_ends)
1157 extent_ends_reset(extent_ends);
1158 darray_exit(&extent_ends->e);
1161 static void extent_ends_init(struct extent_ends *extent_ends)
1163 memset(extent_ends, 0, sizeof(*extent_ends));
1166 static int extent_ends_at(struct bch_fs *c,
1167 struct extent_ends *extent_ends,
1168 struct snapshots_seen *seen,
1171 struct extent_end *i, n = (struct extent_end) {
1172 .offset = k.k->p.offset,
1173 .snapshot = k.k->p.snapshot,
1177 n.seen.ids.data = kmemdup(seen->ids.data,
1178 sizeof(seen->ids.data[0]) * seen->ids.size,
1180 if (!n.seen.ids.data)
1181 return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
1183 __darray_for_each(extent_ends->e, i) {
1184 if (i->snapshot == k.k->p.snapshot) {
1185 snapshots_seen_exit(&i->seen);
1190 if (i->snapshot >= k.k->p.snapshot)
1194 return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
1197 static int overlapping_extents_found(struct btree_trans *trans,
1198 enum btree_id btree,
1199 struct bpos pos1, struct snapshots_seen *pos1_seen,
1202 struct extent_end *extent_end)
1204 struct bch_fs *c = trans->c;
1205 struct printbuf buf = PRINTBUF;
1206 struct btree_iter iter1, iter2 = { NULL };
1207 struct bkey_s_c k1, k2;
1210 BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
1212 bch2_trans_iter_init(trans, &iter1, btree, pos1,
1213 BTREE_ITER_ALL_SNAPSHOTS|
1214 BTREE_ITER_NOT_EXTENTS);
1215 k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
1220 prt_str(&buf, "\n ");
1221 bch2_bkey_val_to_text(&buf, c, k1);
1223 if (!bpos_eq(pos1, k1.k->p)) {
1224 prt_str(&buf, "\n wanted\n ");
1225 bch2_bpos_to_text(&buf, pos1);
1226 prt_str(&buf, "\n ");
1227 bch2_bkey_to_text(&buf, &pos2);
1229 bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
1231 ret = -BCH_ERR_internal_fsck_err;
1235 bch2_trans_copy_iter(&iter2, &iter1);
1238 bch2_btree_iter_advance(&iter2);
1240 k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX));
1245 if (bpos_ge(k2.k->p, pos2.p))
1249 prt_str(&buf, "\n ");
1250 bch2_bkey_val_to_text(&buf, c, k2);
1252 if (bpos_gt(k2.k->p, pos2.p) ||
1253 pos2.size != k2.k->size) {
1254 bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
1256 ret = -BCH_ERR_internal_fsck_err;
1260 prt_printf(&buf, "\n overwriting %s extent",
1261 pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
1263 if (fsck_err(c, extent_overlapping,
1264 "overlapping extents%s", buf.buf)) {
1265 struct btree_iter *old_iter = &iter1;
1266 struct disk_reservation res = { 0 };
1268 if (pos1.snapshot < pos2.p.snapshot) {
1273 trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
1275 ret = bch2_trans_update_extent_overwrite(trans, old_iter,
1276 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
1278 bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
1279 bch2_disk_reservation_put(c, &res);
1286 if (pos1.snapshot == pos2.p.snapshot) {
1288 * We overwrote the first extent, and did the overwrite
1289 * in the same snapshot:
1291 extent_end->offset = bkey_start_offset(&pos2);
1292 } else if (pos1.snapshot > pos2.p.snapshot) {
1294 * We overwrote the first extent in pos2's snapshot:
1296 ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
1299 * We overwrote the second extent - restart
1300 * check_extent() from the top:
1302 ret = -BCH_ERR_transaction_restart_nested;
1307 bch2_trans_iter_exit(trans, &iter2);
1308 bch2_trans_iter_exit(trans, &iter1);
1309 printbuf_exit(&buf);
1313 static int check_overlapping_extents(struct btree_trans *trans,
1314 struct snapshots_seen *seen,
1315 struct extent_ends *extent_ends,
1318 struct btree_iter *iter,
1321 struct bch_fs *c = trans->c;
1324 /* transaction restart, running again */
1325 if (bpos_eq(extent_ends->last_pos, k.k->p))
1328 if (extent_ends->last_pos.inode != k.k->p.inode)
1329 extent_ends_reset(extent_ends);
1331 darray_for_each(extent_ends->e, i) {
1332 if (i->offset <= bkey_start_offset(k.k))
1335 if (!ref_visible2(c,
1336 k.k->p.snapshot, seen,
1337 i->snapshot, &i->seen))
1340 ret = overlapping_extents_found(trans, iter->btree_id,
1341 SPOS(iter->pos.inode,
1350 ret = extent_ends_at(c, extent_ends, seen, k);
1354 extent_ends->last_pos = k.k->p;
1359 static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
1362 struct bch_fs *c = trans->c;
1363 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1364 struct bch_extent_crc_unpacked crc;
1365 const union bch_extent_entry *i;
1366 unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
1368 bkey_for_each_crc(k.k, ptrs, crc, i)
1369 if (crc_is_encoded(crc) &&
1370 crc.uncompressed_size > encoded_extent_max_sectors) {
1371 struct printbuf buf = PRINTBUF;
1373 bch2_bkey_val_to_text(&buf, c, k);
1374 bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf);
1375 printbuf_exit(&buf);
1381 static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
1383 struct inode_walker *inode,
1384 struct snapshots_seen *s,
1385 struct extent_ends *extent_ends)
1387 struct bch_fs *c = trans->c;
1388 struct inode_walker_entry *i;
1389 struct printbuf buf = PRINTBUF;
1390 struct bpos equiv = k.k->p;
1393 equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1395 ret = check_key_has_snapshot(trans, iter, k);
1397 ret = ret < 0 ? ret : 0;
1401 if (inode->last_pos.inode != k.k->p.inode) {
1402 ret = check_i_sectors(trans, inode);
1407 i = walk_inode(trans, inode, k);
1408 ret = PTR_ERR_OR_ZERO(i);
1412 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1416 if (k.k->type != KEY_TYPE_whiteout) {
1417 if (fsck_err_on(!i, c, extent_in_missing_inode,
1418 "extent in missing inode:\n %s",
1419 (printbuf_reset(&buf),
1420 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1423 if (fsck_err_on(i &&
1424 !S_ISREG(i->inode.bi_mode) &&
1425 !S_ISLNK(i->inode.bi_mode),
1426 c, extent_in_non_reg_inode,
1427 "extent in non regular inode mode %o:\n %s",
1429 (printbuf_reset(&buf),
1430 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1433 ret = check_overlapping_extents(trans, s, extent_ends, k,
1434 equiv.snapshot, iter,
1435 &inode->recalculate_sums);
1441 * Check inodes in reverse order, from oldest snapshots to newest,
1442 * starting from the inode that matches this extent's snapshot. If we
1443 * didn't have one, iterate over all inodes:
1446 i = inode->inodes.data + inode->inodes.nr - 1;
1449 inode->inodes.data && i >= inode->inodes.data;
1451 if (i->snapshot > equiv.snapshot ||
1452 !key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
1455 if (k.k->type != KEY_TYPE_whiteout) {
1456 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_size_dirty) &&
1457 k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
1458 !bkey_extent_is_reservation(k),
1459 c, extent_past_end_of_inode,
1460 "extent type past end of inode %llu:%u, i_size %llu\n %s",
1461 i->inode.bi_inum, i->snapshot, i->inode.bi_size,
1462 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1463 struct btree_iter iter2;
1465 bch2_trans_copy_iter(&iter2, iter);
1466 bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
1467 ret = bch2_btree_iter_traverse(&iter2) ?:
1468 bch2_btree_delete_at(trans, &iter2,
1469 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1470 bch2_trans_iter_exit(trans, &iter2);
1474 iter->k.type = KEY_TYPE_whiteout;
1477 if (bkey_extent_is_allocation(k.k))
1478 i->count += k.k->size;
1481 i->seen_this_pos = true;
1486 printbuf_exit(&buf);
1490 ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1495 * Walk extents: verify that extents have a corresponding S_ISREG inode, and
1496 * that i_size an i_sectors are consistent
1498 int bch2_check_extents(struct bch_fs *c)
1500 struct inode_walker w = inode_walker_init();
1501 struct snapshots_seen s;
1502 struct extent_ends extent_ends;
1503 struct disk_reservation res = { 0 };
1505 snapshots_seen_init(&s);
1506 extent_ends_init(&extent_ends);
1508 int ret = bch2_trans_run(c,
1509 for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
1510 POS(BCACHEFS_ROOT_INO, 0),
1511 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1513 BCH_TRANS_COMMIT_no_enospc, ({
1514 bch2_disk_reservation_put(c, &res);
1515 check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
1516 check_extent_overbig(trans, &iter, k);
1518 check_i_sectors(trans, &w));
1520 bch2_disk_reservation_put(c, &res);
1521 extent_ends_exit(&extent_ends);
1522 inode_walker_exit(&w);
1523 snapshots_seen_exit(&s);
1529 int bch2_check_indirect_extents(struct bch_fs *c)
1531 struct disk_reservation res = { 0 };
1533 int ret = bch2_trans_run(c,
1534 for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
1536 BTREE_ITER_PREFETCH, k,
1538 BCH_TRANS_COMMIT_no_enospc, ({
1539 bch2_disk_reservation_put(c, &res);
1540 check_extent_overbig(trans, &iter, k);
1543 bch2_disk_reservation_put(c, &res);
1548 static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
1550 struct bch_fs *c = trans->c;
1551 u32 restart_count = trans->restart_count;
1555 darray_for_each(w->inodes, i) {
1556 if (i->inode.bi_nlink == i->count)
1559 count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
1563 if (i->count != count2) {
1564 bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
1567 if (i->inode.bi_nlink == i->count)
1571 if (fsck_err_on(i->inode.bi_nlink != i->count,
1572 c, inode_dir_wrong_nlink,
1573 "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
1574 w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
1575 i->inode.bi_nlink = i->count;
1576 ret = bch2_fsck_write_inode(trans, &i->inode, i->snapshot);
1583 return ret ?: trans_was_restarted(trans, restart_count);
1586 static int check_dirent_inode_dirent(struct btree_trans *trans,
1587 struct btree_iter *iter,
1588 struct bkey_s_c_dirent d,
1589 struct bch_inode_unpacked *target,
1590 u32 target_snapshot)
1592 struct bch_fs *c = trans->c;
1593 struct printbuf buf = PRINTBUF;
1596 if (inode_points_to_dirent(target, d))
1599 if (!target->bi_dir &&
1600 !target->bi_dir_offset) {
1601 target->bi_dir = d.k->p.inode;
1602 target->bi_dir_offset = d.k->p.offset;
1603 return __bch2_fsck_write_inode(trans, target, target_snapshot);
1606 struct btree_iter bp_iter = { NULL };
1607 struct bkey_s_c_dirent bp_dirent = dirent_get_by_pos(trans, &bp_iter,
1608 SPOS(target->bi_dir, target->bi_dir_offset, target_snapshot));
1609 ret = bkey_err(bp_dirent);
1610 if (ret && !bch2_err_matches(ret, ENOENT))
1613 bool backpointer_exists = !ret;
1616 if (fsck_err_on(!backpointer_exists,
1617 c, inode_wrong_backpointer,
1618 "inode %llu:%u has wrong backpointer:\n"
1620 "should be %llu:%llu",
1621 target->bi_inum, target_snapshot,
1623 target->bi_dir_offset,
1626 target->bi_dir = d.k->p.inode;
1627 target->bi_dir_offset = d.k->p.offset;
1628 ret = __bch2_fsck_write_inode(trans, target, target_snapshot);
1632 bch2_bkey_val_to_text(&buf, c, d.s_c);
1634 if (backpointer_exists)
1635 bch2_bkey_val_to_text(&buf, c, bp_dirent.s_c);
1637 if (fsck_err_on(backpointer_exists &&
1638 (S_ISDIR(target->bi_mode) ||
1640 c, inode_dir_multiple_links,
1641 "%s %llu:%u with multiple links\n%s",
1642 S_ISDIR(target->bi_mode) ? "directory" : "subvolume",
1643 target->bi_inum, target_snapshot, buf.buf)) {
1644 ret = __remove_dirent(trans, d.k->p);
1649 * hardlinked file with nlink 0:
1650 * We're just adjusting nlink here so check_nlinks() will pick
1651 * it up, it ignores inodes with nlink 0
1653 if (fsck_err_on(backpointer_exists && !target->bi_nlink,
1654 c, inode_multiple_links_but_nlink_0,
1655 "inode %llu:%u type %s has multiple links but i_nlink 0\n%s",
1656 target->bi_inum, target_snapshot, bch2_d_types[d.v->d_type], buf.buf)) {
1658 target->bi_flags &= ~BCH_INODE_unlinked;
1659 ret = __bch2_fsck_write_inode(trans, target, target_snapshot);
1666 bch2_trans_iter_exit(trans, &bp_iter);
1667 printbuf_exit(&buf);
1672 static int check_dirent_target(struct btree_trans *trans,
1673 struct btree_iter *iter,
1674 struct bkey_s_c_dirent d,
1675 struct bch_inode_unpacked *target,
1676 u32 target_snapshot)
1678 struct bch_fs *c = trans->c;
1679 struct bkey_i_dirent *n;
1680 struct printbuf buf = PRINTBUF;
1683 ret = check_dirent_inode_dirent(trans, iter, d, target, target_snapshot);
1687 if (fsck_err_on(d.v->d_type != inode_d_type(target),
1688 c, dirent_d_type_wrong,
1689 "incorrect d_type: got %s, should be %s:\n%s",
1690 bch2_d_type_str(d.v->d_type),
1691 bch2_d_type_str(inode_d_type(target)),
1692 (printbuf_reset(&buf),
1693 bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
1694 n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1695 ret = PTR_ERR_OR_ZERO(n);
1699 bkey_reassemble(&n->k_i, d.s_c);
1700 n->v.d_type = inode_d_type(target);
1701 if (n->v.d_type == DT_SUBVOL) {
1702 n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
1703 n->v.d_child_subvol = cpu_to_le32(target->bi_subvol);
1705 n->v.d_inum = cpu_to_le64(target->bi_inum);
1708 ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1712 d = dirent_i_to_s_c(n);
1716 printbuf_exit(&buf);
1721 /* find a subvolume that's a descendent of @snapshot: */
1722 static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid)
1724 struct btree_iter iter;
1728 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 0, k, ret) {
1729 if (k.k->type != KEY_TYPE_subvolume)
1732 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
1733 if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) {
1734 bch2_trans_iter_exit(trans, &iter);
1735 *subvolid = k.k->p.offset;
1742 bch2_trans_iter_exit(trans, &iter);
1746 static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter,
1747 struct bkey_s_c_dirent d)
1749 struct bch_fs *c = trans->c;
1750 struct btree_iter subvol_iter = {};
1751 struct bch_inode_unpacked subvol_root;
1752 u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol);
1753 u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
1754 u32 parent_snapshot;
1756 struct printbuf buf = PRINTBUF;
1759 ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum);
1760 if (ret && !bch2_err_matches(ret, ENOENT))
1763 if (fsck_err_on(ret, c, dirent_to_missing_parent_subvol,
1764 "dirent parent_subvol points to missing subvolume\n%s",
1765 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) ||
1766 fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot),
1767 c, dirent_not_visible_in_parent_subvol,
1768 "dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s",
1770 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
1771 u32 new_parent_subvol;
1772 ret = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
1776 struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
1777 ret = PTR_ERR_OR_ZERO(new_dirent);
1781 new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol);
1784 struct bkey_s_c_subvolume s =
1785 bch2_bkey_get_iter_typed(trans, &subvol_iter,
1786 BTREE_ID_subvolumes, POS(0, target_subvol),
1788 ret = bkey_err(s.s_c);
1789 if (ret && !bch2_err_matches(ret, ENOENT))
1793 if (fsck_err(c, dirent_to_missing_subvol,
1794 "dirent points to missing subvolume\n%s",
1795 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)))
1796 return __remove_dirent(trans, d.k->p);
1801 if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol,
1802 c, subvol_fs_path_parent_wrong,
1803 "subvol with wrong fs_path_parent, should be be %u\n%s",
1805 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
1806 struct bkey_i_subvolume *n =
1807 bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume);
1808 ret = PTR_ERR_OR_ZERO(n);
1812 n->v.fs_path_parent = cpu_to_le32(parent_subvol);
1815 u64 target_inum = le64_to_cpu(s.v->inode);
1816 u32 target_snapshot = le32_to_cpu(s.v->snapshot);
1818 ret = lookup_inode(trans, target_inum, &subvol_root, &target_snapshot);
1819 if (ret && !bch2_err_matches(ret, ENOENT))
1822 if (fsck_err_on(parent_subvol != subvol_root.bi_parent_subvol,
1823 c, inode_bi_parent_wrong,
1824 "subvol root %llu has wrong bi_parent_subvol: got %u, should be %u",
1826 subvol_root.bi_parent_subvol, parent_subvol)) {
1827 subvol_root.bi_parent_subvol = parent_subvol;
1828 ret = __bch2_fsck_write_inode(trans, &subvol_root, target_snapshot);
1833 ret = check_dirent_target(trans, iter, d, &subvol_root,
1840 bch2_trans_iter_exit(trans, &subvol_iter);
1841 printbuf_exit(&buf);
1845 static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
1847 struct bch_hash_info *hash_info,
1848 struct inode_walker *dir,
1849 struct inode_walker *target,
1850 struct snapshots_seen *s)
1852 struct bch_fs *c = trans->c;
1853 struct bkey_s_c_dirent d;
1854 struct inode_walker_entry *i;
1855 struct printbuf buf = PRINTBUF;
1859 ret = check_key_has_snapshot(trans, iter, k);
1861 ret = ret < 0 ? ret : 0;
1866 equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1868 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1872 if (k.k->type == KEY_TYPE_whiteout)
1875 if (dir->last_pos.inode != k.k->p.inode) {
1876 ret = check_subdir_count(trans, dir);
1881 BUG_ON(!btree_iter_path(trans, iter)->should_be_locked);
1883 i = walk_inode(trans, dir, k);
1884 ret = PTR_ERR_OR_ZERO(i);
1888 if (dir->first_this_inode && dir->inodes.nr)
1889 *hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
1890 dir->first_this_inode = false;
1892 if (fsck_err_on(!i, c, dirent_in_missing_dir_inode,
1893 "dirent in nonexisting directory:\n%s",
1894 (printbuf_reset(&buf),
1895 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1896 ret = bch2_btree_delete_at(trans, iter,
1897 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1904 if (fsck_err_on(!S_ISDIR(i->inode.bi_mode),
1905 c, dirent_in_non_dir_inode,
1906 "dirent in non directory inode type %s:\n%s",
1907 bch2_d_type_str(inode_d_type(&i->inode)),
1908 (printbuf_reset(&buf),
1909 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1910 ret = bch2_btree_delete_at(trans, iter, 0);
1914 ret = hash_check_key(trans, bch2_dirent_hash_desc, hash_info, iter, k);
1918 /* dirent has been deleted */
1923 if (k.k->type != KEY_TYPE_dirent)
1926 d = bkey_s_c_to_dirent(k);
1928 if (d.v->d_type == DT_SUBVOL) {
1929 ret = check_dirent_to_subvol(trans, iter, d);
1933 ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
1937 if (fsck_err_on(!target->inodes.nr,
1938 c, dirent_to_missing_inode,
1939 "dirent points to missing inode: (equiv %u)\n%s",
1941 (printbuf_reset(&buf),
1942 bch2_bkey_val_to_text(&buf, c, k),
1944 ret = __remove_dirent(trans, d.k->p);
1949 darray_for_each(target->inodes, i) {
1950 ret = check_dirent_target(trans, iter, d,
1951 &i->inode, i->snapshot);
1956 if (d.v->d_type == DT_DIR)
1957 for_each_visible_inode(c, s, dir, equiv.snapshot, i)
1963 printbuf_exit(&buf);
1969 * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
1972 int bch2_check_dirents(struct bch_fs *c)
1974 struct inode_walker dir = inode_walker_init();
1975 struct inode_walker target = inode_walker_init();
1976 struct snapshots_seen s;
1977 struct bch_hash_info hash_info;
1979 snapshots_seen_init(&s);
1981 int ret = bch2_trans_run(c,
1982 for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
1983 POS(BCACHEFS_ROOT_INO, 0),
1984 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1987 BCH_TRANS_COMMIT_no_enospc,
1988 check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)));
1990 snapshots_seen_exit(&s);
1991 inode_walker_exit(&dir);
1992 inode_walker_exit(&target);
1997 static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
1999 struct bch_hash_info *hash_info,
2000 struct inode_walker *inode)
2002 struct bch_fs *c = trans->c;
2003 struct inode_walker_entry *i;
2006 ret = check_key_has_snapshot(trans, iter, k);
2010 i = walk_inode(trans, inode, k);
2011 ret = PTR_ERR_OR_ZERO(i);
2015 if (inode->first_this_inode && inode->inodes.nr)
2016 *hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
2017 inode->first_this_inode = false;
2019 if (fsck_err_on(!i, c, xattr_in_missing_inode,
2020 "xattr for missing inode %llu",
2022 return bch2_btree_delete_at(trans, iter, 0);
2027 ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
2034 * Walk xattrs: verify that they all have a corresponding inode
2036 int bch2_check_xattrs(struct bch_fs *c)
2038 struct inode_walker inode = inode_walker_init();
2039 struct bch_hash_info hash_info;
2042 ret = bch2_trans_run(c,
2043 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
2044 POS(BCACHEFS_ROOT_INO, 0),
2045 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
2048 BCH_TRANS_COMMIT_no_enospc,
2049 check_xattr(trans, &iter, k, &hash_info, &inode)));
2054 static int check_root_trans(struct btree_trans *trans)
2056 struct bch_fs *c = trans->c;
2057 struct bch_inode_unpacked root_inode;
2062 ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
2063 if (ret && !bch2_err_matches(ret, ENOENT))
2066 if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
2067 "root subvol missing")) {
2068 struct bkey_i_subvolume root_subvol;
2071 inum = BCACHEFS_ROOT_INO;
2073 bkey_subvolume_init(&root_subvol.k_i);
2074 root_subvol.k.p.offset = BCACHEFS_ROOT_SUBVOL;
2075 root_subvol.v.flags = 0;
2076 root_subvol.v.snapshot = cpu_to_le32(snapshot);
2077 root_subvol.v.inode = cpu_to_le64(inum);
2078 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol.k_i, 0);
2079 bch_err_msg(c, ret, "writing root subvol");
2084 ret = lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
2085 if (ret && !bch2_err_matches(ret, ENOENT))
2088 if (mustfix_fsck_err_on(ret, c, root_dir_missing,
2089 "root directory missing") ||
2090 mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
2091 c, root_inode_not_dir,
2092 "root inode not a directory")) {
2093 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
2095 root_inode.bi_inum = inum;
2097 ret = __bch2_fsck_write_inode(trans, &root_inode, snapshot);
2098 bch_err_msg(c, ret, "writing root inode");
2105 /* Get root directory, create if it doesn't exist: */
2106 int bch2_check_root(struct bch_fs *c)
2108 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2109 check_root_trans(trans));
2114 struct pathbuf_entry {
2119 typedef DARRAY(struct pathbuf_entry) pathbuf;
2121 static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
2123 darray_for_each(*p, i)
2124 if (i->inum == inum &&
2125 i->snapshot == snapshot)
2130 static int path_down(struct bch_fs *c, pathbuf *p,
2131 u64 inum, u32 snapshot)
2133 int ret = darray_push(p, ((struct pathbuf_entry) {
2135 .snapshot = snapshot,
2139 bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
2145 * Check that a given inode is reachable from the root:
2147 * XXX: we should also be verifying that inodes are in the right subvolumes
2149 static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c inode_k)
2151 struct bch_fs *c = trans->c;
2152 struct btree_iter inode_iter = {};
2153 struct bch_inode_unpacked inode;
2154 struct printbuf buf = PRINTBUF;
2155 u32 snapshot = bch2_snapshot_equiv(c, inode_k.k->p.snapshot);
2160 BUG_ON(bch2_inode_unpack(inode_k, &inode));
2162 while (!(inode.bi_inum == BCACHEFS_ROOT_INO &&
2163 inode.bi_subvol == BCACHEFS_ROOT_SUBVOL)) {
2164 struct btree_iter dirent_iter;
2165 struct bkey_s_c_dirent d;
2166 u32 parent_snapshot = snapshot;
2168 d = inode_get_dirent(trans, &dirent_iter, &inode, &parent_snapshot);
2169 ret = bkey_err(d.s_c);
2170 if (ret && !bch2_err_matches(ret, ENOENT))
2173 if (!ret && !dirent_points_to_inode(d, &inode)) {
2174 bch2_trans_iter_exit(trans, &dirent_iter);
2175 ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
2178 if (bch2_err_matches(ret, ENOENT)) {
2180 if (fsck_err(c, inode_unreachable,
2181 "unreachable inode\n%s",
2182 (printbuf_reset(&buf),
2183 bch2_bkey_val_to_text(&buf, c, inode_k),
2185 ret = reattach_inode(trans, &inode, snapshot);
2189 bch2_trans_iter_exit(trans, &dirent_iter);
2191 if (!S_ISDIR(inode.bi_mode))
2194 ret = path_down(c, p, inode.bi_inum, snapshot);
2196 bch_err(c, "memory allocation failure");
2200 snapshot = parent_snapshot;
2202 bch2_trans_iter_exit(trans, &inode_iter);
2203 inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
2204 SPOS(0, inode.bi_dir, snapshot), 0);
2205 ret = bkey_err(inode_k) ?:
2206 !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode
2207 : bch2_inode_unpack(inode_k, &inode);
2209 /* Should have been caught in dirents pass */
2210 if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
2211 bch_err(c, "error looking up parent directory: %i", ret);
2215 snapshot = inode_k.k->p.snapshot;
2217 if (path_is_dup(p, inode.bi_inum, snapshot)) {
2218 /* XXX print path */
2219 bch_err(c, "directory structure loop");
2221 darray_for_each(*p, i)
2222 pr_err("%llu:%u", i->inum, i->snapshot);
2223 pr_err("%llu:%u", inode.bi_inum, snapshot);
2225 if (!fsck_err(c, dir_loop, "directory structure loop"))
2228 ret = remove_backpointer(trans, &inode);
2229 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
2230 bch_err_msg(c, ret, "removing dirent");
2234 ret = reattach_inode(trans, &inode, snapshot);
2235 if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
2236 bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
2242 bch2_trans_iter_exit(trans, &inode_iter);
2243 printbuf_exit(&buf);
2249 * Check for unreachable inodes, as well as loops in the directory structure:
2250 * After bch2_check_dirents(), if an inode backpointer doesn't exist that means it's
2253 int bch2_check_directory_structure(struct bch_fs *c)
2255 pathbuf path = { 0, };
2258 ret = bch2_trans_run(c,
2259 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
2261 BTREE_ITER_PREFETCH|
2262 BTREE_ITER_ALL_SNAPSHOTS, k,
2263 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
2264 if (!bkey_is_inode(k.k))
2267 if (bch2_inode_flags(k) & BCH_INODE_unlinked)
2270 check_path(trans, &path, k);
2278 struct nlink_table {
2289 static int add_nlink(struct bch_fs *c, struct nlink_table *t,
2290 u64 inum, u32 snapshot)
2292 if (t->nr == t->size) {
2293 size_t new_size = max_t(size_t, 128UL, t->size * 2);
2294 void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
2297 bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
2299 return -BCH_ERR_ENOMEM_fsck_add_nlink;
2303 memcpy(d, t->d, t->size * sizeof(t->d[0]));
2311 t->d[t->nr++] = (struct nlink) {
2313 .snapshot = snapshot,
2319 static int nlink_cmp(const void *_l, const void *_r)
2321 const struct nlink *l = _l;
2322 const struct nlink *r = _r;
2324 return cmp_int(l->inum, r->inum);
2327 static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
2328 struct nlink_table *links,
2329 u64 range_start, u64 range_end, u64 inum, u32 snapshot)
2331 struct nlink *link, key = {
2332 .inum = inum, .snapshot = U32_MAX,
2335 if (inum < range_start || inum >= range_end)
2338 link = __inline_bsearch(&key, links->d, links->nr,
2339 sizeof(links->d[0]), nlink_cmp);
2343 while (link > links->d && link[0].inum == link[-1].inum)
2346 for (; link < links->d + links->nr && link->inum == inum; link++)
2347 if (ref_visible(c, s, snapshot, link->snapshot)) {
2349 if (link->snapshot >= snapshot)
2355 static int check_nlinks_find_hardlinks(struct bch_fs *c,
2356 struct nlink_table *t,
2357 u64 start, u64 *end)
2359 int ret = bch2_trans_run(c,
2360 for_each_btree_key(trans, iter, BTREE_ID_inodes,
2363 BTREE_ITER_PREFETCH|
2364 BTREE_ITER_ALL_SNAPSHOTS, k, ({
2365 if (!bkey_is_inode(k.k))
2368 /* Should never fail, checked by bch2_inode_invalid: */
2369 struct bch_inode_unpacked u;
2370 BUG_ON(bch2_inode_unpack(k, &u));
2373 * Backpointer and directory structure checks are sufficient for
2374 * directories, since they can't have hardlinks:
2376 if (S_ISDIR(u.bi_mode))
2382 ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
2384 *end = k.k->p.offset;
2396 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
2397 u64 range_start, u64 range_end)
2399 struct snapshots_seen s;
2401 snapshots_seen_init(&s);
2403 int ret = bch2_trans_run(c,
2404 for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
2406 BTREE_ITER_PREFETCH|
2407 BTREE_ITER_ALL_SNAPSHOTS, k, ({
2408 ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
2412 if (k.k->type == KEY_TYPE_dirent) {
2413 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
2415 if (d.v->d_type != DT_DIR &&
2416 d.v->d_type != DT_SUBVOL)
2417 inc_link(c, &s, links, range_start, range_end,
2418 le64_to_cpu(d.v->d_inum),
2419 bch2_snapshot_equiv(c, d.k->p.snapshot));
2424 snapshots_seen_exit(&s);
2430 static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
2432 struct nlink_table *links,
2433 size_t *idx, u64 range_end)
2435 struct bch_fs *c = trans->c;
2436 struct bch_inode_unpacked u;
2437 struct nlink *link = &links->d[*idx];
2440 if (k.k->p.offset >= range_end)
2443 if (!bkey_is_inode(k.k))
2446 BUG_ON(bch2_inode_unpack(k, &u));
2448 if (S_ISDIR(u.bi_mode))
2454 while ((cmp_int(link->inum, k.k->p.offset) ?:
2455 cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
2456 BUG_ON(*idx == links->nr);
2457 link = &links->d[++*idx];
2460 if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
2461 c, inode_wrong_nlink,
2462 "inode %llu type %s has wrong i_nlink (%u, should be %u)",
2463 u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
2464 bch2_inode_nlink_get(&u), link->count)) {
2465 bch2_inode_nlink_set(&u, link->count);
2466 ret = __bch2_fsck_write_inode(trans, &u, k.k->p.snapshot);
2473 static int check_nlinks_update_hardlinks(struct bch_fs *c,
2474 struct nlink_table *links,
2475 u64 range_start, u64 range_end)
2479 int ret = bch2_trans_run(c,
2480 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
2481 POS(0, range_start),
2482 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
2483 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2484 check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
2486 bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret));
2493 int bch2_check_nlinks(struct bch_fs *c)
2495 struct nlink_table links = { 0 };
2496 u64 this_iter_range_start, next_iter_range_start = 0;
2500 this_iter_range_start = next_iter_range_start;
2501 next_iter_range_start = U64_MAX;
2503 ret = check_nlinks_find_hardlinks(c, &links,
2504 this_iter_range_start,
2505 &next_iter_range_start);
2507 ret = check_nlinks_walk_dirents(c, &links,
2508 this_iter_range_start,
2509 next_iter_range_start);
2513 ret = check_nlinks_update_hardlinks(c, &links,
2514 this_iter_range_start,
2515 next_iter_range_start);
2520 } while (next_iter_range_start != U64_MAX);
2527 static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
2530 struct bkey_s_c_reflink_p p;
2531 struct bkey_i_reflink_p *u;
2533 if (k.k->type != KEY_TYPE_reflink_p)
2536 p = bkey_s_c_to_reflink_p(k);
2538 if (!p.v->front_pad && !p.v->back_pad)
2541 u = bch2_trans_kmalloc(trans, sizeof(*u));
2542 int ret = PTR_ERR_OR_ZERO(u);
2546 bkey_reassemble(&u->k_i, k);
2550 return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
2553 int bch2_fix_reflink_p(struct bch_fs *c)
2555 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
2558 int ret = bch2_trans_run(c,
2559 for_each_btree_key_commit(trans, iter,
2560 BTREE_ID_extents, POS_MIN,
2561 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
2562 BTREE_ITER_ALL_SNAPSHOTS, k,
2563 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2564 fix_reflink_p_key(trans, &iter, k)));