+struct inode_walker {
+ bool first_this_inode;
+ u64 cur_inum;
+
+ size_t nr;
+ size_t size;
+ struct inode_walker_entry {
+ struct bch_inode_unpacked inode;
+ u32 snapshot;
+ u64 count;
+ } *d;
+};
+
+static void inode_walker_exit(struct inode_walker *w)
+{
+ kfree(w->d);
+ w->d = NULL;
+}
+
+static struct inode_walker inode_walker_init(void)
+{
+ return (struct inode_walker) { 0, };
+}
+
+static int inode_walker_realloc(struct inode_walker *w)
+{
+ if (w->nr == w->size) {
+ size_t new_size = max_t(size_t, 8UL, w->size * 2);
+ void *d = krealloc(w->d, new_size * sizeof(w->d[0]),
+ GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ w->d = d;
+ w->size = new_size;
+ }
+
+ return 0;
+}
+
+static int add_inode(struct bch_fs *c, struct inode_walker *w,
+ struct bkey_s_c_inode inode)
+{
+ struct bch_inode_unpacked u;
+ int ret;
+
+ ret = inode_walker_realloc(w);
+ if (ret)
+ return ret;
+
+ BUG_ON(bch2_inode_unpack(inode, &u));
+
+ w->d[w->nr++] = (struct inode_walker_entry) {
+ .inode = u,
+ .snapshot = snapshot_t(c, inode.k->p.snapshot)->equiv,
+ };
+
+ return 0;
+}
+
+static int __walk_inode(struct btree_trans *trans,
+ struct inode_walker *w, struct bpos pos)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ unsigned i, ancestor_pos;
+ int ret;
+
+ pos.snapshot = snapshot_t(c, pos.snapshot)->equiv;
+
+ if (pos.inode == w->cur_inum) {
+ w->first_this_inode = false;
+ goto lookup_snapshot;
+ }
+
+ w->nr = 0;
+
+ for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, pos.inode),
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ if (k.k->p.offset != pos.inode)
+ break;
+
+ if (k.k->type == KEY_TYPE_inode)
+ add_inode(c, w, bkey_s_c_to_inode(k));
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ if (ret)
+ return ret;
+
+ w->cur_inum = pos.inode;
+ w->first_this_inode = true;
+lookup_snapshot:
+ for (i = 0; i < w->nr; i++)
+ if (bch2_snapshot_is_ancestor(c, pos.snapshot, w->d[i].snapshot))
+ goto found;
+ return INT_MAX;
+found:
+ BUG_ON(pos.snapshot > w->d[i].snapshot);
+
+ if (pos.snapshot != w->d[i].snapshot) {
+ ancestor_pos = i;
+
+ while (i && w->d[i - 1].snapshot > pos.snapshot)
+ --i;
+
+ ret = inode_walker_realloc(w);
+ if (ret)
+ return ret;
+
+ array_insert_item(w->d, w->nr, i, w->d[ancestor_pos]);
+ w->d[i].snapshot = pos.snapshot;
+ w->d[i].count = 0;
+ }
+
+ return i;
+}
+
+static int walk_inode(struct btree_trans *trans,
+ struct inode_walker *w, struct bpos pos)
+{
+ return lockrestart_do(trans, __walk_inode(trans, w, pos));
+}
+
+static int __get_visible_inodes(struct btree_trans *trans,
+ struct inode_walker *w,
+ struct snapshots_seen *s,
+ u64 inum)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ w->nr = 0;
+
+ for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, inum),
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ if (k.k->p.offset != inum)
+ break;
+
+ if (k.k->type != KEY_TYPE_inode)
+ continue;
+
+ if (ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot)) {
+ add_inode(c, w, bkey_s_c_to_inode(k));
+ if (k.k->p.snapshot >= s->pos.snapshot)
+ break;
+ }
+ }
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+static int check_key_has_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c k)
+{
+ struct bch_fs *c = trans->c;
+ char buf[200];
+ int ret = 0;
+
+ if (fsck_err_on(!snapshot_t(c, k.k->p.snapshot)->equiv, c,
+ "key in missing snapshot: %s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf))) {
+ ret = __bch2_trans_do(trans, NULL, NULL, BTREE_INSERT_LAZY_RW,
+ bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
+ return ret ?: -EINTR;
+ }
+fsck_err:
+ return ret;
+}
+
+static int hash_redo_key(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ struct bch_hash_info *hash_info,
+ struct btree_iter *k_iter, struct bkey_s_c k)
+{
+ bch_err(trans->c, "hash_redo_key() not implemented yet");
+ return -EINVAL;
+#if 0
+ struct bkey_i *delete;
+ struct bkey_i *tmp;
+
+ delete = bch2_trans_kmalloc(trans, sizeof(*delete));
+ if (IS_ERR(delete))
+ return PTR_ERR(delete);
+
+ tmp = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+
+ bkey_reassemble(tmp, k);
+
+ bkey_init(&delete->k);
+ delete->k.p = k_iter->pos;
+ return bch2_btree_iter_traverse(k_iter) ?:
+ bch2_trans_update(trans, k_iter, delete, 0) ?:
+ bch2_hash_set(trans, desc, hash_info, k_iter->pos.inode, tmp, 0);
+#endif
+}
+
+static int fsck_hash_delete_at(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ struct bch_hash_info *info,
+ struct btree_iter *iter)
+{
+ int ret;
+retry:
+ ret = bch2_hash_delete_at(trans, desc, info, iter, 0) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
+ if (ret == -EINTR) {
+ ret = bch2_btree_iter_traverse(iter);
+ if (!ret)
+ goto retry;
+ }
+
+ return ret;
+}
+
+static int hash_check_key(struct btree_trans *trans,
+ const struct bch_hash_desc desc,
+ struct bch_hash_info *hash_info,
+ struct btree_iter *k_iter, struct bkey_s_c hash_k)
+{
+ struct bch_fs *c = trans->c;
+ struct btree_iter iter = { NULL };
+ char buf[200];
+ struct bkey_s_c k;
+ u64 hash;
+ int ret = 0;
+
+ if (hash_k.k->type != desc.key_type)
+ return 0;
+
+ hash = desc.hash_bkey(hash_info, hash_k);
+
+ if (likely(hash == hash_k.k->p.offset))
+ return 0;
+
+ if (hash_k.k->p.offset < hash)
+ goto bad_hash;
+
+ for_each_btree_key(trans, iter, desc.btree_id, POS(hash_k.k->p.inode, hash),
+ BTREE_ITER_SLOTS, k, ret) {
+ if (!bkey_cmp(k.k->p, hash_k.k->p))
+ break;
+
+ if (fsck_err_on(k.k->type == desc.key_type &&
+ !desc.cmp_bkey(k, hash_k), c,
+ "duplicate hash table keys:\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ hash_k), buf))) {
+ ret = fsck_hash_delete_at(trans, desc, hash_info, k_iter);
+ if (ret)
+ return ret;
+ ret = 1;
+ break;
+ }
+
+ if (bkey_deleted(k.k)) {
+ bch2_trans_iter_exit(trans, &iter);
+ goto bad_hash;
+ }
+
+ }
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+bad_hash:
+ if (fsck_err(c, "hash table key at wrong offset: btree %u inode %llu offset %llu, "
+ "hashed to %llu\n%s",
+ desc.btree_id, hash_k.k->p.inode, hash_k.k->p.offset, hash,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, hash_k), buf)) == FSCK_ERR_IGNORE)
+ return 0;
+
+ ret = __bch2_trans_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ hash_redo_key(trans, desc, hash_info, k_iter, hash_k));
+ if (ret) {
+ bch_err(c, "hash_redo_key err %i", ret);
+ return ret;
+ }
+ return -EINTR;
+fsck_err:
+ return ret;
+}
+
+static int check_inode(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_inode_unpacked *prev,
+ struct bch_inode_unpacked u)
+{
+ struct bch_fs *c = trans->c;
+ bool do_update = false;
+ int ret = 0;
+
+ if (fsck_err_on(prev &&
+ (prev->bi_hash_seed != u.bi_hash_seed ||
+ mode_to_type(prev->bi_mode) != mode_to_type(u.bi_mode)), c,
+ "inodes in different snapshots don't match")) {
+ bch_err(c, "repair not implemented yet");
+ return -EINVAL;
+ }
+
+ if (u.bi_flags & BCH_INODE_UNLINKED &&
+ (!c->sb.clean ||
+ fsck_err(c, "filesystem marked clean, but inode %llu unlinked",
+ u.bi_inum))) {
+ bch2_trans_unlock(trans);
+ bch2_fs_lazy_rw(c);
+
+ ret = fsck_inode_rm(trans, u.bi_inum, iter->pos.snapshot);
+ if (ret)
+ bch_err(c, "error in fsck: error %i while deleting inode", ret);
+ return ret;
+ }
+
+ if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY &&
+ (!c->sb.clean ||
+ fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty",
+ u.bi_inum))) {
+ bch_verbose(c, "truncating inode %llu", u.bi_inum);
+
+ bch2_trans_unlock(trans);
+ bch2_fs_lazy_rw(c);
+
+ /*
+ * XXX: need to truncate partial blocks too here - or ideally
+ * just switch units to bytes and that issue goes away
+ */
+ ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
+ SPOS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9,
+ iter->pos.snapshot),
+ POS(u.bi_inum, U64_MAX),
+ 0, NULL);
+ if (ret) {
+ bch_err(c, "error in fsck: error %i truncating inode", ret);
+ return ret;
+ }
+
+ /*
+ * We truncated without our normal sector accounting hook, just
+ * make sure we recalculate it:
+ */
+ u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
+
+ u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
+ do_update = true;
+ }
+
+ if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY &&
+ (!c->sb.clean ||
+ fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty",
+ u.bi_inum))) {
+ s64 sectors;
+
+ bch_verbose(c, "recounting sectors for inode %llu",
+ u.bi_inum);
+
+ sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
+ if (sectors < 0) {
+ bch_err(c, "error in fsck: error %i recounting inode sectors",
+ (int) sectors);
+ return sectors;
+ }
+
+ u.bi_sectors = sectors;
+ u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
+ do_update = true;
+ }
+
+ if (u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) {
+ u.bi_dir = 0;
+ u.bi_dir_offset = 0;
+ u.bi_flags &= ~BCH_INODE_BACKPTR_UNTRUSTED;
+ do_update = true;
+ }
+
+ if (do_update) {
+ ret = write_inode(trans, &u, iter->pos.snapshot);
+ if (ret)
+ bch_err(c, "error in fsck: error %i "
+ "updating inode", ret);
+ }
+fsck_err:
+ return ret;
+}
+
+noinline_for_stack
+static int check_inodes(struct bch_fs *c, bool full)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_inode inode;
+ struct bch_inode_unpacked prev, u;
+ int ret;
+
+ memset(&prev, 0, sizeof(prev));
+
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN,
+ BTREE_ITER_INTENT|
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+ ret = check_key_has_snapshot(&trans, &iter, k);
+ if (ret)
+ break;
+
+ /*
+ * if snapshot id isn't a leaf node, skip it - deletion in
+ * particular is not atomic, so on the internal snapshot nodes
+ * we can see inodes marked for deletion after a clean shutdown
+ */
+ if (bch2_snapshot_internal_node(c, k.k->p.snapshot))
+ continue;
+
+ if (k.k->type != KEY_TYPE_inode)
+ continue;
+
+ inode = bkey_s_c_to_inode(k);
+
+ if (!full &&
+ !(inode.v->bi_flags & (BCH_INODE_I_SIZE_DIRTY|
+ BCH_INODE_I_SECTORS_DIRTY|
+ BCH_INODE_UNLINKED)))
+ continue;
+
+ BUG_ON(bch2_inode_unpack(inode, &u));
+
+ ret = check_inode(&trans, &iter,
+ full && prev.bi_inum == u.bi_inum
+ ? &prev : NULL, u);
+ if (ret)
+ break;
+
+ prev = u;
+ }
+ bch2_trans_iter_exit(&trans, &iter);
+
+ BUG_ON(ret == -EINTR);
+
+ return bch2_trans_exit(&trans) ?: ret;
+}
+
+noinline_for_stack
+static int check_subvols(struct bch_fs *c)
+{
+ struct btree_trans trans;
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ int ret;
+
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
+
+ for_each_btree_key(&trans, iter, BTREE_ID_subvolumes, POS_MIN,
+ 0, k, ret) {
+ }
+ bch2_trans_iter_exit(&trans, &iter);
+
+ bch2_trans_exit(&trans);
+ return ret;
+}
+
+/*
+ * Checking for overlapping extents needs to be reimplemented
+ */
+#if 0
+static int fix_overlapping_extent(struct btree_trans *trans,
+ struct bkey_s_c k, struct bpos cut_at)
+{
+ struct btree_iter iter;
+ struct bkey_i *u;
+ int ret;
+
+ u = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ ret = PTR_ERR_OR_ZERO(u);
+ if (ret)
+ return ret;
+
+ bkey_reassemble(u, k);
+ bch2_cut_front(cut_at, u);
+
+
+ /*
+ * We don't want to go through the extent_handle_overwrites path:
+ *
+ * XXX: this is going to screw up disk accounting, extent triggers
+ * assume things about extent overwrites - we should be running the
+ * triggers manually here
+ */
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, u->k.p,
+ BTREE_ITER_INTENT|BTREE_ITER_NOT_EXTENTS);
+
+ BUG_ON(iter.flags & BTREE_ITER_IS_EXTENTS);
+ ret = bch2_btree_iter_traverse(&iter) ?:
+ bch2_trans_update(trans, &iter, u, BTREE_TRIGGER_NORUN) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+#endif
+
+static int inode_backpointer_exists(struct btree_trans *trans,
+ struct bch_inode_unpacked *inode,
+ u32 snapshot)
+{
+ struct btree_iter iter;
+ struct bkey_s_c k;
+ u32 target_subvol, target_snapshot;
+ u64 target_inum;
+ int ret;
+
+ bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents,
+ SPOS(inode->bi_dir, inode->bi_dir_offset, snapshot), 0);
+ k = bch2_btree_iter_peek_slot(&iter);
+ ret = bkey_err(k);
+ if (ret)
+ goto out;
+ if (k.k->type != KEY_TYPE_dirent)
+ goto out;
+
+ ret = __bch2_dirent_read_target(trans, bkey_s_c_to_dirent(k),
+ &target_subvol,
+ &target_snapshot,
+ &target_inum,
+ true);
+ if (ret)
+ goto out;
+
+ ret = target_inum == inode->bi_inum;
+out:
+ bch2_trans_iter_exit(trans, &iter);
+ return ret;
+}
+
+static bool inode_backpointer_matches(struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *inode)
+{
+ return d.k->p.inode == inode->bi_dir &&
+ d.k->p.offset == inode->bi_dir_offset;
+}
+
+static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
+{
+ struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
+ int ret = 0, ret2 = 0;
+ s64 count2;
+
+ for (i = w->d; i < w->d + w->nr; i++) {
+ if (i->inode.bi_sectors == i->count)
+ continue;
+
+ count2 = lockrestart_do(trans,
+ bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot));
+
+ if (i->count != count2) {
+ bch_err(c, "fsck counted i_sectors wrong: got %llu should be %llu",
+ i->count, count2);
+ i->count = count2;
+ if (i->inode.bi_sectors == i->count)
+ continue;
+ }
+
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
+ "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
+ w->cur_inum, i->snapshot,
+ i->inode.bi_sectors, i->count) == FSCK_ERR_IGNORE)
+ continue;
+
+ i->inode.bi_sectors = i->count;
+ ret = write_inode(trans, &i->inode, i->snapshot);
+ if (ret)
+ break;
+ ret2 = -EINTR;
+ }
+fsck_err:
+ return ret ?: ret2;
+}
+
+static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
+ struct inode_walker *inode,
+ struct snapshots_seen *s)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ struct inode_walker_entry *i;
+ char buf[200];
+ int ret = 0;
+
+ k = bch2_btree_iter_peek(iter);
+ if (!k.k)
+ return 0;
+
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
+
+ ret = check_key_has_snapshot(trans, iter, k);
+ if (ret)
+ return ret;
+
+ ret = snapshots_seen_update(c, s, k.k->p);
+ if (ret)
+ return ret;
+
+ if (k.k->type == KEY_TYPE_whiteout)
+ return 0;
+
+ if (inode->cur_inum != k.k->p.inode) {
+ ret = check_i_sectors(trans, inode);
+ if (ret)
+ return ret;
+ }
+#if 0
+ if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
+ char buf1[200];
+ char buf2[200];
+
+ bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev.k));
+ bch2_bkey_val_to_text(&PBUF(buf2), c, k);
+
+ if (fsck_err(c, "overlapping extents:\n%s\n%s", buf1, buf2))
+ return fix_overlapping_extent(trans, k, prev.k->k.p) ?: -EINTR;
+ }
+#endif
+ ret = __walk_inode(trans, inode, k.k->p);
+ if (ret < 0)
+ return ret;
+
+ if (fsck_err_on(ret == INT_MAX, c,
+ "extent in missing inode:\n %s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
+ return __bch2_trans_do(trans, NULL, NULL, BTREE_INSERT_LAZY_RW,
+ bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
+
+ if (ret == INT_MAX)
+ return 0;
+
+ i = inode->d + ret;
+ ret = 0;
+
+ if (fsck_err_on(!S_ISREG(i->inode.bi_mode) &&
+ !S_ISLNK(i->inode.bi_mode), c,
+ "extent in non regular inode mode %o:\n %s",
+ i->inode.bi_mode,
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
+ return __bch2_trans_do(trans, NULL, NULL, BTREE_INSERT_LAZY_RW,
+ bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
+
+ if (!bch2_snapshot_internal_node(c, k.k->p.snapshot)) {
+ for_each_visible_inode(c, s, inode, k.k->p.snapshot, i) {
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+ k.k->type != KEY_TYPE_reservation &&
+ k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9, c,
+ "extent type %u offset %llu past end of inode %llu, i_size %llu",
+ k.k->type, k.k->p.offset, k.k->p.inode, i->inode.bi_size)) {
+ bch2_fs_lazy_rw(c);
+ return bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
+ SPOS(k.k->p.inode, round_up(i->inode.bi_size, block_bytes(c)) >> 9,
+ k.k->p.snapshot),
+ POS(k.k->p.inode, U64_MAX),
+ 0, NULL) ?: -EINTR;
+ }
+ }
+ }
+
+ if (bkey_extent_is_allocation(k.k))
+ for_each_visible_inode(c, s, inode, k.k->p.snapshot, i)
+ i->count += k.k->size;
+#if 0
+ bch2_bkey_buf_reassemble(&prev, c, k);
+#endif
+
+fsck_err:
+ return ret;
+}
+
+/*
+ * Walk extents: verify that extents have a corresponding S_ISREG inode, and
+ * that i_size an i_sectors are consistent
+ */
+noinline_for_stack
+static int check_extents(struct bch_fs *c)
+{
+ struct inode_walker w = inode_walker_init();
+ struct snapshots_seen s;
+ struct btree_trans trans;
+ struct btree_iter iter;
+ int ret = 0;
+
+#if 0
+ struct bkey_buf prev;
+ bch2_bkey_buf_init(&prev);
+ prev.k->k = KEY(0, 0, 0);
+#endif
+ snapshots_seen_init(&s);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
+
+ bch_verbose(c, "checking extents");
+
+ bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
+ POS(BCACHEFS_ROOT_INO, 0),
+ BTREE_ITER_INTENT|
+ BTREE_ITER_PREFETCH|
+ BTREE_ITER_ALL_SNAPSHOTS);
+
+ do {
+ ret = lockrestart_do(&trans,
+ check_extent(&trans, &iter, &w, &s));
+ if (ret)
+ break;
+ } while (bch2_btree_iter_advance(&iter));
+ bch2_trans_iter_exit(&trans, &iter);
+#if 0
+ bch2_bkey_buf_exit(&prev, c);
+#endif
+ inode_walker_exit(&w);
+ bch2_trans_exit(&trans);
+ snapshots_seen_exit(&s);
+
+ return ret;
+}
+
+static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
+{
+ struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
+ int ret = 0, ret2 = 0;
+ s64 count2;
+
+ for (i = w->d; i < w->d + w->nr; i++) {
+ if (i->inode.bi_nlink == i->count)
+ continue;
+
+ count2 = lockrestart_do(trans,
+ bch2_count_subdirs(trans, w->cur_inum, i->snapshot));
+
+ if (i->count != count2) {
+ bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
+ i->count, count2);
+ i->count = count2;
+ if (i->inode.bi_nlink == i->count)
+ continue;
+ }
+
+ if (fsck_err_on(i->inode.bi_nlink != i->count, c,
+ "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
+ w->cur_inum, i->snapshot, i->inode.bi_nlink, i->count)) {
+ i->inode.bi_nlink = i->count;
+ ret = write_inode(trans, &i->inode, i->snapshot);
+ if (ret)
+ break;
+ ret2 = -EINTR;
+ }
+ }
+fsck_err:
+ return ret ?: ret2;
+}
+
+static int check_dirent_target(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c_dirent d,
+ struct bch_inode_unpacked *target,
+ u32 target_snapshot)
+{
+ struct bch_fs *c = trans->c;
+ bool backpointer_exists = true;
+ char buf[200];
+ int ret = 0;
+
+ if (!target->bi_dir &&
+ !target->bi_dir_offset) {
+ target->bi_dir = d.k->p.inode;
+ target->bi_dir_offset = d.k->p.offset;
+
+ ret = write_inode(trans, target, target_snapshot);
+ if (ret)
+ goto err;
+ }
+
+ if (!inode_backpointer_matches(d, target)) {
+ ret = inode_backpointer_exists(trans, target, d.k->p.snapshot);
+ if (ret < 0)
+ goto err;
+
+ backpointer_exists = ret;
+ ret = 0;