+ struct bch_fs *c = trans->c;
+ struct inode_walker_entry *i;
+ int ret = 0, ret2 = 0;
+ s64 count2;
+
+ darray_for_each(w->inodes, i) {
+ if (i->inode.bi_sectors == i->count)
+ continue;
+
+ count2 = lockrestart_do(trans,
+ bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot));
+
+ if (i->count != count2) {
+ bch_err(c, "fsck counted i_sectors wrong: got %llu should be %llu",
+ i->count, count2);
+ i->count = count2;
+ if (i->inode.bi_sectors == i->count)
+ continue;
+ }
+
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
+ "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
+ w->cur_inum, i->snapshot,
+ i->inode.bi_sectors, i->count) == FSCK_ERR_IGNORE)
+ continue;
+
+ i->inode.bi_sectors = i->count;
+ ret = write_inode(trans, &i->inode, i->snapshot);
+ if (ret)
+ break;
+ ret2 = -EINTR;
+ }
+fsck_err:
+ return ret ?: ret2;
+}
+
+static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
+ struct inode_walker *inode,
+ struct snapshots_seen *s)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_s_c k;
+ struct inode_walker_entry *i;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+peek:
+ k = bch2_btree_iter_peek(iter);
+ if (!k.k)
+ goto out;
+
+ ret = bkey_err(k);
+ if (ret)
+ goto err;
+
+ ret = check_key_has_snapshot(trans, iter, k);
+ if (ret) {
+ ret = ret < 0 ? ret : 0;
+ goto out;
+ }
+
+ ret = snapshots_seen_update(c, s, k.k->p);
+ if (ret)
+ goto err;
+
+ if (k.k->type == KEY_TYPE_whiteout)
+ goto out;
+
+ if (inode->cur_inum != k.k->p.inode) {
+ ret = check_i_sectors(trans, inode);
+ if (ret)
+ goto err;
+ }
+
+ if (!iter->path->should_be_locked) {
+ /*
+ * hack: check_i_sectors may have handled a transaction restart,
+ * it shouldn't be but we need to fix the new i_sectors check
+ * code and delete the old bch2_count_inode_sectors() first
+ */
+ goto peek;
+ }
+#if 0
+ if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
+ char buf1[200];
+ char buf2[200];
+
+ bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev.k));
+ bch2_bkey_val_to_text(&PBUF(buf2), c, k);
+
+ if (fsck_err(c, "overlapping extents:\n%s\n%s", buf1, buf2)) {
+ ret = fix_overlapping_extent(trans, k, prev.k->k.p) ?: -EINTR;
+ goto out;
+ }
+ }
+#endif
+ ret = __walk_inode(trans, inode, k.k->p);
+ if (ret < 0)
+ goto err;
+
+ if (fsck_err_on(ret == INT_MAX, c,
+ "extent in missing inode:\n %s",
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ goto out;
+ }
+
+ if (ret == INT_MAX) {
+ ret = 0;
+ goto out;
+ }
+
+ i = inode->inodes.data + ret;
+ ret = 0;
+
+ if (fsck_err_on(!S_ISREG(i->inode.bi_mode) &&
+ !S_ISLNK(i->inode.bi_mode), c,
+ "extent in non regular inode mode %o:\n %s",
+ i->inode.bi_mode,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ ret = bch2_btree_delete_at(trans, iter,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+ goto out;
+ }
+
+ if (!bch2_snapshot_internal_node(c, k.k->p.snapshot)) {
+ for_each_visible_inode(c, s, inode, k.k->p.snapshot, i) {
+ if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+ k.k->type != KEY_TYPE_reservation &&
+ k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9, c,
+ "extent type %u offset %llu past end of inode %llu, i_size %llu",
+ k.k->type, k.k->p.offset, k.k->p.inode, i->inode.bi_size)) {
+ bch2_fs_lazy_rw(c);
+ ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
+ SPOS(k.k->p.inode, round_up(i->inode.bi_size, block_bytes(c)) >> 9,
+ k.k->p.snapshot),
+ POS(k.k->p.inode, U64_MAX),
+ 0, NULL) ?: -EINTR;
+ goto out;
+ }
+ }
+ }
+
+ if (bkey_extent_is_allocation(k.k))
+ for_each_visible_inode(c, s, inode, k.k->p.snapshot, i)
+ i->count += k.k->size;
+#if 0
+ bch2_bkey_buf_reassemble(&prev, c, k);
+#endif
+
+out:
+err:
+fsck_err:
+ printbuf_exit(&buf);
+ return ret;