#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
+/*
+ * XXX: this is handling transaction restarts without returning
+ * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
+ */
static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
u32 snapshot)
{
u64 sectors = 0;
int ret;
- for_each_btree_key(trans, iter, BTREE_ID_extents,
- SPOS(inum, 0, snapshot), 0, k, ret) {
- if (k.k->p.inode != inum)
- break;
-
+ for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
+ SPOS(inum, 0, snapshot),
+ POS(inum, U64_MAX),
+ 0, k, ret)
if (bkey_extent_is_allocation(k.k))
sectors += k.k->size;
- }
bch2_trans_iter_exit(trans, &iter);
u64 subdirs = 0;
int ret;
- for_each_btree_key(trans, iter, BTREE_ID_dirents,
- SPOS(inum, 0, snapshot), 0, k, ret) {
- if (k.k->p.inode != inum)
- break;
-
+ for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
+ SPOS(inum, 0, snapshot),
+ POS(inum, U64_MAX),
+ 0, k, ret) {
if (k.k->type != KEY_TYPE_dirent)
continue;
if (d.v->d_type == DT_DIR)
subdirs++;
}
-
bch2_trans_iter_exit(trans, &iter);
return ret ?: subdirs;
if (ret)
goto err;
- if (!k.k || bkey_cmp(k.k->p, POS(0, inode_nr))) {
+ if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
ret = -ENOENT;
goto err;
}
ret = bch2_inode_unpack(k, inode);
err:
- if (ret && ret != -EINTR)
- bch_err(trans->c, "error %i fetching inode %llu",
- ret, inode_nr);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(trans->c, "error fetching inode %llu: %s",
+ inode_nr, bch2_err_str(ret));
bch2_trans_iter_exit(trans, &iter);
return ret;
}
if (!ret)
*snapshot = iter.pos.snapshot;
err:
- if (ret && ret != -EINTR)
- bch_err(trans->c, "error %i fetching inode %llu:%u",
- ret, inode_nr, *snapshot);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(trans->c, "error fetching inode %llu:%u: %s",
+ inode_nr, *snapshot, bch2_err_str(ret));
bch2_trans_iter_exit(trans, &iter);
return ret;
}
BTREE_INSERT_LAZY_RW,
__write_inode(trans, inode, snapshot));
if (ret)
- bch_err(trans->c, "error in fsck: error %i updating inode", ret);
+ bch_err(trans->c, "error in fsck: error updating inode: %s",
+ bch2_err_str(ret));
return ret;
}
struct bkey_s_c k;
int ret;
- ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
- SPOS(inum, 0, snapshot),
- SPOS(inum, U64_MAX, snapshot),
- 0, NULL) ?:
- bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
- SPOS(inum, 0, snapshot),
- SPOS(inum, U64_MAX, snapshot),
- 0, NULL) ?:
- bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
- SPOS(inum, 0, snapshot),
- SPOS(inum, U64_MAX, snapshot),
- 0, NULL);
+ do {
+ ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
+ SPOS(inum, 0, snapshot),
+ SPOS(inum, U64_MAX, snapshot),
+ 0, NULL) ?:
+ bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
+ SPOS(inum, 0, snapshot),
+ SPOS(inum, U64_MAX, snapshot),
+ 0, NULL) ?:
+ bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
+ SPOS(inum, 0, snapshot),
+ SPOS(inum, U64_MAX, snapshot),
+ 0, NULL);
+ } while (ret == -BCH_ERR_transaction_restart_nested);
if (ret)
goto err;
retry:
BTREE_INSERT_NOFAIL);
err:
bch2_trans_iter_exit(trans, &iter);
- if (ret == -EINTR)
+ if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
- return ret;
+ return ret ?: -BCH_ERR_transaction_restart_nested;
}
static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
bch2_trans_iter_exit(trans, &iter);
err:
- if (ret && ret != -EINTR)
- bch_err(c, "error %i from __remove_dirent()", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
goto create_lostfound;
}
- if (ret && ret != -EINTR)
- bch_err(c, "error looking up lost+found: %i", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "error looking up lost+found: %s", bch2_err_str(ret));
if (ret)
return ret;
lostfound, &lostfound_str,
0, 0, S_IFDIR|0700, 0, NULL, NULL,
(subvol_inum) { }, 0);
- if (ret && ret != -EINTR)
- bch_err(c, "error creating lost+found: %i", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "error creating lost+found: %s", bch2_err_str(ret));
return ret;
}
BTREE_INSERT_NOFAIL,
__reattach_inode(trans, inode, inode_snapshot));
if (ret) {
- bch_err(trans->c, "error %i reattaching inode %llu",
- ret, inode->bi_inum);
+ bch_err(trans->c, "error reattaching inode %llu: %s",
+ inode->bi_inum, bch2_err_str(ret));
return ret;
}
break;
if (i->equiv == n.equiv) {
- bch_err(c, "adding duplicate snapshot in snapshots_seen_add()");
+ bch_err(c, "%s(): adding duplicate snapshot", __func__);
return -EINVAL;
}
}
.id = pos.snapshot,
.equiv = bch2_snapshot_equiv(c, pos.snapshot),
};
- int ret;
+ int ret = 0;
- if (bkey_cmp(s->pos, pos))
+ if (!bkey_eq(s->pos, pos))
s->ids.nr = 0;
pos.snapshot = n.equiv;
darray_for_each(s->ids, i)
if (i->equiv == n.equiv) {
- if (i->id != n.id) {
- bch_err(c, "snapshot deletion did not run correctly:\n"
+ if (fsck_err_on(i->id != n.id, c,
+ "snapshot deletion did not run correctly:\n"
" duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
bch2_btree_ids[btree_id],
pos.inode, pos.offset,
- i->id, n.id, n.equiv);
- return -NEED_SNAPSHOT_CLEANUP;
- }
+ i->id, n.id, n.equiv))
+ return -BCH_ERR_need_snapshot_cleanup;
return 0;
}
if (ret)
bch_err(c, "error reallocating snapshots_seen table (size %zu)",
s->ids.size);
+fsck_err:
return ret;
}
: bch2_snapshot_is_ancestor(c, src, dst);
}
+static int ref_visible2(struct bch_fs *c,
+ u32 src, struct snapshots_seen *src_seen,
+ u32 dst, struct snapshots_seen *dst_seen)
+{
+ src = bch2_snapshot_equiv(c, src);
+ dst = bch2_snapshot_equiv(c, dst);
+
+ if (dst > src) {
+ swap(dst, src);
+ swap(dst_seen, src_seen);
+ }
+ return key_visible_in_snapshot(c, src_seen, dst, src);
+}
+
#define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \
for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \
(_i)->snapshot <= (_snapshot); _i++) \
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
+ u32 restart_count = trans->restart_count;
unsigned i;
int ret;
w->cur_inum = pos.inode;
w->first_this_inode = true;
+
+ if (trans_was_restarted(trans, restart_count))
+ return -BCH_ERR_transaction_restart_nested;
+
lookup_snapshot:
for (i = 0; i < w->inodes.nr; i++)
if (bch2_snapshot_is_ancestor(c, pos.snapshot, w->inodes.data[i].snapshot))
w->inodes.nr = 0;
- for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, inum),
+ for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
struct bch_hash_info *hash_info,
struct btree_iter *k_iter, struct bkey_s_c k)
{
- bch_err(trans->c, "hash_redo_key() not implemented yet");
- return -EINVAL;
-#if 0
struct bkey_i *delete;
struct bkey_i *tmp;
if (IS_ERR(delete))
return PTR_ERR(delete);
- tmp = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ tmp = bch2_bkey_make_mut(trans, k);
if (IS_ERR(tmp))
return PTR_ERR(tmp);
- bkey_reassemble(tmp, k);
-
bkey_init(&delete->k);
delete->k.p = k_iter->pos;
return bch2_btree_iter_traverse(k_iter) ?:
bch2_trans_update(trans, k_iter, delete, 0) ?:
- bch2_hash_set(trans, desc, hash_info, k_iter->pos.inode, tmp, 0);
-#endif
+ bch2_hash_set_snapshot(trans, desc, hash_info,
+ (subvol_inum) { 0, k.k->p.inode },
+ k.k->p.snapshot, tmp,
+ BCH_HASH_SET_MUST_CREATE,
+ BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
}
static int hash_check_key(struct btree_trans *trans,
goto bad_hash;
for_each_btree_key_norestart(trans, iter, desc.btree_id,
- POS(hash_k.k->p.inode, hash),
+ SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
BTREE_ITER_SLOTS, k, ret) {
- if (!bkey_cmp(k.k->p, hash_k.k->p))
+ if (bkey_eq(k.k->p, hash_k.k->p))
break;
if (fsck_err_on(k.k->type == desc.key_type &&
printbuf_exit(&buf);
return ret;
bad_hash:
- if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, "
- "hashed to %llu\n%s",
+ if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
(printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf)) == FSCK_ERR_IGNORE)
- return 0;
-
- ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
- if (ret) {
- bch_err(c, "hash_redo_key err %i", ret);
- return ret;
+ bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
+ ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
+ if (ret) {
+ bch_err(c, "hash_redo_key err %s", bch2_err_str(ret));
+ return ret;
+ }
+ ret = -BCH_ERR_transaction_restart_nested;
}
- ret = -EINTR;
fsck_err:
goto out;
}
bch2_fs_lazy_rw(c);
ret = fsck_inode_rm(trans, u.bi_inum, iter->pos.snapshot);
- if (ret)
- bch_err(c, "error in fsck: error %i while deleting inode", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "error in fsck: error while deleting inode: %s",
+ bch2_err_str(ret));
return ret;
}
POS(u.bi_inum, U64_MAX),
0, NULL);
if (ret) {
- bch_err(c, "error in fsck: error %i truncating inode", ret);
+ bch_err(c, "error in fsck: error truncating inode: %s",
+ bch2_err_str(ret));
return ret;
}
sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
if (sectors < 0) {
- bch_err(c, "error in fsck: error %i recounting inode sectors",
- (int) sectors);
+ bch_err(c, "error in fsck: error recounting inode sectors: %s",
+ bch2_err_str(sectors));
return sectors;
}
if (do_update) {
ret = __write_inode(trans, &u, iter->pos.snapshot);
if (ret)
- bch_err(c, "error in fsck: error %i "
- "updating inode", ret);
+ bch_err(c, "error in fsck: error updating inode: %s",
+ bch2_err_str(ret));
}
err:
fsck_err:
if (ret)
- bch_err(c, "error %i from check_inode()", ret);
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_inodes,
POS_MIN,
- BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
- k,
- NULL, NULL,
- BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
check_inode(&trans, &iter, k, &prev, &s, full));
bch2_trans_exit(&trans);
snapshots_seen_exit(&s);
if (ret)
- bch_err(c, "error %i from check_inodes()", ret);
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
{
struct bch_fs *c = trans->c;
struct inode_walker_entry *i;
- int ret = 0, ret2 = 0;
+ u32 restart_count = trans->restart_count;
+ int ret = 0;
s64 count2;
darray_for_each(w->inodes, i) {
if (i->inode.bi_sectors == i->count)
continue;
- count2 = lockrestart_do(trans,
- bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot));
+ count2 = bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot);
if (i->count != count2) {
bch_err(c, "fsck counted i_sectors wrong: got %llu should be %llu",
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
"inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
w->cur_inum, i->snapshot,
- i->inode.bi_sectors, i->count) == FSCK_ERR_IGNORE)
+ i->inode.bi_sectors, i->count)) {
+ i->inode.bi_sectors = i->count;
+ ret = write_inode(trans, &i->inode, i->snapshot);
+ if (ret)
+ break;
+ }
+ }
+fsck_err:
+ if (ret)
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ if (!ret && trans_was_restarted(trans, restart_count))
+ ret = -BCH_ERR_transaction_restart_nested;
+ return ret;
+}
+
+struct extent_end {
+ u32 snapshot;
+ u64 offset;
+ struct snapshots_seen seen;
+};
+
+typedef DARRAY(struct extent_end) extent_ends;
+
+static int check_overlapping_extents(struct btree_trans *trans,
+ struct snapshots_seen *seen,
+ extent_ends *extent_ends,
+ struct bkey_s_c k,
+ struct btree_iter *iter)
+{
+ struct bch_fs *c = trans->c;
+ struct extent_end *i;
+ struct printbuf buf = PRINTBUF;
+ int ret = 0;
+
+ darray_for_each(*extent_ends, i) {
+ /* duplicate, due to transaction restart: */
+ if (i->offset == k.k->p.offset &&
+ i->snapshot == k.k->p.snapshot)
continue;
- i->inode.bi_sectors = i->count;
- ret = write_inode(trans, &i->inode, i->snapshot);
- if (ret)
- break;
- ret2 = -EINTR;
+ if (!ref_visible2(c,
+ k.k->p.snapshot, seen,
+ i->snapshot, &i->seen))
+ continue;
+
+ if (fsck_err_on(i->offset > bkey_start_offset(k.k), c,
+ "overlapping extents: extent in snapshot %u ends at %llu overlaps with\n%s",
+ i->snapshot,
+ i->offset,
+ (printbuf_reset(&buf),
+ bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
+ struct bkey_i *update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
+ if ((ret = PTR_ERR_OR_ZERO(update)))
+ goto err;
+ bkey_reassemble(update, k);
+ ret = bch2_trans_update_extent(trans, iter, update, 0);
+ if (!ret)
+ goto err;
+ }
}
+err:
fsck_err:
- if (ret)
- bch_err(c, "error %i from check_i_sectors()", ret);
- return ret ?: ret2;
+ printbuf_exit(&buf);
+ return ret;
+}
+
+static int extent_ends_at(extent_ends *extent_ends,
+ struct snapshots_seen *seen,
+ struct bkey_s_c k)
+{
+ struct extent_end *i, n = (struct extent_end) {
+ .snapshot = k.k->p.snapshot,
+ .offset = k.k->p.offset,
+ .seen = *seen,
+ };
+
+ n.seen.ids.data = kmemdup(seen->ids.data,
+ sizeof(seen->ids.data[0]) * seen->ids.size,
+ GFP_KERNEL);
+ if (!n.seen.ids.data)
+ return -ENOMEM;
+
+ darray_for_each(*extent_ends, i) {
+ if (i->snapshot == k.k->p.snapshot) {
+ snapshots_seen_exit(&i->seen);
+ *i = n;
+ return 0;
+ }
+
+ if (i->snapshot >= k.k->p.snapshot)
+ break;
+ }
+
+ return darray_insert_item(extent_ends, i - extent_ends->data, n);
+}
+
+static void extent_ends_reset(extent_ends *extent_ends)
+{
+ struct extent_end *i;
+
+ darray_for_each(*extent_ends, i)
+ snapshots_seen_exit(&i->seen);
+
+ extent_ends->nr = 0;
}
static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k,
struct inode_walker *inode,
- struct snapshots_seen *s)
+ struct snapshots_seen *s,
+ extent_ends *extent_ends)
{
struct bch_fs *c = trans->c;
struct inode_walker_entry *i;
ret = check_i_sectors(trans, inode);
if (ret)
goto err;
- }
- if (!iter->path->should_be_locked) {
- /*
- * hack: check_i_sectors may have handled a transaction restart,
- * it shouldn't be but we need to fix the new i_sectors check
- * code and delete the old bch2_count_inode_sectors() first
- */
- return -EINTR;
+ extent_ends_reset(extent_ends);
}
-#if 0
- if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
- char buf1[200];
- char buf2[200];
- bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(prev.k));
- bch2_bkey_val_to_text(&PBUF(buf2), c, k);
+ BUG_ON(!iter->path->should_be_locked);
+
+ ret = check_overlapping_extents(trans, s, extent_ends, k, iter);
+ if (ret)
+ goto err;
+
+ ret = extent_ends_at(extent_ends, s, k);
+ if (ret)
+ goto err;
- if (fsck_err(c, "overlapping extents:\n%s\n%s", buf1, buf2)) {
- ret = fix_overlapping_extent(trans, k, prev.k->k.p) ?: -EINTR;
- goto out;
- }
- }
-#endif
ret = __walk_inode(trans, inode, equiv);
if (ret < 0)
goto err;
continue;
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
- k.k->type != KEY_TYPE_reservation &&
- k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9, c,
+ k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
+ !bkey_extent_is_reservation(k), c,
"extent type past end of inode %llu:%u, i_size %llu\n %s",
i->inode.bi_inum, i->snapshot, i->inode.bi_size,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
fsck_err:
printbuf_exit(&buf);
- if (ret && ret != -EINTR)
- bch_err(c, "error %i from check_extent()", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
+ extent_ends extent_ends = { 0 };
int ret = 0;
-#if 0
- struct bkey_buf prev;
- bch2_bkey_buf_init(&prev);
- prev.k->k = KEY(0, 0, 0);
-#endif
snapshots_seen_init(&s);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
NULL, NULL,
BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
- check_extent(&trans, &iter, k, &w, &s));
-#if 0
- bch2_bkey_buf_exit(&prev, c);
-#endif
+ check_extent(&trans, &iter, k, &w, &s, &extent_ends));
+
+ extent_ends_reset(&extent_ends);
+ darray_exit(&extent_ends);
inode_walker_exit(&w);
bch2_trans_exit(&trans);
snapshots_seen_exit(&s);
if (ret)
- bch_err(c, "error %i from check_extents()", ret);
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
{
struct bch_fs *c = trans->c;
struct inode_walker_entry *i;
- int ret = 0, ret2 = 0;
+ u32 restart_count = trans->restart_count;
+ int ret = 0;
s64 count2;
darray_for_each(w->inodes, i) {
ret = write_inode(trans, &i->inode, i->snapshot);
if (ret)
break;
- ret2 = -EINTR;
}
}
fsck_err:
if (ret)
- bch_err(c, "error %i from check_subdir_count()", ret);
- return ret ?: ret2;
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
+ if (!ret && trans_was_restarted(trans, restart_count))
+ ret = -BCH_ERR_transaction_restart_nested;
+ return ret;
}
static int check_dirent_target(struct btree_trans *trans,
fsck_err:
printbuf_exit(&buf);
- if (ret && ret != -EINTR)
- bch_err(c, "error %i from check_target()", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
goto err;
}
- if (!iter->path->should_be_locked) {
- /* hack: see check_extent() */
- return -EINTR;
- }
+ BUG_ON(!iter->path->should_be_locked);
ret = __walk_inode(trans, dir, equiv);
if (ret < 0)
fsck_err:
printbuf_exit(&buf);
- if (ret && ret != -EINTR)
- bch_err(c, "error %i from check_dirent()", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
inode_walker_exit(&target);
if (ret)
- bch_err(c, "error %i from check_dirents()", ret);
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
fsck_err:
- if (ret && ret != -EINTR)
- bch_err(c, "error %i from check_xattr()", ret);
+ if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
bch2_trans_exit(&trans);
if (ret)
- bch_err(c, "error %i from check_xattrs()", ret);
+ bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
return ret;
}
ret = commit_do(trans, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW,
- __bch2_btree_insert(trans, BTREE_ID_subvolumes, &root_subvol.k_i));
+ __bch2_btree_insert(trans, BTREE_ID_subvolumes,
+ &root_subvol.k_i, 0));
if (ret) {
- bch_err(c, "error writing root subvol: %i", ret);
+ bch_err(c, "error writing root subvol: %s", bch2_err_str(ret));
goto err;
}
ret = __write_inode(trans, &root_inode, snapshot);
if (ret)
- bch_err(c, "error writing root inode: %i", ret);
+ bch_err(c, "error writing root inode: %s", bch2_err_str(ret));
}
err:
fsck_err:
}
fsck_err:
if (ret)
- bch_err(c, "%s: err %i", __func__, ret);
+ bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
return ret;
}
}
bch2_trans_iter_exit(&trans, &iter);
- BUG_ON(ret == -EINTR);
-
darray_exit(&path);
bch2_trans_exit(&trans);
{
if (t->nr == t->size) {
size_t new_size = max_t(size_t, 128UL, t->size * 2);
- void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
+ void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
+
if (!d) {
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
new_size);
return ret;
}
+static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k,
+ struct nlink_table *links,
+ size_t *idx, u64 range_end)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked u;
+ struct nlink *link = &links->d[*idx];
+ int ret = 0;
+
+ if (k.k->p.offset >= range_end)
+ return 1;
+
+ if (!bkey_is_inode(k.k))
+ return 0;
+
+ BUG_ON(bch2_inode_unpack(k, &u));
+
+ if (S_ISDIR(le16_to_cpu(u.bi_mode)))
+ return 0;
+
+ if (!u.bi_nlink)
+ return 0;
+
+ while ((cmp_int(link->inum, k.k->p.offset) ?:
+ cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
+ BUG_ON(*idx == links->nr);
+ link = &links->d[++*idx];
+ }
+
+ if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c,
+ "inode %llu type %s has wrong i_nlink (%u, should be %u)",
+ u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
+ bch2_inode_nlink_get(&u), link->count)) {
+ bch2_inode_nlink_set(&u, link->count);
+ ret = __write_inode(trans, &u, k.k->p.snapshot);
+ }
+fsck_err:
+ return ret;
+}
+
noinline_for_stack
static int check_nlinks_update_hardlinks(struct bch_fs *c,
struct nlink_table *links,
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
- struct bch_inode_unpacked u;
- struct nlink *link = links->d;
+ size_t idx = 0;
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_inodes,
- POS(0, range_start),
- BTREE_ITER_INTENT|
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
- if (k.k->p.offset >= range_end)
- break;
-
- if (!bkey_is_inode(k.k))
- continue;
-
- BUG_ON(bch2_inode_unpack(k, &u));
-
- if (S_ISDIR(le16_to_cpu(u.bi_mode)))
- continue;
-
- if (!u.bi_nlink)
- continue;
-
- while ((cmp_int(link->inum, k.k->p.offset) ?:
- cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
- link++;
- BUG_ON(link >= links->d + links->nr);
- }
-
- if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c,
- "inode %llu type %s has wrong i_nlink (%u, should be %u)",
- u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
- bch2_inode_nlink_get(&u), link->count)) {
- bch2_inode_nlink_set(&u, link->count);
+ ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_inodes,
+ POS(0, range_start),
+ BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
+ check_nlinks_update_inode(&trans, &iter, k, links, &idx, range_end));
- ret = write_inode(&trans, &u, k.k->p.snapshot);
- if (ret)
- bch_err(c, "error in fsck: error %i updating inode", ret);
- }
- }
-fsck_err:
- bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans);
- if (ret)
+ if (ret < 0) {
bch_err(c, "error in fsck: btree error %i while walking inodes", ret);
+ return ret;
+ }
- return ret;
+ return 0;
}
noinline_for_stack
return ret;
}
-static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter)
+static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k)
{
- struct bkey_s_c k;
struct bkey_s_c_reflink_p p;
struct bkey_i_reflink_p *u;
int ret;
- k = bch2_btree_iter_peek(iter);
- if (!k.k)
- return 0;
-
- ret = bkey_err(k);
- if (ret)
- return ret;
-
if (k.k->type != KEY_TYPE_reflink_p)
return 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
- BTREE_ITER_INTENT|
- BTREE_ITER_PREFETCH|
- BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
- if (k.k->type == KEY_TYPE_reflink_p) {
- ret = commit_do(&trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW,
- fix_reflink_p_key(&trans, &iter));
- if (ret)
- break;
- }
- }
- bch2_trans_iter_exit(&trans, &iter);
+ ret = for_each_btree_key_commit(&trans, iter,
+ BTREE_ID_extents, POS_MIN,
+ BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
+ NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+ fix_reflink_p_key(&trans, &iter, k));
bch2_trans_exit(&trans);
return ret;
check_nlinks(c) ?:
fix_reflink_p(c);
- if (ret == -NEED_SNAPSHOT_CLEANUP) {
+ if (bch2_err_matches(ret, BCH_ERR_need_snapshot_cleanup)) {
set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
goto again;
}