bch2_inode_pack(&packed, lostfound_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
- NULL, NULL, NULL,
- BTREE_INSERT_NOFAIL);
+ NULL, NULL, BTREE_INSERT_NOFAIL);
if (ret) {
bch_err(c, "error %i reattaching inode %llu while updating lost+found",
ret, inum);
struct hash_check {
struct bch_hash_info info;
- struct btree_iter chain;
- struct btree_iter iter;
+ struct btree_trans *trans;
+
+ /* start of current chain of hash collisions: */
+ struct btree_iter *chain;
+
+ /* next offset in current chain of hash collisions: */
u64 next;
};
static void hash_check_init(const struct bch_hash_desc desc,
- struct hash_check *h, struct bch_fs *c)
+ struct btree_trans *trans,
+ struct hash_check *h)
{
- bch2_btree_iter_init(&h->chain, c, desc.btree_id, POS_MIN, 0);
- bch2_btree_iter_init(&h->iter, c, desc.btree_id, POS_MIN, 0);
+ h->trans = trans;
+ h->chain = bch2_trans_get_iter(trans, desc.btree_id, POS_MIN, 0);
+ h->next = -1;
}
static void hash_check_set_inode(struct hash_check *h, struct bch_fs *c,
}
static int hash_redo_key(const struct bch_hash_desc desc,
- struct hash_check *h, struct bch_fs *c,
+ struct btree_trans *trans, struct hash_check *h,
struct btree_iter *k_iter, struct bkey_s_c k,
u64 hashed)
{
bkey_reassemble(tmp, k);
- ret = bch2_btree_delete_at(k_iter, 0);
+ ret = bch2_btree_delete_at(trans, k_iter, 0);
if (ret)
goto err;
bch2_btree_iter_unlock(k_iter);
- bch2_hash_set(desc, &h->info, c, k_iter->pos.inode, NULL, tmp,
- BTREE_INSERT_NOFAIL|
- BCH_HASH_SET_MUST_CREATE);
+ bch2_hash_set(trans, desc, &h->info, k_iter->pos.inode,
+ tmp, BCH_HASH_SET_MUST_CREATE);
+ ret = bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
err:
kfree(tmp);
return ret;
}
+/* fsck hasn't been converted to new transactions yet: */
+static int fsck_hash_delete_at(const struct bch_hash_desc desc,
+ struct bch_hash_info *info,
+ struct btree_iter *orig_iter)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ int ret;
+
+ bch2_btree_iter_unlock(orig_iter);
+
+ bch2_trans_init(&trans, orig_iter->c);
+retry:
+ bch2_trans_begin(&trans);
+
+ iter = bch2_trans_copy_iter(&trans, orig_iter);
+ if (IS_ERR(iter)) {
+ ret = PTR_ERR(iter);
+ goto err;
+ }
+
+ ret = bch2_hash_delete_at(&trans, desc, info, iter) ?:
+ bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_ATOMIC|
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
+err:
+ if (ret == -EINTR)
+ goto retry;
+
+ bch2_trans_exit(&trans);
+ return ret;
+}
+
+static int hash_check_duplicates(const struct bch_hash_desc desc,
+ struct hash_check *h, struct bch_fs *c,
+ struct btree_iter *k_iter, struct bkey_s_c k)
+{
+ struct btree_iter *iter;
+ struct bkey_s_c k2;
+ char buf[200];
+ int ret = 0;
+
+ if (!bkey_cmp(h->chain->pos, k_iter->pos))
+ return 0;
+
+ iter = bch2_trans_copy_iter(h->trans, h->chain);
+ BUG_ON(IS_ERR(iter));
+
+ for_each_btree_key_continue(iter, 0, k2) {
+ if (bkey_cmp(k2.k->p, k.k->p) >= 0)
+ break;
+
+ if (fsck_err_on(k2.k->type == desc.key_type &&
+ !desc.cmp_bkey(k, k2), c,
+ "duplicate hash table keys:\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf))) {
+ ret = fsck_hash_delete_at(desc, &h->info, k_iter);
+ if (ret)
+ return ret;
+ ret = 1;
+ break;
+ }
+ }
+fsck_err:
+ bch2_trans_iter_free(h->trans, iter);
+ return ret;
+}
+
+static bool key_has_correct_hash(const struct bch_hash_desc desc,
+ struct hash_check *h, struct bch_fs *c,
+ struct btree_iter *k_iter, struct bkey_s_c k)
+{
+ u64 hash;
+
+ if (k.k->type != KEY_TYPE_whiteout &&
+ k.k->type != desc.key_type)
+ return true;
+
+ if (k.k->p.offset != h->next)
+ bch2_btree_iter_copy(h->chain, k_iter);
+ h->next = k.k->p.offset + 1;
+
+ if (k.k->type != desc.key_type)
+ return true;
+
+ hash = desc.hash_bkey(&h->info, k);
+
+ return hash >= h->chain->pos.offset &&
+ hash <= k.k->p.offset;
+}
+
static int hash_check_key(const struct bch_hash_desc desc,
- struct hash_check *h, struct bch_fs *c,
+ struct btree_trans *trans, struct hash_check *h,
struct btree_iter *k_iter, struct bkey_s_c k)
{
+ struct bch_fs *c = trans->c;
char buf[200];
u64 hashed;
int ret = 0;
- if (k.k->type != desc.whiteout_type &&
+ if (k.k->type != KEY_TYPE_whiteout &&
k.k->type != desc.key_type)
return 0;
- if (k.k->p.offset != h->next) {
- if (!btree_iter_linked(&h->chain)) {
- bch2_btree_iter_link(k_iter, &h->chain);
- bch2_btree_iter_link(k_iter, &h->iter);
- }
- bch2_btree_iter_copy(&h->chain, k_iter);
- }
+ if (k.k->p.offset != h->next)
+ bch2_btree_iter_copy(h->chain, k_iter);
h->next = k.k->p.offset + 1;
if (k.k->type != desc.key_type)
hashed = desc.hash_bkey(&h->info, k);
- if (fsck_err_on(hashed < h->chain.pos.offset ||
+ if (fsck_err_on(hashed < h->chain->pos.offset ||
hashed > k.k->p.offset, c,
- "hash table key at wrong offset: %llu, "
+ "hash table key at wrong offset: btree %u, %llu, "
"hashed to %llu chain starts at %llu\n%s",
- k.k->p.offset, hashed, h->chain.pos.offset,
- bch2_bkey_val_to_text(c, bkey_type(0, desc.btree_id),
- buf, sizeof(buf), k))) {
- ret = hash_redo_key(desc, h, c, k_iter, k, hashed);
+ desc.btree_id, k.k->p.offset,
+ hashed, h->chain->pos.offset,
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf))) {
+ ret = hash_redo_key(desc, trans, h, k_iter, k, hashed);
if (ret) {
bch_err(c, "hash_redo_key err %i", ret);
return ret;
return 1;
}
- if (!bkey_cmp(h->chain.pos, k_iter->pos))
+ ret = hash_check_duplicates(desc, h, c, k_iter, k);
+fsck_err:
+ return ret;
+}
+
+static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h,
+ struct btree_iter *iter, struct bkey_s_c *k)
+{
+ struct bch_fs *c = trans->c;
+ struct bkey_i_dirent *d = NULL;
+ int ret = -EINVAL;
+ char buf[200];
+ unsigned len;
+ u64 hash;
+
+ if (key_has_correct_hash(bch2_dirent_hash_desc, h, c, iter, *k))
return 0;
- bch2_btree_iter_copy(&h->iter, &h->chain);
- while (bkey_cmp(h->iter.pos, k_iter->pos) < 0) {
- struct bkey_s_c k2 = bch2_btree_iter_peek(&h->iter);
+ len = bch2_dirent_name_bytes(bkey_s_c_to_dirent(*k));
+ BUG_ON(!len);
- if (fsck_err_on(k2.k->type == desc.key_type &&
- !desc.cmp_bkey(k, k2), c,
- "duplicate hash table keys:\n%s",
- bch2_bkey_val_to_text(c, bkey_type(0, desc.btree_id),
- buf, sizeof(buf), k))) {
- ret = bch2_hash_delete_at(desc, &h->info, &h->iter, NULL);
- if (ret)
- return ret;
- return 1;
- }
- bch2_btree_iter_advance_pos(&h->iter);
+ memcpy(buf, bkey_s_c_to_dirent(*k).v->d_name, len);
+ buf[len] = '\0';
+
+ d = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
+ if (!d) {
+ bch_err(c, "memory allocation failure");
+ return -ENOMEM;
+ }
+
+ bkey_reassemble(&d->k_i, *k);
+
+ do {
+ --len;
+ if (!len)
+ goto err_redo;
+
+ d->k.u64s = BKEY_U64s + dirent_val_u64s(len);
+
+ BUG_ON(bkey_val_bytes(&d->k) <
+ offsetof(struct bch_dirent, d_name) + len);
+
+ memset(d->v.d_name + len, 0,
+ bkey_val_bytes(&d->k) -
+ offsetof(struct bch_dirent, d_name) - len);
+
+ hash = bch2_dirent_hash_desc.hash_bkey(&h->info,
+ bkey_i_to_s_c(&d->k_i));
+ } while (hash < h->chain->pos.offset ||
+ hash > k->k->p.offset);
+
+ if (fsck_err(c, "dirent with junk at end, was %s (%zu) now %s (%u)",
+ buf, strlen(buf), d->v.d_name, len)) {
+ bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &d->k_i));
+
+ ret = bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
+ if (ret)
+ goto err;
+
+ *k = bch2_btree_iter_peek(iter);
+
+ BUG_ON(k->k->type != KEY_TYPE_dirent);
}
+err:
fsck_err:
+ kfree(d);
return ret;
+err_redo:
+ hash = bch2_dirent_hash_desc.hash_bkey(&h->info, *k);
+
+ if (fsck_err(c, "cannot fix dirent by removing trailing garbage %s (%zu)\n"
+ "hash table key at wrong offset: btree %u, offset %llu, "
+ "hashed to %llu chain starts at %llu\n%s",
+ buf, strlen(buf), BTREE_ID_DIRENTS,
+ k->k->p.offset, hash, h->chain->pos.offset,
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ *k), buf))) {
+ ret = hash_redo_key(bch2_dirent_hash_desc, trans,
+ h, iter, *k, hash);
+ if (ret)
+ bch_err(c, "hash_redo_key err %i", ret);
+ else
+ ret = 1;
+ }
+
+ goto err;
+}
+
+static int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size)
+{
+ return bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
+ POS(inode_nr, round_up(new_size, block_bytes(c)) >> 9),
+ POS(inode_nr + 1, 0), NULL);
}
/*
u64 i_sectors;
int ret = 0;
+ bch_verbose(c, "checking extents");
+
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS,
POS(BCACHEFS_ROOT_INO, 0), 0, k) {
- if (k.k->type == KEY_TYPE_DISCARD)
- continue;
-
ret = walk_inode(c, &w, k.k->p.inode);
if (ret)
break;
!S_ISREG(w.inode.bi_mode) && !S_ISLNK(w.inode.bi_mode), c,
"extent type %u for non regular file, inode %llu mode %o",
k.k->type, k.k->p.inode, w.inode.bi_mode)) {
- ret = bch2_btree_delete_at(&iter, 0);
+ bch2_btree_iter_unlock(&iter);
+
+ ret = bch2_inode_truncate(c, k.k->p.inode, 0);
if (ret)
goto err;
continue;
}
- unfixable_fsck_err_on(w.first_this_inode &&
+ if (fsck_err_on(w.first_this_inode &&
w.have_inode &&
!(w.inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY) &&
w.inode.bi_sectors !=
(i_sectors = bch2_count_inode_sectors(c, w.cur_inum)),
c, "i_sectors wrong: got %llu, should be %llu",
- w.inode.bi_sectors, i_sectors);
+ w.inode.bi_sectors, i_sectors)) {
+ struct bkey_inode_buf p;
+
+ w.inode.bi_sectors = i_sectors;
+
+ bch2_btree_iter_unlock(&iter);
- unfixable_fsck_err_on(w.have_inode &&
+ bch2_inode_pack(&p, &w.inode);
+
+ ret = bch2_btree_insert(c, BTREE_ID_INODES,
+ &p.inode.k_i, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+ if (ret) {
+ bch_err(c, "error in fs gc: error %i "
+ "updating inode", ret);
+ goto err;
+ }
+
+ /* revalidate iterator: */
+ k = bch2_btree_iter_peek(&iter);
+ }
+
+ if (fsck_err_on(w.have_inode &&
!(w.inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
- k.k->type != BCH_RESERVATION &&
+ k.k->type != KEY_TYPE_reservation &&
k.k->p.offset > round_up(w.inode.bi_size, PAGE_SIZE) >> 9, c,
"extent type %u offset %llu past end of inode %llu, i_size %llu",
- k.k->type, k.k->p.offset, k.k->p.inode, w.inode.bi_size);
+ k.k->type, k.k->p.offset, k.k->p.inode, w.inode.bi_size)) {
+ bch2_btree_iter_unlock(&iter);
+
+ ret = bch2_inode_truncate(c, k.k->p.inode,
+ w.inode.bi_size);
+ if (ret)
+ goto err;
+ continue;
+ }
}
err:
fsck_err:
{
struct inode_walker w = inode_walker_init();
struct hash_check h;
- struct btree_iter iter;
+ struct btree_trans trans;
+ struct btree_iter *iter;
struct bkey_s_c k;
unsigned name_len;
char buf[200];
int ret = 0;
- hash_check_init(bch2_dirent_hash_desc, &h, c);
+ bch_verbose(c, "checking dirents");
- for_each_btree_key(&iter, c, BTREE_ID_DIRENTS,
- POS(BCACHEFS_ROOT_INO, 0), 0, k) {
+ bch2_trans_init(&trans, c);
+
+ bch2_trans_preload_iters(&trans);
+
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
+ POS(BCACHEFS_ROOT_INO, 0), 0);
+
+ hash_check_init(bch2_dirent_hash_desc, &trans, &h);
+
+ for_each_btree_key_continue(iter, 0, k) {
struct bkey_s_c_dirent d;
struct bch_inode_unpacked target;
bool have_target;
if (fsck_err_on(!w.have_inode, c,
"dirent in nonexisting directory:\n%s",
- bch2_bkey_val_to_text(c, BTREE_ID_DIRENTS,
- buf, sizeof(buf), k)) ||
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf)) ||
fsck_err_on(!S_ISDIR(w.inode.bi_mode), c,
"dirent in non directory inode type %u:\n%s",
mode_to_type(w.inode.bi_mode),
- bch2_bkey_val_to_text(c, BTREE_ID_DIRENTS,
- buf, sizeof(buf), k))) {
- ret = bch2_btree_delete_at(&iter, 0);
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf))) {
+ ret = bch2_btree_delete_at(&trans, iter, 0);
if (ret)
goto err;
continue;
if (w.first_this_inode && w.have_inode)
hash_check_set_inode(&h, c, &w.inode);
- ret = hash_check_key(bch2_dirent_hash_desc, &h, c, &iter, k);
+ ret = check_dirent_hash(&trans, &h, iter, &k);
if (ret > 0) {
ret = 0;
continue;
}
+ if (ret)
+ goto fsck_err;
if (ret)
goto fsck_err;
- if (k.k->type != BCH_DIRENT)
+ if (k.k->type != KEY_TYPE_dirent)
continue;
d = bkey_s_c_to_dirent(k);
". dirent") ||
fsck_err_on(name_len == 2 &&
!memcmp(d.v->d_name, "..", 2), c,
- ".. dirent")) {
- ret = remove_dirent(c, &iter, d);
+ ".. dirent") ||
+ fsck_err_on(name_len == 2 &&
+ !memcmp(d.v->d_name, "..", 2), c,
+ ".. dirent") ||
+ fsck_err_on(memchr(d.v->d_name, '/', name_len), c,
+ "dirent name has invalid chars")) {
+ ret = remove_dirent(c, iter, d);
if (ret)
goto err;
continue;
if (fsck_err_on(d_inum == d.k->p.inode, c,
"dirent points to own directory:\n%s",
- bch2_bkey_val_to_text(c, BTREE_ID_DIRENTS,
- buf, sizeof(buf), k))) {
- ret = remove_dirent(c, &iter, d);
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf))) {
+ ret = remove_dirent(c, iter, d);
if (ret)
goto err;
continue;
if (fsck_err_on(!have_target, c,
"dirent points to missing inode:\n%s",
- bch2_bkey_val_to_text(c, BTREE_ID_DIRENTS,
- buf, sizeof(buf), k))) {
- ret = remove_dirent(c, &iter, d);
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf))) {
+ ret = remove_dirent(c, iter, d);
if (ret)
goto err;
continue;
mode_to_type(target.bi_mode), c,
"incorrect d_type: should be %u:\n%s",
mode_to_type(target.bi_mode),
- bch2_bkey_val_to_text(c, BTREE_ID_DIRENTS,
- buf, sizeof(buf), k))) {
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf))) {
struct bkey_i_dirent *n;
n = kmalloc(bkey_bytes(d.k), GFP_KERNEL);
bkey_reassemble(&n->k_i, d.s_c);
n->v.d_type = mode_to_type(target.bi_mode);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
- BTREE_INSERT_NOFAIL,
- BTREE_INSERT_ENTRY(&iter, &n->k_i));
+ bch2_trans_update(&trans,
+ BTREE_INSERT_ENTRY(iter, &n->k_i));
+
+ ret = bch2_trans_commit(&trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
kfree(n);
if (ret)
goto err;
}
err:
fsck_err:
- bch2_btree_iter_unlock(&h.chain);
- bch2_btree_iter_unlock(&h.iter);
- return bch2_btree_iter_unlock(&iter) ?: ret;
+ return bch2_trans_exit(&trans) ?: ret;
}
/*
{
struct inode_walker w = inode_walker_init();
struct hash_check h;
- struct btree_iter iter;
+ struct btree_trans trans;
+ struct btree_iter *iter;
struct bkey_s_c k;
int ret = 0;
- hash_check_init(bch2_xattr_hash_desc, &h, c);
+ bch_verbose(c, "checking xattrs");
- for_each_btree_key(&iter, c, BTREE_ID_XATTRS,
- POS(BCACHEFS_ROOT_INO, 0), 0, k) {
+ bch2_trans_init(&trans, c);
+
+ bch2_trans_preload_iters(&trans);
+
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
+ POS(BCACHEFS_ROOT_INO, 0), 0);
+
+ hash_check_init(bch2_xattr_hash_desc, &trans, &h);
+
+ for_each_btree_key_continue(iter, 0, k) {
ret = walk_inode(c, &w, k.k->p.inode);
if (ret)
break;
if (fsck_err_on(!w.have_inode, c,
"xattr for missing inode %llu",
k.k->p.inode)) {
- ret = bch2_btree_delete_at(&iter, 0);
+ ret = bch2_btree_delete_at(&trans, iter, 0);
if (ret)
goto err;
continue;
if (w.first_this_inode && w.have_inode)
hash_check_set_inode(&h, c, &w.inode);
- ret = hash_check_key(bch2_xattr_hash_desc, &h, c, &iter, k);
+ ret = hash_check_key(bch2_xattr_hash_desc, &trans, &h, iter, k);
if (ret)
goto fsck_err;
}
err:
fsck_err:
- bch2_btree_iter_unlock(&h.chain);
- bch2_btree_iter_unlock(&h.iter);
- return bch2_btree_iter_unlock(&iter) ?: ret;
+ return bch2_trans_exit(&trans) ?: ret;
}
/* Get root directory, create if it doesn't exist: */
struct bkey_inode_buf packed;
int ret;
+ bch_verbose(c, "checking root directory");
+
ret = bch2_inode_find_by_inum(c, BCACHEFS_ROOT_INO, root_inode);
if (ret && ret != -ENOENT)
return ret;
bch2_inode_pack(&packed, root_inode);
return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
- NULL, NULL, NULL, BTREE_INSERT_NOFAIL);
+ NULL, NULL, BTREE_INSERT_NOFAIL);
}
/* Get lost+found, create if it doesn't exist: */
u64 inum;
int ret;
+ bch_verbose(c, "checking lost+found");
+
inum = bch2_dirent_lookup(c, BCACHEFS_ROOT_INO, &root_hash_info,
&lostfound);
if (!inum) {
bch2_inode_pack(&packed, root_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
- NULL, NULL, NULL, BTREE_INSERT_NOFAIL);
+ NULL, NULL, BTREE_INSERT_NOFAIL);
if (ret)
return ret;
static inline int inode_bitmap_set(struct inode_bitmap *b, size_t nr)
{
if (nr >= b->size) {
- size_t new_size = max(max(PAGE_SIZE * 8,
- b->size * 2),
- nr + 1);
+ size_t new_size = max_t(size_t, max_t(size_t,
+ PAGE_SIZE * 8,
+ b->size * 2),
+ nr + 1);
void *n;
new_size = roundup_pow_of_two(new_size);
static int path_down(struct pathbuf *p, u64 inum)
{
if (p->nr == p->size) {
- size_t new_size = max(256UL, p->size * 2);
+ size_t new_size = max_t(size_t, 256UL, p->size * 2);
void *n = krealloc(p->entries,
new_size * sizeof(p->entries[0]),
GFP_KERNEL);
u64 d_inum;
int ret = 0;
+ bch_verbose(c, "checking directory structure");
+
/* DFS: */
restart_dfs:
had_unreachable = false;
e->offset = k.k->p.offset;
- if (k.k->type != BCH_DIRENT)
+ if (k.k->type != KEY_TYPE_dirent)
continue;
dirent = bkey_s_c_to_dirent(k);
}
for_each_btree_key(&iter, c, BTREE_ID_INODES, POS_MIN, 0, k) {
- if (k.k->type != BCH_INODE_FS ||
- !S_ISDIR(le16_to_cpu(bkey_s_c_to_inode(k).v->bi_mode)))
+ if (k.k->type != KEY_TYPE_inode)
+ continue;
+
+ if (!S_ISDIR(le16_to_cpu(bkey_s_c_to_inode(k).v->bi_mode)))
+ continue;
+
+ if (!bch2_empty_dir(c, k.k->p.inode))
continue;
if (fsck_err_on(!inode_bitmap_test(&dirs_done, k.k->p.inode), c,
for_each_btree_key(&iter, c, BTREE_ID_DIRENTS, POS_MIN, 0, k) {
switch (k.k->type) {
- case BCH_DIRENT:
+ case KEY_TYPE_dirent:
d = bkey_s_c_to_dirent(k);
d_inum = le64_to_cpu(d.v->d_inum);
return bch2_btree_iter_unlock(&iter) ?: sectors;
}
-static int bch2_gc_do_inode(struct bch_fs *c,
- struct bch_inode_unpacked *lostfound_inode,
- struct btree_iter *iter,
- struct bkey_s_c_inode inode, struct nlink link)
+static int check_inode_nlink(struct bch_fs *c,
+ struct bch_inode_unpacked *lostfound_inode,
+ struct bch_inode_unpacked *u,
+ struct nlink *link,
+ bool *do_update)
{
- struct bch_inode_unpacked u;
+ u32 i_nlink = u->bi_flags & BCH_INODE_UNLINKED
+ ? 0
+ : u->bi_nlink + nlink_bias(u->bi_mode);
+ u32 real_i_nlink =
+ link->count * nlink_bias(u->bi_mode) +
+ link->dir_count;
int ret = 0;
- u32 i_nlink, real_i_nlink;
+
+ /*
+ * These should have been caught/fixed by earlier passes, we don't
+ * repair them here:
+ */
+ if (S_ISDIR(u->bi_mode) && link->count > 1) {
+ need_fsck_err(c, "directory %llu with multiple hardlinks: %u",
+ u->bi_inum, link->count);
+ return 0;
+ }
+
+ if (S_ISDIR(u->bi_mode) && !link->count) {
+ need_fsck_err(c, "unreachable directory found (inum %llu)",
+ u->bi_inum);
+ return 0;
+ }
+
+ if (!S_ISDIR(u->bi_mode) && link->dir_count) {
+ need_fsck_err(c, "non directory with subdirectories",
+ u->bi_inum);
+ return 0;
+ }
+
+ if (!link->count &&
+ !(u->bi_flags & BCH_INODE_UNLINKED) &&
+ (c->sb.features & (1 << BCH_FEATURE_ATOMIC_NLINK))) {
+ if (fsck_err(c, "unreachable inode %llu not marked as unlinked (type %u)",
+ u->bi_inum, mode_to_type(u->bi_mode)) ==
+ FSCK_ERR_IGNORE)
+ return 0;
+
+ ret = reattach_inode(c, lostfound_inode, u->bi_inum);
+ if (ret)
+ return ret;
+
+ link->count = 1;
+ real_i_nlink = nlink_bias(u->bi_mode) + link->dir_count;
+ goto set_i_nlink;
+ }
+
+ if (i_nlink < link->count) {
+ if (fsck_err(c, "inode %llu i_link too small (%u < %u, type %i)",
+ u->bi_inum, i_nlink, link->count,
+ mode_to_type(u->bi_mode)) == FSCK_ERR_IGNORE)
+ return 0;
+ goto set_i_nlink;
+ }
+
+ if (i_nlink != real_i_nlink &&
+ c->sb.clean) {
+ if (fsck_err(c, "filesystem marked clean, "
+ "but inode %llu has wrong i_nlink "
+ "(type %u i_nlink %u, should be %u)",
+ u->bi_inum, mode_to_type(u->bi_mode),
+ i_nlink, real_i_nlink) == FSCK_ERR_IGNORE)
+ return 0;
+ goto set_i_nlink;
+ }
+
+ if (i_nlink != real_i_nlink &&
+ (c->sb.features & (1 << BCH_FEATURE_ATOMIC_NLINK))) {
+ if (fsck_err(c, "inode %llu has wrong i_nlink "
+ "(type %u i_nlink %u, should be %u)",
+ u->bi_inum, mode_to_type(u->bi_mode),
+ i_nlink, real_i_nlink) == FSCK_ERR_IGNORE)
+ return 0;
+ goto set_i_nlink;
+ }
+
+ if (real_i_nlink && i_nlink != real_i_nlink)
+ bch_verbose(c, "setting inode %llu nlink from %u to %u",
+ u->bi_inum, i_nlink, real_i_nlink);
+set_i_nlink:
+ if (i_nlink != real_i_nlink) {
+ if (real_i_nlink) {
+ u->bi_nlink = real_i_nlink - nlink_bias(u->bi_mode);
+ u->bi_flags &= ~BCH_INODE_UNLINKED;
+ } else {
+ u->bi_nlink = 0;
+ u->bi_flags |= BCH_INODE_UNLINKED;
+ }
+
+ *do_update = true;
+ }
+fsck_err:
+ return ret;
+}
+
+static int check_inode(struct btree_trans *trans,
+ struct bch_inode_unpacked *lostfound_inode,
+ struct btree_iter *iter,
+ struct bkey_s_c_inode inode,
+ struct nlink *link)
+{
+ struct bch_fs *c = trans->c;
+ struct bch_inode_unpacked u;
bool do_update = false;
+ int ret = 0;
ret = bch2_inode_unpack(inode, &u);
if (bch2_fs_inconsistent_on(ret, c,
inode.k->p.inode))
return ret;
- i_nlink = u.bi_nlink + nlink_bias(u.bi_mode);
-
- fsck_err_on(i_nlink < link.count, c,
- "inode %llu i_link too small (%u < %u, type %i)",
- inode.k->p.inode, i_nlink,
- link.count, mode_to_type(u.bi_mode));
-
- /* These should have been caught/fixed by earlier passes: */
- if (S_ISDIR(u.bi_mode)) {
- need_fsck_err_on(link.count > 1, c,
- "directory %llu with multiple hardlinks: %u",
- inode.k->p.inode, link.count);
-
- real_i_nlink = link.count * 2 + link.dir_count;
- } else {
- need_fsck_err_on(link.dir_count, c,
- "found dirents for non directory %llu",
- inode.k->p.inode);
-
- real_i_nlink = link.count + link.dir_count;
+ if (link) {
+ ret = check_inode_nlink(c, lostfound_inode, &u, link,
+ &do_update);
+ if (ret)
+ return ret;
}
- if (!link.count) {
+ if (u.bi_flags & BCH_INODE_UNLINKED) {
fsck_err_on(c->sb.clean, c,
"filesystem marked clean, "
- "but found orphaned inode %llu",
- inode.k->p.inode);
-
- if (fsck_err_on(S_ISDIR(u.bi_mode) &&
- bch2_empty_dir(c, inode.k->p.inode), c,
- "non empty directory with link count 0, "
- "inode nlink %u, dir links found %u",
- i_nlink, link.dir_count)) {
- ret = reattach_inode(c, lostfound_inode,
- inode.k->p.inode);
- if (ret)
- return ret;
- }
+ "but inode %llu unlinked",
+ u.bi_inum);
- bch_verbose(c, "deleting inode %llu", inode.k->p.inode);
+ bch_verbose(c, "deleting inode %llu", u.bi_inum);
- ret = bch2_inode_rm(c, inode.k->p.inode);
+ ret = bch2_inode_rm(c, u.bi_inum);
if (ret)
bch_err(c, "error in fs gc: error %i "
"while deleting inode", ret);
fsck_err_on(c->sb.clean, c,
"filesystem marked clean, "
"but inode %llu has i_size dirty",
- inode.k->p.inode);
+ u.bi_inum);
- bch_verbose(c, "truncating inode %llu", inode.k->p.inode);
+ bch_verbose(c, "truncating inode %llu", u.bi_inum);
/*
* XXX: need to truncate partial blocks too here - or ideally
* just switch units to bytes and that issue goes away
*/
- ret = bch2_inode_truncate(c, inode.k->p.inode,
- round_up(u.bi_size, PAGE_SIZE) >> 9,
- NULL, NULL);
+ ret = bch2_inode_truncate(c, u.bi_inum, u.bi_size);
if (ret) {
bch_err(c, "error in fs gc: error %i "
"truncating inode", ret);
fsck_err_on(c->sb.clean, c,
"filesystem marked clean, "
"but inode %llu has i_sectors dirty",
- inode.k->p.inode);
+ u.bi_inum);
bch_verbose(c, "recounting sectors for inode %llu",
- inode.k->p.inode);
+ u.bi_inum);
- sectors = bch2_count_inode_sectors(c, inode.k->p.inode);
+ sectors = bch2_count_inode_sectors(c, u.bi_inum);
if (sectors < 0) {
bch_err(c, "error in fs gc: error %i "
"recounting inode sectors",
do_update = true;
}
- if (i_nlink != real_i_nlink) {
- fsck_err_on(c->sb.clean, c,
- "filesystem marked clean, "
- "but inode %llu has wrong i_nlink "
- "(type %u i_nlink %u, should be %u)",
- inode.k->p.inode, mode_to_type(u.bi_mode),
- i_nlink, real_i_nlink);
-
- bch_verbose(c, "setting inode %llu nlinks from %u to %u",
- inode.k->p.inode, i_nlink, real_i_nlink);
- u.bi_nlink = real_i_nlink - nlink_bias(u.bi_mode);
- do_update = true;
- }
-
if (do_update) {
struct bkey_inode_buf p;
bch2_inode_pack(&p, &u);
+ bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
- BTREE_INSERT_NOFAIL,
- BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
+ ret = bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW);
if (ret && ret != -EINTR)
bch_err(c, "error in fs gc: error %i "
"updating inode", ret);
noinline_for_stack
static int bch2_gc_walk_inodes(struct bch_fs *c,
- struct bch_inode_unpacked *lostfound_inode,
- nlink_table *links,
- u64 range_start, u64 range_end)
+ struct bch_inode_unpacked *lostfound_inode,
+ nlink_table *links,
+ u64 range_start, u64 range_end)
{
- struct btree_iter iter;
+ struct btree_trans trans;
+ struct btree_iter *iter;
struct bkey_s_c k;
struct nlink *link, zero_links = { 0, 0 };
struct genradix_iter nlinks_iter;
int ret = 0, ret2 = 0;
u64 nlinks_pos;
- bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(range_start, 0), 0);
- genradix_iter_init(&nlinks_iter);
+ bch2_trans_init(&trans, c);
+
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
+ POS(range_start, 0), 0);
+ nlinks_iter = genradix_iter_init(links, 0);
- while ((k = bch2_btree_iter_peek(&iter)).k &&
- !btree_iter_err(k)) {
+ while ((k = bch2_btree_iter_peek(iter)).k &&
+ !(ret2 = btree_iter_err(k))) {
peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
- if (!link && (!k.k || iter.pos.inode >= range_end))
+ if (!link && (!k.k || iter->pos.inode >= range_end))
break;
nlinks_pos = range_start + nlinks_iter.pos;
- if (iter.pos.inode > nlinks_pos) {
+ if (iter->pos.inode > nlinks_pos) {
/* Should have been caught by dirents pass: */
need_fsck_err_on(link && link->count, c,
"missing inode %llu (nlink %u)",
goto peek_nlinks;
}
- if (iter.pos.inode < nlinks_pos || !link)
+ if (iter->pos.inode < nlinks_pos || !link)
link = &zero_links;
- if (k.k && k.k->type == BCH_INODE_FS) {
+ if (k.k && k.k->type == KEY_TYPE_inode) {
/*
* Avoid potential deadlocks with iter for
* truncate/rm/etc.:
*/
- bch2_btree_iter_unlock(&iter);
+ bch2_btree_iter_unlock(iter);
- ret = bch2_gc_do_inode(c, lostfound_inode, &iter,
- bkey_s_c_to_inode(k), *link);
- if (ret == -EINTR)
- continue;
+ ret = check_inode(&trans, lostfound_inode, iter,
+ bkey_s_c_to_inode(k), link);
+ BUG_ON(ret == -EINTR);
if (ret)
break;
-
- if (link->count)
- atomic_long_inc(&c->nr_inodes);
} else {
/* Should have been caught by dirents pass: */
need_fsck_err_on(link->count, c,
nlinks_pos, link->count);
}
- if (nlinks_pos == iter.pos.inode)
+ if (nlinks_pos == iter->pos.inode)
genradix_iter_advance(&nlinks_iter, links);
- bch2_btree_iter_advance_pos(&iter);
- bch2_btree_iter_cond_resched(&iter);
+ bch2_btree_iter_next(iter);
+ bch2_btree_iter_cond_resched(iter);
}
fsck_err:
- ret2 = bch2_btree_iter_unlock(&iter);
+ bch2_trans_exit(&trans);
+
if (ret2)
bch_err(c, "error in fs gc: btree error %i while walking inodes", ret2);
u64 this_iter_range_start, next_iter_range_start = 0;
int ret = 0;
+ bch_verbose(c, "checking inode nlinks");
+
genradix_init(&links);
do {
return ret;
}
+noinline_for_stack
+static int check_inodes_fast(struct bch_fs *c)
+{
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ struct bkey_s_c k;
+ struct bkey_s_c_inode inode;
+ int ret = 0;
+
+ bch2_trans_init(&trans, c);
+
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
+ POS_MIN, 0);
+
+ for_each_btree_key_continue(iter, 0, k) {
+ if (k.k->type != KEY_TYPE_inode)
+ continue;
+
+ inode = bkey_s_c_to_inode(k);
+
+ if (inode.v->bi_flags &
+ (BCH_INODE_I_SIZE_DIRTY|
+ BCH_INODE_I_SECTORS_DIRTY|
+ BCH_INODE_UNLINKED)) {
+ ret = check_inode(&trans, NULL, iter, inode, NULL);
+ BUG_ON(ret == -EINTR);
+ if (ret)
+ break;
+ }
+ }
+
+ if (!ret)
+ ret = bch2_btree_iter_unlock(iter);
+
+ bch2_trans_exit(&trans);
+
+ return ret;
+}
+
/*
* Checks for inconsistencies that shouldn't happen, unless we have a bug.
* Doesn't fix them yet, mainly because they haven't yet been observed:
*/
-int bch2_fsck(struct bch_fs *c, bool full_fsck)
+static int bch2_fsck_full(struct bch_fs *c)
{
struct bch_inode_unpacked root_inode, lostfound_inode;
int ret;
- if (full_fsck) {
- bch_verbose(c, "checking extents");
- ret = check_extents(c);
- if (ret)
- return ret;
+ bch_verbose(c, "starting fsck:");
+ ret = check_extents(c) ?:
+ check_dirents(c) ?:
+ check_xattrs(c) ?:
+ check_root(c, &root_inode) ?:
+ check_lostfound(c, &root_inode, &lostfound_inode) ?:
+ check_directory_structure(c, &lostfound_inode) ?:
+ check_inode_nlinks(c, &lostfound_inode);
- bch_verbose(c, "checking dirents");
- ret = check_dirents(c);
- if (ret)
- return ret;
+ bch2_flush_fsck_errs(c);
+ bch_verbose(c, "fsck done");
- bch_verbose(c, "checking xattrs");
- ret = check_xattrs(c);
- if (ret)
- return ret;
+ return ret;
+}
- bch_verbose(c, "checking root directory");
- ret = check_root(c, &root_inode);
- if (ret)
- return ret;
+static int bch2_fsck_inode_nlink(struct bch_fs *c)
+{
+ struct bch_inode_unpacked root_inode, lostfound_inode;
+ int ret;
- bch_verbose(c, "checking lost+found");
- ret = check_lostfound(c, &root_inode, &lostfound_inode);
- if (ret)
- return ret;
+ bch_verbose(c, "checking inode link counts:");
+ ret = check_root(c, &root_inode) ?:
+ check_lostfound(c, &root_inode, &lostfound_inode) ?:
+ check_inode_nlinks(c, &lostfound_inode);
- bch_verbose(c, "checking directory structure");
- ret = check_directory_structure(c, &lostfound_inode);
- if (ret)
- return ret;
+ bch2_flush_fsck_errs(c);
+ bch_verbose(c, "done");
- bch_verbose(c, "checking inode nlinks");
- ret = check_inode_nlinks(c, &lostfound_inode);
- if (ret)
- return ret;
- } else {
- bch_verbose(c, "checking root directory");
- ret = check_root(c, &root_inode);
- if (ret)
- return ret;
+ return ret;
+}
- bch_verbose(c, "checking lost+found");
- ret = check_lostfound(c, &root_inode, &lostfound_inode);
- if (ret)
- return ret;
+static int bch2_fsck_walk_inodes_only(struct bch_fs *c)
+{
+ int ret;
- bch_verbose(c, "checking inode nlinks");
- ret = check_inode_nlinks(c, &lostfound_inode);
- if (ret)
- return ret;
- }
+ bch_verbose(c, "walking inodes:");
+ ret = check_inodes_fast(c);
bch2_flush_fsck_errs(c);
+ bch_verbose(c, "done");
- return 0;
+ return ret;
+}
+
+int bch2_fsck(struct bch_fs *c)
+{
+ if (c->opts.fsck)
+ return bch2_fsck_full(c);
+
+ if (c->sb.clean)
+ return 0;
+
+ return c->sb.features & (1 << BCH_FEATURE_ATOMIC_NLINK)
+ ? bch2_fsck_walk_inodes_only(c)
+ : bch2_fsck_inode_nlink(c);
}