-400c2f8d960ac55105bd22905a6ea1a40daa7f4f
+787de128a5caf209845e5a8d0f14f24e1a42492c
#define DT_LNK 10
#define DT_SOCK 12
#define DT_WHT 14
+#define DT_MAX 16
#endif
/*
#define x(_name, _bits) \
if (fieldnr < a.v->nr_fields) { \
- ret = bch2_varint_decode(in, end, &v); \
+ ret = bch2_varint_decode_fast(in, end, &v); \
if (ret < 0) \
return ret; \
in += ret; \
nr_fields++; \
\
if (src._name) { \
- out += bch2_varint_encode(out, src._name); \
+ out += bch2_varint_encode_fast(out, src._name); \
\
last_nonzero_field = out; \
last_nonzero_fieldnr = nr_fields; \
{
spin_lock_init(&c->freelist_lock);
}
+
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct open_bucket *ob;
+
+ for (ob = c->open_buckets;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+ ob++) {
+ spin_lock(&ob->lock);
+ if (ob->valid && !ob->on_partial_list) {
+ pr_buf(out, "%zu ref %u type %s\n",
+ ob - c->open_buckets,
+ atomic_read(&ob->pin),
+ bch2_data_types[ob->type]);
+ }
+ spin_unlock(&ob->lock);
+ }
+
+}
int bch2_alloc_write(struct bch_fs *, unsigned);
void bch2_fs_allocator_background_init(struct bch_fs *);
+void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *);
+
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
+LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
/*
* Features:
if (bch2_verify_btree_ondisk)
bch2_btree_node_write(c, b, SIX_LOCK_intent);
else
- __bch2_btree_node_write(c, b);
+ __bch2_btree_node_write(c, b, false);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
* currently fails for iterators that aren't pointed at a valid btree
* node
*/
- if (iter && !bch2_trans_relock(iter->trans))
+ if (iter &&
+ (!bch2_trans_relock(iter->trans) ||
+ !bch2_btree_iter_relock(iter, _THIS_IP_)))
return ERR_PTR(-EINTR);
if (!six_relock_type(&b->c.lock, lock_type, seq))
* currently fails for iterators that aren't pointed at a valid
* btree node
*/
- if (iter && !bch2_trans_relock(iter->trans))
+ if (iter &&
+ (!bch2_trans_relock(iter->trans) ||
+ !bch2_btree_iter_relock(iter, _THIS_IP_)))
return ERR_PTR(-EINTR);
if (!six_relock_type(&b->c.lock, lock_type, seq))
six_lock_write(&b->c.lock, NULL, NULL);
if (btree_node_dirty(b)) {
- __bch2_btree_node_write(c, b);
+ __bch2_btree_node_write(c, b, false);
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
goto wait_on_io;
static void btree_node_write_done(struct bch_fs *c, struct btree *b)
{
struct btree_write *w = btree_prev_write(b);
+ unsigned long old, new, v;
bch2_btree_complete_write(c, b, w);
- bch2_btree_node_io_unlock(b);
+
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
+
+ if (old & (1U << BTREE_NODE_need_write))
+ goto do_write;
+
+ new &= ~(1U << BTREE_NODE_write_in_flight);
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
+
+ wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
+ return;
+
+do_write:
+ six_lock_read(&b->c.lock, NULL, NULL);
+ v = READ_ONCE(b->flags);
+ do {
+ old = new = v;
+
+ if ((old & (1U << BTREE_NODE_dirty)) &&
+ (old & (1U << BTREE_NODE_need_write)) &&
+ !(old & (1U << BTREE_NODE_never_write)) &&
+ btree_node_may_write(b)) {
+ new &= ~(1U << BTREE_NODE_dirty);
+ new &= ~(1U << BTREE_NODE_need_write);
+ new |= (1U << BTREE_NODE_write_in_flight);
+ new |= (1U << BTREE_NODE_just_written);
+ new ^= (1U << BTREE_NODE_write_idx);
+ } else {
+ new &= ~(1U << BTREE_NODE_write_in_flight);
+ }
+ } while ((v = cmpxchg(&b->flags, old, new)) != old);
+
+ if (new & (1U << BTREE_NODE_write_in_flight))
+ __bch2_btree_node_write(c, b, true);
+
+ six_unlock_read(&b->c.lock);
}
static void bch2_btree_node_write_error(struct bch_fs *c,
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key);
}
-void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
+void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started)
{
struct btree_write_bio *wbio;
struct bset_tree *t;
bool validate_before_checksum = false;
void *data;
- BUG_ON(btree_node_write_in_flight(b));
+ if (already_started)
+ goto do_write;
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
return;
if (old & (1 << BTREE_NODE_never_write))
return;
- if (old & (1 << BTREE_NODE_write_in_flight)) {
- /*
- * XXX waiting on btree writes with btree locks held -
- * this can deadlock, and we hit the write error path
- */
- bch2_btree_node_wait_on_write(b);
- continue;
- }
+ BUG_ON(old & (1 << BTREE_NODE_write_in_flight));
new &= ~(1 << BTREE_NODE_dirty);
new &= ~(1 << BTREE_NODE_need_write);
new ^= (1 << BTREE_NODE_write_idx);
} while (cmpxchg_acquire(&b->flags, old, new) != old);
+ if (new & (1U << BTREE_NODE_need_write))
+ return;
+do_write:
atomic_dec(&c->btree_cache.dirty);
BUG_ON(btree_node_fake(b));
if (lock_type_held == SIX_LOCK_intent ||
(lock_type_held == SIX_LOCK_read &&
six_lock_tryupgrade(&b->c.lock))) {
- __bch2_btree_node_write(c, b);
+ __bch2_btree_node_write(c, b, false);
/* don't cycle lock unnecessarily: */
if (btree_node_just_written(b) &&
if (lock_type_held == SIX_LOCK_read)
six_lock_downgrade(&b->c.lock);
} else {
- __bch2_btree_node_write(c, b);
+ __bch2_btree_node_write(c, b, false);
if (lock_type_held == SIX_LOCK_write &&
btree_node_just_written(b))
bch2_btree_post_write_cleanup(c, b);
struct btree_write *);
void bch2_btree_write_error_work(struct work_struct *);
-void __bch2_btree_node_write(struct bch_fs *, struct btree *);
+void __bch2_btree_node_write(struct bch_fs *, struct btree *, bool);
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
void bch2_btree_node_write(struct bch_fs *, struct btree *,
static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
enum six_lock_type lock_held)
{
- while (b->written &&
- btree_node_need_write(b) &&
- btree_node_may_write(b)) {
- if (!btree_node_write_in_flight(b)) {
- bch2_btree_node_write(c, b, lock_held);
- break;
- }
-
- six_unlock_type(&b->c.lock, lock_held);
- bch2_btree_node_wait_on_write(b);
- btree_node_lock_type(c, b, lock_held);
- }
+ if (b->written &&
+ btree_node_need_write(b) &&
+ btree_node_may_write(b) &&
+ !btree_node_write_in_flight(b))
+ bch2_btree_node_write(c, b, lock_held);
}
#define bch2_btree_node_write_cond(_c, _b, cond) \
#ifdef CONFIG_BCACHEFS_DEBUG
static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
{
+ struct bch_fs *c = iter->trans->c;
unsigned l;
if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
return;
}
- for (l = 0; is_btree_node(iter, l); l++) {
+ for (l = 0; btree_iter_node(iter, l); l++) {
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
!btree_node_locked(iter, l))
continue;
#endif
__flatten
-static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
+bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
{
return btree_iter_get_locks(iter, false, trace_ip);
}
static void bch2_btree_iter_verify(struct btree_iter *iter)
{
+ struct btree_trans *trans = iter->trans;
+ struct bch_fs *c = trans->c;
enum btree_iter_type type = btree_iter_type(iter);
unsigned i;
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
!btree_type_has_snapshots(iter->btree_id));
- bch2_btree_iter_verify_locks(iter);
+ for (i = 0; i < BTREE_MAX_DEPTH; i++) {
+ if (!iter->l[i].b) {
+ BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
+ break;
+ }
- for (i = 0; i < BTREE_MAX_DEPTH; i++)
bch2_btree_iter_verify_level(iter, i);
+ }
+
+ bch2_btree_iter_verify_locks(iter);
}
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
static int btree_iter_traverse_one(struct btree_iter *iter,
unsigned long trace_ip)
{
- unsigned depth_want = iter->level;
+ unsigned l, depth_want = iter->level;
int ret = 0;
- /*
- * if we need interior nodes locked, call btree_iter_relock() to make
- * sure we walk back up enough that we lock them:
- */
- if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
- iter->locks_want > 1)
- bch2_btree_iter_relock(iter, _THIS_IP_);
-
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
ret = bch2_btree_iter_traverse_cached(iter);
goto out;
}
- if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
- goto out;
-
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
goto out;
iter->level = btree_iter_up_until_good_node(iter, 0);
+ /* If we need intent locks, take them too: */
+ for (l = iter->level + 1;
+ l < iter->locks_want && btree_iter_node(iter, l);
+ l++)
+ if (!bch2_btree_node_relock(iter, l))
+ while (iter->level <= l) {
+ btree_node_unlock(iter, iter->level);
+ iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
+ iter->level++;
+ }
+
/*
* Note: iter->nodes[iter->level] may be temporarily NULL here - that
* would indicate to other code that we got to the end of the btree,
goto out;
}
+ __bch2_btree_iter_unlock(iter);
iter->level = depth_want;
if (ret == -EIO) {
struct btree_node_iter *, struct bkey_packed *,
unsigned, unsigned);
+bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
+
bool bch2_trans_relock(struct btree_trans *);
void bch2_trans_unlock(struct btree_trans *);
BUG_ON(iter->level);
- if (btree_node_locked(iter, 0)) {
+ iter->l[1].b = NULL;
+
+ if (bch2_btree_node_relock(iter, 0)) {
ck = (void *) iter->l[0].b;
goto fill;
}
closure_init_stack(&cl);
retry:
- /*
- * This check isn't necessary for correctness - it's just to potentially
- * prevent us from doing a lot of work that'll end up being wasted:
- */
- ret = bch2_journal_error(&c->journal);
- if (ret)
- return ERR_PTR(ret);
/*
* XXX: figure out how far we might need to split,
bch2_keylist_init(&as->new_keys, as->_new_keys);
bch2_keylist_init(&as->parent_keys, as->inline_keys);
+ mutex_lock(&c->btree_interior_update_lock);
+ list_add_tail(&as->list, &c->btree_interior_update_list);
+ mutex_unlock(&c->btree_interior_update_lock);
+
+ /*
+ * We don't want to allocate if we're in an error state, that can cause
+ * deadlock on emergency shutdown due to open buckets getting stuck in
+ * the btree_reserve_cache after allocator shutdown has cleared it out.
+ * This check needs to come after adding us to the btree_interior_update
+ * list but before calling bch2_btree_reserve_get, to synchronize with
+ * __bch2_fs_read_only().
+ */
+ ret = bch2_journal_error(&c->journal);
+ if (ret)
+ goto err;
+
ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
BTREE_UPDATE_JOURNAL_RES,
journal_flags|JOURNAL_RES_GET_NONBLOCK);
atomic64_read(&c->journal.seq),
&as->journal, NULL);
- mutex_lock(&c->btree_interior_update_lock);
- list_add_tail(&as->list, &c->btree_interior_update_list);
- mutex_unlock(&c->btree_interior_update_lock);
-
return as;
err:
bch2_btree_update_free(as);
bch_scnmemcpy(out, d.v->d_name,
bch2_dirent_name_bytes(d));
- pr_buf(out, " -> %llu type %s", d.v->d_inum, bch2_d_types[d.v->d_type]);
+ pr_buf(out, " -> %llu type %s", d.v->d_inum,
+ d.v->d_type < DT_MAX
+ ? bch2_d_types[d.v->d_type]
+ : "(bad d_type)");
}
static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
* bio_iov_iter_get_pages was only able to get <
* blocksize worth of pages:
*/
- bio_for_each_segment_all(bv, bio, iter)
- put_page(bv->bv_page);
ret = -EFAULT;
goto err;
}
if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
bio_for_each_segment_all(bv, bio, iter)
put_page(bv->bv_page);
+ bio->bi_vcnt = 0;
if (dio->op.error) {
set_bit(EI_INODE_ERROR, &inode->ei_flags);
if (dio->free_iov)
kfree(dio->iter.iov);
+ if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
+ bio_for_each_segment_all(bv, bio, iter)
+ put_page(bv->bv_page);
bio_put(bio);
/* inode->i_dio_count is our ref on inode and thus bch_fs */
};
}
-static int walk_inode(struct btree_trans *trans,
- struct inode_walker *w, u64 inum)
+static int __walk_inode(struct btree_trans *trans,
+ struct inode_walker *w, u64 inum)
{
if (inum != w->cur_inum) {
- int ret = lookup_inode(trans, inum, &w->inode, &w->snapshot);
+ int ret = __lookup_inode(trans, inum, &w->inode, &w->snapshot);
if (ret && ret != -ENOENT)
return ret;
return 0;
}
+static int walk_inode(struct btree_trans *trans,
+ struct inode_walker *w, u64 inum)
+{
+ return lockrestart_do(trans, __walk_inode(trans, w, inum));
+}
+
static int hash_redo_key(struct btree_trans *trans,
const struct bch_hash_desc desc,
struct bch_hash_info *hash_info,
return bch2_trans_exit(&trans) ?: ret;
}
-/*
- * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
- * validate d_type
- */
-noinline_for_stack
-static int check_dirents(struct bch_fs *c)
+static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
+ struct bch_hash_info *hash_info,
+ struct inode_walker *w, unsigned *nr_subdirs)
{
- struct inode_walker w = inode_walker_init();
- struct bch_hash_info hash_info;
- struct btree_trans trans;
- struct btree_iter *iter;
+ struct bch_fs *c = trans->c;
struct bkey_s_c k;
+ struct bkey_s_c_dirent d;
+ struct bch_inode_unpacked target;
+ u32 target_snapshot;
+ bool have_target;
+ bool backpointer_exists = true;
+ u64 d_inum;
char buf[200];
- unsigned nr_subdirs = 0;
- int ret = 0;
+ int ret;
- bch_verbose(c, "checking dirents");
+ k = bch2_btree_iter_peek(iter);
+ if (!k.k)
+ return 1;
- bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
+ ret = bkey_err(k);
+ if (ret)
+ return ret;
- iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents,
- POS(BCACHEFS_ROOT_INO, 0),
- BTREE_ITER_INTENT|
- BTREE_ITER_PREFETCH);
-retry:
- while ((k = bch2_btree_iter_peek(iter)).k &&
- !(ret = bkey_err(k))) {
- struct bkey_s_c_dirent d;
- struct bch_inode_unpacked target;
- u32 target_snapshot;
- bool have_target;
- bool backpointer_exists = true;
- u64 d_inum;
+ if (w->have_inode &&
+ w->cur_inum != k.k->p.inode &&
+ fsck_err_on(w->inode.bi_nlink != *nr_subdirs, c,
+ "directory %llu with wrong i_nlink: got %u, should be %u",
+ w->inode.bi_inum, w->inode.bi_nlink, *nr_subdirs)) {
+ w->inode.bi_nlink = *nr_subdirs;
+ ret = write_inode(trans, &w->inode, w->snapshot);
+ return ret ?: -EINTR;
+ }
- if (w.have_inode &&
- w.cur_inum != k.k->p.inode &&
- fsck_err_on(w.inode.bi_nlink != nr_subdirs, c,
- "directory %llu with wrong i_nlink: got %u, should be %u",
- w.inode.bi_inum, w.inode.bi_nlink, nr_subdirs)) {
- w.inode.bi_nlink = nr_subdirs;
- ret = write_inode(&trans, &w.inode, w.snapshot);
- if (ret)
- break;
- }
+ ret = __walk_inode(trans, w, k.k->p.inode);
+ if (ret)
+ return ret;
- ret = walk_inode(&trans, &w, k.k->p.inode);
- if (ret)
- break;
+ if (w->first_this_inode)
+ *nr_subdirs = 0;
- if (w.first_this_inode)
- nr_subdirs = 0;
+ if (fsck_err_on(!w->have_inode, c,
+ "dirent in nonexisting directory:\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)) ||
+ fsck_err_on(!S_ISDIR(w->inode.bi_mode), c,
+ "dirent in non directory inode type %u:\n%s",
+ mode_to_type(w->inode.bi_mode),
+ (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
+ return __bch2_trans_do(trans, NULL, NULL, 0,
+ bch2_btree_delete_at(trans, iter, 0));
- if (fsck_err_on(!w.have_inode, c,
- "dirent in nonexisting directory:\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c,
- k), buf)) ||
- fsck_err_on(!S_ISDIR(w.inode.bi_mode), c,
- "dirent in non directory inode type %u:\n%s",
- mode_to_type(w.inode.bi_mode),
- (bch2_bkey_val_to_text(&PBUF(buf), c,
- k), buf))) {
- ret = __bch2_trans_do(&trans, NULL, NULL, 0,
- bch2_btree_delete_at(&trans, iter, 0));
- if (ret)
- goto err;
- goto next;
- }
+ if (!w->have_inode)
+ return 0;
- if (!w.have_inode)
- goto next;
+ if (w->first_this_inode)
+ *hash_info = bch2_hash_info_init(c, &w->inode);
- if (w.first_this_inode)
- hash_info = bch2_hash_info_init(c, &w.inode);
+ ret = hash_check_key(trans, bch2_dirent_hash_desc,
+ hash_info, iter, k);
+ if (ret < 0)
+ return ret;
+ if (ret) /* dirent has been deleted */
+ return 0;
- ret = hash_check_key(&trans, bch2_dirent_hash_desc,
- &hash_info, iter, k);
- if (ret > 0) {
- ret = 0;
- goto next;
- }
- if (ret)
- goto fsck_err;
+ if (k.k->type != KEY_TYPE_dirent)
+ return 0;
+
+ d = bkey_s_c_to_dirent(k);
+ d_inum = le64_to_cpu(d.v->d_inum);
- if (k.k->type != KEY_TYPE_dirent)
- goto next;
+ ret = __lookup_inode(trans, d_inum, &target, &target_snapshot);
+ if (ret && ret != -ENOENT)
+ return ret;
- d = bkey_s_c_to_dirent(k);
- d_inum = le64_to_cpu(d.v->d_inum);
+ have_target = !ret;
+ ret = 0;
- ret = lookup_inode(&trans, d_inum, &target, &target_snapshot);
- if (ret && ret != -ENOENT)
- break;
+ if (fsck_err_on(!have_target, c,
+ "dirent points to missing inode:\n%s",
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf)))
+ return remove_dirent(trans, d.k->p);
- have_target = !ret;
+ if (!have_target)
+ return 0;
+
+ if (!target.bi_dir &&
+ !target.bi_dir_offset) {
+ target.bi_dir = k.k->p.inode;
+ target.bi_dir_offset = k.k->p.offset;
+
+ ret = __write_inode(trans, &target, target_snapshot) ?:
+ bch2_trans_commit(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_NOUNLOCK);
+ if (ret)
+ return ret;
+ return -EINTR;
+ }
+
+ if (!inode_backpointer_matches(d, &target)) {
+ ret = inode_backpointer_exists(trans, &target);
+ if (ret < 0)
+ return ret;
+
+ backpointer_exists = ret;
ret = 0;
- if (fsck_err_on(!have_target, c,
- "dirent points to missing inode:\n%s",
- (bch2_bkey_val_to_text(&PBUF(buf), c,
- k), buf))) {
- ret = remove_dirent(&trans, d.k->p);
- if (ret)
- goto err;
- goto next;
+ if (fsck_err_on(S_ISDIR(target.bi_mode) &&
+ backpointer_exists, c,
+ "directory %llu with multiple links",
+ target.bi_inum))
+ return remove_dirent(trans, d.k->p);
+
+ if (fsck_err_on(backpointer_exists &&
+ !target.bi_nlink, c,
+ "inode %llu has multiple links but i_nlink 0",
+ d_inum)) {
+ target.bi_nlink++;
+ target.bi_flags &= ~BCH_INODE_UNLINKED;
+
+ ret = write_inode(trans, &target, target_snapshot);
+ return ret ?: -EINTR;
}
- if (!have_target)
- goto next;
-
- if (!target.bi_dir &&
- !target.bi_dir_offset) {
+ if (fsck_err_on(!backpointer_exists, c,
+ "inode %llu has wrong backpointer:\n"
+ "got %llu:%llu\n"
+ "should be %llu:%llu",
+ d_inum,
+ target.bi_dir,
+ target.bi_dir_offset,
+ k.k->p.inode,
+ k.k->p.offset)) {
target.bi_dir = k.k->p.inode;
target.bi_dir_offset = k.k->p.offset;
- ret = write_inode(&trans, &target, target_snapshot);
- if (ret)
- goto err;
+ ret = write_inode(trans, &target, target_snapshot);
+ return ret ?: -EINTR;
}
+ }
- if (!inode_backpointer_matches(d, &target)) {
- ret = inode_backpointer_exists(&trans, &target);
- if (ret < 0)
- goto err;
-
- backpointer_exists = ret;
- ret = 0;
+ if (fsck_err_on(d.v->d_type != mode_to_type(target.bi_mode), c,
+ "incorrect d_type: should be %u:\n%s",
+ mode_to_type(target.bi_mode),
+ (bch2_bkey_val_to_text(&PBUF(buf), c,
+ k), buf))) {
+ struct bkey_i_dirent *n;
- if (fsck_err_on(S_ISDIR(target.bi_mode) &&
- backpointer_exists, c,
- "directory %llu with multiple links",
- target.bi_inum)) {
- ret = remove_dirent(&trans, d.k->p);
- if (ret)
- goto err;
- continue;
- }
+ n = kmalloc(bkey_bytes(d.k), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
- if (fsck_err_on(backpointer_exists &&
- !target.bi_nlink, c,
- "inode %llu has multiple links but i_nlink 0",
- d_inum)) {
- target.bi_nlink++;
- target.bi_flags &= ~BCH_INODE_UNLINKED;
+ bkey_reassemble(&n->k_i, d.s_c);
+ n->v.d_type = mode_to_type(target.bi_mode);
- ret = write_inode(&trans, &target, target_snapshot);
- if (ret)
- goto err;
- }
+ ret = __bch2_trans_do(trans, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW,
+ bch2_btree_iter_traverse(iter) ?:
+ bch2_trans_update(trans, iter, &n->k_i, 0));
+ kfree(n);
+ return ret ?: -EINTR;
+ }
- if (fsck_err_on(!backpointer_exists, c,
- "inode %llu has wrong backpointer:\n"
- "got %llu:%llu\n"
- "should be %llu:%llu",
- d_inum,
- target.bi_dir,
- target.bi_dir_offset,
- k.k->p.inode,
- k.k->p.offset)) {
- target.bi_dir = k.k->p.inode;
- target.bi_dir_offset = k.k->p.offset;
-
- ret = write_inode(&trans, &target, target_snapshot);
- if (ret)
- goto err;
- }
- }
+ *nr_subdirs += d.v->d_type == DT_DIR;
+ return 0;
+fsck_err:
+ return ret;
+}
- if (fsck_err_on(d.v->d_type != mode_to_type(target.bi_mode), c,
- "incorrect d_type: should be %u:\n%s",
- mode_to_type(target.bi_mode),
- (bch2_bkey_val_to_text(&PBUF(buf), c,
- k), buf))) {
- struct bkey_i_dirent *n;
+/*
+ * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
+ * validate d_type
+ */
+noinline_for_stack
+static int check_dirents(struct bch_fs *c)
+{
+ struct inode_walker w = inode_walker_init();
+ struct bch_hash_info hash_info;
+ struct btree_trans trans;
+ struct btree_iter *iter;
+ unsigned nr_subdirs = 0;
+ int ret = 0;
- n = kmalloc(bkey_bytes(d.k), GFP_KERNEL);
- if (!n) {
- ret = -ENOMEM;
- goto err;
- }
+ bch_verbose(c, "checking dirents");
- bkey_reassemble(&n->k_i, d.s_c);
- n->v.d_type = mode_to_type(target.bi_mode);
+ bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
- ret = __bch2_trans_do(&trans, NULL, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_LAZY_RW,
- bch2_btree_iter_traverse(iter) ?:
- bch2_trans_update(&trans, iter, &n->k_i, 0));
- kfree(n);
- if (ret)
- goto err;
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents,
+ POS(BCACHEFS_ROOT_INO, 0),
+ BTREE_ITER_INTENT|
+ BTREE_ITER_PREFETCH);
+ while (1) {
+ ret = lockrestart_do(&trans,
+ check_dirent(&trans, iter, &hash_info, &w, &nr_subdirs));
+ if (ret == 1) {
+ /* at end */
+ ret = 0;
+ break;
}
+ if (ret)
+ break;
- nr_subdirs += d.v->d_type == DT_DIR;
-next:
bch2_btree_iter_advance(iter);
}
-err:
-fsck_err:
- if (ret == -EINTR)
- goto retry;
-
bch2_trans_iter_put(&trans, iter);
+
return bch2_trans_exit(&trans) ?: ret;
}
nr_fields++; \
\
if (inode->_name) { \
- ret = bch2_varint_encode(out, inode->_name); \
+ ret = bch2_varint_encode_fast(out, inode->_name); \
out += ret; \
\
if (_bits > 64) \
#define x(_name, _bits) \
if (fieldnr < INODE_NR_FIELDS(inode.v)) { \
- ret = bch2_varint_decode(in, end, &v[0]); \
+ ret = bch2_varint_decode_fast(in, end, &v[0]); \
if (ret < 0) \
return ret; \
in += ret; \
\
if (_bits > 64) { \
- ret = bch2_varint_decode(in, end, &v[1]); \
+ ret = bch2_varint_decode_fast(in, end, &v[1]); \
if (ret < 0) \
return ret; \
in += ret; \
struct bkey_s_c k;
int ret;
- iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum),
- BTREE_ITER_CACHED|flags);
+ if (trans->c->opts.inodes_use_key_cache)
+ flags |= BTREE_ITER_CACHED;
+
+ iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum), flags);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
struct bpos end = POS(inode_nr + 1, 0);
struct bch_inode_unpacked inode_u;
struct bkey_s_c k;
+ unsigned iter_flags = BTREE_ITER_INTENT;
int ret;
+ if (cached && c->opts.inodes_use_key_cache)
+ iter_flags |= BTREE_ITER_CACHED;
+
bch2_trans_init(&trans, c, 0, 1024);
/*
retry:
bch2_trans_begin(&trans);
- iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
- (cached
- ? BTREE_ITER_CACHED
- : BTREE_ITER_SLOTS)|
- BTREE_ITER_INTENT);
+ iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes,
+ POS(0, inode_nr), iter_flags);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
#undef x
-const char * const bch2_d_types[] = {
+const char * const bch2_d_types[DT_MAX] = {
[DT_UNKNOWN] = "unknown",
[DT_FIFO] = "fifo",
[DT_CHR] = "chr",
OPT_BOOL(), \
BCH_SB_SHARD_INUMS, false, \
NULL, "Shard new inode numbers by CPU id") \
+ x(inodes_use_key_cache, u8, \
+ OPT_FORMAT|OPT_MOUNT, \
+ OPT_BOOL(), \
+ BCH_SB_INODES_USE_KEY_CACHE, true, \
+ NULL, "Use the btree key cache for the inodes btree") \
x(gc_reserve_percent, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_UINT(5, 21), \
read_attribute(btree_key_cache);
read_attribute(btree_transactions);
read_attribute(stripes_heap);
+read_attribute(open_buckets);
read_attribute(internal_uuid);
return out.pos - buf;
}
+ if (attr == &sysfs_open_buckets) {
+ bch2_open_buckets_to_text(&out, c);
+ return out.pos - buf;
+ }
+
if (attr == &sysfs_compression_stats) {
bch2_compression_stats_to_text(&out, c);
return out.pos - buf;
&sysfs_btree_key_cache,
&sysfs_btree_transactions,
&sysfs_stripes_heap,
+ &sysfs_open_buckets,
&sysfs_read_realloc_races,
&sysfs_extent_migrate_done,
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/string.h>
#include <asm/unaligned.h>
#include "varint.h"
+/**
+ * bch2_varint_encode - encode a variable length integer
+ * @out - destination to encode to
+ * @v - unsigned integer to encode
+ *
+ * Returns the size in bytes of the encoded integer - at most 9 bytes
+ */
int bch2_varint_encode(u8 *out, u64 v)
{
unsigned bits = fls64(v|1);
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
+ v = cpu_to_le64(v);
+ memcpy(out, &v, bytes);
} else {
*out++ = 255;
bytes = 9;
+ put_unaligned_le64(v, out);
}
- put_unaligned_le64(v, out);
return bytes;
}
+/**
+ * bch2_varint_decode - encode a variable length integer
+ * @in - varint to decode
+ * @end - end of buffer to decode from
+ * @out - on success, decoded integer
+ *
+ * Returns the size in bytes of the decoded integer - or -1 on failure (would
+ * have read past the end of the buffer)
+ */
int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
+{
+ unsigned bytes = likely(in < end)
+ ? ffz(*in & 255) + 1
+ : 1;
+ u64 v;
+
+ if (unlikely(in + bytes > end))
+ return -1;
+
+ if (likely(bytes < 9)) {
+ v = 0;
+ memcpy(&v, in, bytes);
+ v = le64_to_cpu(v);
+ v >>= bytes;
+ } else {
+ v = get_unaligned_le64(++in);
+ }
+
+ *out = v;
+ return bytes;
+}
+
+/**
+ * bch2_varint_encode_fast - fast version of bch2_varint_encode
+ *
+ * This version assumes it's always safe to write 8 bytes to @out, even if the
+ * encoded integer would be smaller.
+ */
+int bch2_varint_encode_fast(u8 *out, u64 v)
+{
+ unsigned bits = fls64(v|1);
+ unsigned bytes = DIV_ROUND_UP(bits, 7);
+
+ if (likely(bytes < 9)) {
+ v <<= bytes;
+ v |= ~(~0 << (bytes - 1));
+ } else {
+ *out++ = 255;
+ bytes = 9;
+ }
+
+ put_unaligned_le64(v, out);
+ return bytes;
+}
+
+/**
+ * bch2_varint_decode_fast - fast version of bch2_varint_decode
+ *
+ * This version assumes that it is safe to read at most 8 bytes past the end of
+ * @end (we still return an error if the varint extends past @end).
+ */
+int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
{
u64 v = get_unaligned_le64(in);
unsigned bytes = ffz(v & 255) + 1;
int bch2_varint_encode(u8 *, u64);
int bch2_varint_decode(const u8 *, const u8 *, u64 *);
+int bch2_varint_encode_fast(u8 *, u64);
+int bch2_varint_decode_fast(const u8 *, const u8 *, u64 *);
+
#endif /* _BCACHEFS_VARINT_H */