/* iterate over keys read from the journal: */
-struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
+static struct journal_key *journal_key_search(struct journal_keys *journal_keys,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
{
- while (iter->k) {
- if (iter->k->btree_id == iter->btree_id)
- return bkey_i_to_s_c(iter->k->k);
+ size_t l = 0, r = journal_keys->nr, m;
- iter->k++;
- if (iter->k == iter->keys->d + iter->keys->nr)
- iter->k = NULL;
+ while (l < r) {
+ m = l + ((r - l) >> 1);
+ if ((cmp_int(id, journal_keys->d[m].btree_id) ?:
+ cmp_int(level, journal_keys->d[m].level) ?:
+ bkey_cmp(pos, journal_keys->d[m].k->k.p)) > 0)
+ l = m + 1;
+ else
+ r = m;
}
- return bkey_s_c_null;
+ BUG_ON(l < journal_keys->nr &&
+ (cmp_int(id, journal_keys->d[l].btree_id) ?:
+ cmp_int(level, journal_keys->d[l].level) ?:
+ bkey_cmp(pos, journal_keys->d[l].k->k.p)) > 0);
+
+ BUG_ON(l &&
+ (cmp_int(id, journal_keys->d[l - 1].btree_id) ?:
+ cmp_int(level, journal_keys->d[l - 1].level) ?:
+ bkey_cmp(pos, journal_keys->d[l - 1].k->k.p)) <= 0);
+
+ return l < journal_keys->nr ? journal_keys->d + l : NULL;
}
-struct bkey_s_c bch2_journal_iter_next(struct journal_iter *iter)
+static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
{
- if (!iter->k)
- return bkey_s_c_null;
+ if (iter->k &&
+ iter->k < iter->keys->d + iter->keys->nr &&
+ iter->k->btree_id == iter->btree_id &&
+ iter->k->level == iter->level)
+ return iter->k->k;
- iter->k++;
- if (iter->k == iter->keys->d + iter->keys->nr)
- iter->k = NULL;
+ iter->k = NULL;
+ return NULL;
+}
- return bch2_journal_iter_peek(iter);
+static void bch2_journal_iter_advance(struct journal_iter *iter)
+{
+ if (iter->k)
+ iter->k++;
+}
+
+static void bch2_journal_iter_init(struct journal_iter *iter,
+ struct journal_keys *journal_keys,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
+{
+ iter->btree_id = id;
+ iter->level = level;
+ iter->keys = journal_keys;
+ iter->k = journal_key_search(journal_keys, id, level, pos);
+}
+
+static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
+{
+ return iter->btree
+ ? bch2_btree_iter_peek(iter->btree)
+ : bch2_btree_node_iter_peek_unpack(&iter->node_iter,
+ iter->b, &iter->unpacked);
+}
+
+static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
+{
+ if (iter->btree)
+ bch2_btree_iter_next(iter->btree);
+ else
+ bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
}
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
case none:
break;
case btree:
- bch2_btree_iter_next(iter->btree);
+ bch2_journal_iter_advance_btree(iter);
break;
case journal:
- bch2_journal_iter_next(&iter->journal);
+ bch2_journal_iter_advance(&iter->journal);
break;
}
struct bkey_s_c ret;
while (1) {
- struct bkey_s_c btree_k = bch2_btree_iter_peek(iter->btree);
- struct bkey_s_c journal_k = bch2_journal_iter_peek(&iter->journal);
+ struct bkey_s_c btree_k =
+ bch2_journal_iter_peek_btree(iter);
+ struct bkey_s_c journal_k =
+ bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
if (btree_k.k && journal_k.k) {
int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
if (!cmp)
- bch2_btree_iter_next(iter->btree);
+ bch2_journal_iter_advance_btree(iter);
iter->last = cmp < 0 ? btree : journal;
} else if (btree_k.k) {
}
ret = iter->last == journal ? journal_k : btree_k;
+
+ if (iter->b &&
+ bkey_cmp(ret.k->p, iter->b->data->max_key) > 0) {
+ iter->journal.k = NULL;
+ iter->last = none;
+ return bkey_s_c_null;
+ }
+
if (!bkey_deleted(ret.k))
break;
return bch2_btree_and_journal_iter_peek(iter);
}
-struct journal_key *journal_key_search(struct journal_keys *journal_keys,
- enum btree_id id, struct bpos pos)
+void bch2_btree_and_journal_iter_init(struct btree_and_journal_iter *iter,
+ struct btree_trans *trans,
+ struct journal_keys *journal_keys,
+ enum btree_id id, struct bpos pos)
{
- size_t l = 0, r = journal_keys->nr, m;
+ memset(iter, 0, sizeof(*iter));
- while (l < r) {
- m = l + ((r - l) >> 1);
- if ((cmp_int(id, journal_keys->d[m].btree_id) ?:
- bkey_cmp(pos, journal_keys->d[m].k->k.p)) > 0)
- l = m + 1;
- else
- r = m;
- }
+ iter->btree = bch2_trans_get_iter(trans, id, pos, 0);
+ bch2_journal_iter_init(&iter->journal, journal_keys, id, 0, pos);
+}
- BUG_ON(l < journal_keys->nr &&
- (cmp_int(id, journal_keys->d[l].btree_id) ?:
- bkey_cmp(pos, journal_keys->d[l].k->k.p)) > 0);
+void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
+ struct journal_keys *journal_keys,
+ struct btree *b)
+{
+ memset(iter, 0, sizeof(*iter));
- BUG_ON(l &&
- (cmp_int(id, journal_keys->d[l - 1].btree_id) ?:
- bkey_cmp(pos, journal_keys->d[l - 1].k->k.p)) <= 0);
+ iter->b = b;
+ bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
+ bch2_journal_iter_init(&iter->journal, journal_keys,
+ b->c.btree_id, b->c.level, b->data->min_key);
+}
- return l < journal_keys->nr ? journal_keys->d + l : NULL;
+/* Walk btree, overlaying keys from the journal: */
+
+static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
+ struct journal_keys *journal_keys,
+ enum btree_id btree_id,
+ btree_walk_node_fn node_fn,
+ btree_walk_key_fn key_fn)
+{
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ int ret = 0;
+
+ bch2_btree_and_journal_iter_init_node_iter(&iter, journal_keys, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ ret = key_fn(c, btree_id, b->c.level, k);
+ if (ret)
+ break;
+
+ if (b->c.level) {
+ struct btree *child;
+ BKEY_PADDED(k) tmp;
+
+ bkey_reassemble(&tmp.k, k);
+ k = bkey_i_to_s_c(&tmp.k);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ if (b->c.level > 0) {
+ child = bch2_btree_node_get_noiter(c, &tmp.k,
+ b->c.btree_id, b->c.level - 1);
+ ret = PTR_ERR_OR_ZERO(child);
+ if (ret)
+ break;
+
+ ret = (node_fn ? node_fn(c, b) : 0) ?:
+ bch2_btree_and_journal_walk_recurse(c, child,
+ journal_keys, btree_id, node_fn, key_fn);
+ six_unlock_read(&child->c.lock);
+
+ if (ret)
+ break;
+ }
+ } else {
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+ }
+
+ return ret;
}
-void bch2_btree_and_journal_iter_init(struct btree_and_journal_iter *iter,
- struct btree_trans *trans,
- struct journal_keys *journal_keys,
- enum btree_id id, struct bpos pos)
+int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
+ enum btree_id btree_id,
+ btree_walk_node_fn node_fn,
+ btree_walk_key_fn key_fn)
{
- iter->journal.keys = journal_keys;
- iter->journal.k = journal_key_search(journal_keys, id, pos);
- iter->journal.btree_id = id;
+ struct btree *b = c->btree_roots[btree_id].b;
+ int ret = 0;
- iter->btree = bch2_trans_get_iter(trans, id, pos, 0);
+ if (btree_node_fake(b))
+ return 0;
+
+ six_lock_read(&b->c.lock, NULL, NULL);
+ ret = (node_fn ? node_fn(c, b) : 0) ?:
+ bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
+ node_fn, key_fn) ?:
+ key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
+ six_unlock_read(&b->c.lock);
+
+ return ret;
}
/* sort and dedup all keys in the journal: */
-static void journal_entries_free(struct list_head *list)
+void bch2_journal_entries_free(struct list_head *list)
{
while (!list_empty(list)) {
const struct journal_key *l = _l;
const struct journal_key *r = _r;
- return cmp_int(l->btree_id, r->btree_id) ?:
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ cmp_int(l->level, r->level) ?:
bkey_cmp(l->k->k.p, r->k->k.p) ?:
cmp_int(l->journal_seq, r->journal_seq) ?:
cmp_int(l->journal_offset, r->journal_offset);
}
-static int journal_sort_seq_cmp(const void *_l, const void *_r)
-{
- const struct journal_key *l = _l;
- const struct journal_key *r = _r;
-
- return cmp_int(l->journal_seq, r->journal_seq) ?:
- cmp_int(l->btree_id, r->btree_id) ?:
- bkey_cmp(l->k->k.p, r->k->k.p);
-}
-
-static void journal_keys_free(struct journal_keys *keys)
+void bch2_journal_keys_free(struct journal_keys *keys)
{
kvfree(keys->d);
keys->d = NULL;
struct journal_key *src, *dst;
size_t nr_keys = 0;
- list_for_each_entry(p, journal_entries, list)
+ if (list_empty(journal_entries))
+ return keys;
+
+ keys.journal_seq_base =
+ le64_to_cpu(list_last_entry(journal_entries,
+ struct journal_replay, list)->j.last_seq);
+
+ list_for_each_entry(p, journal_entries, list) {
+ if (le64_to_cpu(p->j.seq) < keys.journal_seq_base)
+ continue;
+
for_each_jset_key(k, _n, entry, &p->j)
nr_keys++;
+ }
- keys.journal_seq_base =
- le64_to_cpu(list_first_entry(journal_entries,
- struct journal_replay,
- list)->j.seq);
keys.d = kvmalloc(sizeof(keys.d[0]) * nr_keys, GFP_KERNEL);
if (!keys.d)
goto err;
- list_for_each_entry(p, journal_entries, list)
+ list_for_each_entry(p, journal_entries, list) {
+ if (le64_to_cpu(p->j.seq) < keys.journal_seq_base)
+ continue;
+
for_each_jset_key(k, _n, entry, &p->j)
keys.d[keys.nr++] = (struct journal_key) {
.btree_id = entry->btree_id,
+ .level = entry->level,
.k = k,
.journal_seq = le64_to_cpu(p->j.seq) -
keys.journal_seq_base,
.journal_offset = k->_data - p->j._data,
};
+ }
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
src = dst = keys.d;
while (src < keys.d + keys.nr) {
while (src + 1 < keys.d + keys.nr &&
- src[0].btree_id == src[1].btree_id &&
+ src[0].btree_id == src[1].btree_id &&
+ src[0].level == src[1].level &&
!bkey_cmp(src[0].k->k.p, src[1].k->k.p))
src++;
}
static int __bch2_journal_replay_key(struct btree_trans *trans,
- enum btree_id id, struct bkey_i *k)
+ enum btree_id id, unsigned level,
+ struct bkey_i *k)
{
struct btree_iter *iter;
int ret;
- iter = bch2_trans_get_iter(trans, id, k->k.p, BTREE_ITER_INTENT);
+ iter = bch2_trans_get_node_iter(trans, id, k->k.p,
+ BTREE_MAX_DEPTH, level,
+ BTREE_ITER_INTENT);
if (IS_ERR(iter))
return PTR_ERR(iter);
}
static int bch2_journal_replay_key(struct bch_fs *c, enum btree_id id,
- struct bkey_i *k)
+ unsigned level, struct bkey_i *k)
{
return bch2_trans_do(c, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_JOURNAL_REPLAY,
- __bch2_journal_replay_key(&trans, id, k));
+ __bch2_journal_replay_key(&trans, id, level, k));
+}
+
+static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
+{
+ struct btree_iter *iter;
+ int ret;
+
+ iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, k->k.p,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
+ ret = PTR_ERR_OR_ZERO(iter) ?:
+ bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+}
+
+static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
+{
+ return bch2_trans_do(c, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE|
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_JOURNAL_REPLAY,
+ __bch2_alloc_replay_key(&trans, k));
+}
+
+static int journal_sort_seq_cmp(const void *_l, const void *_r)
+{
+ const struct journal_key *l = _l;
+ const struct journal_key *r = _r;
+
+ return cmp_int(r->level, l->level) ?:
+ cmp_int(l->journal_seq, r->journal_seq) ?:
+ cmp_int(l->btree_id, r->btree_id) ?:
+ bkey_cmp(l->k->k.p, r->k->k.p);
}
static int bch2_journal_replay(struct bch_fs *c,
{
struct journal *j = &c->journal;
struct journal_key *i;
+ u64 seq;
int ret;
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
+ if (keys.nr)
+ replay_now_at(j, keys.journal_seq_base);
+
+ seq = j->replay_journal_seq;
+
+ /*
+ * First replay updates to the alloc btree - these will only update the
+ * btree key cache:
+ */
for_each_journal_key(keys, i) {
- replay_now_at(j, keys.journal_seq_base + i->journal_seq);
+ cond_resched();
- if (i->btree_id == BTREE_ID_ALLOC)
+ if (!i->level && i->btree_id == BTREE_ID_ALLOC) {
+ j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
ret = bch2_alloc_replay_key(c, i->k);
- else if (i->k->k.size)
- ret = bch2_extent_replay_key(c, i->btree_id, i->k);
- else
- ret = bch2_journal_replay_key(c, i->btree_id, i->k);
+ if (ret)
+ goto err;
+ }
+ }
- if (ret) {
- bch_err(c, "journal replay: error %d while replaying key",
- ret);
- return ret;
+ /*
+ * Next replay updates to interior btree nodes:
+ */
+ for_each_journal_key(keys, i) {
+ cond_resched();
+
+ if (i->level) {
+ j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
+ ret = bch2_journal_replay_key(c, i->btree_id, i->level, i->k);
+ if (ret)
+ goto err;
}
+ }
+
+ /*
+ * Now that the btree is in a consistent state, we can start journal
+ * reclaim (which will be flushing entries from the btree key cache back
+ * to the btree:
+ */
+ set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
+ set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
+
+ j->replay_journal_seq = seq;
+ /*
+ * Now replay leaf node updates:
+ */
+ for_each_journal_key(keys, i) {
cond_resched();
+
+ if (i->level || i->btree_id == BTREE_ID_ALLOC)
+ continue;
+
+ replay_now_at(j, keys.journal_seq_base + i->journal_seq);
+
+ ret = i->k->k.size
+ ? bch2_extent_replay_key(c, i->btree_id, i->k)
+ : bch2_journal_replay_key(c, i->btree_id, i->level, i->k);
+ if (ret)
+ goto err;
}
replay_now_at(j, j->replay_journal_seq_end);
bch2_journal_set_replay_done(j);
bch2_journal_flush_all_pins(j);
return bch2_journal_error(j);
+err:
+ bch_err(c, "journal replay: error %d while replaying key", ret);
+ return ret;
}
static bool journal_empty(struct list_head *journal)
int ret = 0;
list_for_each_entry(i, journal, list) {
+ if (le64_to_cpu(i->j.seq) < start_seq)
+ continue;
+
fsck_err_on(seq != le64_to_cpu(i->j.seq), c,
"journal entries %llu-%llu missing! (replaying %llu-%llu)",
seq, le64_to_cpu(i->j.seq) - 1,
"superblock read clock doesn't match journal after clean shutdown");
for (i = 0; i < BTREE_ID_NR; i++) {
+ char buf1[200], buf2[200];
struct bkey_i *k1, *k2;
unsigned l1 = 0, l2 = 0;
k1->k.u64s != k2->k.u64s ||
memcmp(k1, k2, bkey_bytes(k1)) ||
l1 != l2, c,
- "superblock btree root doesn't match journal after clean shutdown");
+ "superblock btree root %u doesn't match journal after clean shutdown\n"
+ "sb: l=%u %s\n"
+ "journal: l=%u %s\n", i,
+ l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
+ l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
}
fsck_err:
return ret;
const char *err = "cannot allocate memory";
struct bch_sb_field_clean *clean = NULL;
u64 journal_seq;
- LIST_HEAD(journal_entries);
- struct journal_keys journal_keys = { NULL };
bool wrote = false, write_sb = false;
int ret;
set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
}
- if (!c->sb.clean || c->opts.fsck) {
+ if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
struct jset *j;
- ret = bch2_journal_read(c, &journal_entries);
+ ret = bch2_journal_read(c, &c->journal_entries);
if (ret)
goto err;
- if (mustfix_fsck_err_on(c->sb.clean && !journal_empty(&journal_entries), c,
+ if (mustfix_fsck_err_on(c->sb.clean && !journal_empty(&c->journal_entries), c,
"filesystem marked clean but journal not empty")) {
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
c->sb.clean = false;
}
- if (!c->sb.clean && list_empty(&journal_entries)) {
+ if (!c->sb.clean && list_empty(&c->journal_entries)) {
bch_err(c, "no journal entries found");
ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
goto err;
}
- journal_keys = journal_keys_sort(&journal_entries);
- if (!journal_keys.d) {
+ c->journal_keys = journal_keys_sort(&c->journal_entries);
+ if (!c->journal_keys.d) {
ret = -ENOMEM;
goto err;
}
- j = &list_last_entry(&journal_entries,
+ j = &list_last_entry(&c->journal_entries,
struct journal_replay, list)->j;
ret = verify_superblock_clean(c, &clean, j);
goto err;
}
- c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_extents_above_btree_updates;
-
- ret = journal_replay_early(c, clean, &journal_entries);
+ ret = journal_replay_early(c, clean, &c->journal_entries);
if (ret)
goto err;
ret = bch2_blacklist_table_initialize(c);
- if (!list_empty(&journal_entries)) {
+ if (!list_empty(&c->journal_entries)) {
ret = verify_journal_entries_not_blacklisted_or_missing(c,
- &journal_entries);
+ &c->journal_entries);
if (ret)
goto err;
}
ret = bch2_fs_journal_start(&c->journal, journal_seq,
- &journal_entries);
+ &c->journal_entries);
if (ret)
goto err;
bch_verbose(c, "starting alloc read");
err = "error reading allocation information";
- ret = bch2_alloc_read(c, &journal_keys);
+ ret = bch2_alloc_read(c, &c->journal_keys);
if (ret)
goto err;
bch_verbose(c, "alloc read done");
bch_verbose(c, "starting stripes_read");
err = "error reading stripes";
- ret = bch2_stripes_read(c, &journal_keys);
+ ret = bch2_stripes_read(c, &c->journal_keys);
if (ret)
goto err;
bch_verbose(c, "stripes_read done");
*/
bch_info(c, "starting metadata mark and sweep");
err = "error in mark and sweep";
- ret = bch2_gc(c, NULL, true, true);
+ ret = bch2_gc(c, &c->journal_keys, true, true);
if (ret)
goto err;
bch_verbose(c, "mark and sweep done");
test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
bch_info(c, "starting mark and sweep");
err = "error in mark and sweep";
- ret = bch2_gc(c, &journal_keys, true, false);
+ ret = bch2_gc(c, &c->journal_keys, true, false);
if (ret)
goto err;
bch_verbose(c, "mark and sweep done");
bch_verbose(c, "starting journal replay");
err = "journal replay failed";
- ret = bch2_journal_replay(c, journal_keys);
+ ret = bch2_journal_replay(c, c->journal_keys);
if (ret)
goto err;
bch_verbose(c, "journal replay done");
set_bit(BCH_FS_FSCK_DONE, &c->flags);
bch2_flush_fsck_errs(c);
- journal_keys_free(&journal_keys);
- journal_entries_free(&journal_entries);
+ if (!c->opts.keep_journal) {
+ bch2_journal_keys_free(&c->journal_keys);
+ bch2_journal_entries_free(&c->journal_entries);
+ }
kfree(clean);
if (ret)
bch_err(c, "Error in recovery: %s (%i)", err, ret);
bch2_mark_dev_superblock(c, ca, 0);
mutex_unlock(&c->sb_lock);
+ mutex_lock(&c->sb_lock);
+ c->disk_sb.sb->version = c->disk_sb.sb->version_min =
+ le16_to_cpu(bcachefs_metadata_version_current);
+ c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
+ c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
+
+ bch2_write_super(c);
+ mutex_unlock(&c->sb_lock);
+
set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i);
+ set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
+ set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
+
err = "unable to allocate journal buckets";
for_each_online_member(ca, c, i) {
ret = bch2_dev_journal_alloc(ca);
goto err;
mutex_lock(&c->sb_lock);
- c->disk_sb.sb->version = c->disk_sb.sb->version_min =
- le16_to_cpu(bcachefs_metadata_version_current);
- c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
- c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
-
SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);