1 // SPDX-License-Identifier: GPL-2.0
4 #include "backpointers.h"
6 #include "alloc_background.h"
8 #include "btree_journal_iter.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
17 #include "fs-common.h"
19 #include "journal_io.h"
20 #include "journal_reclaim.h"
21 #include "journal_seq_blacklist.h"
23 #include "logged_ops.h"
30 #include "subvolume.h"
33 #include <linux/sort.h>
34 #include <linux/stat.h>
36 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
38 /* for -o reconstruct_alloc: */
39 static void drop_alloc_keys(struct journal_keys *keys)
43 for (src = 0, dst = 0; src < keys->nr; src++)
44 if (keys->d[src].btree_id != BTREE_ID_alloc)
45 keys->d[dst++] = keys->d[src];
51 * Btree node pointers have a field to stack a pointer to the in memory btree
52 * node; we need to zero out this field when reading in btree nodes, or when
53 * reading in keys from the journal:
55 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
57 struct journal_key *i;
59 for (i = keys->d; i < keys->d + keys->nr; i++)
60 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
61 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
66 static void replay_now_at(struct journal *j, u64 seq)
68 BUG_ON(seq < j->replay_journal_seq);
70 seq = min(seq, j->replay_journal_seq_end);
72 while (j->replay_journal_seq < seq)
73 bch2_journal_pin_put(j, j->replay_journal_seq++);
76 static int bch2_journal_replay_key(struct btree_trans *trans,
77 struct journal_key *k)
79 struct btree_iter iter;
82 BTREE_ITER_NOT_EXTENTS;
83 unsigned update_flags = BTREE_TRIGGER_NORUN;
87 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
88 * keep the key cache coherent with the underlying btree. Nothing
89 * besides the allocator is doing updates yet so we don't need key cache
90 * coherency for non-alloc btrees, and key cache fills for snapshots
91 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
92 * the snapshots recovery pass runs.
94 if (!k->level && k->btree_id == BTREE_ID_alloc)
95 iter_flags |= BTREE_ITER_CACHED;
97 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
99 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
100 BTREE_MAX_DEPTH, k->level,
102 ret = bch2_btree_iter_traverse(&iter);
106 /* Must be checked with btree locked: */
110 ret = bch2_trans_update(trans, &iter, k->k, update_flags);
112 bch2_trans_iter_exit(trans, &iter);
116 static int journal_sort_seq_cmp(const void *_l, const void *_r)
118 const struct journal_key *l = *((const struct journal_key **)_l);
119 const struct journal_key *r = *((const struct journal_key **)_r);
121 return cmp_int(l->journal_seq, r->journal_seq);
124 static int bch2_journal_replay(struct bch_fs *c)
126 struct journal_keys *keys = &c->journal_keys;
127 struct journal_key **keys_sorted, *k;
128 struct journal *j = &c->journal;
129 u64 start_seq = c->journal_replay_seq_start;
130 u64 end_seq = c->journal_replay_seq_start;
134 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
135 keys->gap = keys->nr;
137 keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL);
139 return -BCH_ERR_ENOMEM_journal_replay;
141 for (i = 0; i < keys->nr; i++)
142 keys_sorted[i] = &keys->d[i];
144 sort(keys_sorted, keys->nr,
145 sizeof(keys_sorted[0]),
146 journal_sort_seq_cmp, NULL);
149 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
150 keys->nr, start_seq, end_seq);
155 for (i = 0; i < keys->nr; i++) {
160 replay_now_at(j, k->journal_seq);
162 ret = bch2_trans_do(c, NULL, NULL,
163 BTREE_INSERT_LAZY_RW|
166 ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim
168 bch2_journal_replay_key(trans, k));
170 bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s",
171 bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret));
176 replay_now_at(j, j->replay_journal_seq_end);
177 j->replay_journal_seq = 0;
179 bch2_journal_set_replay_done(j);
180 bch2_journal_flush_all_pins(j);
181 ret = bch2_journal_error(j);
183 if (keys->nr && !ret)
184 bch2_journal_log_msg(c, "journal replay finished");
193 /* journal replay early: */
195 static int journal_replay_entry_early(struct bch_fs *c,
196 struct jset_entry *entry)
200 switch (entry->type) {
201 case BCH_JSET_ENTRY_btree_root: {
202 struct btree_root *r;
204 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
205 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
210 r = bch2_btree_id_root(c, entry->btree_id);
213 r->level = entry->level;
214 bkey_copy(&r->key, &entry->start[0]);
222 case BCH_JSET_ENTRY_usage: {
223 struct jset_entry_usage *u =
224 container_of(entry, struct jset_entry_usage, entry);
226 switch (entry->btree_id) {
227 case BCH_FS_USAGE_reserved:
228 if (entry->level < BCH_REPLICAS_MAX)
229 c->usage_base->persistent_reserved[entry->level] =
232 case BCH_FS_USAGE_inodes:
233 c->usage_base->nr_inodes = le64_to_cpu(u->v);
235 case BCH_FS_USAGE_key_version:
236 atomic64_set(&c->key_version,
243 case BCH_JSET_ENTRY_data_usage: {
244 struct jset_entry_data_usage *u =
245 container_of(entry, struct jset_entry_data_usage, entry);
247 ret = bch2_replicas_set_usage(c, &u->r,
251 case BCH_JSET_ENTRY_dev_usage: {
252 struct jset_entry_dev_usage *u =
253 container_of(entry, struct jset_entry_dev_usage, entry);
254 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
255 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
257 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
259 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
260 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
261 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
262 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
267 case BCH_JSET_ENTRY_blacklist: {
268 struct jset_entry_blacklist *bl_entry =
269 container_of(entry, struct jset_entry_blacklist, entry);
271 ret = bch2_journal_seq_blacklist_add(c,
272 le64_to_cpu(bl_entry->seq),
273 le64_to_cpu(bl_entry->seq) + 1);
276 case BCH_JSET_ENTRY_blacklist_v2: {
277 struct jset_entry_blacklist_v2 *bl_entry =
278 container_of(entry, struct jset_entry_blacklist_v2, entry);
280 ret = bch2_journal_seq_blacklist_add(c,
281 le64_to_cpu(bl_entry->start),
282 le64_to_cpu(bl_entry->end) + 1);
285 case BCH_JSET_ENTRY_clock: {
286 struct jset_entry_clock *clock =
287 container_of(entry, struct jset_entry_clock, entry);
289 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
296 static int journal_replay_early(struct bch_fs *c,
297 struct bch_sb_field_clean *clean)
299 struct jset_entry *entry;
303 for (entry = clean->start;
304 entry != vstruct_end(&clean->field);
305 entry = vstruct_next(entry)) {
306 ret = journal_replay_entry_early(c, entry);
311 struct genradix_iter iter;
312 struct journal_replay *i, **_i;
314 genradix_for_each(&c->journal_entries, iter, _i) {
320 vstruct_for_each(&i->j, entry) {
321 ret = journal_replay_entry_early(c, entry);
328 bch2_fs_usage_initialize(c);
333 /* sb clean section: */
335 static bool btree_id_is_alloc(enum btree_id id)
339 case BTREE_ID_backpointers:
340 case BTREE_ID_need_discard:
341 case BTREE_ID_freespace:
342 case BTREE_ID_bucket_gens:
349 static int read_btree_roots(struct bch_fs *c)
354 for (i = 0; i < btree_id_nr_alive(c); i++) {
355 struct btree_root *r = bch2_btree_id_root(c, i);
360 if (btree_id_is_alloc(i) &&
361 c->opts.reconstruct_alloc) {
362 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
367 __fsck_err(c, btree_id_is_alloc(i)
368 ? FSCK_CAN_IGNORE : 0,
369 "invalid btree root %s",
371 if (i == BTREE_ID_alloc)
372 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
375 ret = bch2_btree_root_read(c, i, &r->key, r->level);
378 "error reading btree root %s",
380 if (btree_id_is_alloc(i))
381 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
386 for (i = 0; i < BTREE_ID_NR; i++) {
387 struct btree_root *r = bch2_btree_id_root(c, i);
392 bch2_btree_root_alloc(c, i);
399 static int bch2_initialize_subvolumes(struct bch_fs *c)
401 struct bkey_i_snapshot_tree root_tree;
402 struct bkey_i_snapshot root_snapshot;
403 struct bkey_i_subvolume root_volume;
406 bkey_snapshot_tree_init(&root_tree.k_i);
407 root_tree.k.p.offset = 1;
408 root_tree.v.master_subvol = cpu_to_le32(1);
409 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
411 bkey_snapshot_init(&root_snapshot.k_i);
412 root_snapshot.k.p.offset = U32_MAX;
413 root_snapshot.v.flags = 0;
414 root_snapshot.v.parent = 0;
415 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
416 root_snapshot.v.tree = cpu_to_le32(1);
417 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
419 bkey_subvolume_init(&root_volume.k_i);
420 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
421 root_volume.v.flags = 0;
422 root_volume.v.snapshot = cpu_to_le32(U32_MAX);
423 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
425 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
426 bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
427 bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
433 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
435 struct btree_iter iter;
437 struct bch_inode_unpacked inode;
440 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
441 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
446 if (!bkey_is_inode(k.k)) {
447 bch_err(trans->c, "root inode not found");
448 ret = -BCH_ERR_ENOENT_inode;
452 ret = bch2_inode_unpack(k, &inode);
455 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
457 ret = bch2_inode_write(trans, &iter, &inode);
459 bch2_trans_iter_exit(trans, &iter);
463 /* set bi_subvol on root inode */
465 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
467 int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
468 __bch2_fs_upgrade_for_subvolumes(trans));
474 const char * const bch2_recovery_passes[] = {
475 #define x(_fn, _when) #_fn,
476 BCH_RECOVERY_PASSES()
481 static int bch2_check_allocations(struct bch_fs *c)
483 return bch2_gc(c, true, c->opts.norecovery);
486 static int bch2_set_may_go_rw(struct bch_fs *c)
488 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
492 struct recovery_pass_fn {
493 int (*fn)(struct bch_fs *);
497 static struct recovery_pass_fn recovery_pass_fns[] = {
498 #define x(_fn, _when) { .fn = bch2_##_fn, .when = _when },
499 BCH_RECOVERY_PASSES()
503 static void check_version_upgrade(struct bch_fs *c)
505 unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version);
506 unsigned latest_version = bcachefs_metadata_version_current;
507 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
508 unsigned new_version = 0;
511 if (old_version < bcachefs_metadata_required_upgrade_below) {
512 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
513 latest_compatible < bcachefs_metadata_required_upgrade_below)
514 new_version = latest_version;
516 new_version = latest_compatible;
518 switch (c->opts.version_upgrade) {
519 case BCH_VERSION_UPGRADE_compatible:
520 new_version = latest_compatible;
522 case BCH_VERSION_UPGRADE_incompatible:
523 new_version = latest_version;
525 case BCH_VERSION_UPGRADE_none:
526 new_version = old_version;
531 if (new_version > old_version) {
532 struct printbuf buf = PRINTBUF;
534 if (old_version < bcachefs_metadata_required_upgrade_below)
535 prt_str(&buf, "Version upgrade required:\n");
537 if (old_version != c->sb.version) {
538 prt_str(&buf, "Version upgrade from ");
539 bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
540 prt_str(&buf, " to ");
541 bch2_version_to_text(&buf, c->sb.version);
542 prt_str(&buf, " incomplete\n");
545 prt_printf(&buf, "Doing %s version upgrade from ",
546 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
547 ? "incompatible" : "compatible");
548 bch2_version_to_text(&buf, old_version);
549 prt_str(&buf, " to ");
550 bch2_version_to_text(&buf, new_version);
553 recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version);
554 if (recovery_passes) {
555 if ((recovery_passes & RECOVERY_PASS_ALL_FSCK) == RECOVERY_PASS_ALL_FSCK)
556 prt_str(&buf, "fsck required");
558 prt_str(&buf, "running recovery passes: ");
559 prt_bitflags(&buf, bch2_recovery_passes, recovery_passes);
562 c->recovery_passes_explicit |= recovery_passes;
563 c->opts.fix_errors = FSCK_FIX_yes;
566 bch_info(c, "%s", buf.buf);
568 mutex_lock(&c->sb_lock);
569 bch2_sb_upgrade(c, new_version);
570 mutex_unlock(&c->sb_lock);
576 u64 bch2_fsck_recovery_passes(void)
580 for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
581 if (recovery_pass_fns[i].when & PASS_FSCK)
586 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
588 struct recovery_pass_fn *p = recovery_pass_fns + c->curr_recovery_pass;
590 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
592 if (c->recovery_passes_explicit & BIT_ULL(pass))
594 if ((p->when & PASS_FSCK) && c->opts.fsck)
596 if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
598 if (p->when & PASS_ALWAYS)
603 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
607 c->curr_recovery_pass = pass;
609 if (should_run_recovery_pass(c, pass)) {
610 struct recovery_pass_fn *p = recovery_pass_fns + pass;
612 if (!(p->when & PASS_SILENT))
613 printk(KERN_INFO bch2_log_msg(c, "%s..."),
614 bch2_recovery_passes[pass]);
618 if (!(p->when & PASS_SILENT))
619 printk(KERN_CONT " done\n");
621 c->recovery_passes_complete |= BIT_ULL(pass);
627 static int bch2_run_recovery_passes(struct bch_fs *c)
631 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
632 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
633 if (bch2_err_matches(ret, BCH_ERR_restart_recovery))
637 c->curr_recovery_pass++;
643 int bch2_fs_recovery(struct bch_fs *c)
645 struct bch_sb_field_clean *clean = NULL;
646 struct jset *last_journal_entry = NULL;
647 u64 last_seq = 0, blacklist_seq, journal_seq;
648 bool write_sb = false;
652 clean = bch2_read_superblock_clean(c);
653 ret = PTR_ERR_OR_ZERO(clean);
657 bch_info(c, "recovering from clean shutdown, journal seq %llu",
658 le64_to_cpu(clean->journal_seq));
660 bch_info(c, "recovering from unclean shutdown");
663 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
664 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
670 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
671 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
676 if (c->opts.fsck || !(c->opts.nochanges && c->opts.norecovery))
677 check_version_upgrade(c);
679 if (c->opts.fsck && c->opts.norecovery) {
680 bch_err(c, "cannot select both norecovery and fsck");
685 ret = bch2_blacklist_table_initialize(c);
687 bch_err(c, "error initializing blacklist table");
691 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
692 struct genradix_iter iter;
693 struct journal_replay **i;
695 bch_verbose(c, "starting journal read");
696 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
701 * note: cmd_list_journal needs the blacklist table fully up to date so
702 * it can asterisk ignored journal entries:
704 if (c->opts.read_journal_only)
707 genradix_for_each_reverse(&c->journal_entries, iter, i)
708 if (*i && !(*i)->ignore) {
709 last_journal_entry = &(*i)->j;
713 if (mustfix_fsck_err_on(c->sb.clean &&
714 last_journal_entry &&
715 !journal_entry_empty(last_journal_entry), c,
716 "filesystem marked clean but journal not empty")) {
717 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
718 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
722 if (!last_journal_entry) {
723 fsck_err_on(!c->sb.clean, c, "no journal entries found");
727 genradix_for_each_reverse(&c->journal_entries, iter, i)
729 last_journal_entry = &(*i)->j;
730 (*i)->ignore = false;
735 ret = bch2_journal_keys_sort(c);
739 if (c->sb.clean && last_journal_entry) {
740 ret = bch2_verify_superblock_clean(c, &clean,
748 bch_err(c, "no superblock clean section found");
749 ret = -BCH_ERR_fsck_repair_impossible;
753 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
756 c->journal_replay_seq_start = last_seq;
757 c->journal_replay_seq_end = blacklist_seq - 1;
759 if (c->opts.reconstruct_alloc) {
760 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
761 drop_alloc_keys(&c->journal_keys);
764 zero_out_btree_mem_ptr(&c->journal_keys);
766 ret = journal_replay_early(c, clean);
771 * After an unclean shutdown, skip then next few journal sequence
772 * numbers as they may have been referenced by btree writes that
773 * happened before their corresponding journal writes - those btree
774 * writes need to be ignored, by skipping and blacklisting the next few
775 * journal sequence numbers:
780 if (blacklist_seq != journal_seq) {
781 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
782 blacklist_seq, journal_seq) ?:
783 bch2_journal_seq_blacklist_add(c,
784 blacklist_seq, journal_seq);
786 bch_err(c, "error creating new journal seq blacklist entry");
791 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
792 journal_seq, last_seq, blacklist_seq - 1) ?:
793 bch2_fs_journal_start(&c->journal, journal_seq);
797 if (c->opts.reconstruct_alloc)
798 bch2_journal_log_msg(c, "dropping alloc info");
801 * Skip past versions that might have possibly been used (as nonces),
802 * but hadn't had their pointers written:
804 if (c->sb.encryption_type && !c->sb.clean)
805 atomic64_add(1 << 16, &c->key_version);
807 ret = read_btree_roots(c);
812 (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
813 BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)))
814 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
816 ret = bch2_run_recovery_passes(c);
820 /* If we fixed errors, verify that fs is actually clean now: */
821 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
822 test_bit(BCH_FS_ERRORS_FIXED, &c->flags) &&
823 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) &&
824 !test_bit(BCH_FS_ERROR, &c->flags)) {
825 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
826 clear_bit(BCH_FS_ERRORS_FIXED, &c->flags);
828 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
830 ret = bch2_run_recovery_passes(c);
834 if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) ||
835 test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
836 bch_err(c, "Second fsck run was not clean");
837 set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags);
840 set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
843 if (enabled_qtypes(c)) {
844 bch_verbose(c, "reading quotas");
845 ret = bch2_fs_quota_read(c);
848 bch_verbose(c, "quotas done");
851 mutex_lock(&c->sb_lock);
852 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != c->sb.version) {
853 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, c->sb.version);
857 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
858 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
863 !test_bit(BCH_FS_ERROR, &c->flags) &&
864 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
865 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
866 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
872 mutex_unlock(&c->sb_lock);
874 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
875 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
876 struct bch_move_stats stats;
878 bch2_move_stats_init(&stats, "recovery");
880 bch_info(c, "scanning for old btree nodes");
881 ret = bch2_fs_read_write(c) ?:
882 bch2_scan_old_btree_nodes(c, &stats);
885 bch_info(c, "scanning for old btree nodes done");
888 if (c->journal_seq_blacklist_table &&
889 c->journal_seq_blacklist_table->nr > 128)
890 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
894 set_bit(BCH_FS_FSCK_DONE, &c->flags);
895 bch2_flush_fsck_errs(c);
897 if (!c->opts.keep_journal &&
898 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
899 bch2_journal_keys_free(&c->journal_keys);
900 bch2_journal_entries_free(c);
904 if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) {
905 bch2_fs_read_write_early(c);
906 bch2_delete_dead_snapshots_async(c);
914 bch2_fs_emergency_read_only(c);
918 int bch2_fs_initialize(struct bch_fs *c)
920 struct bch_inode_unpacked root_inode, lostfound_inode;
921 struct bkey_inode_buf packed_inode;
922 struct qstr lostfound = QSTR("lost+found");
927 bch_notice(c, "initializing new filesystem");
929 mutex_lock(&c->sb_lock);
930 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
931 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
933 bch2_sb_maybe_downgrade(c);
935 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
936 bch2_sb_upgrade(c, bcachefs_metadata_version_current);
937 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
940 mutex_unlock(&c->sb_lock);
942 c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
943 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
944 set_bit(BCH_FS_FSCK_DONE, &c->flags);
946 for (i = 0; i < BTREE_ID_NR; i++)
947 bch2_btree_root_alloc(c, i);
949 for_each_online_member(ca, c, i)
950 bch2_dev_usage_init(ca);
952 for_each_online_member(ca, c, i) {
953 ret = bch2_dev_journal_alloc(ca);
955 percpu_ref_put(&ca->io_ref);
961 * journal_res_get() will crash if called before this has
962 * set up the journal.pin FIFO and journal.cur pointer:
964 bch2_fs_journal_start(&c->journal, 1);
965 bch2_journal_set_replay_done(&c->journal);
967 ret = bch2_fs_read_write_early(c);
972 * Write out the superblock and journal buckets, now that we can do
975 bch_verbose(c, "marking superblocks");
976 for_each_member_device(ca, c, i) {
977 ret = bch2_trans_mark_dev_sb(c, ca);
979 percpu_ref_put(&ca->ref);
983 ca->new_fs_bucket_idx = 0;
986 ret = bch2_fs_freespace_init(c);
990 ret = bch2_initialize_subvolumes(c);
994 bch_verbose(c, "reading snapshots table");
995 ret = bch2_snapshots_read(c);
998 bch_verbose(c, "reading snapshots done");
1000 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1001 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1002 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1003 bch2_inode_pack(&packed_inode, &root_inode);
1004 packed_inode.inode.k.p.snapshot = U32_MAX;
1006 ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
1008 bch_err_msg(c, ret, "creating root directory");
1012 bch2_inode_init_early(c, &lostfound_inode);
1014 ret = bch2_trans_do(c, NULL, NULL, 0,
1015 bch2_create_trans(trans,
1016 BCACHEFS_ROOT_SUBVOL_INUM,
1017 &root_inode, &lostfound_inode,
1019 0, 0, S_IFDIR|0700, 0,
1020 NULL, NULL, (subvol_inum) { 0 }, 0));
1022 bch_err_msg(c, ret, "creating lost+found");
1026 if (enabled_qtypes(c)) {
1027 ret = bch2_fs_quota_read(c);
1032 ret = bch2_journal_flush(&c->journal);
1034 bch_err_msg(c, ret, "writing first journal entry");
1038 mutex_lock(&c->sb_lock);
1039 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1040 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1042 bch2_write_super(c);
1043 mutex_unlock(&c->sb_lock);
1047 bch_err_fn(ca, ret);