3 #include "alloc_background.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
21 #include <linux/sort.h>
22 #include <linux/stat.h>
24 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
26 /* sort and dedup all keys in the journal: */
28 static void journal_entries_free(struct list_head *list)
31 while (!list_empty(list)) {
32 struct journal_replay *i =
33 list_first_entry(list, struct journal_replay, list);
35 kvpfree(i, offsetof(struct journal_replay, j) +
36 vstruct_bytes(&i->j));
40 static int journal_sort_key_cmp(const void *_l, const void *_r)
42 const struct journal_key *l = _l;
43 const struct journal_key *r = _r;
45 return cmp_int(l->btree_id, r->btree_id) ?:
46 bkey_cmp(l->pos, r->pos) ?:
47 cmp_int(l->journal_seq, r->journal_seq) ?:
48 cmp_int(l->journal_offset, r->journal_offset);
51 static int journal_sort_seq_cmp(const void *_l, const void *_r)
53 const struct journal_key *l = _l;
54 const struct journal_key *r = _r;
56 return cmp_int(l->journal_seq, r->journal_seq) ?:
57 cmp_int(l->btree_id, r->btree_id) ?:
58 bkey_cmp(l->pos, r->pos);
61 static void journal_keys_sift(struct journal_keys *keys, struct journal_key *i)
63 while (i + 1 < keys->d + keys->nr &&
64 journal_sort_key_cmp(i, i + 1) > 0) {
70 static void journal_keys_free(struct journal_keys *keys)
72 struct journal_key *i;
74 for_each_journal_key(*keys, i)
82 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
84 struct journal_replay *p;
85 struct jset_entry *entry;
86 struct bkey_i *k, *_n;
87 struct journal_keys keys = { NULL }, keys_deduped = { NULL };
88 struct journal_key *i;
91 list_for_each_entry(p, journal_entries, list)
92 for_each_jset_key(k, _n, entry, &p->j)
95 keys.journal_seq_base = keys_deduped.journal_seq_base =
96 le64_to_cpu(list_first_entry(journal_entries,
97 struct journal_replay,
100 keys.d = kvmalloc(sizeof(keys.d[0]) * nr_keys, GFP_KERNEL);
104 keys_deduped.d = kvmalloc(sizeof(keys.d[0]) * nr_keys * 2, GFP_KERNEL);
108 list_for_each_entry(p, journal_entries, list)
109 for_each_jset_key(k, _n, entry, &p->j)
110 keys.d[keys.nr++] = (struct journal_key) {
111 .btree_id = entry->btree_id,
112 .pos = bkey_start_pos(&k->k),
114 .journal_seq = le64_to_cpu(p->j.seq) -
115 keys.journal_seq_base,
116 .journal_offset = k->_data - p->j._data,
119 sort(keys.d, nr_keys, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
122 while (i < keys.d + keys.nr) {
123 if (i + 1 < keys.d + keys.nr &&
124 i[0].btree_id == i[1].btree_id &&
125 !bkey_cmp(i[0].pos, i[1].pos)) {
126 if (bkey_cmp(i[0].k->k.p, i[1].k->k.p) <= 0) {
129 bch2_cut_front(i[1].k->k.p, i[0].k);
130 i[0].pos = i[1].k->k.p;
131 journal_keys_sift(&keys, i);
136 if (i + 1 < keys.d + keys.nr &&
137 i[0].btree_id == i[1].btree_id &&
138 bkey_cmp(i[0].k->k.p, bkey_start_pos(&i[1].k->k)) > 0) {
139 if ((cmp_int(i[0].journal_seq, i[1].journal_seq) ?:
140 cmp_int(i[0].journal_offset, i[1].journal_offset)) < 0) {
141 if (bkey_cmp(i[0].k->k.p, i[1].k->k.p) <= 0) {
142 bch2_cut_back(bkey_start_pos(&i[1].k->k), &i[0].k->k);
144 struct bkey_i *split =
145 kmalloc(bkey_bytes(i[0].k), GFP_KERNEL);
150 bkey_copy(split, i[0].k);
151 bch2_cut_back(bkey_start_pos(&i[1].k->k), &split->k);
152 keys_deduped.d[keys_deduped.nr++] = (struct journal_key) {
153 .btree_id = i[0].btree_id,
155 .pos = bkey_start_pos(&split->k),
157 .journal_seq = i[0].journal_seq,
158 .journal_offset = i[0].journal_offset,
161 bch2_cut_front(i[1].k->k.p, i[0].k);
162 i[0].pos = i[1].k->k.p;
163 journal_keys_sift(&keys, i);
167 if (bkey_cmp(i[0].k->k.p, i[1].k->k.p) >= 0) {
172 bch2_cut_front(i[0].k->k.p, i[1].k);
173 i[1].pos = i[0].k->k.p;
174 journal_keys_sift(&keys, i + 1);
180 keys_deduped.d[keys_deduped.nr++] = *i++;
186 journal_keys_free(&keys_deduped);
188 return (struct journal_keys) { NULL };
191 /* journal replay: */
193 static void replay_now_at(struct journal *j, u64 seq)
195 BUG_ON(seq < j->replay_journal_seq);
196 BUG_ON(seq > j->replay_journal_seq_end);
198 while (j->replay_journal_seq < seq)
199 bch2_journal_pin_put(j, j->replay_journal_seq++);
202 static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
204 struct btree_trans trans;
205 struct btree_iter *iter, *split_iter;
207 * We might cause compressed extents to be split, so we need to pass in
208 * a disk_reservation:
210 struct disk_reservation disk_res =
211 bch2_disk_reservation_init(c, 0);
212 struct bkey_i *split;
213 bool split_compressed = false;
216 bch2_trans_init(&trans, c);
217 bch2_trans_preload_iters(&trans);
219 bch2_trans_begin(&trans);
221 iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
222 bkey_start_pos(&k->k),
226 ret = bch2_btree_iter_traverse(iter);
230 split_iter = bch2_trans_copy_iter(&trans, iter);
231 ret = PTR_ERR_OR_ZERO(split_iter);
235 split = bch2_trans_kmalloc(&trans, bkey_bytes(&k->k));
236 ret = PTR_ERR_OR_ZERO(split);
240 if (!split_compressed &&
241 bch2_extent_is_compressed(bkey_i_to_s_c(k)) &&
242 !bch2_extent_is_atomic(k, split_iter)) {
243 ret = bch2_disk_reservation_add(c, &disk_res,
245 bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(k)),
246 BCH_DISK_RESERVATION_NOFAIL);
249 split_compressed = true;
253 bch2_cut_front(split_iter->pos, split);
254 bch2_extent_trim_atomic(split, split_iter);
256 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(split_iter, split));
257 bch2_btree_iter_set_pos(iter, split->k.p);
258 } while (bkey_cmp(iter->pos, k->k.p) < 0);
260 if (split_compressed) {
261 memset(&trans.fs_usage_deltas.fs_usage, 0,
262 sizeof(trans.fs_usage_deltas.fs_usage));
263 trans.fs_usage_deltas.top = trans.fs_usage_deltas.d;
265 ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k), false,
267 &trans.fs_usage_deltas) ?:
268 bch2_trans_commit(&trans, &disk_res, NULL,
271 BTREE_INSERT_LAZY_RW|
272 BTREE_INSERT_NOMARK_OVERWRITES|
273 BTREE_INSERT_NO_CLEAR_REPLICAS);
275 ret = bch2_trans_commit(&trans, &disk_res, NULL,
278 BTREE_INSERT_LAZY_RW|
279 BTREE_INSERT_JOURNAL_REPLAY|
280 BTREE_INSERT_NOMARK);
289 bch2_disk_reservation_put(c, &disk_res);
291 return bch2_trans_exit(&trans) ?: ret;
294 static int bch2_journal_replay(struct bch_fs *c,
295 struct journal_keys keys)
297 struct journal *j = &c->journal;
298 struct journal_key *i;
301 sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
303 for_each_journal_key(keys, i) {
304 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
306 switch (i->btree_id) {
308 ret = bch2_alloc_replay_key(c, i->k);
310 case BTREE_ID_EXTENTS:
311 ret = bch2_extent_replay_key(c, i->k);
314 ret = bch2_btree_insert(c, i->btree_id, i->k,
317 BTREE_INSERT_LAZY_RW|
318 BTREE_INSERT_JOURNAL_REPLAY|
319 BTREE_INSERT_NOMARK);
324 bch_err(c, "journal replay: error %d while replaying key",
332 replay_now_at(j, j->replay_journal_seq_end);
333 j->replay_journal_seq = 0;
335 bch2_journal_set_replay_done(j);
336 bch2_journal_flush_all_pins(j);
337 return bch2_journal_error(j);
340 static bool journal_empty(struct list_head *journal)
342 return list_empty(journal) ||
343 journal_entry_empty(&list_last_entry(journal,
344 struct journal_replay, list)->j);
348 verify_journal_entries_not_blacklisted_or_missing(struct bch_fs *c,
349 struct list_head *journal)
351 struct journal_replay *i =
352 list_last_entry(journal, struct journal_replay, list);
353 u64 start_seq = le64_to_cpu(i->j.last_seq);
354 u64 end_seq = le64_to_cpu(i->j.seq);
358 list_for_each_entry(i, journal, list) {
359 fsck_err_on(seq != le64_to_cpu(i->j.seq), c,
360 "journal entries %llu-%llu missing! (replaying %llu-%llu)",
361 seq, le64_to_cpu(i->j.seq) - 1,
364 seq = le64_to_cpu(i->j.seq);
366 fsck_err_on(bch2_journal_seq_is_blacklisted(c, seq, false), c,
367 "found blacklisted journal entry %llu", seq);
371 } while (bch2_journal_seq_is_blacklisted(c, seq, false));
377 /* journal replay early: */
379 static int journal_replay_entry_early(struct bch_fs *c,
380 struct jset_entry *entry)
384 switch (entry->type) {
385 case BCH_JSET_ENTRY_btree_root: {
386 struct btree_root *r = &c->btree_roots[entry->btree_id];
389 r->level = entry->level;
390 bkey_copy(&r->key, &entry->start[0]);
398 case BCH_JSET_ENTRY_usage: {
399 struct jset_entry_usage *u =
400 container_of(entry, struct jset_entry_usage, entry);
402 switch (entry->btree_id) {
403 case FS_USAGE_RESERVED:
404 if (entry->level < BCH_REPLICAS_MAX)
405 c->usage_base->persistent_reserved[entry->level] =
408 case FS_USAGE_INODES:
409 c->usage_base->nr_inodes = le64_to_cpu(u->v);
411 case FS_USAGE_KEY_VERSION:
412 atomic64_set(&c->key_version,
419 case BCH_JSET_ENTRY_data_usage: {
420 struct jset_entry_data_usage *u =
421 container_of(entry, struct jset_entry_data_usage, entry);
422 ret = bch2_replicas_set_usage(c, &u->r,
426 case BCH_JSET_ENTRY_blacklist: {
427 struct jset_entry_blacklist *bl_entry =
428 container_of(entry, struct jset_entry_blacklist, entry);
430 ret = bch2_journal_seq_blacklist_add(c,
431 le64_to_cpu(bl_entry->seq),
432 le64_to_cpu(bl_entry->seq) + 1);
435 case BCH_JSET_ENTRY_blacklist_v2: {
436 struct jset_entry_blacklist_v2 *bl_entry =
437 container_of(entry, struct jset_entry_blacklist_v2, entry);
439 ret = bch2_journal_seq_blacklist_add(c,
440 le64_to_cpu(bl_entry->start),
441 le64_to_cpu(bl_entry->end) + 1);
449 static int journal_replay_early(struct bch_fs *c,
450 struct bch_sb_field_clean *clean,
451 struct list_head *journal)
453 struct jset_entry *entry;
457 c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
458 c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
460 for (entry = clean->start;
461 entry != vstruct_end(&clean->field);
462 entry = vstruct_next(entry)) {
463 ret = journal_replay_entry_early(c, entry);
468 struct journal_replay *i =
469 list_last_entry(journal, struct journal_replay, list);
471 c->bucket_clock[READ].hand = le16_to_cpu(i->j.read_clock);
472 c->bucket_clock[WRITE].hand = le16_to_cpu(i->j.write_clock);
474 list_for_each_entry(i, journal, list)
475 vstruct_for_each(&i->j, entry) {
476 ret = journal_replay_entry_early(c, entry);
482 bch2_fs_usage_initialize(c);
487 /* sb clean section: */
489 static struct bkey_i *btree_root_find(struct bch_fs *c,
490 struct bch_sb_field_clean *clean,
492 enum btree_id id, unsigned *level)
495 struct jset_entry *entry, *start, *end;
498 start = clean->start;
499 end = vstruct_end(&clean->field);
502 end = vstruct_last(j);
505 for (entry = start; entry < end; entry = vstruct_next(entry))
506 if (entry->type == BCH_JSET_ENTRY_btree_root &&
507 entry->btree_id == id)
513 return ERR_PTR(-EINVAL);
516 *level = entry->level;
520 static int verify_superblock_clean(struct bch_fs *c,
521 struct bch_sb_field_clean **cleanp,
525 struct bch_sb_field_clean *clean = *cleanp;
528 if (!c->sb.clean || !j)
531 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
532 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
533 le64_to_cpu(clean->journal_seq),
534 le64_to_cpu(j->seq))) {
540 mustfix_fsck_err_on(j->read_clock != clean->read_clock, c,
541 "superblock read clock doesn't match journal after clean shutdown");
542 mustfix_fsck_err_on(j->write_clock != clean->write_clock, c,
543 "superblock read clock doesn't match journal after clean shutdown");
545 for (i = 0; i < BTREE_ID_NR; i++) {
546 struct bkey_i *k1, *k2;
547 unsigned l1 = 0, l2 = 0;
549 k1 = btree_root_find(c, clean, NULL, i, &l1);
550 k2 = btree_root_find(c, NULL, j, i, &l2);
555 mustfix_fsck_err_on(!k1 || !k2 ||
558 k1->k.u64s != k2->k.u64s ||
559 memcmp(k1, k2, bkey_bytes(k1)) ||
561 "superblock btree root doesn't match journal after clean shutdown");
567 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
569 struct bch_sb_field_clean *clean, *sb_clean;
572 mutex_lock(&c->sb_lock);
573 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
575 if (fsck_err_on(!sb_clean, c,
576 "superblock marked clean but clean section not present")) {
577 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
579 mutex_unlock(&c->sb_lock);
583 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
586 mutex_unlock(&c->sb_lock);
587 return ERR_PTR(-ENOMEM);
590 if (le16_to_cpu(c->disk_sb.sb->version) <
591 bcachefs_metadata_version_bkey_renumber)
592 bch2_sb_clean_renumber(clean, READ);
594 mutex_unlock(&c->sb_lock);
598 mutex_unlock(&c->sb_lock);
602 static int read_btree_roots(struct bch_fs *c)
607 for (i = 0; i < BTREE_ID_NR; i++) {
608 struct btree_root *r = &c->btree_roots[i];
613 if (i == BTREE_ID_ALLOC &&
614 test_reconstruct_alloc(c)) {
615 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
621 __fsck_err(c, i == BTREE_ID_ALLOC
622 ? FSCK_CAN_IGNORE : 0,
623 "invalid btree root %s",
625 if (i == BTREE_ID_ALLOC)
626 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
629 ret = bch2_btree_root_read(c, i, &r->key, r->level);
631 __fsck_err(c, i == BTREE_ID_ALLOC
632 ? FSCK_CAN_IGNORE : 0,
633 "error reading btree root %s",
635 if (i == BTREE_ID_ALLOC)
636 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
640 for (i = 0; i < BTREE_ID_NR; i++)
641 if (!c->btree_roots[i].b)
642 bch2_btree_root_alloc(c, i);
647 int bch2_fs_recovery(struct bch_fs *c)
649 const char *err = "cannot allocate memory";
650 struct bch_sb_field_clean *clean = NULL;
652 LIST_HEAD(journal_entries);
653 struct journal_keys journal_keys = { NULL };
654 bool wrote = false, write_sb = false;
658 clean = read_superblock_clean(c);
659 ret = PTR_ERR_OR_ZERO(clean);
664 bch_info(c, "recovering from clean shutdown, journal seq %llu",
665 le64_to_cpu(clean->journal_seq));
667 if (!c->replicas.entries) {
668 bch_info(c, "building replicas info");
669 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
672 if (!c->sb.clean || c->opts.fsck) {
675 ret = bch2_journal_read(c, &journal_entries);
679 if (mustfix_fsck_err_on(c->sb.clean && !journal_empty(&journal_entries), c,
680 "filesystem marked clean but journal not empty")) {
681 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
682 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
686 if (!c->sb.clean && list_empty(&journal_entries)) {
687 bch_err(c, "no journal entries found");
688 ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
692 journal_keys = journal_keys_sort(&journal_entries);
693 if (!journal_keys.d) {
698 j = &list_last_entry(&journal_entries,
699 struct journal_replay, list)->j;
701 ret = verify_superblock_clean(c, &clean, j);
705 journal_seq = le64_to_cpu(j->seq) + 1;
707 journal_seq = le64_to_cpu(clean->journal_seq) + 1;
710 ret = journal_replay_early(c, clean, &journal_entries);
715 ret = bch2_journal_seq_blacklist_add(c,
719 bch_err(c, "error creating new journal seq blacklist entry");
726 ret = bch2_blacklist_table_initialize(c);
728 ret = verify_journal_entries_not_blacklisted_or_missing(c,
733 ret = bch2_fs_journal_start(&c->journal, journal_seq,
738 ret = read_btree_roots(c);
742 bch_verbose(c, "starting alloc read");
743 err = "error reading allocation information";
744 ret = bch2_alloc_read(c, &journal_keys);
747 bch_verbose(c, "alloc read done");
749 bch_verbose(c, "starting stripes_read");
750 err = "error reading stripes";
751 ret = bch2_stripes_read(c, &journal_keys);
754 bch_verbose(c, "stripes_read done");
756 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
758 if ((c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) &&
759 !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA))) {
761 * interior btree node updates aren't consistent with the
762 * journal; after an unclean shutdown we have to walk all
763 * pointers to metadata:
765 bch_info(c, "starting metadata mark and sweep");
766 err = "error in mark and sweep";
767 ret = bch2_gc(c, NULL, true, true);
770 bch_verbose(c, "mark and sweep done");
774 !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
775 test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
776 bch_info(c, "starting mark and sweep");
777 err = "error in mark and sweep";
778 ret = bch2_gc(c, &journal_keys, true, false);
781 bch_verbose(c, "mark and sweep done");
784 clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
785 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
788 * Skip past versions that might have possibly been used (as nonces),
789 * but hadn't had their pointers written:
791 if (c->sb.encryption_type && !c->sb.clean)
792 atomic64_add(1 << 16, &c->key_version);
794 if (c->opts.norecovery)
797 bch_verbose(c, "starting journal replay");
798 err = "journal replay failed";
799 ret = bch2_journal_replay(c, journal_keys);
802 bch_verbose(c, "journal replay done");
804 if (!c->opts.nochanges) {
806 * note that even when filesystem was clean there might be work
807 * to do here, if we ran gc (because of fsck) which recalculated
810 bch_verbose(c, "writing allocation info");
811 err = "error writing out alloc info";
812 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW, &wrote) ?:
813 bch2_alloc_write(c, BTREE_INSERT_LAZY_RW, &wrote);
815 bch_err(c, "error writing alloc info");
818 bch_verbose(c, "alloc write done");
822 if (!(c->sb.features & (1 << BCH_FEATURE_ATOMIC_NLINK))) {
823 bch_info(c, "checking inode link counts");
824 err = "error in recovery";
825 ret = bch2_fsck_inode_nlink(c);
828 bch_verbose(c, "check inodes done");
831 bch_verbose(c, "checking for deleted inodes");
832 err = "error in recovery";
833 ret = bch2_fsck_walk_inodes_only(c);
836 bch_verbose(c, "check inodes done");
841 bch_info(c, "starting fsck");
842 err = "error in fsck";
843 ret = bch2_fsck_full(c);
846 bch_verbose(c, "fsck done");
849 if (enabled_qtypes(c)) {
850 bch_verbose(c, "reading quotas");
851 ret = bch2_fs_quota_read(c);
854 bch_verbose(c, "quotas done");
857 mutex_lock(&c->sb_lock);
858 if (c->opts.version_upgrade) {
859 if (c->sb.version < bcachefs_metadata_version_new_versioning)
860 c->disk_sb.sb->version_min =
861 le16_to_cpu(bcachefs_metadata_version_min);
862 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
866 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
867 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
872 !test_bit(BCH_FS_ERROR, &c->flags)) {
873 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK;
874 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
880 mutex_unlock(&c->sb_lock);
882 if (c->journal_seq_blacklist_table &&
883 c->journal_seq_blacklist_table->nr > 128)
884 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
889 bch2_flush_fsck_errs(c);
890 journal_keys_free(&journal_keys);
891 journal_entries_free(&journal_entries);
894 bch_err(c, "Error in recovery: %s (%i)", err, ret);
896 bch_verbose(c, "ret %i", ret);
900 int bch2_fs_initialize(struct bch_fs *c)
902 struct bch_inode_unpacked root_inode, lostfound_inode;
903 struct bkey_inode_buf packed_inode;
904 struct bch_hash_info root_hash_info;
905 struct qstr lostfound = QSTR("lost+found");
906 const char *err = "cannot allocate memory";
912 bch_notice(c, "initializing new filesystem");
914 mutex_lock(&c->sb_lock);
915 for_each_online_member(ca, c, i)
916 bch2_mark_dev_superblock(c, ca, 0);
917 mutex_unlock(&c->sb_lock);
919 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
920 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
922 for (i = 0; i < BTREE_ID_NR; i++)
923 bch2_btree_root_alloc(c, i);
925 err = "unable to allocate journal buckets";
926 for_each_online_member(ca, c, i) {
927 ret = bch2_dev_journal_alloc(ca);
929 percpu_ref_put(&ca->io_ref);
935 * journal_res_get() will crash if called before this has
936 * set up the journal.pin FIFO and journal.cur pointer:
938 bch2_fs_journal_start(&c->journal, 1, &journal);
939 bch2_journal_set_replay_done(&c->journal);
941 err = "error going read write";
942 ret = __bch2_fs_read_write(c, true);
946 bch2_inode_init(c, &root_inode, 0, 0,
947 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
948 root_inode.bi_inum = BCACHEFS_ROOT_INO;
949 root_inode.bi_nlink++; /* lost+found */
950 bch2_inode_pack(&packed_inode, &root_inode);
952 err = "error creating root directory";
953 ret = bch2_btree_insert(c, BTREE_ID_INODES,
954 &packed_inode.inode.k_i,
959 bch2_inode_init(c, &lostfound_inode, 0, 0,
960 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0,
962 lostfound_inode.bi_inum = BCACHEFS_ROOT_INO + 1;
963 bch2_inode_pack(&packed_inode, &lostfound_inode);
965 err = "error creating lost+found";
966 ret = bch2_btree_insert(c, BTREE_ID_INODES,
967 &packed_inode.inode.k_i,
972 root_hash_info = bch2_hash_info_init(c, &root_inode);
974 ret = bch2_dirent_create(c, BCACHEFS_ROOT_INO, &root_hash_info, DT_DIR,
975 &lostfound, lostfound_inode.bi_inum, NULL,
976 BTREE_INSERT_NOFAIL);
980 if (enabled_qtypes(c)) {
981 ret = bch2_fs_quota_read(c);
986 err = "error writing first journal entry";
987 ret = bch2_journal_meta(&c->journal);
991 mutex_lock(&c->sb_lock);
992 c->disk_sb.sb->version = c->disk_sb.sb->version_min =
993 le16_to_cpu(bcachefs_metadata_version_current);
994 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK;
996 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
997 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1000 mutex_unlock(&c->sb_lock);
1004 pr_err("Error initializing new filesystem: %s (%i)", err, ret);