1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_background.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
14 #include "fs-common.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
23 #include "subvolume.h"
26 #include <linux/sort.h>
27 #include <linux/stat.h>
29 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
31 /* for -o reconstruct_alloc: */
32 static void drop_alloc_keys(struct journal_keys *keys)
36 for (src = 0, dst = 0; src < keys->nr; src++)
37 if (keys->d[src].btree_id != BTREE_ID_alloc)
38 keys->d[dst++] = keys->d[src];
44 * Btree node pointers have a field to stack a pointer to the in memory btree
45 * node; we need to zero out this field when reading in btree nodes, or when
46 * reading in keys from the journal:
48 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
50 struct journal_key *i;
52 for (i = keys->d; i < keys->d + keys->nr; i++)
53 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
54 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
57 /* iterate over keys read from the journal: */
59 static int __journal_key_cmp(enum btree_id l_btree_id,
62 const struct journal_key *r)
64 return (cmp_int(l_btree_id, r->btree_id) ?:
65 cmp_int(l_level, r->level) ?:
66 bpos_cmp(l_pos, r->k->k.p));
69 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
71 return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
74 size_t bch2_journal_key_search(struct journal_keys *journal_keys,
75 enum btree_id id, unsigned level,
78 size_t l = 0, r = journal_keys->nr, m;
81 m = l + ((r - l) >> 1);
82 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
88 BUG_ON(l < journal_keys->nr &&
89 __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
92 __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
97 struct bkey_i *bch2_journal_keys_peek(struct bch_fs *c, enum btree_id btree_id,
98 unsigned level, struct bpos pos)
100 struct journal_keys *keys = &c->journal_keys;
101 struct journal_key *end = keys->d + keys->nr;
102 struct journal_key *k = keys->d +
103 bch2_journal_key_search(keys, btree_id, level, pos);
105 while (k < end && k->overwritten)
109 k->btree_id == btree_id &&
115 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
117 struct bkey_i *n = iter->keys->d[idx].k;
118 struct btree_and_journal_iter *biter =
119 container_of(iter, struct btree_and_journal_iter, journal);
121 if (iter->idx > idx ||
124 bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
128 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
129 unsigned level, struct bkey_i *k)
131 struct journal_key n = {
137 * Ensure these keys are done last by journal replay, to unblock
140 .journal_seq = U32_MAX,
142 struct journal_keys *keys = &c->journal_keys;
143 struct journal_iter *iter;
144 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
146 BUG_ON(test_bit(BCH_FS_RW, &c->flags));
148 if (idx < keys->nr &&
149 journal_key_cmp(&n, &keys->d[idx]) == 0) {
150 if (keys->d[idx].allocated)
151 kfree(keys->d[idx].k);
156 if (keys->nr == keys->size) {
157 struct journal_keys new_keys = {
159 .size = keys->size * 2,
160 .journal_seq_base = keys->journal_seq_base,
163 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
165 bch_err(c, "%s: error allocating new key array (size %zu)",
166 __func__, new_keys.size);
170 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
175 array_insert_item(keys->d, keys->nr, idx, n);
177 list_for_each_entry(iter, &c->journal_iters, list)
178 journal_iter_fix(c, iter, idx);
184 * Can only be used from the recovery thread while we're still RO - can't be
185 * used once we've got RW, as journal_keys is at that point used by multiple
188 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
189 unsigned level, struct bkey_i *k)
194 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
199 ret = bch2_journal_key_insert_take(c, id, level, n);
205 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
206 unsigned level, struct bpos pos)
208 struct bkey_i whiteout;
210 bkey_init(&whiteout.k);
213 return bch2_journal_key_insert(c, id, level, &whiteout);
216 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
217 unsigned level, struct bpos pos)
219 struct journal_keys *keys = &c->journal_keys;
220 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
222 if (idx < keys->nr &&
223 keys->d[idx].btree_id == btree &&
224 keys->d[idx].level == level &&
225 !bpos_cmp(keys->d[idx].k->k.p, pos))
226 keys->d[idx].overwritten = true;
229 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
231 struct journal_key *k = iter->keys->d + iter->idx;
233 while (k < iter->keys->d + iter->keys->nr &&
234 k->btree_id == iter->btree_id &&
235 k->level == iter->level) {
240 k = iter->keys->d + iter->idx;
246 static void bch2_journal_iter_advance(struct journal_iter *iter)
248 if (iter->idx < iter->keys->nr)
252 static void bch2_journal_iter_exit(struct journal_iter *iter)
254 list_del(&iter->list);
257 static void bch2_journal_iter_init(struct bch_fs *c,
258 struct journal_iter *iter,
259 enum btree_id id, unsigned level,
264 iter->keys = &c->journal_keys;
265 iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
268 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
270 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
271 iter->b, &iter->unpacked);
274 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
276 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
279 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
281 switch (iter->last) {
285 bch2_journal_iter_advance_btree(iter);
288 bch2_journal_iter_advance(&iter->journal);
295 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
300 struct bkey_s_c btree_k =
301 bch2_journal_iter_peek_btree(iter);
302 struct bkey_s_c journal_k =
303 bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
305 if (btree_k.k && journal_k.k) {
306 int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
309 bch2_journal_iter_advance_btree(iter);
311 iter->last = cmp < 0 ? btree : journal;
312 } else if (btree_k.k) {
314 } else if (journal_k.k) {
315 iter->last = journal;
318 return bkey_s_c_null;
321 ret = iter->last == journal ? journal_k : btree_k;
324 bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
325 iter->journal.idx = iter->journal.keys->nr;
327 return bkey_s_c_null;
330 if (!bkey_deleted(ret.k))
333 bch2_btree_and_journal_iter_advance(iter);
339 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
341 bch2_btree_and_journal_iter_advance(iter);
343 return bch2_btree_and_journal_iter_peek(iter);
346 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
348 bch2_journal_iter_exit(&iter->journal);
351 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
354 struct btree_node_iter node_iter,
357 memset(iter, 0, sizeof(*iter));
360 iter->node_iter = node_iter;
361 bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
362 INIT_LIST_HEAD(&iter->journal.list);
366 * this version is used by btree_gc before filesystem has gone RW and
367 * multithreaded, so uses the journal_iters list:
369 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
373 struct btree_node_iter node_iter;
375 bch2_btree_node_iter_init_from_start(&node_iter, b);
376 __bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
377 list_add(&iter->journal.list, &c->journal_iters);
380 /* sort and dedup all keys in the journal: */
382 void bch2_journal_entries_free(struct list_head *list)
385 while (!list_empty(list)) {
386 struct journal_replay *i =
387 list_first_entry(list, struct journal_replay, list);
389 kvpfree(i, offsetof(struct journal_replay, j) +
390 vstruct_bytes(&i->j));
395 * When keys compare equal, oldest compares first:
397 static int journal_sort_key_cmp(const void *_l, const void *_r)
399 const struct journal_key *l = _l;
400 const struct journal_key *r = _r;
402 return journal_key_cmp(l, r) ?:
403 cmp_int(l->journal_seq, r->journal_seq) ?:
404 cmp_int(l->journal_offset, r->journal_offset);
407 void bch2_journal_keys_free(struct journal_keys *keys)
409 struct journal_key *i;
411 for (i = keys->d; i < keys->d + keys->nr; i++)
420 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
422 struct journal_replay *i;
423 struct jset_entry *entry;
424 struct bkey_i *k, *_n;
425 struct journal_keys keys = { NULL };
426 struct journal_key *src, *dst;
429 if (list_empty(journal_entries))
432 list_for_each_entry(i, journal_entries, list) {
436 if (!keys.journal_seq_base)
437 keys.journal_seq_base = le64_to_cpu(i->j.seq);
439 for_each_jset_key(k, _n, entry, &i->j)
443 keys.size = roundup_pow_of_two(nr_keys);
445 keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
449 list_for_each_entry(i, journal_entries, list) {
453 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
455 for_each_jset_key(k, _n, entry, &i->j)
456 keys.d[keys.nr++] = (struct journal_key) {
457 .btree_id = entry->btree_id,
458 .level = entry->level,
460 .journal_seq = le64_to_cpu(i->j.seq) -
461 keys.journal_seq_base,
462 .journal_offset = k->_data - i->j._data,
466 sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
469 while (src < keys.d + keys.nr) {
470 while (src + 1 < keys.d + keys.nr &&
471 src[0].btree_id == src[1].btree_id &&
472 src[0].level == src[1].level &&
473 !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
479 keys.nr = dst - keys.d;
484 /* journal replay: */
486 static void replay_now_at(struct journal *j, u64 seq)
488 BUG_ON(seq < j->replay_journal_seq);
489 BUG_ON(seq > j->replay_journal_seq_end);
491 while (j->replay_journal_seq < seq)
492 bch2_journal_pin_put(j, j->replay_journal_seq++);
495 static int bch2_journal_replay_key(struct btree_trans *trans,
496 struct journal_key *k)
498 struct btree_iter iter;
499 unsigned iter_flags =
501 BTREE_ITER_NOT_EXTENTS;
504 if (!k->level && k->btree_id == BTREE_ID_alloc)
505 iter_flags |= BTREE_ITER_CACHED;
507 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
508 BTREE_MAX_DEPTH, k->level,
510 ret = bch2_btree_iter_traverse(&iter);
514 /* Must be checked with btree locked: */
518 ret = bch2_trans_update(trans, &iter, k->k, BTREE_TRIGGER_NORUN);
520 bch2_trans_iter_exit(trans, &iter);
524 static int journal_sort_seq_cmp(const void *_l, const void *_r)
526 const struct journal_key *l = *((const struct journal_key **)_l);
527 const struct journal_key *r = *((const struct journal_key **)_r);
529 return cmp_int(l->journal_seq, r->journal_seq);
532 static int bch2_journal_replay(struct bch_fs *c)
534 struct journal_keys *keys = &c->journal_keys;
535 struct journal_key **keys_sorted, *k;
536 struct journal *j = &c->journal;
540 keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
544 for (i = 0; i < keys->nr; i++)
545 keys_sorted[i] = &keys->d[i];
547 sort(keys_sorted, keys->nr,
548 sizeof(keys_sorted[0]),
549 journal_sort_seq_cmp, NULL);
552 replay_now_at(j, keys->journal_seq_base);
554 for (i = 0; i < keys->nr; i++) {
560 replay_now_at(j, keys->journal_seq_base + k->journal_seq);
562 ret = bch2_trans_do(c, NULL, NULL,
563 BTREE_INSERT_LAZY_RW|
565 BTREE_INSERT_JOURNAL_RESERVED|
566 (!k->allocated ? BTREE_INSERT_JOURNAL_REPLAY : 0),
567 bch2_journal_replay_key(&trans, k));
569 bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
570 ret, bch2_btree_ids[k->btree_id], k->level);
575 replay_now_at(j, j->replay_journal_seq_end);
576 j->replay_journal_seq = 0;
578 bch2_journal_set_replay_done(j);
579 bch2_journal_flush_all_pins(j);
580 ret = bch2_journal_error(j);
586 /* journal replay early: */
588 static int journal_replay_entry_early(struct bch_fs *c,
589 struct jset_entry *entry)
593 switch (entry->type) {
594 case BCH_JSET_ENTRY_btree_root: {
595 struct btree_root *r;
597 if (entry->btree_id >= BTREE_ID_NR) {
598 bch_err(c, "filesystem has unknown btree type %u",
603 r = &c->btree_roots[entry->btree_id];
606 r->level = entry->level;
607 bkey_copy(&r->key, &entry->start[0]);
615 case BCH_JSET_ENTRY_usage: {
616 struct jset_entry_usage *u =
617 container_of(entry, struct jset_entry_usage, entry);
619 switch (entry->btree_id) {
620 case BCH_FS_USAGE_reserved:
621 if (entry->level < BCH_REPLICAS_MAX)
622 c->usage_base->persistent_reserved[entry->level] =
625 case BCH_FS_USAGE_inodes:
626 c->usage_base->nr_inodes = le64_to_cpu(u->v);
628 case BCH_FS_USAGE_key_version:
629 atomic64_set(&c->key_version,
636 case BCH_JSET_ENTRY_data_usage: {
637 struct jset_entry_data_usage *u =
638 container_of(entry, struct jset_entry_data_usage, entry);
640 ret = bch2_replicas_set_usage(c, &u->r,
644 case BCH_JSET_ENTRY_dev_usage: {
645 struct jset_entry_dev_usage *u =
646 container_of(entry, struct jset_entry_dev_usage, entry);
647 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
648 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
650 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
651 ca->usage_base->buckets_unavailable = le64_to_cpu(u->buckets_unavailable);
653 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
654 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
655 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
656 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
661 case BCH_JSET_ENTRY_blacklist: {
662 struct jset_entry_blacklist *bl_entry =
663 container_of(entry, struct jset_entry_blacklist, entry);
665 ret = bch2_journal_seq_blacklist_add(c,
666 le64_to_cpu(bl_entry->seq),
667 le64_to_cpu(bl_entry->seq) + 1);
670 case BCH_JSET_ENTRY_blacklist_v2: {
671 struct jset_entry_blacklist_v2 *bl_entry =
672 container_of(entry, struct jset_entry_blacklist_v2, entry);
674 ret = bch2_journal_seq_blacklist_add(c,
675 le64_to_cpu(bl_entry->start),
676 le64_to_cpu(bl_entry->end) + 1);
679 case BCH_JSET_ENTRY_clock: {
680 struct jset_entry_clock *clock =
681 container_of(entry, struct jset_entry_clock, entry);
683 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
690 static int journal_replay_early(struct bch_fs *c,
691 struct bch_sb_field_clean *clean,
692 struct list_head *journal)
694 struct journal_replay *i;
695 struct jset_entry *entry;
699 for (entry = clean->start;
700 entry != vstruct_end(&clean->field);
701 entry = vstruct_next(entry)) {
702 ret = journal_replay_entry_early(c, entry);
707 list_for_each_entry(i, journal, list) {
711 vstruct_for_each(&i->j, entry) {
712 ret = journal_replay_entry_early(c, entry);
719 bch2_fs_usage_initialize(c);
724 /* sb clean section: */
726 static struct bkey_i *btree_root_find(struct bch_fs *c,
727 struct bch_sb_field_clean *clean,
729 enum btree_id id, unsigned *level)
732 struct jset_entry *entry, *start, *end;
735 start = clean->start;
736 end = vstruct_end(&clean->field);
739 end = vstruct_last(j);
742 for (entry = start; entry < end; entry = vstruct_next(entry))
743 if (entry->type == BCH_JSET_ENTRY_btree_root &&
744 entry->btree_id == id)
750 return ERR_PTR(-EINVAL);
753 *level = entry->level;
757 static int verify_superblock_clean(struct bch_fs *c,
758 struct bch_sb_field_clean **cleanp,
762 struct bch_sb_field_clean *clean = *cleanp;
763 struct printbuf buf1 = PRINTBUF;
764 struct printbuf buf2 = PRINTBUF;
767 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
768 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
769 le64_to_cpu(clean->journal_seq),
770 le64_to_cpu(j->seq))) {
776 for (i = 0; i < BTREE_ID_NR; i++) {
777 struct bkey_i *k1, *k2;
778 unsigned l1 = 0, l2 = 0;
780 k1 = btree_root_find(c, clean, NULL, i, &l1);
781 k2 = btree_root_find(c, NULL, j, i, &l2);
786 printbuf_reset(&buf1);
787 printbuf_reset(&buf2);
790 bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
792 pr_buf(&buf1, "(none)");
795 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
797 pr_buf(&buf2, "(none)");
799 mustfix_fsck_err_on(!k1 || !k2 ||
802 k1->k.u64s != k2->k.u64s ||
803 memcmp(k1, k2, bkey_bytes(k1)) ||
805 "superblock btree root %u doesn't match journal after clean shutdown\n"
807 "journal: l=%u %s\n", i,
812 printbuf_exit(&buf2);
813 printbuf_exit(&buf1);
817 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
819 struct bch_sb_field_clean *clean, *sb_clean;
822 mutex_lock(&c->sb_lock);
823 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
825 if (fsck_err_on(!sb_clean, c,
826 "superblock marked clean but clean section not present")) {
827 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
829 mutex_unlock(&c->sb_lock);
833 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
836 mutex_unlock(&c->sb_lock);
837 return ERR_PTR(-ENOMEM);
840 ret = bch2_sb_clean_validate_late(c, clean, READ);
842 mutex_unlock(&c->sb_lock);
846 mutex_unlock(&c->sb_lock);
850 mutex_unlock(&c->sb_lock);
854 static int read_btree_roots(struct bch_fs *c)
859 for (i = 0; i < BTREE_ID_NR; i++) {
860 struct btree_root *r = &c->btree_roots[i];
865 if (i == BTREE_ID_alloc &&
866 c->opts.reconstruct_alloc) {
867 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
872 __fsck_err(c, i == BTREE_ID_alloc
873 ? FSCK_CAN_IGNORE : 0,
874 "invalid btree root %s",
876 if (i == BTREE_ID_alloc)
877 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
880 ret = bch2_btree_root_read(c, i, &r->key, r->level);
882 __fsck_err(c, i == BTREE_ID_alloc
883 ? FSCK_CAN_IGNORE : 0,
884 "error reading btree root %s",
886 if (i == BTREE_ID_alloc)
887 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
891 for (i = 0; i < BTREE_ID_NR; i++)
892 if (!c->btree_roots[i].b)
893 bch2_btree_root_alloc(c, i);
898 static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
900 struct bkey_i_snapshot root_snapshot;
901 struct bkey_i_subvolume root_volume;
904 bkey_snapshot_init(&root_snapshot.k_i);
905 root_snapshot.k.p.offset = U32_MAX;
906 root_snapshot.v.flags = 0;
907 root_snapshot.v.parent = 0;
908 root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
909 root_snapshot.v.pad = 0;
910 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
912 ret = bch2_btree_insert(c, BTREE_ID_snapshots,
919 bkey_subvolume_init(&root_volume.k_i);
920 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
921 root_volume.v.flags = 0;
922 root_volume.v.snapshot = cpu_to_le32(U32_MAX);
923 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
925 ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
934 static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
936 struct btree_iter iter;
938 struct bch_inode_unpacked inode;
941 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
942 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
943 k = bch2_btree_iter_peek_slot(&iter);
948 if (!bkey_is_inode(k.k)) {
949 bch_err(trans->c, "root inode not found");
954 ret = bch2_inode_unpack(k, &inode);
957 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
959 ret = bch2_inode_write(trans, &iter, &inode);
961 bch2_trans_iter_exit(trans, &iter);
965 int bch2_fs_recovery(struct bch_fs *c)
967 const char *err = "cannot allocate memory";
968 struct bch_sb_field_clean *clean = NULL;
969 struct jset *last_journal_entry = NULL;
970 u64 blacklist_seq, journal_seq;
971 bool write_sb = false;
975 clean = read_superblock_clean(c);
976 ret = PTR_ERR_OR_ZERO(clean);
981 bch_info(c, "recovering from clean shutdown, journal seq %llu",
982 le64_to_cpu(clean->journal_seq));
984 bch_info(c, "recovering from unclean shutdown");
986 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
987 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
993 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
994 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
999 if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1000 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
1005 if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1006 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1007 c->opts.fsck = true;
1008 c->opts.fix_errors = FSCK_OPT_YES;
1011 if (!c->replicas.entries ||
1012 c->opts.rebuild_replicas) {
1013 bch_info(c, "building replicas info");
1014 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1017 if (!c->opts.nochanges) {
1018 if (c->sb.version < bcachefs_metadata_version_inode_backpointers) {
1019 bch_info(c, "version prior to inode backpointers, upgrade and fsck required");
1020 c->opts.version_upgrade = true;
1021 c->opts.fsck = true;
1022 c->opts.fix_errors = FSCK_OPT_YES;
1023 } else if (c->sb.version < bcachefs_metadata_version_subvol_dirent) {
1024 bch_info(c, "filesystem version is prior to subvol_dirent - upgrading");
1025 c->opts.version_upgrade = true;
1026 c->opts.fsck = true;
1027 } else if (c->sb.version < bcachefs_metadata_version_inode_v2) {
1028 bch_info(c, "filesystem version is prior to inode_v2 - upgrading");
1029 c->opts.version_upgrade = true;
1033 ret = bch2_blacklist_table_initialize(c);
1035 bch_err(c, "error initializing blacklist table");
1039 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1040 struct journal_replay *i;
1042 bch_verbose(c, "starting journal read");
1043 ret = bch2_journal_read(c, &c->journal_entries,
1044 &blacklist_seq, &journal_seq);
1048 list_for_each_entry_reverse(i, &c->journal_entries, list)
1050 last_journal_entry = &i->j;
1054 if (mustfix_fsck_err_on(c->sb.clean &&
1055 last_journal_entry &&
1056 !journal_entry_empty(last_journal_entry), c,
1057 "filesystem marked clean but journal not empty")) {
1058 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1059 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1060 c->sb.clean = false;
1063 if (!last_journal_entry) {
1064 fsck_err_on(!c->sb.clean, c, "no journal entries found");
1068 c->journal_keys = journal_keys_sort(&c->journal_entries);
1069 if (!c->journal_keys.d) {
1074 if (c->sb.clean && last_journal_entry) {
1075 ret = verify_superblock_clean(c, &clean,
1076 last_journal_entry);
1083 bch_err(c, "no superblock clean section found");
1084 ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1088 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1091 if (c->opts.read_journal_only)
1094 if (c->opts.reconstruct_alloc) {
1095 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1096 drop_alloc_keys(&c->journal_keys);
1099 zero_out_btree_mem_ptr(&c->journal_keys);
1101 ret = journal_replay_early(c, clean, &c->journal_entries);
1106 * After an unclean shutdown, skip then next few journal sequence
1107 * numbers as they may have been referenced by btree writes that
1108 * happened before their corresponding journal writes - those btree
1109 * writes need to be ignored, by skipping and blacklisting the next few
1110 * journal sequence numbers:
1115 if (blacklist_seq != journal_seq) {
1116 ret = bch2_journal_seq_blacklist_add(c,
1117 blacklist_seq, journal_seq);
1119 bch_err(c, "error creating new journal seq blacklist entry");
1124 ret = bch2_fs_journal_start(&c->journal, journal_seq,
1125 &c->journal_entries);
1129 ret = read_btree_roots(c);
1133 bch_verbose(c, "starting alloc read");
1134 err = "error reading allocation information";
1136 down_read(&c->gc_lock);
1137 ret = bch2_alloc_read(c, false, false);
1138 up_read(&c->gc_lock);
1142 bch_verbose(c, "alloc read done");
1144 bch_verbose(c, "starting stripes_read");
1145 err = "error reading stripes";
1146 ret = bch2_stripes_read(c);
1149 bch_verbose(c, "stripes_read done");
1152 * If we're not running fsck, this ensures bch2_fsck_err() calls are
1153 * instead interpreted as bch2_inconsistent_err() calls:
1156 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1159 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) ||
1160 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) ||
1161 test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1162 bool metadata_only = c->opts.norecovery;
1164 bch_info(c, "checking allocations");
1165 err = "error in mark and sweep";
1166 ret = bch2_gc(c, true, metadata_only);
1169 bch_verbose(c, "done checking allocations");
1172 bch2_stripes_heap_start(c);
1174 clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1175 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1176 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1179 * Skip past versions that might have possibly been used (as nonces),
1180 * but hadn't had their pointers written:
1182 if (c->sb.encryption_type && !c->sb.clean)
1183 atomic64_add(1 << 16, &c->key_version);
1185 if (c->opts.norecovery)
1188 bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
1189 err = "journal replay failed";
1190 ret = bch2_journal_replay(c);
1193 if (c->opts.verbose || !c->sb.clean)
1194 bch_info(c, "journal replay done");
1196 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1199 err = "error creating root snapshot node";
1200 ret = bch2_fs_initialize_subvolumes(c);
1205 bch_verbose(c, "reading snapshots table");
1206 err = "error reading snapshots table";
1207 ret = bch2_fs_snapshots_start(c);
1210 bch_verbose(c, "reading snapshots done");
1212 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1213 /* set bi_subvol on root inode */
1214 err = "error upgrade root inode for subvolumes";
1215 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1216 bch2_fs_upgrade_for_subvolumes(&trans));
1222 bch_info(c, "starting fsck");
1223 err = "error in fsck";
1224 ret = bch2_fsck_full(c);
1227 bch_verbose(c, "fsck done");
1228 } else if (!c->sb.clean) {
1229 bch_verbose(c, "checking for deleted inodes");
1230 err = "error in recovery";
1231 ret = bch2_fsck_walk_inodes_only(c);
1234 bch_verbose(c, "check inodes done");
1237 if (enabled_qtypes(c)) {
1238 bch_verbose(c, "reading quotas");
1239 ret = bch2_fs_quota_read(c);
1242 bch_verbose(c, "quotas done");
1245 mutex_lock(&c->sb_lock);
1246 if (c->opts.version_upgrade) {
1247 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1248 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1252 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1253 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1258 !test_bit(BCH_FS_ERROR, &c->flags) &&
1259 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1260 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1261 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1266 bch2_write_super(c);
1267 mutex_unlock(&c->sb_lock);
1269 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1270 !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
1271 le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
1272 struct bch_move_stats stats;
1274 bch_move_stats_init(&stats, "recovery");
1276 bch_info(c, "scanning for old btree nodes");
1277 ret = bch2_fs_read_write(c);
1281 ret = bch2_scan_old_btree_nodes(c, &stats);
1284 bch_info(c, "scanning for old btree nodes done");
1287 if (c->journal_seq_blacklist_table &&
1288 c->journal_seq_blacklist_table->nr > 128)
1289 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1293 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1294 bch2_flush_fsck_errs(c);
1296 if (!c->opts.keep_journal) {
1297 bch2_journal_keys_free(&c->journal_keys);
1298 bch2_journal_entries_free(&c->journal_entries);
1302 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1304 bch_verbose(c, "ret %i", ret);
1308 bch2_fs_emergency_read_only(c);
1312 int bch2_fs_initialize(struct bch_fs *c)
1314 struct bch_inode_unpacked root_inode, lostfound_inode;
1315 struct bkey_inode_buf packed_inode;
1316 struct qstr lostfound = QSTR("lost+found");
1317 const char *err = "cannot allocate memory";
1323 bch_notice(c, "initializing new filesystem");
1325 mutex_lock(&c->sb_lock);
1326 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1327 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1329 if (c->opts.version_upgrade) {
1330 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1331 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1332 bch2_write_super(c);
1334 mutex_unlock(&c->sb_lock);
1336 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1337 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1338 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1340 for (i = 0; i < BTREE_ID_NR; i++)
1341 bch2_btree_root_alloc(c, i);
1343 err = "unable to allocate journal buckets";
1344 for_each_online_member(ca, c, i) {
1345 ret = bch2_dev_journal_alloc(ca);
1347 percpu_ref_put(&ca->io_ref);
1353 * journal_res_get() will crash if called before this has
1354 * set up the journal.pin FIFO and journal.cur pointer:
1356 bch2_fs_journal_start(&c->journal, 1, &journal);
1357 bch2_journal_set_replay_done(&c->journal);
1359 err = "error going read-write";
1360 ret = bch2_fs_read_write_early(c);
1365 * Write out the superblock and journal buckets, now that we can do
1368 err = "error marking superblock and journal";
1369 for_each_member_device(ca, c, i) {
1370 ret = bch2_trans_mark_dev_sb(c, ca);
1372 percpu_ref_put(&ca->ref);
1376 ca->new_fs_bucket_idx = 0;
1379 err = "error creating root snapshot node";
1380 ret = bch2_fs_initialize_subvolumes(c);
1384 bch_verbose(c, "reading snapshots table");
1385 err = "error reading snapshots table";
1386 ret = bch2_fs_snapshots_start(c);
1389 bch_verbose(c, "reading snapshots done");
1391 bch2_inode_init(c, &root_inode, 0, 0,
1392 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1393 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1394 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1395 bch2_inode_pack(c, &packed_inode, &root_inode);
1396 packed_inode.inode.k.p.snapshot = U32_MAX;
1398 err = "error creating root directory";
1399 ret = bch2_btree_insert(c, BTREE_ID_inodes,
1400 &packed_inode.inode.k_i,
1405 bch2_inode_init_early(c, &lostfound_inode);
1407 err = "error creating lost+found";
1408 ret = bch2_trans_do(c, NULL, NULL, 0,
1409 bch2_create_trans(&trans,
1410 BCACHEFS_ROOT_SUBVOL_INUM,
1411 &root_inode, &lostfound_inode,
1413 0, 0, S_IFDIR|0700, 0,
1414 NULL, NULL, (subvol_inum) { 0 }, 0));
1416 bch_err(c, "error creating lost+found");
1420 if (enabled_qtypes(c)) {
1421 ret = bch2_fs_quota_read(c);
1426 err = "error writing first journal entry";
1427 ret = bch2_journal_flush(&c->journal);
1431 mutex_lock(&c->sb_lock);
1432 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1433 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1435 bch2_write_super(c);
1436 mutex_unlock(&c->sb_lock);
1440 pr_err("Error initializing new filesystem: %s (%i)", err, ret);