1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_background.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
14 #include "fs-common.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
23 #include "subvolume.h"
26 #include <linux/sort.h>
27 #include <linux/stat.h>
29 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
31 /* for -o reconstruct_alloc: */
32 static void drop_alloc_keys(struct journal_keys *keys)
36 for (src = 0, dst = 0; src < keys->nr; src++)
37 if (keys->d[src].btree_id != BTREE_ID_alloc)
38 keys->d[dst++] = keys->d[src];
44 * Btree node pointers have a field to stack a pointer to the in memory btree
45 * node; we need to zero out this field when reading in btree nodes, or when
46 * reading in keys from the journal:
48 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
50 struct journal_key *i;
52 for (i = keys->d; i < keys->d + keys->nr; i++)
53 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
54 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
57 /* iterate over keys read from the journal: */
59 static int __journal_key_cmp(enum btree_id l_btree_id,
62 struct journal_key *r)
64 return (cmp_int(l_btree_id, r->btree_id) ?:
65 cmp_int(l_level, r->level) ?:
66 bpos_cmp(l_pos, r->k->k.p));
69 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
71 return (cmp_int(l->btree_id, r->btree_id) ?:
72 cmp_int(l->level, r->level) ?:
73 bpos_cmp(l->k->k.p, r->k->k.p));
76 static size_t journal_key_search(struct journal_keys *journal_keys,
77 enum btree_id id, unsigned level,
80 size_t l = 0, r = journal_keys->nr, m;
83 m = l + ((r - l) >> 1);
84 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
90 BUG_ON(l < journal_keys->nr &&
91 __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
94 __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
99 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
101 struct bkey_i *n = iter->keys->d[idx].k;
102 struct btree_and_journal_iter *biter =
103 container_of(iter, struct btree_and_journal_iter, journal);
105 if (iter->idx > idx ||
108 bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
112 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
113 unsigned level, struct bkey_i *k)
115 struct journal_key n = {
121 struct journal_keys *keys = &c->journal_keys;
122 struct journal_iter *iter;
123 unsigned idx = journal_key_search(keys, id, level, k->k.p);
125 if (idx < keys->nr &&
126 journal_key_cmp(&n, &keys->d[idx]) == 0) {
127 if (keys->d[idx].allocated)
128 kfree(keys->d[idx].k);
133 if (keys->nr == keys->size) {
134 struct journal_keys new_keys = {
136 .size = keys->size * 2,
137 .journal_seq_base = keys->journal_seq_base,
140 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
142 bch_err(c, "%s: error allocating new key array (size %zu)",
143 __func__, new_keys.size);
147 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
152 array_insert_item(keys->d, keys->nr, idx, n);
154 list_for_each_entry(iter, &c->journal_iters, list)
155 journal_iter_fix(c, iter, idx);
160 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
161 unsigned level, struct bkey_i *k)
166 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
171 ret = bch2_journal_key_insert_take(c, id, level, n);
177 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
178 unsigned level, struct bpos pos)
180 struct bkey_i whiteout;
182 bkey_init(&whiteout.k);
185 return bch2_journal_key_insert(c, id, level, &whiteout);
188 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
189 unsigned level, struct bpos pos)
191 struct journal_keys *keys = &c->journal_keys;
192 size_t idx = journal_key_search(keys, btree, level, pos);
194 if (idx < keys->nr &&
195 keys->d[idx].btree_id == btree &&
196 keys->d[idx].level == level &&
197 !bpos_cmp(keys->d[idx].k->k.p, pos))
198 keys->d[idx].overwritten = true;
201 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
203 struct journal_key *k = iter->idx - iter->keys->nr
204 ? iter->keys->d + iter->idx : NULL;
207 k->btree_id == iter->btree_id &&
208 k->level == iter->level)
211 iter->idx = iter->keys->nr;
215 static void bch2_journal_iter_advance(struct journal_iter *iter)
217 if (iter->idx < iter->keys->nr)
221 static void bch2_journal_iter_exit(struct journal_iter *iter)
223 list_del(&iter->list);
226 static void bch2_journal_iter_init(struct bch_fs *c,
227 struct journal_iter *iter,
228 enum btree_id id, unsigned level,
233 iter->keys = &c->journal_keys;
234 iter->idx = journal_key_search(&c->journal_keys, id, level, pos);
235 list_add(&iter->list, &c->journal_iters);
238 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
240 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
241 iter->b, &iter->unpacked);
244 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
246 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
249 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
251 switch (iter->last) {
255 bch2_journal_iter_advance_btree(iter);
258 bch2_journal_iter_advance(&iter->journal);
265 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
270 struct bkey_s_c btree_k =
271 bch2_journal_iter_peek_btree(iter);
272 struct bkey_s_c journal_k =
273 bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
275 if (btree_k.k && journal_k.k) {
276 int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
279 bch2_journal_iter_advance_btree(iter);
281 iter->last = cmp < 0 ? btree : journal;
282 } else if (btree_k.k) {
284 } else if (journal_k.k) {
285 iter->last = journal;
288 return bkey_s_c_null;
291 ret = iter->last == journal ? journal_k : btree_k;
294 bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
295 iter->journal.idx = iter->journal.keys->nr;
297 return bkey_s_c_null;
300 if (!bkey_deleted(ret.k))
303 bch2_btree_and_journal_iter_advance(iter);
309 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
311 bch2_btree_and_journal_iter_advance(iter);
313 return bch2_btree_and_journal_iter_peek(iter);
316 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
318 bch2_journal_iter_exit(&iter->journal);
321 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
325 memset(iter, 0, sizeof(*iter));
328 bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
329 bch2_journal_iter_init(c, &iter->journal,
330 b->c.btree_id, b->c.level, b->data->min_key);
333 /* Walk btree, overlaying keys from the journal: */
335 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
336 struct btree_and_journal_iter iter)
338 unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
344 bch2_bkey_buf_init(&tmp);
347 (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
348 bch2_bkey_buf_reassemble(&tmp, c, k);
350 bch2_btree_node_prefetch(c, NULL, NULL, tmp.k,
351 b->c.btree_id, b->c.level - 1);
353 bch2_btree_and_journal_iter_advance(&iter);
357 bch2_bkey_buf_exit(&tmp, c);
360 static int bch2_btree_and_journal_walk_recurse(struct btree_trans *trans, struct btree *b,
361 enum btree_id btree_id,
362 btree_walk_key_fn key_fn)
364 struct bch_fs *c = trans->c;
365 struct btree_and_journal_iter iter;
371 bch2_bkey_buf_init(&tmp);
372 bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
374 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
376 bch2_bkey_buf_reassemble(&tmp, c, k);
378 child = bch2_btree_node_get_noiter(c, tmp.k,
379 b->c.btree_id, b->c.level - 1,
382 ret = PTR_ERR_OR_ZERO(child);
386 btree_and_journal_iter_prefetch(c, b, iter);
388 ret = bch2_btree_and_journal_walk_recurse(trans, child,
390 six_unlock_read(&child->c.lock);
392 ret = key_fn(trans, k);
398 bch2_btree_and_journal_iter_advance(&iter);
401 bch2_btree_and_journal_iter_exit(&iter);
402 bch2_bkey_buf_exit(&tmp, c);
406 int bch2_btree_and_journal_walk(struct btree_trans *trans, enum btree_id btree_id,
407 btree_walk_key_fn key_fn)
409 struct bch_fs *c = trans->c;
410 struct btree *b = c->btree_roots[btree_id].b;
413 if (btree_node_fake(b))
416 six_lock_read(&b->c.lock, NULL, NULL);
417 ret = bch2_btree_and_journal_walk_recurse(trans, b, btree_id, key_fn);
418 six_unlock_read(&b->c.lock);
423 /* sort and dedup all keys in the journal: */
425 void bch2_journal_entries_free(struct list_head *list)
428 while (!list_empty(list)) {
429 struct journal_replay *i =
430 list_first_entry(list, struct journal_replay, list);
432 kvpfree(i, offsetof(struct journal_replay, j) +
433 vstruct_bytes(&i->j));
438 * When keys compare equal, oldest compares first:
440 static int journal_sort_key_cmp(const void *_l, const void *_r)
442 const struct journal_key *l = _l;
443 const struct journal_key *r = _r;
445 return cmp_int(l->btree_id, r->btree_id) ?:
446 cmp_int(l->level, r->level) ?:
447 bpos_cmp(l->k->k.p, r->k->k.p) ?:
448 cmp_int(l->journal_seq, r->journal_seq) ?:
449 cmp_int(l->journal_offset, r->journal_offset);
452 void bch2_journal_keys_free(struct journal_keys *keys)
454 struct journal_key *i;
456 for (i = keys->d; i < keys->d + keys->nr; i++)
465 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
467 struct journal_replay *i;
468 struct jset_entry *entry;
469 struct bkey_i *k, *_n;
470 struct journal_keys keys = { NULL };
471 struct journal_key *src, *dst;
474 if (list_empty(journal_entries))
477 list_for_each_entry(i, journal_entries, list) {
481 if (!keys.journal_seq_base)
482 keys.journal_seq_base = le64_to_cpu(i->j.seq);
484 for_each_jset_key(k, _n, entry, &i->j)
488 keys.size = roundup_pow_of_two(nr_keys);
490 keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
494 list_for_each_entry(i, journal_entries, list) {
498 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
500 for_each_jset_key(k, _n, entry, &i->j)
501 keys.d[keys.nr++] = (struct journal_key) {
502 .btree_id = entry->btree_id,
503 .level = entry->level,
505 .journal_seq = le64_to_cpu(i->j.seq) -
506 keys.journal_seq_base,
507 .journal_offset = k->_data - i->j._data,
511 sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
514 while (src < keys.d + keys.nr) {
515 while (src + 1 < keys.d + keys.nr &&
516 src[0].btree_id == src[1].btree_id &&
517 src[0].level == src[1].level &&
518 !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
524 keys.nr = dst - keys.d;
529 /* journal replay: */
531 static void replay_now_at(struct journal *j, u64 seq)
533 BUG_ON(seq < j->replay_journal_seq);
534 BUG_ON(seq > j->replay_journal_seq_end);
536 while (j->replay_journal_seq < seq)
537 bch2_journal_pin_put(j, j->replay_journal_seq++);
540 static int __bch2_journal_replay_key(struct btree_trans *trans,
541 struct journal_key *k)
543 struct btree_iter iter;
544 unsigned iter_flags =
546 BTREE_ITER_NOT_EXTENTS;
549 /* Must be checked with btree locked: */
553 if (!k->level && k->btree_id == BTREE_ID_alloc)
554 iter_flags |= BTREE_ITER_CACHED|BTREE_ITER_CACHED_NOFILL;
556 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
557 BTREE_MAX_DEPTH, k->level,
559 ret = bch2_btree_iter_traverse(&iter) ?:
560 bch2_trans_update(trans, &iter, k->k, BTREE_TRIGGER_NORUN);
561 bch2_trans_iter_exit(trans, &iter);
565 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
567 unsigned commit_flags =
568 BTREE_INSERT_LAZY_RW|
570 BTREE_INSERT_JOURNAL_RESERVED;
573 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
575 return bch2_trans_do(c, NULL, NULL, commit_flags,
576 __bch2_journal_replay_key(&trans, k));
579 static int journal_sort_seq_cmp(const void *_l, const void *_r)
581 const struct journal_key *l = *((const struct journal_key **)_l);
582 const struct journal_key *r = *((const struct journal_key **)_r);
584 return cmp_int(r->level, l->level) ?:
585 cmp_int(l->journal_seq, r->journal_seq) ?:
586 cmp_int(l->btree_id, r->btree_id) ?:
587 bpos_cmp(l->k->k.p, r->k->k.p);
590 static int bch2_journal_replay(struct bch_fs *c)
592 struct journal_keys *keys = &c->journal_keys;
593 struct journal_key **keys_sorted, *k;
594 struct journal *j = &c->journal;
601 keys_sorted = kmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
605 for (i = 0; i < keys->nr; i++)
606 keys_sorted[i] = &keys->d[i];
608 sort(keys_sorted, keys->nr,
609 sizeof(keys_sorted[0]),
610 journal_sort_seq_cmp, NULL);
613 replay_now_at(j, keys->journal_seq_base);
615 seq = j->replay_journal_seq;
618 * First replay updates to the alloc btree - these will only update the
621 for (i = 0; i < keys->nr; i++) {
626 if (!k->level && k->btree_id == BTREE_ID_alloc) {
627 j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
628 ret = bch2_journal_replay_key(c, k);
634 /* Now we can start the allocator threads: */
635 set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
636 for_each_member_device(ca, c, idx)
637 bch2_wake_allocator(ca);
640 * Next replay updates to interior btree nodes:
642 for (i = 0; i < keys->nr; i++) {
648 j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
649 ret = bch2_journal_replay_key(c, k);
656 * Now that the btree is in a consistent state, we can start journal
657 * reclaim (which will be flushing entries from the btree key cache back
660 set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
661 set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
662 journal_reclaim_kick(j);
664 j->replay_journal_seq = seq;
667 * Now replay leaf node updates:
669 for (i = 0; i < keys->nr; i++) {
674 if (k->level || k->btree_id == BTREE_ID_alloc)
677 replay_now_at(j, keys->journal_seq_base + k->journal_seq);
679 ret = bch2_journal_replay_key(c, k);
684 replay_now_at(j, j->replay_journal_seq_end);
685 j->replay_journal_seq = 0;
687 bch2_journal_set_replay_done(j);
688 bch2_journal_flush_all_pins(j);
691 return bch2_journal_error(j);
693 bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
694 ret, bch2_btree_ids[k->btree_id], k->level);
700 /* journal replay early: */
702 static int journal_replay_entry_early(struct bch_fs *c,
703 struct jset_entry *entry)
707 switch (entry->type) {
708 case BCH_JSET_ENTRY_btree_root: {
709 struct btree_root *r;
711 if (entry->btree_id >= BTREE_ID_NR) {
712 bch_err(c, "filesystem has unknown btree type %u",
717 r = &c->btree_roots[entry->btree_id];
720 r->level = entry->level;
721 bkey_copy(&r->key, &entry->start[0]);
729 case BCH_JSET_ENTRY_usage: {
730 struct jset_entry_usage *u =
731 container_of(entry, struct jset_entry_usage, entry);
733 switch (entry->btree_id) {
734 case BCH_FS_USAGE_reserved:
735 if (entry->level < BCH_REPLICAS_MAX)
736 c->usage_base->persistent_reserved[entry->level] =
739 case BCH_FS_USAGE_inodes:
740 c->usage_base->nr_inodes = le64_to_cpu(u->v);
742 case BCH_FS_USAGE_key_version:
743 atomic64_set(&c->key_version,
750 case BCH_JSET_ENTRY_data_usage: {
751 struct jset_entry_data_usage *u =
752 container_of(entry, struct jset_entry_data_usage, entry);
754 ret = bch2_replicas_set_usage(c, &u->r,
758 case BCH_JSET_ENTRY_dev_usage: {
759 struct jset_entry_dev_usage *u =
760 container_of(entry, struct jset_entry_dev_usage, entry);
761 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
762 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
764 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
765 ca->usage_base->buckets_unavailable = le64_to_cpu(u->buckets_unavailable);
767 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
768 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
769 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
770 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
775 case BCH_JSET_ENTRY_blacklist: {
776 struct jset_entry_blacklist *bl_entry =
777 container_of(entry, struct jset_entry_blacklist, entry);
779 ret = bch2_journal_seq_blacklist_add(c,
780 le64_to_cpu(bl_entry->seq),
781 le64_to_cpu(bl_entry->seq) + 1);
784 case BCH_JSET_ENTRY_blacklist_v2: {
785 struct jset_entry_blacklist_v2 *bl_entry =
786 container_of(entry, struct jset_entry_blacklist_v2, entry);
788 ret = bch2_journal_seq_blacklist_add(c,
789 le64_to_cpu(bl_entry->start),
790 le64_to_cpu(bl_entry->end) + 1);
793 case BCH_JSET_ENTRY_clock: {
794 struct jset_entry_clock *clock =
795 container_of(entry, struct jset_entry_clock, entry);
797 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
804 static int journal_replay_early(struct bch_fs *c,
805 struct bch_sb_field_clean *clean,
806 struct list_head *journal)
808 struct journal_replay *i;
809 struct jset_entry *entry;
813 for (entry = clean->start;
814 entry != vstruct_end(&clean->field);
815 entry = vstruct_next(entry)) {
816 ret = journal_replay_entry_early(c, entry);
821 list_for_each_entry(i, journal, list) {
825 vstruct_for_each(&i->j, entry) {
826 ret = journal_replay_entry_early(c, entry);
833 bch2_fs_usage_initialize(c);
838 /* sb clean section: */
840 static struct bkey_i *btree_root_find(struct bch_fs *c,
841 struct bch_sb_field_clean *clean,
843 enum btree_id id, unsigned *level)
846 struct jset_entry *entry, *start, *end;
849 start = clean->start;
850 end = vstruct_end(&clean->field);
853 end = vstruct_last(j);
856 for (entry = start; entry < end; entry = vstruct_next(entry))
857 if (entry->type == BCH_JSET_ENTRY_btree_root &&
858 entry->btree_id == id)
864 return ERR_PTR(-EINVAL);
867 *level = entry->level;
871 static int verify_superblock_clean(struct bch_fs *c,
872 struct bch_sb_field_clean **cleanp,
876 struct bch_sb_field_clean *clean = *cleanp;
879 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
880 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
881 le64_to_cpu(clean->journal_seq),
882 le64_to_cpu(j->seq))) {
888 for (i = 0; i < BTREE_ID_NR; i++) {
889 char buf1[200], buf2[200];
890 struct bkey_i *k1, *k2;
891 unsigned l1 = 0, l2 = 0;
893 k1 = btree_root_find(c, clean, NULL, i, &l1);
894 k2 = btree_root_find(c, NULL, j, i, &l2);
899 mustfix_fsck_err_on(!k1 || !k2 ||
902 k1->k.u64s != k2->k.u64s ||
903 memcmp(k1, k2, bkey_bytes(k1)) ||
905 "superblock btree root %u doesn't match journal after clean shutdown\n"
907 "journal: l=%u %s\n", i,
908 l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
909 l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
915 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
917 struct bch_sb_field_clean *clean, *sb_clean;
920 mutex_lock(&c->sb_lock);
921 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
923 if (fsck_err_on(!sb_clean, c,
924 "superblock marked clean but clean section not present")) {
925 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
927 mutex_unlock(&c->sb_lock);
931 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
934 mutex_unlock(&c->sb_lock);
935 return ERR_PTR(-ENOMEM);
938 ret = bch2_sb_clean_validate(c, clean, READ);
940 mutex_unlock(&c->sb_lock);
944 mutex_unlock(&c->sb_lock);
948 mutex_unlock(&c->sb_lock);
952 static int read_btree_roots(struct bch_fs *c)
957 for (i = 0; i < BTREE_ID_NR; i++) {
958 struct btree_root *r = &c->btree_roots[i];
963 if (i == BTREE_ID_alloc &&
964 c->opts.reconstruct_alloc) {
965 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
970 __fsck_err(c, i == BTREE_ID_alloc
971 ? FSCK_CAN_IGNORE : 0,
972 "invalid btree root %s",
974 if (i == BTREE_ID_alloc)
975 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
978 ret = bch2_btree_root_read(c, i, &r->key, r->level);
980 __fsck_err(c, i == BTREE_ID_alloc
981 ? FSCK_CAN_IGNORE : 0,
982 "error reading btree root %s",
984 if (i == BTREE_ID_alloc)
985 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
989 for (i = 0; i < BTREE_ID_NR; i++)
990 if (!c->btree_roots[i].b)
991 bch2_btree_root_alloc(c, i);
996 static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
998 struct bkey_i_snapshot root_snapshot;
999 struct bkey_i_subvolume root_volume;
1002 bkey_snapshot_init(&root_snapshot.k_i);
1003 root_snapshot.k.p.offset = U32_MAX;
1004 root_snapshot.v.flags = 0;
1005 root_snapshot.v.parent = 0;
1006 root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
1007 root_snapshot.v.pad = 0;
1008 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
1010 ret = bch2_btree_insert(c, BTREE_ID_snapshots,
1017 bkey_subvolume_init(&root_volume.k_i);
1018 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
1019 root_volume.v.flags = 0;
1020 root_volume.v.snapshot = cpu_to_le32(U32_MAX);
1021 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
1023 ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
1032 static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
1034 struct bch_fs *c = trans->c;
1035 struct btree_iter iter;
1037 struct bch_inode_unpacked inode;
1040 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
1041 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
1042 k = bch2_btree_iter_peek_slot(&iter);
1047 if (!bkey_is_inode(k.k)) {
1048 bch_err(c, "root inode not found");
1053 ret = bch2_inode_unpack(k, &inode);
1056 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1058 ret = bch2_inode_write(trans, &iter, &inode);
1060 bch2_trans_iter_exit(trans, &iter);
1064 int bch2_fs_recovery(struct bch_fs *c)
1066 const char *err = "cannot allocate memory";
1067 struct bch_sb_field_clean *clean = NULL;
1068 struct jset *last_journal_entry = NULL;
1069 u64 blacklist_seq, journal_seq;
1070 bool write_sb = false;
1074 clean = read_superblock_clean(c);
1075 ret = PTR_ERR_OR_ZERO(clean);
1080 bch_info(c, "recovering from clean shutdown, journal seq %llu",
1081 le64_to_cpu(clean->journal_seq));
1083 bch_info(c, "recovering from unclean shutdown");
1085 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
1086 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
1092 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1093 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1098 if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1099 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
1104 if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1105 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1106 c->opts.fsck = true;
1107 c->opts.fix_errors = FSCK_OPT_YES;
1110 if (!c->replicas.entries ||
1111 c->opts.rebuild_replicas) {
1112 bch_info(c, "building replicas info");
1113 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1116 if (!c->opts.nochanges) {
1117 if (c->sb.version < bcachefs_metadata_version_inode_backpointers) {
1118 bch_info(c, "version prior to inode backpointers, upgrade and fsck required");
1119 c->opts.version_upgrade = true;
1120 c->opts.fsck = true;
1121 c->opts.fix_errors = FSCK_OPT_YES;
1122 } else if (c->sb.version < bcachefs_metadata_version_subvol_dirent) {
1123 bch_info(c, "filesystem version is prior to subvol_dirent - upgrading");
1124 c->opts.version_upgrade = true;
1125 c->opts.fsck = true;
1126 } else if (c->sb.version < bcachefs_metadata_version_inode_v2) {
1127 bch_info(c, "filesystem version is prior to inode_v2 - upgrading");
1128 c->opts.version_upgrade = true;
1132 ret = bch2_blacklist_table_initialize(c);
1134 bch_err(c, "error initializing blacklist table");
1138 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1139 struct journal_replay *i;
1141 ret = bch2_journal_read(c, &c->journal_entries,
1142 &blacklist_seq, &journal_seq);
1146 list_for_each_entry_reverse(i, &c->journal_entries, list)
1148 last_journal_entry = &i->j;
1152 if (mustfix_fsck_err_on(c->sb.clean &&
1153 last_journal_entry &&
1154 !journal_entry_empty(last_journal_entry), c,
1155 "filesystem marked clean but journal not empty")) {
1156 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1157 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1158 c->sb.clean = false;
1161 if (!last_journal_entry) {
1162 fsck_err_on(!c->sb.clean, c, "no journal entries found");
1166 c->journal_keys = journal_keys_sort(&c->journal_entries);
1167 if (!c->journal_keys.d) {
1172 if (c->sb.clean && last_journal_entry) {
1173 ret = verify_superblock_clean(c, &clean,
1174 last_journal_entry);
1181 bch_err(c, "no superblock clean section found");
1182 ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1186 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1189 if (c->opts.reconstruct_alloc) {
1190 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1191 drop_alloc_keys(&c->journal_keys);
1194 zero_out_btree_mem_ptr(&c->journal_keys);
1196 ret = journal_replay_early(c, clean, &c->journal_entries);
1200 if (blacklist_seq != journal_seq) {
1201 ret = bch2_journal_seq_blacklist_add(c,
1202 blacklist_seq, journal_seq);
1204 bch_err(c, "error creating new journal seq blacklist entry");
1209 ret = bch2_fs_journal_start(&c->journal, journal_seq,
1210 &c->journal_entries);
1214 ret = read_btree_roots(c);
1218 bch_verbose(c, "starting alloc read");
1219 err = "error reading allocation information";
1220 ret = bch2_alloc_read(c);
1223 bch_verbose(c, "alloc read done");
1225 bch_verbose(c, "starting stripes_read");
1226 err = "error reading stripes";
1227 ret = bch2_stripes_read(c);
1230 bch_verbose(c, "stripes_read done");
1232 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1235 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) ||
1236 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) ||
1237 test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1238 bool metadata_only = c->opts.norecovery;
1240 bch_info(c, "starting mark and sweep");
1241 err = "error in mark and sweep";
1242 ret = bch2_gc(c, true, metadata_only);
1245 bch_verbose(c, "mark and sweep done");
1248 bch2_stripes_heap_start(c);
1250 clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1251 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1254 * Skip past versions that might have possibly been used (as nonces),
1255 * but hadn't had their pointers written:
1257 if (c->sb.encryption_type && !c->sb.clean)
1258 atomic64_add(1 << 16, &c->key_version);
1260 if (c->opts.norecovery)
1263 bch_verbose(c, "starting journal replay");
1264 err = "journal replay failed";
1265 ret = bch2_journal_replay(c);
1268 bch_verbose(c, "journal replay done");
1270 if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1271 !c->opts.nochanges) {
1273 * note that even when filesystem was clean there might be work
1274 * to do here, if we ran gc (because of fsck) which recalculated
1277 bch_verbose(c, "writing allocation info");
1278 err = "error writing out alloc info";
1279 ret = bch2_alloc_write_all(c, BTREE_INSERT_LAZY_RW);
1281 bch_err(c, "error writing alloc info");
1284 bch_verbose(c, "alloc write done");
1287 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1290 err = "error creating root snapshot node";
1291 ret = bch2_fs_initialize_subvolumes(c);
1296 bch_verbose(c, "reading snapshots table");
1297 err = "error reading snapshots table";
1298 ret = bch2_fs_snapshots_start(c);
1301 bch_verbose(c, "reading snapshots done");
1303 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1304 /* set bi_subvol on root inode */
1305 err = "error upgrade root inode for subvolumes";
1306 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1307 bch2_fs_upgrade_for_subvolumes(&trans));
1313 bch_info(c, "starting fsck");
1314 err = "error in fsck";
1315 ret = bch2_fsck_full(c);
1318 bch_verbose(c, "fsck done");
1319 } else if (!c->sb.clean) {
1320 bch_verbose(c, "checking for deleted inodes");
1321 err = "error in recovery";
1322 ret = bch2_fsck_walk_inodes_only(c);
1325 bch_verbose(c, "check inodes done");
1328 if (enabled_qtypes(c)) {
1329 bch_verbose(c, "reading quotas");
1330 ret = bch2_fs_quota_read(c);
1333 bch_verbose(c, "quotas done");
1336 mutex_lock(&c->sb_lock);
1338 * With journal replay done, we can clear the journal seq blacklist
1341 BUG_ON(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
1342 if (le16_to_cpu(c->sb.version_min) >= bcachefs_metadata_version_btree_ptr_sectors_written)
1343 bch2_sb_resize_journal_seq_blacklist(&c->disk_sb, 0);
1345 if (c->opts.version_upgrade) {
1346 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1347 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1351 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1352 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1357 !test_bit(BCH_FS_ERROR, &c->flags) &&
1358 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1359 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1360 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1365 bch2_write_super(c);
1366 mutex_unlock(&c->sb_lock);
1368 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1369 !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
1370 le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
1371 struct bch_move_stats stats;
1373 bch_move_stats_init(&stats, "recovery");
1375 bch_info(c, "scanning for old btree nodes");
1376 ret = bch2_fs_read_write(c);
1380 ret = bch2_scan_old_btree_nodes(c, &stats);
1383 bch_info(c, "scanning for old btree nodes done");
1388 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1389 bch2_flush_fsck_errs(c);
1391 if (!c->opts.keep_journal) {
1392 bch2_journal_keys_free(&c->journal_keys);
1393 bch2_journal_entries_free(&c->journal_entries);
1397 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1399 bch_verbose(c, "ret %i", ret);
1403 bch2_fs_emergency_read_only(c);
1407 int bch2_fs_initialize(struct bch_fs *c)
1409 struct bch_inode_unpacked root_inode, lostfound_inode;
1410 struct bkey_inode_buf packed_inode;
1411 struct qstr lostfound = QSTR("lost+found");
1412 const char *err = "cannot allocate memory";
1418 bch_notice(c, "initializing new filesystem");
1420 mutex_lock(&c->sb_lock);
1421 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1422 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1424 if (c->opts.version_upgrade) {
1425 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1426 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1427 bch2_write_super(c);
1429 mutex_unlock(&c->sb_lock);
1431 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1432 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1434 for (i = 0; i < BTREE_ID_NR; i++)
1435 bch2_btree_root_alloc(c, i);
1437 set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
1438 set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1439 set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1441 err = "unable to allocate journal buckets";
1442 for_each_online_member(ca, c, i) {
1443 ret = bch2_dev_journal_alloc(ca);
1445 percpu_ref_put(&ca->io_ref);
1451 * journal_res_get() will crash if called before this has
1452 * set up the journal.pin FIFO and journal.cur pointer:
1454 bch2_fs_journal_start(&c->journal, 1, &journal);
1455 bch2_journal_set_replay_done(&c->journal);
1457 err = "error going read-write";
1458 ret = bch2_fs_read_write_early(c);
1463 * Write out the superblock and journal buckets, now that we can do
1466 err = "error marking superblock and journal";
1467 for_each_member_device(ca, c, i) {
1468 ret = bch2_trans_mark_dev_sb(c, ca);
1470 percpu_ref_put(&ca->ref);
1474 ca->new_fs_bucket_idx = 0;
1477 err = "error creating root snapshot node";
1478 ret = bch2_fs_initialize_subvolumes(c);
1482 bch_verbose(c, "reading snapshots table");
1483 err = "error reading snapshots table";
1484 ret = bch2_fs_snapshots_start(c);
1487 bch_verbose(c, "reading snapshots done");
1489 bch2_inode_init(c, &root_inode, 0, 0,
1490 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1491 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1492 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1493 bch2_inode_pack(c, &packed_inode, &root_inode);
1494 packed_inode.inode.k.p.snapshot = U32_MAX;
1496 err = "error creating root directory";
1497 ret = bch2_btree_insert(c, BTREE_ID_inodes,
1498 &packed_inode.inode.k_i,
1503 bch2_inode_init_early(c, &lostfound_inode);
1505 err = "error creating lost+found";
1506 ret = bch2_trans_do(c, NULL, NULL, 0,
1507 bch2_create_trans(&trans,
1508 BCACHEFS_ROOT_SUBVOL_INUM,
1509 &root_inode, &lostfound_inode,
1511 0, 0, S_IFDIR|0700, 0,
1512 NULL, NULL, (subvol_inum) { 0 }, 0));
1514 bch_err(c, "error creating lost+found");
1518 if (enabled_qtypes(c)) {
1519 ret = bch2_fs_quota_read(c);
1524 err = "error writing first journal entry";
1525 ret = bch2_journal_flush(&c->journal);
1529 mutex_lock(&c->sb_lock);
1530 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1531 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1533 bch2_write_super(c);
1534 mutex_unlock(&c->sb_lock);
1538 pr_err("Error initializing new filesystem: %s (%i)", err, ret);