1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_background.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
14 #include "fs-common.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
24 #include "subvolume.h"
27 #include <linux/sort.h>
28 #include <linux/stat.h>
30 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
32 /* for -o reconstruct_alloc: */
33 static void drop_alloc_keys(struct journal_keys *keys)
37 for (src = 0, dst = 0; src < keys->nr; src++)
38 if (keys->d[src].btree_id != BTREE_ID_alloc)
39 keys->d[dst++] = keys->d[src];
45 * Btree node pointers have a field to stack a pointer to the in memory btree
46 * node; we need to zero out this field when reading in btree nodes, or when
47 * reading in keys from the journal:
49 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
51 struct journal_key *i;
53 for (i = keys->d; i < keys->d + keys->nr; i++)
54 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
55 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
58 /* iterate over keys read from the journal: */
60 static int __journal_key_cmp(enum btree_id l_btree_id,
63 const struct journal_key *r)
65 return (cmp_int(l_btree_id, r->btree_id) ?:
66 cmp_int(l_level, r->level) ?:
67 bpos_cmp(l_pos, r->k->k.p));
70 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
72 return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
75 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
77 size_t gap_size = keys->size - keys->nr;
84 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
86 return keys->d + idx_to_pos(keys, idx);
89 size_t bch2_journal_key_search(struct journal_keys *keys,
90 enum btree_id id, unsigned level,
93 size_t l = 0, r = keys->nr, m;
96 m = l + ((r - l) >> 1);
97 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
103 BUG_ON(l < keys->nr &&
104 __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
107 __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
109 return idx_to_pos(keys, l);
112 struct bkey_i *bch2_journal_keys_peek(struct bch_fs *c, enum btree_id btree_id,
113 unsigned level, struct bpos pos)
115 struct journal_keys *keys = &c->journal_keys;
116 size_t idx = bch2_journal_key_search(keys, btree_id, level, pos);
118 while (idx < keys->size &&
119 keys->d[idx].overwritten) {
121 if (idx == keys->gap)
122 idx += keys->size - keys->nr;
125 if (idx < keys->size &&
126 keys->d[idx].btree_id == btree_id &&
127 keys->d[idx].level == level)
128 return keys->d[idx].k;
132 static void journal_iters_fix(struct bch_fs *c)
134 struct journal_keys *keys = &c->journal_keys;
135 /* The key we just inserted is immediately before the gap: */
136 struct journal_key *n = &keys->d[keys->gap - 1];
137 size_t gap_end = keys->gap + (keys->size - keys->nr);
138 struct btree_and_journal_iter *iter;
141 * If an iterator points one after the key we just inserted,
142 * and the key we just inserted compares >= the iterator's position,
143 * decrement the iterator so it points at the key we just inserted:
145 list_for_each_entry(iter, &c->journal_iters, journal.list)
146 if (iter->journal.idx == gap_end &&
148 iter->b->c.btree_id == n->btree_id &&
149 iter->b->c.level == n->level &&
150 bpos_cmp(n->k->k.p, iter->unpacked.p) >= 0)
151 iter->journal.idx = keys->gap - 1;
154 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
156 struct journal_keys *keys = &c->journal_keys;
157 struct journal_iter *iter;
158 size_t gap_size = keys->size - keys->nr;
160 list_for_each_entry(iter, &c->journal_iters, list) {
161 if (iter->idx > old_gap)
162 iter->idx -= gap_size;
163 if (iter->idx >= new_gap)
164 iter->idx += gap_size;
168 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
169 unsigned level, struct bkey_i *k)
171 struct journal_key n = {
177 * Ensure these keys are done last by journal replay, to unblock
180 .journal_seq = U32_MAX,
182 struct journal_keys *keys = &c->journal_keys;
183 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
185 BUG_ON(test_bit(BCH_FS_RW, &c->flags));
187 if (idx < keys->size &&
188 journal_key_cmp(&n, &keys->d[idx]) == 0) {
189 if (keys->d[idx].allocated)
190 kfree(keys->d[idx].k);
196 idx -= keys->size - keys->nr;
198 if (keys->nr == keys->size) {
199 struct journal_keys new_keys = {
201 .size = max(keys->size, 8UL) * 2,
202 .journal_seq_base = keys->journal_seq_base,
205 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
207 bch_err(c, "%s: error allocating new key array (size %zu)",
208 __func__, new_keys.size);
212 /* Since @keys was full, there was no gap: */
213 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
217 /* And now the gap is at the end: */
218 keys->gap = keys->nr;
221 journal_iters_move_gap(c, keys->gap, idx);
223 move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
227 keys->d[keys->gap++] = n;
229 journal_iters_fix(c);
235 * Can only be used from the recovery thread while we're still RO - can't be
236 * used once we've got RW, as journal_keys is at that point used by multiple
239 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
240 unsigned level, struct bkey_i *k)
245 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
250 ret = bch2_journal_key_insert_take(c, id, level, n);
256 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
257 unsigned level, struct bpos pos)
259 struct bkey_i whiteout;
261 bkey_init(&whiteout.k);
264 return bch2_journal_key_insert(c, id, level, &whiteout);
267 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
268 unsigned level, struct bpos pos)
270 struct journal_keys *keys = &c->journal_keys;
271 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
273 if (idx < keys->size &&
274 keys->d[idx].btree_id == btree &&
275 keys->d[idx].level == level &&
276 !bpos_cmp(keys->d[idx].k->k.p, pos))
277 keys->d[idx].overwritten = true;
280 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
282 struct journal_key *k = iter->keys->d + iter->idx;
284 while (k < iter->keys->d + iter->keys->nr &&
285 k->btree_id == iter->btree_id &&
286 k->level == iter->level) {
291 k = iter->keys->d + iter->idx;
297 static void bch2_journal_iter_advance(struct journal_iter *iter)
299 if (iter->idx < iter->keys->size) {
301 if (iter->idx == iter->keys->gap)
302 iter->idx += iter->keys->size - iter->keys->nr;
306 static void bch2_journal_iter_exit(struct journal_iter *iter)
308 list_del(&iter->list);
311 static void bch2_journal_iter_init(struct bch_fs *c,
312 struct journal_iter *iter,
313 enum btree_id id, unsigned level,
318 iter->keys = &c->journal_keys;
319 iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
322 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
324 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
325 iter->b, &iter->unpacked);
328 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
330 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
333 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
335 switch (iter->last) {
339 bch2_journal_iter_advance_btree(iter);
342 bch2_journal_iter_advance(&iter->journal);
349 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
354 struct bkey_s_c btree_k =
355 bch2_journal_iter_peek_btree(iter);
356 struct bkey_s_c journal_k =
357 bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
359 if (btree_k.k && journal_k.k) {
360 int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
363 bch2_journal_iter_advance_btree(iter);
365 iter->last = cmp < 0 ? btree : journal;
366 } else if (btree_k.k) {
368 } else if (journal_k.k) {
369 iter->last = journal;
372 return bkey_s_c_null;
375 ret = iter->last == journal ? journal_k : btree_k;
378 bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
379 iter->journal.idx = iter->journal.keys->nr;
381 return bkey_s_c_null;
384 if (!bkey_deleted(ret.k))
387 bch2_btree_and_journal_iter_advance(iter);
393 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
395 bch2_btree_and_journal_iter_advance(iter);
397 return bch2_btree_and_journal_iter_peek(iter);
400 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
402 bch2_journal_iter_exit(&iter->journal);
405 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
408 struct btree_node_iter node_iter,
411 memset(iter, 0, sizeof(*iter));
414 iter->node_iter = node_iter;
415 bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
416 INIT_LIST_HEAD(&iter->journal.list);
420 * this version is used by btree_gc before filesystem has gone RW and
421 * multithreaded, so uses the journal_iters list:
423 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
427 struct btree_node_iter node_iter;
429 bch2_btree_node_iter_init_from_start(&node_iter, b);
430 __bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
431 list_add(&iter->journal.list, &c->journal_iters);
434 /* sort and dedup all keys in the journal: */
436 void bch2_journal_entries_free(struct bch_fs *c)
438 struct journal_replay **i;
439 struct genradix_iter iter;
441 genradix_for_each(&c->journal_entries, iter, i)
443 kvpfree(*i, offsetof(struct journal_replay, j) +
444 vstruct_bytes(&(*i)->j));
445 genradix_free(&c->journal_entries);
449 * When keys compare equal, oldest compares first:
451 static int journal_sort_key_cmp(const void *_l, const void *_r)
453 const struct journal_key *l = _l;
454 const struct journal_key *r = _r;
456 return journal_key_cmp(l, r) ?:
457 cmp_int(l->journal_seq, r->journal_seq) ?:
458 cmp_int(l->journal_offset, r->journal_offset);
461 void bch2_journal_keys_free(struct journal_keys *keys)
463 struct journal_key *i;
465 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
466 keys->gap = keys->nr;
468 for (i = keys->d; i < keys->d + keys->nr; i++)
474 keys->nr = keys->gap = keys->size = 0;
477 static int journal_keys_sort(struct bch_fs *c)
479 struct genradix_iter iter;
480 struct journal_replay *i, **_i;
481 struct jset_entry *entry;
482 struct bkey_i *k, *_n;
483 struct journal_keys *keys = &c->journal_keys;
484 struct journal_key *src, *dst;
487 genradix_for_each(&c->journal_entries, iter, _i) {
493 if (!keys->journal_seq_base)
494 keys->journal_seq_base = le64_to_cpu(i->j.seq);
496 for_each_jset_key(k, _n, entry, &i->j)
503 keys->size = roundup_pow_of_two(nr_keys);
505 keys->d = kvmalloc(sizeof(keys->d[0]) * keys->size, GFP_KERNEL);
509 genradix_for_each(&c->journal_entries, iter, _i) {
515 BUG_ON(le64_to_cpu(i->j.seq) - keys->journal_seq_base > U32_MAX);
517 for_each_jset_key(k, _n, entry, &i->j)
518 keys->d[keys->nr++] = (struct journal_key) {
519 .btree_id = entry->btree_id,
520 .level = entry->level,
522 .journal_seq = le64_to_cpu(i->j.seq) -
523 keys->journal_seq_base,
524 .journal_offset = k->_data - i->j._data,
528 sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
531 while (src < keys->d + keys->nr) {
532 while (src + 1 < keys->d + keys->nr &&
533 src[0].btree_id == src[1].btree_id &&
534 src[0].level == src[1].level &&
535 !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
541 keys->nr = dst - keys->d;
542 keys->gap = keys->nr;
546 /* journal replay: */
548 static void replay_now_at(struct journal *j, u64 seq)
550 BUG_ON(seq < j->replay_journal_seq);
551 BUG_ON(seq > j->replay_journal_seq_end);
553 while (j->replay_journal_seq < seq)
554 bch2_journal_pin_put(j, j->replay_journal_seq++);
557 static int bch2_journal_replay_key(struct btree_trans *trans,
558 struct journal_key *k)
560 struct btree_iter iter;
561 unsigned iter_flags =
563 BTREE_ITER_NOT_EXTENTS;
566 if (!k->level && k->btree_id == BTREE_ID_alloc)
567 iter_flags |= BTREE_ITER_CACHED;
569 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
570 BTREE_MAX_DEPTH, k->level,
572 ret = bch2_btree_iter_traverse(&iter);
576 /* Must be checked with btree locked: */
580 ret = bch2_trans_update(trans, &iter, k->k, BTREE_TRIGGER_NORUN);
582 bch2_trans_iter_exit(trans, &iter);
586 static int journal_sort_seq_cmp(const void *_l, const void *_r)
588 const struct journal_key *l = *((const struct journal_key **)_l);
589 const struct journal_key *r = *((const struct journal_key **)_r);
591 return cmp_int(l->journal_seq, r->journal_seq);
594 static int bch2_journal_replay(struct bch_fs *c)
596 struct journal_keys *keys = &c->journal_keys;
597 struct journal_key **keys_sorted, *k;
598 struct journal *j = &c->journal;
602 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
603 keys->gap = keys->nr;
605 keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
609 for (i = 0; i < keys->nr; i++)
610 keys_sorted[i] = &keys->d[i];
612 sort(keys_sorted, keys->nr,
613 sizeof(keys_sorted[0]),
614 journal_sort_seq_cmp, NULL);
617 replay_now_at(j, keys->journal_seq_base);
619 for (i = 0; i < keys->nr; i++) {
625 replay_now_at(j, keys->journal_seq_base + k->journal_seq);
627 ret = bch2_trans_do(c, NULL, NULL,
628 BTREE_INSERT_LAZY_RW|
631 ? BTREE_INSERT_JOURNAL_REPLAY|JOURNAL_WATERMARK_reserved
633 bch2_journal_replay_key(&trans, k));
635 bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
636 ret, bch2_btree_ids[k->btree_id], k->level);
641 replay_now_at(j, j->replay_journal_seq_end);
642 j->replay_journal_seq = 0;
644 bch2_journal_set_replay_done(j);
645 bch2_journal_flush_all_pins(j);
646 ret = bch2_journal_error(j);
648 if (keys->nr && !ret)
649 bch2_journal_log_msg(&c->journal, "journal replay finished");
655 /* journal replay early: */
657 static int journal_replay_entry_early(struct bch_fs *c,
658 struct jset_entry *entry)
662 switch (entry->type) {
663 case BCH_JSET_ENTRY_btree_root: {
664 struct btree_root *r;
666 if (entry->btree_id >= BTREE_ID_NR) {
667 bch_err(c, "filesystem has unknown btree type %u",
672 r = &c->btree_roots[entry->btree_id];
675 r->level = entry->level;
676 bkey_copy(&r->key, &entry->start[0]);
684 case BCH_JSET_ENTRY_usage: {
685 struct jset_entry_usage *u =
686 container_of(entry, struct jset_entry_usage, entry);
688 switch (entry->btree_id) {
689 case BCH_FS_USAGE_reserved:
690 if (entry->level < BCH_REPLICAS_MAX)
691 c->usage_base->persistent_reserved[entry->level] =
694 case BCH_FS_USAGE_inodes:
695 c->usage_base->nr_inodes = le64_to_cpu(u->v);
697 case BCH_FS_USAGE_key_version:
698 atomic64_set(&c->key_version,
705 case BCH_JSET_ENTRY_data_usage: {
706 struct jset_entry_data_usage *u =
707 container_of(entry, struct jset_entry_data_usage, entry);
709 ret = bch2_replicas_set_usage(c, &u->r,
713 case BCH_JSET_ENTRY_dev_usage: {
714 struct jset_entry_dev_usage *u =
715 container_of(entry, struct jset_entry_dev_usage, entry);
716 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
717 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
719 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
721 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
722 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
723 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
724 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
729 case BCH_JSET_ENTRY_blacklist: {
730 struct jset_entry_blacklist *bl_entry =
731 container_of(entry, struct jset_entry_blacklist, entry);
733 ret = bch2_journal_seq_blacklist_add(c,
734 le64_to_cpu(bl_entry->seq),
735 le64_to_cpu(bl_entry->seq) + 1);
738 case BCH_JSET_ENTRY_blacklist_v2: {
739 struct jset_entry_blacklist_v2 *bl_entry =
740 container_of(entry, struct jset_entry_blacklist_v2, entry);
742 ret = bch2_journal_seq_blacklist_add(c,
743 le64_to_cpu(bl_entry->start),
744 le64_to_cpu(bl_entry->end) + 1);
747 case BCH_JSET_ENTRY_clock: {
748 struct jset_entry_clock *clock =
749 container_of(entry, struct jset_entry_clock, entry);
751 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
758 static int journal_replay_early(struct bch_fs *c,
759 struct bch_sb_field_clean *clean)
761 struct jset_entry *entry;
765 for (entry = clean->start;
766 entry != vstruct_end(&clean->field);
767 entry = vstruct_next(entry)) {
768 ret = journal_replay_entry_early(c, entry);
773 struct genradix_iter iter;
774 struct journal_replay *i, **_i;
776 genradix_for_each(&c->journal_entries, iter, _i) {
782 vstruct_for_each(&i->j, entry) {
783 ret = journal_replay_entry_early(c, entry);
790 bch2_fs_usage_initialize(c);
795 /* sb clean section: */
797 static struct bkey_i *btree_root_find(struct bch_fs *c,
798 struct bch_sb_field_clean *clean,
800 enum btree_id id, unsigned *level)
803 struct jset_entry *entry, *start, *end;
806 start = clean->start;
807 end = vstruct_end(&clean->field);
810 end = vstruct_last(j);
813 for (entry = start; entry < end; entry = vstruct_next(entry))
814 if (entry->type == BCH_JSET_ENTRY_btree_root &&
815 entry->btree_id == id)
821 return ERR_PTR(-EINVAL);
824 *level = entry->level;
828 static int verify_superblock_clean(struct bch_fs *c,
829 struct bch_sb_field_clean **cleanp,
833 struct bch_sb_field_clean *clean = *cleanp;
834 struct printbuf buf1 = PRINTBUF;
835 struct printbuf buf2 = PRINTBUF;
838 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
839 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
840 le64_to_cpu(clean->journal_seq),
841 le64_to_cpu(j->seq))) {
847 for (i = 0; i < BTREE_ID_NR; i++) {
848 struct bkey_i *k1, *k2;
849 unsigned l1 = 0, l2 = 0;
851 k1 = btree_root_find(c, clean, NULL, i, &l1);
852 k2 = btree_root_find(c, NULL, j, i, &l2);
857 printbuf_reset(&buf1);
858 printbuf_reset(&buf2);
861 bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
863 pr_buf(&buf1, "(none)");
866 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
868 pr_buf(&buf2, "(none)");
870 mustfix_fsck_err_on(!k1 || !k2 ||
873 k1->k.u64s != k2->k.u64s ||
874 memcmp(k1, k2, bkey_bytes(k1)) ||
876 "superblock btree root %u doesn't match journal after clean shutdown\n"
878 "journal: l=%u %s\n", i,
883 printbuf_exit(&buf2);
884 printbuf_exit(&buf1);
888 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
890 struct bch_sb_field_clean *clean, *sb_clean;
893 mutex_lock(&c->sb_lock);
894 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
896 if (fsck_err_on(!sb_clean, c,
897 "superblock marked clean but clean section not present")) {
898 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
900 mutex_unlock(&c->sb_lock);
904 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
907 mutex_unlock(&c->sb_lock);
908 return ERR_PTR(-ENOMEM);
911 ret = bch2_sb_clean_validate_late(c, clean, READ);
913 mutex_unlock(&c->sb_lock);
917 mutex_unlock(&c->sb_lock);
921 mutex_unlock(&c->sb_lock);
925 static int read_btree_roots(struct bch_fs *c)
930 for (i = 0; i < BTREE_ID_NR; i++) {
931 struct btree_root *r = &c->btree_roots[i];
936 if (i == BTREE_ID_alloc &&
937 c->opts.reconstruct_alloc) {
938 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
943 __fsck_err(c, i == BTREE_ID_alloc
944 ? FSCK_CAN_IGNORE : 0,
945 "invalid btree root %s",
947 if (i == BTREE_ID_alloc)
948 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
951 ret = bch2_btree_root_read(c, i, &r->key, r->level);
953 __fsck_err(c, i == BTREE_ID_alloc
954 ? FSCK_CAN_IGNORE : 0,
955 "error reading btree root %s",
957 if (i == BTREE_ID_alloc)
958 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
962 for (i = 0; i < BTREE_ID_NR; i++)
963 if (!c->btree_roots[i].b)
964 bch2_btree_root_alloc(c, i);
969 static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
971 struct bkey_i_snapshot root_snapshot;
972 struct bkey_i_subvolume root_volume;
975 bkey_snapshot_init(&root_snapshot.k_i);
976 root_snapshot.k.p.offset = U32_MAX;
977 root_snapshot.v.flags = 0;
978 root_snapshot.v.parent = 0;
979 root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
980 root_snapshot.v.pad = 0;
981 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
983 ret = bch2_btree_insert(c, BTREE_ID_snapshots,
990 bkey_subvolume_init(&root_volume.k_i);
991 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
992 root_volume.v.flags = 0;
993 root_volume.v.snapshot = cpu_to_le32(U32_MAX);
994 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
996 ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
1005 static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
1007 struct btree_iter iter;
1009 struct bch_inode_unpacked inode;
1012 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
1013 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
1014 k = bch2_btree_iter_peek_slot(&iter);
1019 if (!bkey_is_inode(k.k)) {
1020 bch_err(trans->c, "root inode not found");
1025 ret = bch2_inode_unpack(k, &inode);
1028 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1030 ret = bch2_inode_write(trans, &iter, &inode);
1032 bch2_trans_iter_exit(trans, &iter);
1036 int bch2_fs_recovery(struct bch_fs *c)
1038 const char *err = "cannot allocate memory";
1039 struct bch_sb_field_clean *clean = NULL;
1040 struct jset *last_journal_entry = NULL;
1041 u64 blacklist_seq, journal_seq;
1042 bool write_sb = false;
1046 clean = read_superblock_clean(c);
1047 ret = PTR_ERR_OR_ZERO(clean);
1052 bch_info(c, "recovering from clean shutdown, journal seq %llu",
1053 le64_to_cpu(clean->journal_seq));
1055 bch_info(c, "recovering from unclean shutdown");
1057 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
1058 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
1064 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1065 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1070 if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1071 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
1076 if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1077 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1078 c->opts.fsck = true;
1079 c->opts.fix_errors = FSCK_OPT_YES;
1082 if (!c->replicas.entries ||
1083 c->opts.rebuild_replicas) {
1084 bch_info(c, "building replicas info");
1085 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1088 if (!c->opts.nochanges) {
1089 if (c->sb.version < bcachefs_metadata_version_new_data_types) {
1090 bch_info(c, "version prior to new_data_types, upgrade and fsck required");
1091 c->opts.version_upgrade = true;
1092 c->opts.fsck = true;
1093 c->opts.fix_errors = FSCK_OPT_YES;
1097 ret = bch2_blacklist_table_initialize(c);
1099 bch_err(c, "error initializing blacklist table");
1103 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1104 struct genradix_iter iter;
1105 struct journal_replay **i;
1107 bch_verbose(c, "starting journal read");
1108 ret = bch2_journal_read(c, &blacklist_seq, &journal_seq);
1112 genradix_for_each_reverse(&c->journal_entries, iter, i)
1113 if (*i && !(*i)->ignore) {
1114 last_journal_entry = &(*i)->j;
1118 if (mustfix_fsck_err_on(c->sb.clean &&
1119 last_journal_entry &&
1120 !journal_entry_empty(last_journal_entry), c,
1121 "filesystem marked clean but journal not empty")) {
1122 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1123 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1124 c->sb.clean = false;
1127 if (!last_journal_entry) {
1128 fsck_err_on(!c->sb.clean, c, "no journal entries found");
1132 ret = journal_keys_sort(c);
1136 if (c->sb.clean && last_journal_entry) {
1137 ret = verify_superblock_clean(c, &clean,
1138 last_journal_entry);
1145 bch_err(c, "no superblock clean section found");
1146 ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1150 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1153 if (c->opts.read_journal_only)
1156 if (c->opts.reconstruct_alloc) {
1157 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1158 drop_alloc_keys(&c->journal_keys);
1161 zero_out_btree_mem_ptr(&c->journal_keys);
1163 ret = journal_replay_early(c, clean);
1168 * After an unclean shutdown, skip then next few journal sequence
1169 * numbers as they may have been referenced by btree writes that
1170 * happened before their corresponding journal writes - those btree
1171 * writes need to be ignored, by skipping and blacklisting the next few
1172 * journal sequence numbers:
1177 if (blacklist_seq != journal_seq) {
1178 ret = bch2_journal_seq_blacklist_add(c,
1179 blacklist_seq, journal_seq);
1181 bch_err(c, "error creating new journal seq blacklist entry");
1186 ret = bch2_fs_journal_start(&c->journal, journal_seq);
1190 ret = read_btree_roots(c);
1194 bch_verbose(c, "starting alloc read");
1195 err = "error reading allocation information";
1197 down_read(&c->gc_lock);
1198 ret = bch2_alloc_read(c);
1199 up_read(&c->gc_lock);
1203 bch_verbose(c, "alloc read done");
1205 bch_verbose(c, "starting stripes_read");
1206 err = "error reading stripes";
1207 ret = bch2_stripes_read(c);
1210 bch_verbose(c, "stripes_read done");
1213 * If we're not running fsck, this ensures bch2_fsck_err() calls are
1214 * instead interpreted as bch2_inconsistent_err() calls:
1217 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1220 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) ||
1221 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) ||
1222 test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1223 bool metadata_only = c->opts.norecovery;
1225 bch_info(c, "checking allocations");
1226 err = "error checking allocations";
1227 ret = bch2_gc(c, true, metadata_only);
1230 bch_verbose(c, "done checking allocations");
1234 bch_info(c, "checking need_discard and freespace btrees");
1235 err = "error checking need_discard and freespace btrees";
1236 ret = bch2_check_alloc_info(c);
1240 ret = bch2_check_lrus(c, true);
1243 bch_verbose(c, "done checking need_discard and freespace btrees");
1246 bch2_stripes_heap_start(c);
1248 clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1249 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1250 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1253 * Skip past versions that might have possibly been used (as nonces),
1254 * but hadn't had their pointers written:
1256 if (c->sb.encryption_type && !c->sb.clean)
1257 atomic64_add(1 << 16, &c->key_version);
1259 if (c->opts.norecovery)
1262 bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
1263 err = "journal replay failed";
1264 ret = bch2_journal_replay(c);
1267 if (c->opts.verbose || !c->sb.clean)
1268 bch_info(c, "journal replay done");
1270 err = "error initializing freespace";
1271 ret = bch2_fs_freespace_init(c);
1276 bch_info(c, "checking alloc to lru refs");
1277 err = "error checking alloc to lru refs";
1278 ret = bch2_check_alloc_to_lru_refs(c);
1282 ret = bch2_check_lrus(c, true);
1285 bch_verbose(c, "done checking alloc to lru refs");
1288 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1291 err = "error creating root snapshot node";
1292 ret = bch2_fs_initialize_subvolumes(c);
1297 bch_verbose(c, "reading snapshots table");
1298 err = "error reading snapshots table";
1299 ret = bch2_fs_snapshots_start(c);
1302 bch_verbose(c, "reading snapshots done");
1304 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1305 /* set bi_subvol on root inode */
1306 err = "error upgrade root inode for subvolumes";
1307 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1308 bch2_fs_upgrade_for_subvolumes(&trans));
1314 bch_info(c, "starting fsck");
1315 err = "error in fsck";
1316 ret = bch2_fsck_full(c);
1319 bch_verbose(c, "fsck done");
1320 } else if (!c->sb.clean) {
1321 bch_verbose(c, "checking for deleted inodes");
1322 err = "error in recovery";
1323 ret = bch2_fsck_walk_inodes_only(c);
1326 bch_verbose(c, "check inodes done");
1329 if (enabled_qtypes(c)) {
1330 bch_verbose(c, "reading quotas");
1331 ret = bch2_fs_quota_read(c);
1334 bch_verbose(c, "quotas done");
1337 mutex_lock(&c->sb_lock);
1338 if (c->opts.version_upgrade) {
1339 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1340 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1344 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1345 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1350 !test_bit(BCH_FS_ERROR, &c->flags) &&
1351 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1352 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1353 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1358 bch2_write_super(c);
1359 mutex_unlock(&c->sb_lock);
1361 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1362 !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
1363 le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
1364 struct bch_move_stats stats;
1366 bch_move_stats_init(&stats, "recovery");
1368 bch_info(c, "scanning for old btree nodes");
1369 ret = bch2_fs_read_write(c);
1373 ret = bch2_scan_old_btree_nodes(c, &stats);
1376 bch_info(c, "scanning for old btree nodes done");
1379 if (c->journal_seq_blacklist_table &&
1380 c->journal_seq_blacklist_table->nr > 128)
1381 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1385 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1386 bch2_flush_fsck_errs(c);
1388 if (!c->opts.keep_journal) {
1389 bch2_journal_keys_free(&c->journal_keys);
1390 bch2_journal_entries_free(c);
1394 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1396 bch_verbose(c, "ret %i", ret);
1400 bch2_fs_emergency_read_only(c);
1404 int bch2_fs_initialize(struct bch_fs *c)
1406 struct bch_inode_unpacked root_inode, lostfound_inode;
1407 struct bkey_inode_buf packed_inode;
1408 struct qstr lostfound = QSTR("lost+found");
1409 const char *err = "cannot allocate memory";
1414 bch_notice(c, "initializing new filesystem");
1416 mutex_lock(&c->sb_lock);
1417 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1418 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1420 if (c->opts.version_upgrade) {
1421 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1422 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1423 bch2_write_super(c);
1425 mutex_unlock(&c->sb_lock);
1427 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1428 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1429 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1431 for (i = 0; i < BTREE_ID_NR; i++)
1432 bch2_btree_root_alloc(c, i);
1434 for_each_online_member(ca, c, i)
1435 bch2_dev_usage_init(ca);
1437 err = "unable to allocate journal buckets";
1438 for_each_online_member(ca, c, i) {
1439 ret = bch2_dev_journal_alloc(ca);
1441 percpu_ref_put(&ca->io_ref);
1447 * journal_res_get() will crash if called before this has
1448 * set up the journal.pin FIFO and journal.cur pointer:
1450 bch2_fs_journal_start(&c->journal, 1);
1451 bch2_journal_set_replay_done(&c->journal);
1453 err = "error going read-write";
1454 ret = bch2_fs_read_write_early(c);
1459 * Write out the superblock and journal buckets, now that we can do
1462 bch_verbose(c, "marking superblocks");
1463 err = "error marking superblock and journal";
1464 for_each_member_device(ca, c, i) {
1465 ret = bch2_trans_mark_dev_sb(c, ca);
1467 percpu_ref_put(&ca->ref);
1471 ca->new_fs_bucket_idx = 0;
1474 bch_verbose(c, "initializing freespace");
1475 err = "error initializing freespace";
1476 ret = bch2_fs_freespace_init(c);
1480 err = "error creating root snapshot node";
1481 ret = bch2_fs_initialize_subvolumes(c);
1485 bch_verbose(c, "reading snapshots table");
1486 err = "error reading snapshots table";
1487 ret = bch2_fs_snapshots_start(c);
1490 bch_verbose(c, "reading snapshots done");
1492 bch2_inode_init(c, &root_inode, 0, 0,
1493 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1494 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1495 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1496 bch2_inode_pack(c, &packed_inode, &root_inode);
1497 packed_inode.inode.k.p.snapshot = U32_MAX;
1499 err = "error creating root directory";
1500 ret = bch2_btree_insert(c, BTREE_ID_inodes,
1501 &packed_inode.inode.k_i,
1506 bch2_inode_init_early(c, &lostfound_inode);
1508 err = "error creating lost+found";
1509 ret = bch2_trans_do(c, NULL, NULL, 0,
1510 bch2_create_trans(&trans,
1511 BCACHEFS_ROOT_SUBVOL_INUM,
1512 &root_inode, &lostfound_inode,
1514 0, 0, S_IFDIR|0700, 0,
1515 NULL, NULL, (subvol_inum) { 0 }, 0));
1517 bch_err(c, "error creating lost+found");
1521 if (enabled_qtypes(c)) {
1522 ret = bch2_fs_quota_read(c);
1527 err = "error writing first journal entry";
1528 ret = bch2_journal_flush(&c->journal);
1532 mutex_lock(&c->sb_lock);
1533 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1534 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1536 bch2_write_super(c);
1537 mutex_unlock(&c->sb_lock);
1541 pr_err("Error initializing new filesystem: %s (%i)", err, ret);