1 // SPDX-License-Identifier: GPL-2.0
4 #include "backpointers.h"
6 #include "alloc_background.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
16 #include "fs-common.h"
18 #include "journal_io.h"
19 #include "journal_reclaim.h"
20 #include "journal_seq_blacklist.h"
26 #include "subvolume.h"
29 #include <linux/sort.h>
30 #include <linux/stat.h>
32 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
34 /* for -o reconstruct_alloc: */
35 static void drop_alloc_keys(struct journal_keys *keys)
39 for (src = 0, dst = 0; src < keys->nr; src++)
40 if (keys->d[src].btree_id != BTREE_ID_alloc)
41 keys->d[dst++] = keys->d[src];
47 * Btree node pointers have a field to stack a pointer to the in memory btree
48 * node; we need to zero out this field when reading in btree nodes, or when
49 * reading in keys from the journal:
51 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
53 struct journal_key *i;
55 for (i = keys->d; i < keys->d + keys->nr; i++)
56 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
57 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
60 /* iterate over keys read from the journal: */
62 static int __journal_key_cmp(enum btree_id l_btree_id,
65 const struct journal_key *r)
67 return (cmp_int(l_btree_id, r->btree_id) ?:
68 cmp_int(l_level, r->level) ?:
69 bpos_cmp(l_pos, r->k->k.p));
72 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
74 return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
77 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
79 size_t gap_size = keys->size - keys->nr;
86 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
88 return keys->d + idx_to_pos(keys, idx);
91 static size_t __bch2_journal_key_search(struct journal_keys *keys,
92 enum btree_id id, unsigned level,
95 size_t l = 0, r = keys->nr, m;
98 m = l + ((r - l) >> 1);
99 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
105 BUG_ON(l < keys->nr &&
106 __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
109 __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
114 static size_t bch2_journal_key_search(struct journal_keys *keys,
115 enum btree_id id, unsigned level,
118 return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
121 struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
122 unsigned level, struct bpos pos,
123 struct bpos end_pos, size_t *idx)
125 struct journal_keys *keys = &c->journal_keys;
127 struct journal_key *k;
130 *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
132 while (*idx < keys->nr &&
133 (k = idx_to_key(keys, *idx),
134 k->btree_id == btree_id &&
136 bpos_cmp(k->k->k.p, end_pos) <= 0)) {
137 if (bpos_cmp(k->k->k.p, pos) >= 0 &&
152 struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
153 unsigned level, struct bpos pos)
157 return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
160 static void journal_iters_fix(struct bch_fs *c)
162 struct journal_keys *keys = &c->journal_keys;
163 /* The key we just inserted is immediately before the gap: */
164 size_t gap_end = keys->gap + (keys->size - keys->nr);
165 struct btree_and_journal_iter *iter;
168 * If an iterator points one after the key we just inserted, decrement
169 * the iterator so it points at the key we just inserted - if the
170 * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
173 list_for_each_entry(iter, &c->journal_iters, journal.list)
174 if (iter->journal.idx == gap_end)
175 iter->journal.idx = keys->gap - 1;
178 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
180 struct journal_keys *keys = &c->journal_keys;
181 struct journal_iter *iter;
182 size_t gap_size = keys->size - keys->nr;
184 list_for_each_entry(iter, &c->journal_iters, list) {
185 if (iter->idx > old_gap)
186 iter->idx -= gap_size;
187 if (iter->idx >= new_gap)
188 iter->idx += gap_size;
192 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
193 unsigned level, struct bkey_i *k)
195 struct journal_key n = {
201 * Ensure these keys are done last by journal replay, to unblock
204 .journal_seq = U32_MAX,
206 struct journal_keys *keys = &c->journal_keys;
207 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
209 BUG_ON(test_bit(BCH_FS_RW, &c->flags));
211 if (idx < keys->size &&
212 journal_key_cmp(&n, &keys->d[idx]) == 0) {
213 if (keys->d[idx].allocated)
214 kfree(keys->d[idx].k);
220 idx -= keys->size - keys->nr;
222 if (keys->nr == keys->size) {
223 struct journal_keys new_keys = {
225 .size = max_t(size_t, keys->size, 8) * 2,
228 new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
230 bch_err(c, "%s: error allocating new key array (size %zu)",
231 __func__, new_keys.size);
235 /* Since @keys was full, there was no gap: */
236 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
240 /* And now the gap is at the end: */
241 keys->gap = keys->nr;
244 journal_iters_move_gap(c, keys->gap, idx);
246 move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
250 keys->d[keys->gap++] = n;
252 journal_iters_fix(c);
258 * Can only be used from the recovery thread while we're still RO - can't be
259 * used once we've got RW, as journal_keys is at that point used by multiple
262 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
263 unsigned level, struct bkey_i *k)
268 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
273 ret = bch2_journal_key_insert_take(c, id, level, n);
279 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
280 unsigned level, struct bpos pos)
282 struct bkey_i whiteout;
284 bkey_init(&whiteout.k);
287 return bch2_journal_key_insert(c, id, level, &whiteout);
290 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
291 unsigned level, struct bpos pos)
293 struct journal_keys *keys = &c->journal_keys;
294 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
296 if (idx < keys->size &&
297 keys->d[idx].btree_id == btree &&
298 keys->d[idx].level == level &&
299 !bpos_cmp(keys->d[idx].k->k.p, pos))
300 keys->d[idx].overwritten = true;
303 static void bch2_journal_iter_advance(struct journal_iter *iter)
305 if (iter->idx < iter->keys->size) {
307 if (iter->idx == iter->keys->gap)
308 iter->idx += iter->keys->size - iter->keys->nr;
312 struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
314 struct journal_key *k = iter->keys->d + iter->idx;
316 while (k < iter->keys->d + iter->keys->size &&
317 k->btree_id == iter->btree_id &&
318 k->level == iter->level) {
320 return bkey_i_to_s_c(k->k);
322 bch2_journal_iter_advance(iter);
323 k = iter->keys->d + iter->idx;
326 return bkey_s_c_null;
329 static void bch2_journal_iter_exit(struct journal_iter *iter)
331 list_del(&iter->list);
334 static void bch2_journal_iter_init(struct bch_fs *c,
335 struct journal_iter *iter,
336 enum btree_id id, unsigned level,
341 iter->keys = &c->journal_keys;
342 iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
345 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
347 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
348 iter->b, &iter->unpacked);
351 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
353 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
356 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
358 if (!bpos_cmp(iter->pos, SPOS_MAX))
361 iter->pos = bpos_successor(iter->pos);
364 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
366 struct bkey_s_c btree_k, journal_k, ret;
369 return bkey_s_c_null;
371 while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
372 bpos_cmp(btree_k.k->p, iter->pos) < 0)
373 bch2_journal_iter_advance_btree(iter);
375 while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
376 bpos_cmp(journal_k.k->p, iter->pos) < 0)
377 bch2_journal_iter_advance(&iter->journal);
380 (!btree_k.k || bpos_cmp(journal_k.k->p, btree_k.k->p) <= 0)
384 if (ret.k && iter->b && bpos_cmp(ret.k->p, iter->b->data->max_key) > 0)
388 iter->pos = ret.k->p;
389 if (bkey_deleted(ret.k)) {
390 bch2_btree_and_journal_iter_advance(iter);
394 iter->pos = SPOS_MAX;
401 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
403 bch2_journal_iter_exit(&iter->journal);
406 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
409 struct btree_node_iter node_iter,
412 memset(iter, 0, sizeof(*iter));
415 iter->node_iter = node_iter;
416 bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
417 INIT_LIST_HEAD(&iter->journal.list);
418 iter->pos = b->data->min_key;
419 iter->at_end = false;
423 * this version is used by btree_gc before filesystem has gone RW and
424 * multithreaded, so uses the journal_iters list:
426 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
430 struct btree_node_iter node_iter;
432 bch2_btree_node_iter_init_from_start(&node_iter, b);
433 __bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
434 list_add(&iter->journal.list, &c->journal_iters);
437 /* sort and dedup all keys in the journal: */
439 void bch2_journal_entries_free(struct bch_fs *c)
441 struct journal_replay **i;
442 struct genradix_iter iter;
444 genradix_for_each(&c->journal_entries, iter, i)
446 kvpfree(*i, offsetof(struct journal_replay, j) +
447 vstruct_bytes(&(*i)->j));
448 genradix_free(&c->journal_entries);
452 * When keys compare equal, oldest compares first:
454 static int journal_sort_key_cmp(const void *_l, const void *_r)
456 const struct journal_key *l = _l;
457 const struct journal_key *r = _r;
459 return journal_key_cmp(l, r) ?:
460 cmp_int(l->journal_seq, r->journal_seq) ?:
461 cmp_int(l->journal_offset, r->journal_offset);
464 void bch2_journal_keys_free(struct journal_keys *keys)
466 struct journal_key *i;
468 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
469 keys->gap = keys->nr;
471 for (i = keys->d; i < keys->d + keys->nr; i++)
477 keys->nr = keys->gap = keys->size = 0;
480 static int journal_keys_sort(struct bch_fs *c)
482 struct genradix_iter iter;
483 struct journal_replay *i, **_i;
484 struct jset_entry *entry;
485 struct bkey_i *k, *_n;
486 struct journal_keys *keys = &c->journal_keys;
487 struct journal_key *src, *dst;
490 genradix_for_each(&c->journal_entries, iter, _i) {
496 for_each_jset_key(k, _n, entry, &i->j)
503 keys->size = roundup_pow_of_two(nr_keys);
505 keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
509 genradix_for_each(&c->journal_entries, iter, _i) {
515 for_each_jset_key(k, _n, entry, &i->j)
516 keys->d[keys->nr++] = (struct journal_key) {
517 .btree_id = entry->btree_id,
518 .level = entry->level,
520 .journal_seq = le64_to_cpu(i->j.seq),
521 .journal_offset = k->_data - i->j._data,
525 sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
528 while (src < keys->d + keys->nr) {
529 while (src + 1 < keys->d + keys->nr &&
530 src[0].btree_id == src[1].btree_id &&
531 src[0].level == src[1].level &&
532 !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
538 keys->nr = dst - keys->d;
539 keys->gap = keys->nr;
543 /* journal replay: */
545 static void replay_now_at(struct journal *j, u64 seq)
547 BUG_ON(seq < j->replay_journal_seq);
549 seq = min(seq, j->replay_journal_seq_end);
551 while (j->replay_journal_seq < seq)
552 bch2_journal_pin_put(j, j->replay_journal_seq++);
555 static int bch2_journal_replay_key(struct btree_trans *trans,
556 struct journal_key *k)
558 struct btree_iter iter;
559 unsigned iter_flags =
561 BTREE_ITER_NOT_EXTENTS;
564 if (!k->level && k->btree_id == BTREE_ID_alloc)
565 iter_flags |= BTREE_ITER_CACHED;
567 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
568 BTREE_MAX_DEPTH, k->level,
570 ret = bch2_btree_iter_traverse(&iter);
574 /* Must be checked with btree locked: */
578 ret = bch2_trans_update(trans, &iter, k->k, BTREE_TRIGGER_NORUN);
580 bch2_trans_iter_exit(trans, &iter);
584 static int journal_sort_seq_cmp(const void *_l, const void *_r)
586 const struct journal_key *l = *((const struct journal_key **)_l);
587 const struct journal_key *r = *((const struct journal_key **)_r);
589 return cmp_int(l->journal_seq, r->journal_seq);
592 static int bch2_journal_replay(struct bch_fs *c)
594 struct journal_keys *keys = &c->journal_keys;
595 struct journal_key **keys_sorted, *k;
596 struct journal *j = &c->journal;
600 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
601 keys->gap = keys->nr;
603 keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
607 for (i = 0; i < keys->nr; i++)
608 keys_sorted[i] = &keys->d[i];
610 sort(keys_sorted, keys->nr,
611 sizeof(keys_sorted[0]),
612 journal_sort_seq_cmp, NULL);
614 for (i = 0; i < keys->nr; i++) {
619 replay_now_at(j, k->journal_seq);
621 ret = bch2_trans_do(c, NULL, NULL,
622 BTREE_INSERT_LAZY_RW|
625 ? BTREE_INSERT_JOURNAL_REPLAY|JOURNAL_WATERMARK_reserved
627 bch2_journal_replay_key(&trans, k));
629 bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
630 ret, bch2_btree_ids[k->btree_id], k->level);
635 replay_now_at(j, j->replay_journal_seq_end);
636 j->replay_journal_seq = 0;
638 bch2_journal_set_replay_done(j);
639 bch2_journal_flush_all_pins(j);
640 ret = bch2_journal_error(j);
642 if (keys->nr && !ret)
643 bch2_journal_log_msg(&c->journal, "journal replay finished");
649 /* journal replay early: */
651 static int journal_replay_entry_early(struct bch_fs *c,
652 struct jset_entry *entry)
656 switch (entry->type) {
657 case BCH_JSET_ENTRY_btree_root: {
658 struct btree_root *r;
660 if (entry->btree_id >= BTREE_ID_NR) {
661 bch_err(c, "filesystem has unknown btree type %u",
666 r = &c->btree_roots[entry->btree_id];
669 r->level = entry->level;
670 bkey_copy(&r->key, &entry->start[0]);
678 case BCH_JSET_ENTRY_usage: {
679 struct jset_entry_usage *u =
680 container_of(entry, struct jset_entry_usage, entry);
682 switch (entry->btree_id) {
683 case BCH_FS_USAGE_reserved:
684 if (entry->level < BCH_REPLICAS_MAX)
685 c->usage_base->persistent_reserved[entry->level] =
688 case BCH_FS_USAGE_inodes:
689 c->usage_base->nr_inodes = le64_to_cpu(u->v);
691 case BCH_FS_USAGE_key_version:
692 atomic64_set(&c->key_version,
699 case BCH_JSET_ENTRY_data_usage: {
700 struct jset_entry_data_usage *u =
701 container_of(entry, struct jset_entry_data_usage, entry);
703 ret = bch2_replicas_set_usage(c, &u->r,
707 case BCH_JSET_ENTRY_dev_usage: {
708 struct jset_entry_dev_usage *u =
709 container_of(entry, struct jset_entry_dev_usage, entry);
710 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
711 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
713 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
715 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
716 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
717 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
718 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
723 case BCH_JSET_ENTRY_blacklist: {
724 struct jset_entry_blacklist *bl_entry =
725 container_of(entry, struct jset_entry_blacklist, entry);
727 ret = bch2_journal_seq_blacklist_add(c,
728 le64_to_cpu(bl_entry->seq),
729 le64_to_cpu(bl_entry->seq) + 1);
732 case BCH_JSET_ENTRY_blacklist_v2: {
733 struct jset_entry_blacklist_v2 *bl_entry =
734 container_of(entry, struct jset_entry_blacklist_v2, entry);
736 ret = bch2_journal_seq_blacklist_add(c,
737 le64_to_cpu(bl_entry->start),
738 le64_to_cpu(bl_entry->end) + 1);
741 case BCH_JSET_ENTRY_clock: {
742 struct jset_entry_clock *clock =
743 container_of(entry, struct jset_entry_clock, entry);
745 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
752 static int journal_replay_early(struct bch_fs *c,
753 struct bch_sb_field_clean *clean)
755 struct jset_entry *entry;
759 for (entry = clean->start;
760 entry != vstruct_end(&clean->field);
761 entry = vstruct_next(entry)) {
762 ret = journal_replay_entry_early(c, entry);
767 struct genradix_iter iter;
768 struct journal_replay *i, **_i;
770 genradix_for_each(&c->journal_entries, iter, _i) {
776 vstruct_for_each(&i->j, entry) {
777 ret = journal_replay_entry_early(c, entry);
784 bch2_fs_usage_initialize(c);
789 /* sb clean section: */
791 static struct bkey_i *btree_root_find(struct bch_fs *c,
792 struct bch_sb_field_clean *clean,
794 enum btree_id id, unsigned *level)
797 struct jset_entry *entry, *start, *end;
800 start = clean->start;
801 end = vstruct_end(&clean->field);
804 end = vstruct_last(j);
807 for (entry = start; entry < end; entry = vstruct_next(entry))
808 if (entry->type == BCH_JSET_ENTRY_btree_root &&
809 entry->btree_id == id)
815 return ERR_PTR(-EINVAL);
818 *level = entry->level;
822 static int verify_superblock_clean(struct bch_fs *c,
823 struct bch_sb_field_clean **cleanp,
827 struct bch_sb_field_clean *clean = *cleanp;
828 struct printbuf buf1 = PRINTBUF;
829 struct printbuf buf2 = PRINTBUF;
832 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
833 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
834 le64_to_cpu(clean->journal_seq),
835 le64_to_cpu(j->seq))) {
841 for (i = 0; i < BTREE_ID_NR; i++) {
842 struct bkey_i *k1, *k2;
843 unsigned l1 = 0, l2 = 0;
845 k1 = btree_root_find(c, clean, NULL, i, &l1);
846 k2 = btree_root_find(c, NULL, j, i, &l2);
851 printbuf_reset(&buf1);
852 printbuf_reset(&buf2);
855 bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
857 prt_printf(&buf1, "(none)");
860 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
862 prt_printf(&buf2, "(none)");
864 mustfix_fsck_err_on(!k1 || !k2 ||
867 k1->k.u64s != k2->k.u64s ||
868 memcmp(k1, k2, bkey_bytes(k1)) ||
870 "superblock btree root %u doesn't match journal after clean shutdown\n"
872 "journal: l=%u %s\n", i,
877 printbuf_exit(&buf2);
878 printbuf_exit(&buf1);
882 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
884 struct bch_sb_field_clean *clean, *sb_clean;
887 mutex_lock(&c->sb_lock);
888 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
890 if (fsck_err_on(!sb_clean, c,
891 "superblock marked clean but clean section not present")) {
892 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
894 mutex_unlock(&c->sb_lock);
898 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
901 mutex_unlock(&c->sb_lock);
902 return ERR_PTR(-ENOMEM);
905 ret = bch2_sb_clean_validate_late(c, clean, READ);
907 mutex_unlock(&c->sb_lock);
911 mutex_unlock(&c->sb_lock);
915 mutex_unlock(&c->sb_lock);
919 static bool btree_id_is_alloc(enum btree_id id)
923 case BTREE_ID_backpointers:
924 case BTREE_ID_need_discard:
925 case BTREE_ID_freespace:
932 static int read_btree_roots(struct bch_fs *c)
937 for (i = 0; i < BTREE_ID_NR; i++) {
938 struct btree_root *r = &c->btree_roots[i];
943 if (btree_id_is_alloc(i) &&
944 c->opts.reconstruct_alloc) {
945 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
950 __fsck_err(c, btree_id_is_alloc(i)
951 ? FSCK_CAN_IGNORE : 0,
952 "invalid btree root %s",
954 if (i == BTREE_ID_alloc)
955 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
958 ret = bch2_btree_root_read(c, i, &r->key, r->level);
962 ? FSCK_CAN_IGNORE : 0,
963 "error reading btree root %s",
965 if (i == BTREE_ID_alloc)
966 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
970 for (i = 0; i < BTREE_ID_NR; i++)
971 if (!c->btree_roots[i].b)
972 bch2_btree_root_alloc(c, i);
977 static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
979 struct bkey_i_snapshot root_snapshot;
980 struct bkey_i_subvolume root_volume;
983 bkey_snapshot_init(&root_snapshot.k_i);
984 root_snapshot.k.p.offset = U32_MAX;
985 root_snapshot.v.flags = 0;
986 root_snapshot.v.parent = 0;
987 root_snapshot.v.subvol = BCACHEFS_ROOT_SUBVOL;
988 root_snapshot.v.pad = 0;
989 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
991 ret = bch2_btree_insert(c, BTREE_ID_snapshots,
997 bkey_subvolume_init(&root_volume.k_i);
998 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
999 root_volume.v.flags = 0;
1000 root_volume.v.snapshot = cpu_to_le32(U32_MAX);
1001 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
1003 ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
1012 static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
1014 struct btree_iter iter;
1016 struct bch_inode_unpacked inode;
1019 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
1020 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
1021 k = bch2_btree_iter_peek_slot(&iter);
1026 if (!bkey_is_inode(k.k)) {
1027 bch_err(trans->c, "root inode not found");
1032 ret = bch2_inode_unpack(k, &inode);
1035 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1037 ret = bch2_inode_write(trans, &iter, &inode);
1039 bch2_trans_iter_exit(trans, &iter);
1043 int bch2_fs_recovery(struct bch_fs *c)
1045 const char *err = "cannot allocate memory";
1046 struct bch_sb_field_clean *clean = NULL;
1047 struct jset *last_journal_entry = NULL;
1048 u64 blacklist_seq, journal_seq;
1049 bool write_sb = false;
1053 clean = read_superblock_clean(c);
1054 ret = PTR_ERR_OR_ZERO(clean);
1059 bch_info(c, "recovering from clean shutdown, journal seq %llu",
1060 le64_to_cpu(clean->journal_seq));
1062 bch_info(c, "recovering from unclean shutdown");
1064 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
1065 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
1071 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1072 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1077 if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1078 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
1083 if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1084 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1085 c->opts.fsck = true;
1086 c->opts.fix_errors = FSCK_OPT_YES;
1089 if (!c->opts.nochanges) {
1090 if (c->sb.version < bcachefs_metadata_version_backpointers) {
1091 bch_info(c, "version prior to backpointers, upgrade and fsck required");
1092 c->opts.version_upgrade = true;
1093 c->opts.fsck = true;
1094 c->opts.fix_errors = FSCK_OPT_YES;
1095 } else if (c->sb.version < bcachefs_metadata_version_inode_v3) {
1096 bch_info(c, "version prior to inode_v3, upgrade required");
1097 c->opts.version_upgrade = true;
1101 if (c->opts.fsck && c->opts.norecovery) {
1102 bch_err(c, "cannot select both norecovery and fsck");
1107 ret = bch2_blacklist_table_initialize(c);
1109 bch_err(c, "error initializing blacklist table");
1113 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1114 struct genradix_iter iter;
1115 struct journal_replay **i;
1117 bch_verbose(c, "starting journal read");
1118 ret = bch2_journal_read(c, &blacklist_seq, &journal_seq);
1122 genradix_for_each_reverse(&c->journal_entries, iter, i)
1123 if (*i && !(*i)->ignore) {
1124 last_journal_entry = &(*i)->j;
1128 if (mustfix_fsck_err_on(c->sb.clean &&
1129 last_journal_entry &&
1130 !journal_entry_empty(last_journal_entry), c,
1131 "filesystem marked clean but journal not empty")) {
1132 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1133 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1134 c->sb.clean = false;
1137 if (!last_journal_entry) {
1138 fsck_err_on(!c->sb.clean, c, "no journal entries found");
1142 ret = journal_keys_sort(c);
1146 if (c->sb.clean && last_journal_entry) {
1147 ret = verify_superblock_clean(c, &clean,
1148 last_journal_entry);
1155 bch_err(c, "no superblock clean section found");
1156 ret = -BCH_ERR_fsck_repair_impossible;
1160 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1163 if (c->opts.reconstruct_alloc) {
1164 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1165 drop_alloc_keys(&c->journal_keys);
1168 zero_out_btree_mem_ptr(&c->journal_keys);
1170 ret = journal_replay_early(c, clean);
1175 * After an unclean shutdown, skip then next few journal sequence
1176 * numbers as they may have been referenced by btree writes that
1177 * happened before their corresponding journal writes - those btree
1178 * writes need to be ignored, by skipping and blacklisting the next few
1179 * journal sequence numbers:
1184 if (blacklist_seq != journal_seq) {
1185 ret = bch2_journal_seq_blacklist_add(c,
1186 blacklist_seq, journal_seq);
1188 bch_err(c, "error creating new journal seq blacklist entry");
1194 * note: cmd_list_journal needs the blacklist table fully up to date so
1195 * it can asterisk ignored journal entries:
1197 if (c->opts.read_journal_only)
1200 ret = bch2_fs_journal_start(&c->journal, journal_seq);
1205 * Skip past versions that might have possibly been used (as nonces),
1206 * but hadn't had their pointers written:
1208 if (c->sb.encryption_type && !c->sb.clean)
1209 atomic64_add(1 << 16, &c->key_version);
1211 ret = read_btree_roots(c);
1215 bch_verbose(c, "starting alloc read");
1216 err = "error reading allocation information";
1218 down_read(&c->gc_lock);
1219 ret = bch2_alloc_read(c);
1220 up_read(&c->gc_lock);
1224 bch_verbose(c, "alloc read done");
1226 bch_verbose(c, "starting stripes_read");
1227 err = "error reading stripes";
1228 ret = bch2_stripes_read(c);
1231 bch_verbose(c, "stripes_read done");
1233 bch2_stripes_heap_start(c);
1236 bool metadata_only = c->opts.norecovery;
1238 bch_info(c, "checking allocations");
1239 err = "error checking allocations";
1240 ret = bch2_gc(c, true, metadata_only);
1243 bch_verbose(c, "done checking allocations");
1245 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1247 bch_info(c, "checking need_discard and freespace btrees");
1248 err = "error checking need_discard and freespace btrees";
1249 ret = bch2_check_alloc_info(c);
1252 bch_verbose(c, "done checking need_discard and freespace btrees");
1254 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1255 err = "error creating root snapshot node";
1256 ret = bch2_fs_initialize_subvolumes(c);
1261 bch_verbose(c, "reading snapshots table");
1262 err = "error reading snapshots table";
1263 ret = bch2_fs_snapshots_start(c);
1266 bch_verbose(c, "reading snapshots done");
1268 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1270 bch_info(c, "starting journal replay, %zu keys", c->journal_keys.nr);
1271 err = "journal replay failed";
1272 ret = bch2_journal_replay(c);
1275 if (c->opts.verbose || !c->sb.clean)
1276 bch_info(c, "journal replay done");
1278 bch_info(c, "checking lrus");
1279 err = "error checking lrus";
1280 ret = bch2_check_lrus(c);
1283 bch_verbose(c, "done checking lrus");
1284 set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
1286 bch_info(c, "checking backpointers to alloc keys");
1287 err = "error checking backpointers to alloc keys";
1288 ret = bch2_check_btree_backpointers(c);
1291 bch_verbose(c, "done checking backpointers to alloc keys");
1293 bch_info(c, "checking backpointers to extents");
1294 err = "error checking backpointers to extents";
1295 ret = bch2_check_backpointers_to_extents(c);
1298 bch_verbose(c, "done checking backpointers to extents");
1300 bch_info(c, "checking extents to backpointers");
1301 err = "error checking extents to backpointers";
1302 ret = bch2_check_extents_to_backpointers(c);
1305 bch_verbose(c, "done checking extents to backpointers");
1306 set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
1308 bch_info(c, "checking alloc to lru refs");
1309 err = "error checking alloc to lru refs";
1310 ret = bch2_check_alloc_to_lru_refs(c);
1313 bch_verbose(c, "done checking alloc to lru refs");
1314 set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
1316 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1317 set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
1318 set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
1319 set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
1320 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1322 if (c->opts.norecovery)
1325 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1326 err = "error creating root snapshot node";
1327 ret = bch2_fs_initialize_subvolumes(c);
1332 bch_verbose(c, "reading snapshots table");
1333 err = "error reading snapshots table";
1334 ret = bch2_fs_snapshots_start(c);
1337 bch_verbose(c, "reading snapshots done");
1339 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1341 bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
1342 err = "journal replay failed";
1343 ret = bch2_journal_replay(c);
1346 if (c->opts.verbose || !c->sb.clean)
1347 bch_info(c, "journal replay done");
1350 err = "error initializing freespace";
1351 ret = bch2_fs_freespace_init(c);
1355 if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1356 /* set bi_subvol on root inode */
1357 err = "error upgrade root inode for subvolumes";
1358 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1359 bch2_fs_upgrade_for_subvolumes(&trans));
1365 bch_info(c, "starting fsck");
1366 err = "error in fsck";
1367 ret = bch2_fsck_full(c);
1370 bch_verbose(c, "fsck done");
1371 } else if (!c->sb.clean) {
1372 bch_verbose(c, "checking for deleted inodes");
1373 err = "error in recovery";
1374 ret = bch2_fsck_walk_inodes_only(c);
1377 bch_verbose(c, "check inodes done");
1380 if (enabled_qtypes(c)) {
1381 bch_verbose(c, "reading quotas");
1382 ret = bch2_fs_quota_read(c);
1385 bch_verbose(c, "quotas done");
1388 mutex_lock(&c->sb_lock);
1389 if (c->opts.version_upgrade) {
1390 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1391 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1395 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1396 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1401 !test_bit(BCH_FS_ERROR, &c->flags) &&
1402 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1403 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1404 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1409 bch2_write_super(c);
1410 mutex_unlock(&c->sb_lock);
1412 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1413 !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
1414 le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
1415 struct bch_move_stats stats;
1417 bch2_move_stats_init(&stats, "recovery");
1419 bch_info(c, "scanning for old btree nodes");
1420 ret = bch2_fs_read_write(c);
1424 ret = bch2_scan_old_btree_nodes(c, &stats);
1427 bch_info(c, "scanning for old btree nodes done");
1430 if (c->journal_seq_blacklist_table &&
1431 c->journal_seq_blacklist_table->nr > 128)
1432 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1436 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1437 bch2_flush_fsck_errs(c);
1439 if (!c->opts.keep_journal &&
1440 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
1441 bch2_journal_keys_free(&c->journal_keys);
1442 bch2_journal_entries_free(c);
1446 if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) {
1447 bch2_fs_read_write_early(c);
1448 bch2_delete_dead_snapshots_async(c);
1452 bch_err(c, "Error in recovery: %s (%s)", err, bch2_err_str(ret));
1454 bch_verbose(c, "ret %s", bch2_err_str(ret));
1458 bch2_fs_emergency_read_only(c);
1462 int bch2_fs_initialize(struct bch_fs *c)
1464 struct bch_inode_unpacked root_inode, lostfound_inode;
1465 struct bkey_inode_buf packed_inode;
1466 struct qstr lostfound = QSTR("lost+found");
1467 const char *err = "cannot allocate memory";
1472 bch_notice(c, "initializing new filesystem");
1474 mutex_lock(&c->sb_lock);
1475 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1476 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1478 if (c->sb.version < bcachefs_metadata_version_inode_v3)
1479 c->opts.version_upgrade = true;
1481 if (c->opts.version_upgrade) {
1482 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1483 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1484 bch2_write_super(c);
1486 mutex_unlock(&c->sb_lock);
1488 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1489 set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
1490 set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
1491 set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
1492 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1493 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1495 for (i = 0; i < BTREE_ID_NR; i++)
1496 bch2_btree_root_alloc(c, i);
1498 for_each_online_member(ca, c, i)
1499 bch2_dev_usage_init(ca);
1501 err = "unable to allocate journal buckets";
1502 for_each_online_member(ca, c, i) {
1503 ret = bch2_dev_journal_alloc(ca);
1505 percpu_ref_put(&ca->io_ref);
1511 * journal_res_get() will crash if called before this has
1512 * set up the journal.pin FIFO and journal.cur pointer:
1514 bch2_fs_journal_start(&c->journal, 1);
1515 bch2_journal_set_replay_done(&c->journal);
1517 err = "error going read-write";
1518 ret = bch2_fs_read_write_early(c);
1523 * Write out the superblock and journal buckets, now that we can do
1526 bch_verbose(c, "marking superblocks");
1527 err = "error marking superblock and journal";
1528 for_each_member_device(ca, c, i) {
1529 ret = bch2_trans_mark_dev_sb(c, ca);
1531 percpu_ref_put(&ca->ref);
1535 ca->new_fs_bucket_idx = 0;
1538 bch_verbose(c, "initializing freespace");
1539 err = "error initializing freespace";
1540 ret = bch2_fs_freespace_init(c);
1544 err = "error creating root snapshot node";
1545 ret = bch2_fs_initialize_subvolumes(c);
1549 bch_verbose(c, "reading snapshots table");
1550 err = "error reading snapshots table";
1551 ret = bch2_fs_snapshots_start(c);
1554 bch_verbose(c, "reading snapshots done");
1556 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1557 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1558 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1559 bch2_inode_pack(&packed_inode, &root_inode);
1560 packed_inode.inode.k.p.snapshot = U32_MAX;
1562 err = "error creating root directory";
1563 ret = bch2_btree_insert(c, BTREE_ID_inodes,
1564 &packed_inode.inode.k_i,
1569 bch2_inode_init_early(c, &lostfound_inode);
1571 err = "error creating lost+found";
1572 ret = bch2_trans_do(c, NULL, NULL, 0,
1573 bch2_create_trans(&trans,
1574 BCACHEFS_ROOT_SUBVOL_INUM,
1575 &root_inode, &lostfound_inode,
1577 0, 0, S_IFDIR|0700, 0,
1578 NULL, NULL, (subvol_inum) { 0 }, 0));
1580 bch_err(c, "error creating lost+found");
1584 if (enabled_qtypes(c)) {
1585 ret = bch2_fs_quota_read(c);
1590 err = "error writing first journal entry";
1591 ret = bch2_journal_flush(&c->journal);
1595 mutex_lock(&c->sb_lock);
1596 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1597 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1599 bch2_write_super(c);
1600 mutex_unlock(&c->sb_lock);
1604 pr_err("Error initializing new filesystem: %s (%i)", err, ret);