1 // SPDX-License-Identifier: GPL-2.0
4 #include "backpointers.h"
6 #include "alloc_background.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
16 #include "fs-common.h"
18 #include "journal_io.h"
19 #include "journal_reclaim.h"
20 #include "journal_seq_blacklist.h"
26 #include "subvolume.h"
29 #include <linux/sort.h>
30 #include <linux/stat.h>
32 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
34 /* for -o reconstruct_alloc: */
35 static void drop_alloc_keys(struct journal_keys *keys)
39 for (src = 0, dst = 0; src < keys->nr; src++)
40 if (keys->d[src].btree_id != BTREE_ID_alloc)
41 keys->d[dst++] = keys->d[src];
47 * Btree node pointers have a field to stack a pointer to the in memory btree
48 * node; we need to zero out this field when reading in btree nodes, or when
49 * reading in keys from the journal:
51 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
53 struct journal_key *i;
55 for (i = keys->d; i < keys->d + keys->nr; i++)
56 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
57 bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
60 /* iterate over keys read from the journal: */
62 static int __journal_key_cmp(enum btree_id l_btree_id,
65 const struct journal_key *r)
67 return (cmp_int(l_btree_id, r->btree_id) ?:
68 cmp_int(l_level, r->level) ?:
69 bpos_cmp(l_pos, r->k->k.p));
72 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
74 return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
77 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
79 size_t gap_size = keys->size - keys->nr;
86 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
88 return keys->d + idx_to_pos(keys, idx);
91 static size_t __bch2_journal_key_search(struct journal_keys *keys,
92 enum btree_id id, unsigned level,
95 size_t l = 0, r = keys->nr, m;
98 m = l + ((r - l) >> 1);
99 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
105 BUG_ON(l < keys->nr &&
106 __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
109 __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
114 static size_t bch2_journal_key_search(struct journal_keys *keys,
115 enum btree_id id, unsigned level,
118 return idx_to_pos(keys, __bch2_journal_key_search(keys, id, level, pos));
121 struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
122 unsigned level, struct bpos pos,
123 struct bpos end_pos, size_t *idx)
125 struct journal_keys *keys = &c->journal_keys;
127 struct journal_key *k;
130 *idx = __bch2_journal_key_search(keys, btree_id, level, pos);
132 while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
133 if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
136 if (__journal_key_cmp(btree_id, level, pos, k) <= 0 &&
151 struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
152 unsigned level, struct bpos pos)
156 return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos, &idx);
159 static void journal_iters_fix(struct bch_fs *c)
161 struct journal_keys *keys = &c->journal_keys;
162 /* The key we just inserted is immediately before the gap: */
163 size_t gap_end = keys->gap + (keys->size - keys->nr);
164 struct btree_and_journal_iter *iter;
167 * If an iterator points one after the key we just inserted, decrement
168 * the iterator so it points at the key we just inserted - if the
169 * decrement was unnecessary, bch2_btree_and_journal_iter_peek() will
172 list_for_each_entry(iter, &c->journal_iters, journal.list)
173 if (iter->journal.idx == gap_end)
174 iter->journal.idx = keys->gap - 1;
177 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
179 struct journal_keys *keys = &c->journal_keys;
180 struct journal_iter *iter;
181 size_t gap_size = keys->size - keys->nr;
183 list_for_each_entry(iter, &c->journal_iters, list) {
184 if (iter->idx > old_gap)
185 iter->idx -= gap_size;
186 if (iter->idx >= new_gap)
187 iter->idx += gap_size;
191 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
192 unsigned level, struct bkey_i *k)
194 struct journal_key n = {
200 * Ensure these keys are done last by journal replay, to unblock
203 .journal_seq = U32_MAX,
205 struct journal_keys *keys = &c->journal_keys;
206 size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
208 BUG_ON(test_bit(BCH_FS_RW, &c->flags));
210 if (idx < keys->size &&
211 journal_key_cmp(&n, &keys->d[idx]) == 0) {
212 if (keys->d[idx].allocated)
213 kfree(keys->d[idx].k);
219 idx -= keys->size - keys->nr;
221 if (keys->nr == keys->size) {
222 struct journal_keys new_keys = {
224 .size = max_t(size_t, keys->size, 8) * 2,
227 new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
229 bch_err(c, "%s: error allocating new key array (size %zu)",
230 __func__, new_keys.size);
231 return -BCH_ERR_ENOMEM_journal_key_insert;
234 /* Since @keys was full, there was no gap: */
235 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
239 /* And now the gap is at the end: */
240 keys->gap = keys->nr;
243 journal_iters_move_gap(c, keys->gap, idx);
245 move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
249 keys->d[keys->gap++] = n;
251 journal_iters_fix(c);
257 * Can only be used from the recovery thread while we're still RO - can't be
258 * used once we've got RW, as journal_keys is at that point used by multiple
261 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
262 unsigned level, struct bkey_i *k)
267 n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
269 return -BCH_ERR_ENOMEM_journal_key_insert;
272 ret = bch2_journal_key_insert_take(c, id, level, n);
278 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
279 unsigned level, struct bpos pos)
281 struct bkey_i whiteout;
283 bkey_init(&whiteout.k);
286 return bch2_journal_key_insert(c, id, level, &whiteout);
289 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
290 unsigned level, struct bpos pos)
292 struct journal_keys *keys = &c->journal_keys;
293 size_t idx = bch2_journal_key_search(keys, btree, level, pos);
295 if (idx < keys->size &&
296 keys->d[idx].btree_id == btree &&
297 keys->d[idx].level == level &&
298 bpos_eq(keys->d[idx].k->k.p, pos))
299 keys->d[idx].overwritten = true;
302 static void bch2_journal_iter_advance(struct journal_iter *iter)
304 if (iter->idx < iter->keys->size) {
306 if (iter->idx == iter->keys->gap)
307 iter->idx += iter->keys->size - iter->keys->nr;
311 static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
313 struct journal_key *k = iter->keys->d + iter->idx;
315 while (k < iter->keys->d + iter->keys->size &&
316 k->btree_id == iter->btree_id &&
317 k->level == iter->level) {
319 return bkey_i_to_s_c(k->k);
321 bch2_journal_iter_advance(iter);
322 k = iter->keys->d + iter->idx;
325 return bkey_s_c_null;
328 static void bch2_journal_iter_exit(struct journal_iter *iter)
330 list_del(&iter->list);
333 static void bch2_journal_iter_init(struct bch_fs *c,
334 struct journal_iter *iter,
335 enum btree_id id, unsigned level,
340 iter->keys = &c->journal_keys;
341 iter->idx = bch2_journal_key_search(&c->journal_keys, id, level, pos);
344 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
346 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
347 iter->b, &iter->unpacked);
350 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
352 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
355 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
357 if (bpos_eq(iter->pos, SPOS_MAX))
360 iter->pos = bpos_successor(iter->pos);
363 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
365 struct bkey_s_c btree_k, journal_k, ret;
368 return bkey_s_c_null;
370 while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
371 bpos_lt(btree_k.k->p, iter->pos))
372 bch2_journal_iter_advance_btree(iter);
374 while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
375 bpos_lt(journal_k.k->p, iter->pos))
376 bch2_journal_iter_advance(&iter->journal);
379 (!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
383 if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
387 iter->pos = ret.k->p;
388 if (bkey_deleted(ret.k)) {
389 bch2_btree_and_journal_iter_advance(iter);
393 iter->pos = SPOS_MAX;
400 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
402 bch2_journal_iter_exit(&iter->journal);
405 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
408 struct btree_node_iter node_iter,
411 memset(iter, 0, sizeof(*iter));
414 iter->node_iter = node_iter;
415 bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
416 INIT_LIST_HEAD(&iter->journal.list);
417 iter->pos = b->data->min_key;
418 iter->at_end = false;
422 * this version is used by btree_gc before filesystem has gone RW and
423 * multithreaded, so uses the journal_iters list:
425 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
429 struct btree_node_iter node_iter;
431 bch2_btree_node_iter_init_from_start(&node_iter, b);
432 __bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
433 list_add(&iter->journal.list, &c->journal_iters);
436 /* sort and dedup all keys in the journal: */
438 void bch2_journal_entries_free(struct bch_fs *c)
440 struct journal_replay **i;
441 struct genradix_iter iter;
443 genradix_for_each(&c->journal_entries, iter, i)
445 kvpfree(*i, offsetof(struct journal_replay, j) +
446 vstruct_bytes(&(*i)->j));
447 genradix_free(&c->journal_entries);
451 * When keys compare equal, oldest compares first:
453 static int journal_sort_key_cmp(const void *_l, const void *_r)
455 const struct journal_key *l = _l;
456 const struct journal_key *r = _r;
458 return journal_key_cmp(l, r) ?:
459 cmp_int(l->journal_seq, r->journal_seq) ?:
460 cmp_int(l->journal_offset, r->journal_offset);
463 void bch2_journal_keys_free(struct journal_keys *keys)
465 struct journal_key *i;
467 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
468 keys->gap = keys->nr;
470 for (i = keys->d; i < keys->d + keys->nr; i++)
476 keys->nr = keys->gap = keys->size = 0;
479 static void __journal_keys_sort(struct journal_keys *keys)
481 struct journal_key *src, *dst;
483 sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
486 while (src < keys->d + keys->nr) {
487 while (src + 1 < keys->d + keys->nr &&
488 src[0].btree_id == src[1].btree_id &&
489 src[0].level == src[1].level &&
490 bpos_eq(src[0].k->k.p, src[1].k->k.p))
496 keys->nr = dst - keys->d;
499 static int journal_keys_sort(struct bch_fs *c)
501 struct genradix_iter iter;
502 struct journal_replay *i, **_i;
503 struct jset_entry *entry;
505 struct journal_keys *keys = &c->journal_keys;
506 size_t nr_keys = 0, nr_read = 0;
508 genradix_for_each(&c->journal_entries, iter, _i) {
514 for_each_jset_key(k, entry, &i->j)
521 keys->size = roundup_pow_of_two(nr_keys);
523 keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
525 bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys); trying slowpath",
530 keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
531 } while (!keys->d && keys->size > nr_keys / 8);
534 bch_err(c, "Failed to allocate %zu size buffer for sorted journal keys; exiting",
536 return -BCH_ERR_ENOMEM_journal_keys_sort;
540 genradix_for_each(&c->journal_entries, iter, _i) {
548 for_each_jset_key(k, entry, &i->j) {
549 if (keys->nr == keys->size) {
550 __journal_keys_sort(keys);
552 if (keys->nr > keys->size * 7 / 8) {
553 bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu/%zu",
554 keys->nr, keys->size, nr_read, nr_keys);
555 return -BCH_ERR_ENOMEM_journal_keys_sort;
559 keys->d[keys->nr++] = (struct journal_key) {
560 .btree_id = entry->btree_id,
561 .level = entry->level,
563 .journal_seq = le64_to_cpu(i->j.seq),
564 .journal_offset = k->_data - i->j._data,
571 __journal_keys_sort(keys);
572 keys->gap = keys->nr;
574 bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_keys, keys->nr);
578 /* journal replay: */
580 static void replay_now_at(struct journal *j, u64 seq)
582 BUG_ON(seq < j->replay_journal_seq);
584 seq = min(seq, j->replay_journal_seq_end);
586 while (j->replay_journal_seq < seq)
587 bch2_journal_pin_put(j, j->replay_journal_seq++);
590 static int bch2_journal_replay_key(struct btree_trans *trans,
591 struct journal_key *k)
593 struct btree_iter iter;
594 unsigned iter_flags =
596 BTREE_ITER_NOT_EXTENTS;
597 unsigned update_flags = BTREE_TRIGGER_NORUN;
601 * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
602 * keep the key cache coherent with the underlying btree. Nothing
603 * besides the allocator is doing updates yet so we don't need key cache
604 * coherency for non-alloc btrees, and key cache fills for snapshots
605 * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
606 * the snapshots recovery pass runs.
608 if (!k->level && k->btree_id == BTREE_ID_alloc)
609 iter_flags |= BTREE_ITER_CACHED;
611 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
613 bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
614 BTREE_MAX_DEPTH, k->level,
616 ret = bch2_btree_iter_traverse(&iter);
620 /* Must be checked with btree locked: */
624 ret = bch2_trans_update(trans, &iter, k->k, update_flags);
626 bch2_trans_iter_exit(trans, &iter);
630 static int journal_sort_seq_cmp(const void *_l, const void *_r)
632 const struct journal_key *l = *((const struct journal_key **)_l);
633 const struct journal_key *r = *((const struct journal_key **)_r);
635 return cmp_int(l->journal_seq, r->journal_seq);
638 static int bch2_journal_replay(struct bch_fs *c)
640 struct journal_keys *keys = &c->journal_keys;
641 struct journal_key **keys_sorted, *k;
642 struct journal *j = &c->journal;
643 u64 start_seq = c->journal_replay_seq_start;
644 u64 end_seq = c->journal_replay_seq_start;
648 move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
649 keys->gap = keys->nr;
651 keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
653 return -BCH_ERR_ENOMEM_journal_replay;
655 for (i = 0; i < keys->nr; i++)
656 keys_sorted[i] = &keys->d[i];
658 sort(keys_sorted, keys->nr,
659 sizeof(keys_sorted[0]),
660 journal_sort_seq_cmp, NULL);
663 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
664 keys->nr, start_seq, end_seq);
669 for (i = 0; i < keys->nr; i++) {
674 replay_now_at(j, k->journal_seq);
676 ret = bch2_trans_do(c, NULL, NULL,
677 BTREE_INSERT_LAZY_RW|
680 ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim
682 bch2_journal_replay_key(&trans, k));
684 bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s",
685 bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret));
690 replay_now_at(j, j->replay_journal_seq_end);
691 j->replay_journal_seq = 0;
693 bch2_journal_set_replay_done(j);
694 bch2_journal_flush_all_pins(j);
695 ret = bch2_journal_error(j);
697 if (keys->nr && !ret)
698 bch2_journal_log_msg(c, "journal replay finished");
707 /* journal replay early: */
709 static int journal_replay_entry_early(struct bch_fs *c,
710 struct jset_entry *entry)
714 switch (entry->type) {
715 case BCH_JSET_ENTRY_btree_root: {
716 struct btree_root *r;
718 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
719 ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
724 r = bch2_btree_id_root(c, entry->btree_id);
727 r->level = entry->level;
728 bkey_copy(&r->key, &entry->start[0]);
736 case BCH_JSET_ENTRY_usage: {
737 struct jset_entry_usage *u =
738 container_of(entry, struct jset_entry_usage, entry);
740 switch (entry->btree_id) {
741 case BCH_FS_USAGE_reserved:
742 if (entry->level < BCH_REPLICAS_MAX)
743 c->usage_base->persistent_reserved[entry->level] =
746 case BCH_FS_USAGE_inodes:
747 c->usage_base->nr_inodes = le64_to_cpu(u->v);
749 case BCH_FS_USAGE_key_version:
750 atomic64_set(&c->key_version,
757 case BCH_JSET_ENTRY_data_usage: {
758 struct jset_entry_data_usage *u =
759 container_of(entry, struct jset_entry_data_usage, entry);
761 ret = bch2_replicas_set_usage(c, &u->r,
765 case BCH_JSET_ENTRY_dev_usage: {
766 struct jset_entry_dev_usage *u =
767 container_of(entry, struct jset_entry_dev_usage, entry);
768 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
769 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
771 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
773 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
774 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
775 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
776 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
781 case BCH_JSET_ENTRY_blacklist: {
782 struct jset_entry_blacklist *bl_entry =
783 container_of(entry, struct jset_entry_blacklist, entry);
785 ret = bch2_journal_seq_blacklist_add(c,
786 le64_to_cpu(bl_entry->seq),
787 le64_to_cpu(bl_entry->seq) + 1);
790 case BCH_JSET_ENTRY_blacklist_v2: {
791 struct jset_entry_blacklist_v2 *bl_entry =
792 container_of(entry, struct jset_entry_blacklist_v2, entry);
794 ret = bch2_journal_seq_blacklist_add(c,
795 le64_to_cpu(bl_entry->start),
796 le64_to_cpu(bl_entry->end) + 1);
799 case BCH_JSET_ENTRY_clock: {
800 struct jset_entry_clock *clock =
801 container_of(entry, struct jset_entry_clock, entry);
803 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
810 static int journal_replay_early(struct bch_fs *c,
811 struct bch_sb_field_clean *clean)
813 struct jset_entry *entry;
817 for (entry = clean->start;
818 entry != vstruct_end(&clean->field);
819 entry = vstruct_next(entry)) {
820 ret = journal_replay_entry_early(c, entry);
825 struct genradix_iter iter;
826 struct journal_replay *i, **_i;
828 genradix_for_each(&c->journal_entries, iter, _i) {
834 vstruct_for_each(&i->j, entry) {
835 ret = journal_replay_entry_early(c, entry);
842 bch2_fs_usage_initialize(c);
847 /* sb clean section: */
849 static struct bkey_i *btree_root_find(struct bch_fs *c,
850 struct bch_sb_field_clean *clean,
852 enum btree_id id, unsigned *level)
855 struct jset_entry *entry, *start, *end;
858 start = clean->start;
859 end = vstruct_end(&clean->field);
862 end = vstruct_last(j);
865 for (entry = start; entry < end; entry = vstruct_next(entry))
866 if (entry->type == BCH_JSET_ENTRY_btree_root &&
867 entry->btree_id == id)
873 return ERR_PTR(-EINVAL);
876 *level = entry->level;
880 static int verify_superblock_clean(struct bch_fs *c,
881 struct bch_sb_field_clean **cleanp,
885 struct bch_sb_field_clean *clean = *cleanp;
886 struct printbuf buf1 = PRINTBUF;
887 struct printbuf buf2 = PRINTBUF;
890 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
891 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
892 le64_to_cpu(clean->journal_seq),
893 le64_to_cpu(j->seq))) {
899 for (i = 0; i < BTREE_ID_NR; i++) {
900 struct bkey_i *k1, *k2;
901 unsigned l1 = 0, l2 = 0;
903 k1 = btree_root_find(c, clean, NULL, i, &l1);
904 k2 = btree_root_find(c, NULL, j, i, &l2);
909 printbuf_reset(&buf1);
910 printbuf_reset(&buf2);
913 bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
915 prt_printf(&buf1, "(none)");
918 bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
920 prt_printf(&buf2, "(none)");
922 mustfix_fsck_err_on(!k1 || !k2 ||
925 k1->k.u64s != k2->k.u64s ||
926 memcmp(k1, k2, bkey_bytes(&k1->k)) ||
928 "superblock btree root %u doesn't match journal after clean shutdown\n"
930 "journal: l=%u %s\n", i,
935 printbuf_exit(&buf2);
936 printbuf_exit(&buf1);
940 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
942 struct bch_sb_field_clean *clean, *sb_clean;
945 mutex_lock(&c->sb_lock);
946 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
948 if (fsck_err_on(!sb_clean, c,
949 "superblock marked clean but clean section not present")) {
950 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
952 mutex_unlock(&c->sb_lock);
956 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
959 mutex_unlock(&c->sb_lock);
960 return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
963 ret = bch2_sb_clean_validate_late(c, clean, READ);
965 mutex_unlock(&c->sb_lock);
969 mutex_unlock(&c->sb_lock);
973 mutex_unlock(&c->sb_lock);
977 static bool btree_id_is_alloc(enum btree_id id)
981 case BTREE_ID_backpointers:
982 case BTREE_ID_need_discard:
983 case BTREE_ID_freespace:
984 case BTREE_ID_bucket_gens:
991 static int read_btree_roots(struct bch_fs *c)
996 for (i = 0; i < btree_id_nr_alive(c); i++) {
997 struct btree_root *r = bch2_btree_id_root(c, i);
1002 if (btree_id_is_alloc(i) &&
1003 c->opts.reconstruct_alloc) {
1004 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1009 __fsck_err(c, btree_id_is_alloc(i)
1010 ? FSCK_CAN_IGNORE : 0,
1011 "invalid btree root %s",
1013 if (i == BTREE_ID_alloc)
1014 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1017 ret = bch2_btree_root_read(c, i, &r->key, r->level);
1020 btree_id_is_alloc(i)
1021 ? FSCK_CAN_IGNORE : 0,
1022 "error reading btree root %s",
1024 if (btree_id_is_alloc(i))
1025 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1029 for (i = 0; i < BTREE_ID_NR; i++) {
1030 struct btree_root *r = bch2_btree_id_root(c, i);
1035 bch2_btree_root_alloc(c, i);
1042 static int bch2_initialize_subvolumes(struct bch_fs *c)
1044 struct bkey_i_snapshot_tree root_tree;
1045 struct bkey_i_snapshot root_snapshot;
1046 struct bkey_i_subvolume root_volume;
1049 bkey_snapshot_tree_init(&root_tree.k_i);
1050 root_tree.k.p.offset = 1;
1051 root_tree.v.master_subvol = cpu_to_le32(1);
1052 root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
1054 bkey_snapshot_init(&root_snapshot.k_i);
1055 root_snapshot.k.p.offset = U32_MAX;
1056 root_snapshot.v.flags = 0;
1057 root_snapshot.v.parent = 0;
1058 root_snapshot.v.subvol = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
1059 root_snapshot.v.tree = cpu_to_le32(1);
1060 SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
1062 bkey_subvolume_init(&root_volume.k_i);
1063 root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
1064 root_volume.v.flags = 0;
1065 root_volume.v.snapshot = cpu_to_le32(U32_MAX);
1066 root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
1068 ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees,
1071 bch2_btree_insert(c, BTREE_ID_snapshots,
1074 bch2_btree_insert(c, BTREE_ID_subvolumes,
1082 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
1084 struct btree_iter iter;
1086 struct bch_inode_unpacked inode;
1089 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
1090 SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
1095 if (!bkey_is_inode(k.k)) {
1096 bch_err(trans->c, "root inode not found");
1097 ret = -BCH_ERR_ENOENT_inode;
1101 ret = bch2_inode_unpack(k, &inode);
1104 inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1106 ret = bch2_inode_write(trans, &iter, &inode);
1108 bch2_trans_iter_exit(trans, &iter);
1112 /* set bi_subvol on root inode */
1114 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
1116 int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1117 __bch2_fs_upgrade_for_subvolumes(&trans));
1123 static void check_version_upgrade(struct bch_fs *c)
1125 unsigned latest_compatible = bch2_version_compatible(c->sb.version);
1126 unsigned latest_version = bcachefs_metadata_version_current;
1127 unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
1128 unsigned new_version = 0;
1129 u64 recovery_passes;
1131 if (old_version < bcachefs_metadata_required_upgrade_below) {
1132 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
1133 latest_compatible < bcachefs_metadata_required_upgrade_below)
1134 new_version = latest_version;
1136 new_version = latest_compatible;
1138 switch (c->opts.version_upgrade) {
1139 case BCH_VERSION_UPGRADE_compatible:
1140 new_version = latest_compatible;
1142 case BCH_VERSION_UPGRADE_incompatible:
1143 new_version = latest_version;
1145 case BCH_VERSION_UPGRADE_none:
1146 new_version = old_version;
1151 if (new_version > old_version) {
1152 struct printbuf buf = PRINTBUF;
1154 if (old_version < bcachefs_metadata_required_upgrade_below)
1155 prt_str(&buf, "Version upgrade required:\n");
1157 if (old_version != c->sb.version) {
1158 prt_str(&buf, "Version upgrade from ");
1159 bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
1160 prt_str(&buf, " to ");
1161 bch2_version_to_text(&buf, c->sb.version);
1162 prt_str(&buf, " incomplete\n");
1165 prt_printf(&buf, "Doing %s version upgrade from ",
1166 BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
1167 ? "incompatible" : "compatible");
1168 bch2_version_to_text(&buf, old_version);
1169 prt_str(&buf, " to ");
1170 bch2_version_to_text(&buf, new_version);
1173 recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version);
1174 if (recovery_passes) {
1175 prt_str(&buf, "fsck required");
1177 c->recovery_passes_explicit |= recovery_passes;
1178 c->opts.fix_errors = FSCK_FIX_yes;
1181 bch_info(c, "%s", buf.buf);
1183 mutex_lock(&c->sb_lock);
1184 bch2_sb_upgrade(c, new_version);
1185 mutex_unlock(&c->sb_lock);
1187 printbuf_exit(&buf);
1191 static int bch2_check_allocations(struct bch_fs *c)
1193 return bch2_gc(c, true, c->opts.norecovery);
1196 static int bch2_set_may_go_rw(struct bch_fs *c)
1198 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1202 struct recovery_pass_fn {
1203 int (*fn)(struct bch_fs *);
1208 static struct recovery_pass_fn recovery_passes[] = {
1209 #define x(_fn, _when) { .fn = bch2_##_fn, .name = #_fn, .when = _when },
1210 BCH_RECOVERY_PASSES()
1214 u64 bch2_fsck_recovery_passes(void)
1218 for (unsigned i = 0; i < ARRAY_SIZE(recovery_passes); i++)
1219 if (recovery_passes[i].when & PASS_FSCK)
1224 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
1226 struct recovery_pass_fn *p = recovery_passes + c->curr_recovery_pass;
1228 if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
1230 if (c->recovery_passes_explicit & BIT_ULL(pass))
1232 if ((p->when & PASS_FSCK) && c->opts.fsck)
1234 if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
1236 if (p->when & PASS_ALWAYS)
1241 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
1245 c->curr_recovery_pass = pass;
1247 if (should_run_recovery_pass(c, pass)) {
1248 struct recovery_pass_fn *p = recovery_passes + pass;
1250 if (!(p->when & PASS_SILENT))
1251 printk(KERN_INFO bch2_log_msg(c, "%s..."), p->name);
1255 if (!(p->when & PASS_SILENT))
1256 printk(KERN_CONT " done\n");
1262 static int bch2_run_recovery_passes(struct bch_fs *c)
1266 while (c->curr_recovery_pass < ARRAY_SIZE(recovery_passes)) {
1267 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
1268 if (bch2_err_matches(ret, BCH_ERR_restart_recovery))
1272 c->curr_recovery_pass++;
1278 int bch2_fs_recovery(struct bch_fs *c)
1280 struct bch_sb_field_clean *clean = NULL;
1281 struct jset *last_journal_entry = NULL;
1282 u64 last_seq, blacklist_seq, journal_seq;
1283 bool write_sb = false;
1287 clean = read_superblock_clean(c);
1288 ret = PTR_ERR_OR_ZERO(clean);
1293 bch_info(c, "recovering from clean shutdown, journal seq %llu",
1294 le64_to_cpu(clean->journal_seq));
1296 bch_info(c, "recovering from unclean shutdown");
1298 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
1299 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
1305 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1306 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1311 if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1312 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
1317 if (c->opts.fsck || !(c->opts.nochanges && c->opts.norecovery))
1318 check_version_upgrade(c);
1320 if (c->opts.fsck && c->opts.norecovery) {
1321 bch_err(c, "cannot select both norecovery and fsck");
1326 ret = bch2_blacklist_table_initialize(c);
1328 bch_err(c, "error initializing blacklist table");
1332 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1333 struct genradix_iter iter;
1334 struct journal_replay **i;
1336 bch_verbose(c, "starting journal read");
1337 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
1342 * note: cmd_list_journal needs the blacklist table fully up to date so
1343 * it can asterisk ignored journal entries:
1345 if (c->opts.read_journal_only)
1348 genradix_for_each_reverse(&c->journal_entries, iter, i)
1349 if (*i && !(*i)->ignore) {
1350 last_journal_entry = &(*i)->j;
1354 if (mustfix_fsck_err_on(c->sb.clean &&
1355 last_journal_entry &&
1356 !journal_entry_empty(last_journal_entry), c,
1357 "filesystem marked clean but journal not empty")) {
1358 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1359 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1360 c->sb.clean = false;
1363 if (!last_journal_entry) {
1364 fsck_err_on(!c->sb.clean, c, "no journal entries found");
1368 genradix_for_each_reverse(&c->journal_entries, iter, i)
1370 last_journal_entry = &(*i)->j;
1371 (*i)->ignore = false;
1376 ret = journal_keys_sort(c);
1380 if (c->sb.clean && last_journal_entry) {
1381 ret = verify_superblock_clean(c, &clean,
1382 last_journal_entry);
1389 bch_err(c, "no superblock clean section found");
1390 ret = -BCH_ERR_fsck_repair_impossible;
1394 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1397 c->journal_replay_seq_start = last_seq;
1398 c->journal_replay_seq_end = blacklist_seq - 1;;
1400 if (c->opts.reconstruct_alloc) {
1401 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1402 drop_alloc_keys(&c->journal_keys);
1405 zero_out_btree_mem_ptr(&c->journal_keys);
1407 ret = journal_replay_early(c, clean);
1412 * After an unclean shutdown, skip then next few journal sequence
1413 * numbers as they may have been referenced by btree writes that
1414 * happened before their corresponding journal writes - those btree
1415 * writes need to be ignored, by skipping and blacklisting the next few
1416 * journal sequence numbers:
1421 if (blacklist_seq != journal_seq) {
1422 ret = bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
1423 blacklist_seq, journal_seq) ?:
1424 bch2_journal_seq_blacklist_add(c,
1425 blacklist_seq, journal_seq);
1427 bch_err(c, "error creating new journal seq blacklist entry");
1432 ret = bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
1433 journal_seq, last_seq, blacklist_seq - 1) ?:
1434 bch2_fs_journal_start(&c->journal, journal_seq);
1438 if (c->opts.reconstruct_alloc)
1439 bch2_journal_log_msg(c, "dropping alloc info");
1442 * Skip past versions that might have possibly been used (as nonces),
1443 * but hadn't had their pointers written:
1445 if (c->sb.encryption_type && !c->sb.clean)
1446 atomic64_add(1 << 16, &c->key_version);
1448 ret = read_btree_roots(c);
1453 (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
1454 BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)))
1455 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
1457 ret = bch2_run_recovery_passes(c);
1461 /* If we fixed errors, verify that fs is actually clean now: */
1462 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
1463 test_bit(BCH_FS_ERRORS_FIXED, &c->flags) &&
1464 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) &&
1465 !test_bit(BCH_FS_ERROR, &c->flags)) {
1466 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
1467 clear_bit(BCH_FS_ERRORS_FIXED, &c->flags);
1469 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
1471 ret = bch2_run_recovery_passes(c);
1475 if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) ||
1476 test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1477 bch_err(c, "Second fsck run was not clean");
1478 set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags);
1481 set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
1484 if (enabled_qtypes(c)) {
1485 bch_verbose(c, "reading quotas");
1486 ret = bch2_fs_quota_read(c);
1489 bch_verbose(c, "quotas done");
1492 mutex_lock(&c->sb_lock);
1493 if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != c->sb.version) {
1494 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, c->sb.version);
1498 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1499 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1504 !test_bit(BCH_FS_ERROR, &c->flags) &&
1505 !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1506 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1507 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1512 bch2_write_super(c);
1513 mutex_unlock(&c->sb_lock);
1515 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1516 !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
1517 c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
1518 struct bch_move_stats stats;
1520 bch2_move_stats_init(&stats, "recovery");
1522 bch_info(c, "scanning for old btree nodes");
1523 ret = bch2_fs_read_write(c) ?:
1524 bch2_scan_old_btree_nodes(c, &stats);
1527 bch_info(c, "scanning for old btree nodes done");
1530 if (c->journal_seq_blacklist_table &&
1531 c->journal_seq_blacklist_table->nr > 128)
1532 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1536 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1537 bch2_flush_fsck_errs(c);
1539 if (!c->opts.keep_journal &&
1540 test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
1541 bch2_journal_keys_free(&c->journal_keys);
1542 bch2_journal_entries_free(c);
1546 if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) {
1547 bch2_fs_read_write_early(c);
1548 bch2_delete_dead_snapshots_async(c);
1556 bch2_fs_emergency_read_only(c);
1560 int bch2_fs_initialize(struct bch_fs *c)
1562 struct bch_inode_unpacked root_inode, lostfound_inode;
1563 struct bkey_inode_buf packed_inode;
1564 struct qstr lostfound = QSTR("lost+found");
1569 bch_notice(c, "initializing new filesystem");
1571 mutex_lock(&c->sb_lock);
1572 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1573 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1575 bch2_sb_maybe_downgrade(c);
1577 if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
1578 bch2_sb_upgrade(c, bcachefs_metadata_version_current);
1579 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
1580 bch2_write_super(c);
1582 mutex_unlock(&c->sb_lock);
1584 c->curr_recovery_pass = ARRAY_SIZE(recovery_passes);
1585 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1586 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1588 for (i = 0; i < BTREE_ID_NR; i++)
1589 bch2_btree_root_alloc(c, i);
1591 for_each_online_member(ca, c, i)
1592 bch2_dev_usage_init(ca);
1594 for_each_online_member(ca, c, i) {
1595 ret = bch2_dev_journal_alloc(ca);
1597 percpu_ref_put(&ca->io_ref);
1603 * journal_res_get() will crash if called before this has
1604 * set up the journal.pin FIFO and journal.cur pointer:
1606 bch2_fs_journal_start(&c->journal, 1);
1607 bch2_journal_set_replay_done(&c->journal);
1609 ret = bch2_fs_read_write_early(c);
1614 * Write out the superblock and journal buckets, now that we can do
1617 bch_verbose(c, "marking superblocks");
1618 for_each_member_device(ca, c, i) {
1619 ret = bch2_trans_mark_dev_sb(c, ca);
1621 percpu_ref_put(&ca->ref);
1625 ca->new_fs_bucket_idx = 0;
1628 ret = bch2_fs_freespace_init(c);
1632 ret = bch2_initialize_subvolumes(c);
1636 bch_verbose(c, "reading snapshots table");
1637 ret = bch2_snapshots_read(c);
1640 bch_verbose(c, "reading snapshots done");
1642 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1643 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1644 root_inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1645 bch2_inode_pack(&packed_inode, &root_inode);
1646 packed_inode.inode.k.p.snapshot = U32_MAX;
1648 ret = bch2_btree_insert(c, BTREE_ID_inodes,
1649 &packed_inode.inode.k_i,
1652 bch_err_msg(c, ret, "creating root directory");
1656 bch2_inode_init_early(c, &lostfound_inode);
1658 ret = bch2_trans_do(c, NULL, NULL, 0,
1659 bch2_create_trans(&trans,
1660 BCACHEFS_ROOT_SUBVOL_INUM,
1661 &root_inode, &lostfound_inode,
1663 0, 0, S_IFDIR|0700, 0,
1664 NULL, NULL, (subvol_inum) { 0 }, 0));
1666 bch_err_msg(c, ret, "creating lost+found");
1670 if (enabled_qtypes(c)) {
1671 ret = bch2_fs_quota_read(c);
1676 ret = bch2_journal_flush(&c->journal);
1678 bch_err_msg(c, ret, "writing first journal entry");
1682 mutex_lock(&c->sb_lock);
1683 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1684 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1686 bch2_write_super(c);
1687 mutex_unlock(&c->sb_lock);
1691 bch_err_fn(ca, ret);