]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
Update bcachefs sources to 7bf1ac0d46 bcachefs: Correctly initialize new buckets...
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "backpointers.h"
5 #include "bkey_buf.h"
6 #include "alloc_background.h"
7 #include "btree_gc.h"
8 #include "btree_journal_iter.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
11 #include "btree_io.h"
12 #include "buckets.h"
13 #include "dirent.h"
14 #include "ec.h"
15 #include "errcode.h"
16 #include "error.h"
17 #include "fs-common.h"
18 #include "fsck.h"
19 #include "journal_io.h"
20 #include "journal_reclaim.h"
21 #include "journal_seq_blacklist.h"
22 #include "lru.h"
23 #include "logged_ops.h"
24 #include "move.h"
25 #include "quota.h"
26 #include "recovery.h"
27 #include "replicas.h"
28 #include "sb-clean.h"
29 #include "snapshot.h"
30 #include "subvolume.h"
31 #include "super-io.h"
32
33 #include <linux/sort.h>
34 #include <linux/stat.h>
35
36 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
37
38 /* for -o reconstruct_alloc: */
39 static void drop_alloc_keys(struct journal_keys *keys)
40 {
41         size_t src, dst;
42
43         for (src = 0, dst = 0; src < keys->nr; src++)
44                 if (keys->d[src].btree_id != BTREE_ID_alloc)
45                         keys->d[dst++] = keys->d[src];
46
47         keys->nr = dst;
48 }
49
50 /*
51  * Btree node pointers have a field to stack a pointer to the in memory btree
52  * node; we need to zero out this field when reading in btree nodes, or when
53  * reading in keys from the journal:
54  */
55 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
56 {
57         struct journal_key *i;
58
59         for (i = keys->d; i < keys->d + keys->nr; i++)
60                 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
61                         bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
62 }
63
64 /* journal replay: */
65
66 static void replay_now_at(struct journal *j, u64 seq)
67 {
68         BUG_ON(seq < j->replay_journal_seq);
69
70         seq = min(seq, j->replay_journal_seq_end);
71
72         while (j->replay_journal_seq < seq)
73                 bch2_journal_pin_put(j, j->replay_journal_seq++);
74 }
75
76 static int bch2_journal_replay_key(struct btree_trans *trans,
77                                    struct journal_key *k)
78 {
79         struct btree_iter iter;
80         unsigned iter_flags =
81                 BTREE_ITER_INTENT|
82                 BTREE_ITER_NOT_EXTENTS;
83         unsigned update_flags = BTREE_TRIGGER_NORUN;
84         int ret;
85
86         /*
87          * BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
88          * keep the key cache coherent with the underlying btree. Nothing
89          * besides the allocator is doing updates yet so we don't need key cache
90          * coherency for non-alloc btrees, and key cache fills for snapshots
91          * btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
92          * the snapshots recovery pass runs.
93          */
94         if (!k->level && k->btree_id == BTREE_ID_alloc)
95                 iter_flags |= BTREE_ITER_CACHED;
96         else
97                 update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
98
99         bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
100                                   BTREE_MAX_DEPTH, k->level,
101                                   iter_flags);
102         ret = bch2_btree_iter_traverse(&iter);
103         if (ret)
104                 goto out;
105
106         /* Must be checked with btree locked: */
107         if (k->overwritten)
108                 goto out;
109
110         ret = bch2_trans_update(trans, &iter, k->k, update_flags);
111 out:
112         bch2_trans_iter_exit(trans, &iter);
113         return ret;
114 }
115
116 static int journal_sort_seq_cmp(const void *_l, const void *_r)
117 {
118         const struct journal_key *l = *((const struct journal_key **)_l);
119         const struct journal_key *r = *((const struct journal_key **)_r);
120
121         return cmp_int(l->journal_seq, r->journal_seq);
122 }
123
124 static int bch2_journal_replay(struct bch_fs *c)
125 {
126         struct journal_keys *keys = &c->journal_keys;
127         struct journal_key **keys_sorted, *k;
128         struct journal *j = &c->journal;
129         u64 start_seq   = c->journal_replay_seq_start;
130         u64 end_seq     = c->journal_replay_seq_start;
131         size_t i;
132         int ret;
133
134         move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
135         keys->gap = keys->nr;
136
137         keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL);
138         if (!keys_sorted)
139                 return -BCH_ERR_ENOMEM_journal_replay;
140
141         for (i = 0; i < keys->nr; i++)
142                 keys_sorted[i] = &keys->d[i];
143
144         sort(keys_sorted, keys->nr,
145              sizeof(keys_sorted[0]),
146              journal_sort_seq_cmp, NULL);
147
148         if (keys->nr) {
149                 ret = bch2_journal_log_msg(c, "Starting journal replay (%zu keys in entries %llu-%llu)",
150                                            keys->nr, start_seq, end_seq);
151                 if (ret)
152                         goto err;
153         }
154
155         for (i = 0; i < keys->nr; i++) {
156                 k = keys_sorted[i];
157
158                 cond_resched();
159
160                 replay_now_at(j, k->journal_seq);
161
162                 ret = bch2_trans_do(c, NULL, NULL,
163                                     BTREE_INSERT_LAZY_RW|
164                                     BTREE_INSERT_NOFAIL|
165                                     (!k->allocated
166                                      ? BTREE_INSERT_JOURNAL_REPLAY|BCH_WATERMARK_reclaim
167                                      : 0),
168                              bch2_journal_replay_key(trans, k));
169                 if (ret) {
170                         bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s",
171                                 bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret));
172                         goto err;
173                 }
174         }
175
176         replay_now_at(j, j->replay_journal_seq_end);
177         j->replay_journal_seq = 0;
178
179         bch2_journal_set_replay_done(j);
180         bch2_journal_flush_all_pins(j);
181         ret = bch2_journal_error(j);
182
183         if (keys->nr && !ret)
184                 bch2_journal_log_msg(c, "journal replay finished");
185 err:
186         kvfree(keys_sorted);
187
188         if (ret)
189                 bch_err_fn(c, ret);
190         return ret;
191 }
192
193 /* journal replay early: */
194
195 static int journal_replay_entry_early(struct bch_fs *c,
196                                       struct jset_entry *entry)
197 {
198         int ret = 0;
199
200         switch (entry->type) {
201         case BCH_JSET_ENTRY_btree_root: {
202                 struct btree_root *r;
203
204                 while (entry->btree_id >= c->btree_roots_extra.nr + BTREE_ID_NR) {
205                         ret = darray_push(&c->btree_roots_extra, (struct btree_root) { NULL });
206                         if (ret)
207                                 return ret;
208                 }
209
210                 r = bch2_btree_id_root(c, entry->btree_id);
211
212                 if (entry->u64s) {
213                         r->level = entry->level;
214                         bkey_copy(&r->key, &entry->start[0]);
215                         r->error = 0;
216                 } else {
217                         r->error = -EIO;
218                 }
219                 r->alive = true;
220                 break;
221         }
222         case BCH_JSET_ENTRY_usage: {
223                 struct jset_entry_usage *u =
224                         container_of(entry, struct jset_entry_usage, entry);
225
226                 switch (entry->btree_id) {
227                 case BCH_FS_USAGE_reserved:
228                         if (entry->level < BCH_REPLICAS_MAX)
229                                 c->usage_base->persistent_reserved[entry->level] =
230                                         le64_to_cpu(u->v);
231                         break;
232                 case BCH_FS_USAGE_inodes:
233                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
234                         break;
235                 case BCH_FS_USAGE_key_version:
236                         atomic64_set(&c->key_version,
237                                      le64_to_cpu(u->v));
238                         break;
239                 }
240
241                 break;
242         }
243         case BCH_JSET_ENTRY_data_usage: {
244                 struct jset_entry_data_usage *u =
245                         container_of(entry, struct jset_entry_data_usage, entry);
246
247                 ret = bch2_replicas_set_usage(c, &u->r,
248                                               le64_to_cpu(u->v));
249                 break;
250         }
251         case BCH_JSET_ENTRY_dev_usage: {
252                 struct jset_entry_dev_usage *u =
253                         container_of(entry, struct jset_entry_dev_usage, entry);
254                 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
255                 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
256
257                 ca->usage_base->buckets_ec              = le64_to_cpu(u->buckets_ec);
258
259                 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
260                         ca->usage_base->d[i].buckets    = le64_to_cpu(u->d[i].buckets);
261                         ca->usage_base->d[i].sectors    = le64_to_cpu(u->d[i].sectors);
262                         ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
263                 }
264
265                 break;
266         }
267         case BCH_JSET_ENTRY_blacklist: {
268                 struct jset_entry_blacklist *bl_entry =
269                         container_of(entry, struct jset_entry_blacklist, entry);
270
271                 ret = bch2_journal_seq_blacklist_add(c,
272                                 le64_to_cpu(bl_entry->seq),
273                                 le64_to_cpu(bl_entry->seq) + 1);
274                 break;
275         }
276         case BCH_JSET_ENTRY_blacklist_v2: {
277                 struct jset_entry_blacklist_v2 *bl_entry =
278                         container_of(entry, struct jset_entry_blacklist_v2, entry);
279
280                 ret = bch2_journal_seq_blacklist_add(c,
281                                 le64_to_cpu(bl_entry->start),
282                                 le64_to_cpu(bl_entry->end) + 1);
283                 break;
284         }
285         case BCH_JSET_ENTRY_clock: {
286                 struct jset_entry_clock *clock =
287                         container_of(entry, struct jset_entry_clock, entry);
288
289                 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
290         }
291         }
292
293         return ret;
294 }
295
296 static int journal_replay_early(struct bch_fs *c,
297                                 struct bch_sb_field_clean *clean)
298 {
299         struct jset_entry *entry;
300         int ret;
301
302         if (clean) {
303                 for (entry = clean->start;
304                      entry != vstruct_end(&clean->field);
305                      entry = vstruct_next(entry)) {
306                         ret = journal_replay_entry_early(c, entry);
307                         if (ret)
308                                 return ret;
309                 }
310         } else {
311                 struct genradix_iter iter;
312                 struct journal_replay *i, **_i;
313
314                 genradix_for_each(&c->journal_entries, iter, _i) {
315                         i = *_i;
316
317                         if (!i || i->ignore)
318                                 continue;
319
320                         vstruct_for_each(&i->j, entry) {
321                                 ret = journal_replay_entry_early(c, entry);
322                                 if (ret)
323                                         return ret;
324                         }
325                 }
326         }
327
328         bch2_fs_usage_initialize(c);
329
330         return 0;
331 }
332
333 /* sb clean section: */
334
335 static bool btree_id_is_alloc(enum btree_id id)
336 {
337         switch (id) {
338         case BTREE_ID_alloc:
339         case BTREE_ID_backpointers:
340         case BTREE_ID_need_discard:
341         case BTREE_ID_freespace:
342         case BTREE_ID_bucket_gens:
343                 return true;
344         default:
345                 return false;
346         }
347 }
348
349 static int read_btree_roots(struct bch_fs *c)
350 {
351         unsigned i;
352         int ret = 0;
353
354         for (i = 0; i < btree_id_nr_alive(c); i++) {
355                 struct btree_root *r = bch2_btree_id_root(c, i);
356
357                 if (!r->alive)
358                         continue;
359
360                 if (btree_id_is_alloc(i) &&
361                     c->opts.reconstruct_alloc) {
362                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
363                         continue;
364                 }
365
366                 if (r->error) {
367                         __fsck_err(c, btree_id_is_alloc(i)
368                                    ? FSCK_CAN_IGNORE : 0,
369                                    "invalid btree root %s",
370                                    bch2_btree_ids[i]);
371                         if (i == BTREE_ID_alloc)
372                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
373                 }
374
375                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
376                 if (ret) {
377                         fsck_err(c,
378                                  "error reading btree root %s",
379                                  bch2_btree_ids[i]);
380                         if (btree_id_is_alloc(i))
381                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
382                         ret = 0;
383                 }
384         }
385
386         for (i = 0; i < BTREE_ID_NR; i++) {
387                 struct btree_root *r = bch2_btree_id_root(c, i);
388
389                 if (!r->b) {
390                         r->alive = false;
391                         r->level = 0;
392                         bch2_btree_root_alloc(c, i);
393                 }
394         }
395 fsck_err:
396         return ret;
397 }
398
399 static int bch2_initialize_subvolumes(struct bch_fs *c)
400 {
401         struct bkey_i_snapshot_tree     root_tree;
402         struct bkey_i_snapshot          root_snapshot;
403         struct bkey_i_subvolume         root_volume;
404         int ret;
405
406         bkey_snapshot_tree_init(&root_tree.k_i);
407         root_tree.k.p.offset            = 1;
408         root_tree.v.master_subvol       = cpu_to_le32(1);
409         root_tree.v.root_snapshot       = cpu_to_le32(U32_MAX);
410
411         bkey_snapshot_init(&root_snapshot.k_i);
412         root_snapshot.k.p.offset = U32_MAX;
413         root_snapshot.v.flags   = 0;
414         root_snapshot.v.parent  = 0;
415         root_snapshot.v.subvol  = cpu_to_le32(BCACHEFS_ROOT_SUBVOL);
416         root_snapshot.v.tree    = cpu_to_le32(1);
417         SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
418
419         bkey_subvolume_init(&root_volume.k_i);
420         root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
421         root_volume.v.flags     = 0;
422         root_volume.v.snapshot  = cpu_to_le32(U32_MAX);
423         root_volume.v.inode     = cpu_to_le64(BCACHEFS_ROOT_INO);
424
425         ret =   bch2_btree_insert(c, BTREE_ID_snapshot_trees,   &root_tree.k_i, NULL, 0) ?:
426                 bch2_btree_insert(c, BTREE_ID_snapshots,        &root_snapshot.k_i, NULL, 0) ?:
427                 bch2_btree_insert(c, BTREE_ID_subvolumes,       &root_volume.k_i, NULL, 0);
428         if (ret)
429                 bch_err_fn(c, ret);
430         return ret;
431 }
432
433 static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
434 {
435         struct btree_iter iter;
436         struct bkey_s_c k;
437         struct bch_inode_unpacked inode;
438         int ret;
439
440         k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
441                                SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
442         ret = bkey_err(k);
443         if (ret)
444                 return ret;
445
446         if (!bkey_is_inode(k.k)) {
447                 bch_err(trans->c, "root inode not found");
448                 ret = -BCH_ERR_ENOENT_inode;
449                 goto err;
450         }
451
452         ret = bch2_inode_unpack(k, &inode);
453         BUG_ON(ret);
454
455         inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
456
457         ret = bch2_inode_write(trans, &iter, &inode);
458 err:
459         bch2_trans_iter_exit(trans, &iter);
460         return ret;
461 }
462
463 /* set bi_subvol on root inode */
464 noinline_for_stack
465 static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
466 {
467         int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
468                                 __bch2_fs_upgrade_for_subvolumes(trans));
469         if (ret)
470                 bch_err_fn(c, ret);
471         return ret;
472 }
473
474 const char * const bch2_recovery_passes[] = {
475 #define x(_fn, _when)   #_fn,
476         BCH_RECOVERY_PASSES()
477 #undef x
478         NULL
479 };
480
481 static int bch2_check_allocations(struct bch_fs *c)
482 {
483         return bch2_gc(c, true, c->opts.norecovery);
484 }
485
486 static int bch2_set_may_go_rw(struct bch_fs *c)
487 {
488         set_bit(BCH_FS_MAY_GO_RW, &c->flags);
489         return 0;
490 }
491
492 struct recovery_pass_fn {
493         int             (*fn)(struct bch_fs *);
494         unsigned        when;
495 };
496
497 static struct recovery_pass_fn recovery_pass_fns[] = {
498 #define x(_fn, _when)   { .fn = bch2_##_fn, .when = _when },
499         BCH_RECOVERY_PASSES()
500 #undef x
501 };
502
503 static void check_version_upgrade(struct bch_fs *c)
504 {
505         unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version);
506         unsigned latest_version = bcachefs_metadata_version_current;
507         unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
508         unsigned new_version = 0;
509         u64 recovery_passes;
510
511         if (old_version < bcachefs_metadata_required_upgrade_below) {
512                 if (c->opts.version_upgrade == BCH_VERSION_UPGRADE_incompatible ||
513                     latest_compatible < bcachefs_metadata_required_upgrade_below)
514                         new_version = latest_version;
515                 else
516                         new_version = latest_compatible;
517         } else {
518                 switch (c->opts.version_upgrade) {
519                 case BCH_VERSION_UPGRADE_compatible:
520                         new_version = latest_compatible;
521                         break;
522                 case BCH_VERSION_UPGRADE_incompatible:
523                         new_version = latest_version;
524                         break;
525                 case BCH_VERSION_UPGRADE_none:
526                         new_version = old_version;
527                         break;
528                 }
529         }
530
531         if (new_version > old_version) {
532                 struct printbuf buf = PRINTBUF;
533
534                 if (old_version < bcachefs_metadata_required_upgrade_below)
535                         prt_str(&buf, "Version upgrade required:\n");
536
537                 if (old_version != c->sb.version) {
538                         prt_str(&buf, "Version upgrade from ");
539                         bch2_version_to_text(&buf, c->sb.version_upgrade_complete);
540                         prt_str(&buf, " to ");
541                         bch2_version_to_text(&buf, c->sb.version);
542                         prt_str(&buf, " incomplete\n");
543                 }
544
545                 prt_printf(&buf, "Doing %s version upgrade from ",
546                            BCH_VERSION_MAJOR(old_version) != BCH_VERSION_MAJOR(new_version)
547                            ? "incompatible" : "compatible");
548                 bch2_version_to_text(&buf, old_version);
549                 prt_str(&buf, " to ");
550                 bch2_version_to_text(&buf, new_version);
551                 prt_newline(&buf);
552
553                 recovery_passes = bch2_upgrade_recovery_passes(c, old_version, new_version);
554                 if (recovery_passes) {
555                         if ((recovery_passes & RECOVERY_PASS_ALL_FSCK) == RECOVERY_PASS_ALL_FSCK)
556                                 prt_str(&buf, "fsck required");
557                         else {
558                                 prt_str(&buf, "running recovery passes: ");
559                                 prt_bitflags(&buf, bch2_recovery_passes, recovery_passes);
560                         }
561
562                         c->recovery_passes_explicit |= recovery_passes;
563                         c->opts.fix_errors = FSCK_FIX_yes;
564                 }
565
566                 bch_info(c, "%s", buf.buf);
567
568                 mutex_lock(&c->sb_lock);
569                 bch2_sb_upgrade(c, new_version);
570                 mutex_unlock(&c->sb_lock);
571
572                 printbuf_exit(&buf);
573         }
574 }
575
576 u64 bch2_fsck_recovery_passes(void)
577 {
578         u64 ret = 0;
579
580         for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++)
581                 if (recovery_pass_fns[i].when & PASS_FSCK)
582                         ret |= BIT_ULL(i);
583         return ret;
584 }
585
586 static bool should_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
587 {
588         struct recovery_pass_fn *p = recovery_pass_fns + c->curr_recovery_pass;
589
590         if (c->opts.norecovery && pass > BCH_RECOVERY_PASS_snapshots_read)
591                 return false;
592         if (c->recovery_passes_explicit & BIT_ULL(pass))
593                 return true;
594         if ((p->when & PASS_FSCK) && c->opts.fsck)
595                 return true;
596         if ((p->when & PASS_UNCLEAN) && !c->sb.clean)
597                 return true;
598         if (p->when & PASS_ALWAYS)
599                 return true;
600         return false;
601 }
602
603 static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
604 {
605         int ret;
606
607         c->curr_recovery_pass = pass;
608
609         if (should_run_recovery_pass(c, pass)) {
610                 struct recovery_pass_fn *p = recovery_pass_fns + pass;
611
612                 if (!(p->when & PASS_SILENT))
613                         printk(KERN_INFO bch2_log_msg(c, "%s..."),
614                                bch2_recovery_passes[pass]);
615                 ret = p->fn(c);
616                 if (ret)
617                         return ret;
618                 if (!(p->when & PASS_SILENT))
619                         printk(KERN_CONT " done\n");
620
621                 c->recovery_passes_complete |= BIT_ULL(pass);
622         }
623
624         return 0;
625 }
626
627 static int bch2_run_recovery_passes(struct bch_fs *c)
628 {
629         int ret = 0;
630
631         while (c->curr_recovery_pass < ARRAY_SIZE(recovery_pass_fns)) {
632                 ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
633                 if (bch2_err_matches(ret, BCH_ERR_restart_recovery))
634                         continue;
635                 if (ret)
636                         break;
637                 c->curr_recovery_pass++;
638         }
639
640         return ret;
641 }
642
643 int bch2_fs_recovery(struct bch_fs *c)
644 {
645         struct bch_sb_field_clean *clean = NULL;
646         struct jset *last_journal_entry = NULL;
647         u64 last_seq = 0, blacklist_seq, journal_seq;
648         bool write_sb = false;
649         int ret = 0;
650
651         if (c->sb.clean) {
652                 clean = bch2_read_superblock_clean(c);
653                 ret = PTR_ERR_OR_ZERO(clean);
654                 if (ret)
655                         goto err;
656
657                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
658                          le64_to_cpu(clean->journal_seq));
659         } else {
660                 bch_info(c, "recovering from unclean shutdown");
661         }
662
663         if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
664                 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
665                 ret = -EINVAL;
666                 goto err;
667         }
668
669         if (!c->sb.clean &&
670             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
671                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
672                 ret = -EINVAL;
673                 goto err;
674         }
675
676         if (c->opts.fsck || !(c->opts.nochanges && c->opts.norecovery))
677                 check_version_upgrade(c);
678
679         if (c->opts.fsck && c->opts.norecovery) {
680                 bch_err(c, "cannot select both norecovery and fsck");
681                 ret = -EINVAL;
682                 goto err;
683         }
684
685         ret = bch2_blacklist_table_initialize(c);
686         if (ret) {
687                 bch_err(c, "error initializing blacklist table");
688                 goto err;
689         }
690
691         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
692                 struct genradix_iter iter;
693                 struct journal_replay **i;
694
695                 bch_verbose(c, "starting journal read");
696                 ret = bch2_journal_read(c, &last_seq, &blacklist_seq, &journal_seq);
697                 if (ret)
698                         goto err;
699
700                 /*
701                  * note: cmd_list_journal needs the blacklist table fully up to date so
702                  * it can asterisk ignored journal entries:
703                  */
704                 if (c->opts.read_journal_only)
705                         goto out;
706
707                 genradix_for_each_reverse(&c->journal_entries, iter, i)
708                         if (*i && !(*i)->ignore) {
709                                 last_journal_entry = &(*i)->j;
710                                 break;
711                         }
712
713                 if (mustfix_fsck_err_on(c->sb.clean &&
714                                         last_journal_entry &&
715                                         !journal_entry_empty(last_journal_entry), c,
716                                 "filesystem marked clean but journal not empty")) {
717                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
718                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
719                         c->sb.clean = false;
720                 }
721
722                 if (!last_journal_entry) {
723                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
724                         if (clean)
725                                 goto use_clean;
726
727                         genradix_for_each_reverse(&c->journal_entries, iter, i)
728                                 if (*i) {
729                                         last_journal_entry = &(*i)->j;
730                                         (*i)->ignore = false;
731                                         break;
732                                 }
733                 }
734
735                 ret = bch2_journal_keys_sort(c);
736                 if (ret)
737                         goto err;
738
739                 if (c->sb.clean && last_journal_entry) {
740                         ret = bch2_verify_superblock_clean(c, &clean,
741                                                       last_journal_entry);
742                         if (ret)
743                                 goto err;
744                 }
745         } else {
746 use_clean:
747                 if (!clean) {
748                         bch_err(c, "no superblock clean section found");
749                         ret = -BCH_ERR_fsck_repair_impossible;
750                         goto err;
751
752                 }
753                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
754         }
755
756         c->journal_replay_seq_start     = last_seq;
757         c->journal_replay_seq_end       = blacklist_seq - 1;
758
759         if (c->opts.reconstruct_alloc) {
760                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
761                 drop_alloc_keys(&c->journal_keys);
762         }
763
764         zero_out_btree_mem_ptr(&c->journal_keys);
765
766         ret = journal_replay_early(c, clean);
767         if (ret)
768                 goto err;
769
770         /*
771          * After an unclean shutdown, skip then next few journal sequence
772          * numbers as they may have been referenced by btree writes that
773          * happened before their corresponding journal writes - those btree
774          * writes need to be ignored, by skipping and blacklisting the next few
775          * journal sequence numbers:
776          */
777         if (!c->sb.clean)
778                 journal_seq += 8;
779
780         if (blacklist_seq != journal_seq) {
781                 ret =   bch2_journal_log_msg(c, "blacklisting entries %llu-%llu",
782                                              blacklist_seq, journal_seq) ?:
783                         bch2_journal_seq_blacklist_add(c,
784                                         blacklist_seq, journal_seq);
785                 if (ret) {
786                         bch_err(c, "error creating new journal seq blacklist entry");
787                         goto err;
788                 }
789         }
790
791         ret =   bch2_journal_log_msg(c, "starting journal at entry %llu, replaying %llu-%llu",
792                                      journal_seq, last_seq, blacklist_seq - 1) ?:
793                 bch2_fs_journal_start(&c->journal, journal_seq);
794         if (ret)
795                 goto err;
796
797         if (c->opts.reconstruct_alloc)
798                 bch2_journal_log_msg(c, "dropping alloc info");
799
800         /*
801          * Skip past versions that might have possibly been used (as nonces),
802          * but hadn't had their pointers written:
803          */
804         if (c->sb.encryption_type && !c->sb.clean)
805                 atomic64_add(1 << 16, &c->key_version);
806
807         ret = read_btree_roots(c);
808         if (ret)
809                 goto err;
810
811         if (c->opts.fsck &&
812             (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
813              BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)))
814                 c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
815
816         ret = bch2_run_recovery_passes(c);
817         if (ret)
818                 goto err;
819
820         /* If we fixed errors, verify that fs is actually clean now: */
821         if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
822             test_bit(BCH_FS_ERRORS_FIXED, &c->flags) &&
823             !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags) &&
824             !test_bit(BCH_FS_ERROR, &c->flags)) {
825                 bch_info(c, "Fixed errors, running fsck a second time to verify fs is clean");
826                 clear_bit(BCH_FS_ERRORS_FIXED, &c->flags);
827
828                 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info;
829
830                 ret = bch2_run_recovery_passes(c);
831                 if (ret)
832                         goto err;
833
834                 if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags) ||
835                     test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
836                         bch_err(c, "Second fsck run was not clean");
837                         set_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags);
838                 }
839
840                 set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
841         }
842
843         if (enabled_qtypes(c)) {
844                 bch_verbose(c, "reading quotas");
845                 ret = bch2_fs_quota_read(c);
846                 if (ret)
847                         goto err;
848                 bch_verbose(c, "quotas done");
849         }
850
851         mutex_lock(&c->sb_lock);
852         if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) != c->sb.version) {
853                 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, c->sb.version);
854                 write_sb = true;
855         }
856
857         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
858                 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
859                 write_sb = true;
860         }
861
862         if (c->opts.fsck &&
863             !test_bit(BCH_FS_ERROR, &c->flags) &&
864             !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
865                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
866                 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
867                 write_sb = true;
868         }
869
870         if (write_sb)
871                 bch2_write_super(c);
872         mutex_unlock(&c->sb_lock);
873
874         if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
875             c->sb.version_min < bcachefs_metadata_version_btree_ptr_sectors_written) {
876                 struct bch_move_stats stats;
877
878                 bch2_move_stats_init(&stats, "recovery");
879
880                 bch_info(c, "scanning for old btree nodes");
881                 ret =   bch2_fs_read_write(c) ?:
882                         bch2_scan_old_btree_nodes(c, &stats);
883                 if (ret)
884                         goto err;
885                 bch_info(c, "scanning for old btree nodes done");
886         }
887
888         if (c->journal_seq_blacklist_table &&
889             c->journal_seq_blacklist_table->nr > 128)
890                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
891
892         ret = 0;
893 out:
894         set_bit(BCH_FS_FSCK_DONE, &c->flags);
895         bch2_flush_fsck_errs(c);
896
897         if (!c->opts.keep_journal &&
898             test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) {
899                 bch2_journal_keys_free(&c->journal_keys);
900                 bch2_journal_entries_free(c);
901         }
902         kfree(clean);
903
904         if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) {
905                 bch2_fs_read_write_early(c);
906                 bch2_delete_dead_snapshots_async(c);
907         }
908
909         if (ret)
910                 bch_err_fn(c, ret);
911         return ret;
912 err:
913 fsck_err:
914         bch2_fs_emergency_read_only(c);
915         goto out;
916 }
917
918 int bch2_fs_initialize(struct bch_fs *c)
919 {
920         struct bch_inode_unpacked root_inode, lostfound_inode;
921         struct bkey_inode_buf packed_inode;
922         struct qstr lostfound = QSTR("lost+found");
923         struct bch_dev *ca;
924         unsigned i;
925         int ret;
926
927         bch_notice(c, "initializing new filesystem");
928
929         mutex_lock(&c->sb_lock);
930         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
931         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
932
933         bch2_sb_maybe_downgrade(c);
934
935         if (c->opts.version_upgrade != BCH_VERSION_UPGRADE_none) {
936                 bch2_sb_upgrade(c, bcachefs_metadata_version_current);
937                 SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
938                 bch2_write_super(c);
939         }
940         mutex_unlock(&c->sb_lock);
941
942         c->curr_recovery_pass = ARRAY_SIZE(recovery_pass_fns);
943         set_bit(BCH_FS_MAY_GO_RW, &c->flags);
944         set_bit(BCH_FS_FSCK_DONE, &c->flags);
945
946         for (i = 0; i < BTREE_ID_NR; i++)
947                 bch2_btree_root_alloc(c, i);
948
949         for_each_online_member(ca, c, i)
950                 bch2_dev_usage_init(ca);
951
952         for_each_online_member(ca, c, i) {
953                 ret = bch2_dev_journal_alloc(ca);
954                 if (ret) {
955                         percpu_ref_put(&ca->io_ref);
956                         goto err;
957                 }
958         }
959
960         /*
961          * journal_res_get() will crash if called before this has
962          * set up the journal.pin FIFO and journal.cur pointer:
963          */
964         bch2_fs_journal_start(&c->journal, 1);
965         bch2_journal_set_replay_done(&c->journal);
966
967         ret = bch2_fs_read_write_early(c);
968         if (ret)
969                 goto err;
970
971         /*
972          * Write out the superblock and journal buckets, now that we can do
973          * btree updates
974          */
975         bch_verbose(c, "marking superblocks");
976         for_each_member_device(ca, c, i) {
977                 ret = bch2_trans_mark_dev_sb(c, ca);
978                 if (ret) {
979                         percpu_ref_put(&ca->ref);
980                         goto err;
981                 }
982
983                 ca->new_fs_bucket_idx = 0;
984         }
985
986         ret = bch2_fs_freespace_init(c);
987         if (ret)
988                 goto err;
989
990         ret = bch2_initialize_subvolumes(c);
991         if (ret)
992                 goto err;
993
994         bch_verbose(c, "reading snapshots table");
995         ret = bch2_snapshots_read(c);
996         if (ret)
997                 goto err;
998         bch_verbose(c, "reading snapshots done");
999
1000         bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755, 0, NULL);
1001         root_inode.bi_inum      = BCACHEFS_ROOT_INO;
1002         root_inode.bi_subvol    = BCACHEFS_ROOT_SUBVOL;
1003         bch2_inode_pack(&packed_inode, &root_inode);
1004         packed_inode.inode.k.p.snapshot = U32_MAX;
1005
1006         ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
1007         if (ret) {
1008                 bch_err_msg(c, ret, "creating root directory");
1009                 goto err;
1010         }
1011
1012         bch2_inode_init_early(c, &lostfound_inode);
1013
1014         ret = bch2_trans_do(c, NULL, NULL, 0,
1015                 bch2_create_trans(trans,
1016                                   BCACHEFS_ROOT_SUBVOL_INUM,
1017                                   &root_inode, &lostfound_inode,
1018                                   &lostfound,
1019                                   0, 0, S_IFDIR|0700, 0,
1020                                   NULL, NULL, (subvol_inum) { 0 }, 0));
1021         if (ret) {
1022                 bch_err_msg(c, ret, "creating lost+found");
1023                 goto err;
1024         }
1025
1026         if (enabled_qtypes(c)) {
1027                 ret = bch2_fs_quota_read(c);
1028                 if (ret)
1029                         goto err;
1030         }
1031
1032         ret = bch2_journal_flush(&c->journal);
1033         if (ret) {
1034                 bch_err_msg(c, ret, "writing first journal entry");
1035                 goto err;
1036         }
1037
1038         mutex_lock(&c->sb_lock);
1039         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1040         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1041
1042         bch2_write_super(c);
1043         mutex_unlock(&c->sb_lock);
1044
1045         return 0;
1046 err:
1047         bch_err_fn(ca, ret);
1048         return ret;
1049 }