]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
Update bcachefs sources to 6f603b8d79 bcachefs: some improvements to startup messages...
[bcachefs-tools-debian] / libbcachefs / recovery.c
1
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "btree_gc.h"
5 #include "btree_update.h"
6 #include "btree_update_interior.h"
7 #include "btree_io.h"
8 #include "buckets.h"
9 #include "dirent.h"
10 #include "ec.h"
11 #include "error.h"
12 #include "fsck.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "quota.h"
17 #include "recovery.h"
18 #include "replicas.h"
19 #include "super-io.h"
20
21 #include <linux/sort.h>
22 #include <linux/stat.h>
23
24 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
25
26 /* sort and dedup all keys in the journal: */
27
28 static void journal_entries_free(struct list_head *list)
29 {
30
31         while (!list_empty(list)) {
32                 struct journal_replay *i =
33                         list_first_entry(list, struct journal_replay, list);
34                 list_del(&i->list);
35                 kvpfree(i, offsetof(struct journal_replay, j) +
36                         vstruct_bytes(&i->j));
37         }
38 }
39
40 static int journal_sort_key_cmp(const void *_l, const void *_r)
41 {
42         const struct journal_key *l = _l;
43         const struct journal_key *r = _r;
44
45         return cmp_int(l->btree_id, r->btree_id) ?:
46                 bkey_cmp(l->pos, r->pos) ?:
47                 cmp_int(l->journal_seq, r->journal_seq) ?:
48                 cmp_int(l->journal_offset, r->journal_offset);
49 }
50
51 static int journal_sort_seq_cmp(const void *_l, const void *_r)
52 {
53         const struct journal_key *l = _l;
54         const struct journal_key *r = _r;
55
56         return cmp_int(l->journal_seq, r->journal_seq) ?:
57                 cmp_int(l->btree_id, r->btree_id) ?:
58                 bkey_cmp(l->pos, r->pos);
59 }
60
61 static void journal_keys_sift(struct journal_keys *keys, struct journal_key *i)
62 {
63         while (i + 1 < keys->d + keys->nr &&
64                journal_sort_key_cmp(i, i + 1) > 0) {
65                 swap(i[0], i[1]);
66                 i++;
67         }
68 }
69
70 static void journal_keys_free(struct journal_keys *keys)
71 {
72         struct journal_key *i;
73
74         for_each_journal_key(*keys, i)
75                 if (i->allocated)
76                         kfree(i->k);
77         kvfree(keys->d);
78         keys->d = NULL;
79         keys->nr = 0;
80 }
81
82 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
83 {
84         struct journal_replay *p;
85         struct jset_entry *entry;
86         struct bkey_i *k, *_n;
87         struct journal_keys keys = { NULL }, keys_deduped = { NULL };
88         struct journal_key *i;
89         size_t nr_keys = 0;
90
91         list_for_each_entry(p, journal_entries, list)
92                 for_each_jset_key(k, _n, entry, &p->j)
93                         nr_keys++;
94
95         keys.journal_seq_base = keys_deduped.journal_seq_base =
96                 le64_to_cpu(list_first_entry(journal_entries,
97                                              struct journal_replay,
98                                              list)->j.seq);
99
100         keys.d = kvmalloc(sizeof(keys.d[0]) * nr_keys, GFP_KERNEL);
101         if (!keys.d)
102                 goto err;
103
104         keys_deduped.d = kvmalloc(sizeof(keys.d[0]) * nr_keys * 2, GFP_KERNEL);
105         if (!keys_deduped.d)
106                 goto err;
107
108         list_for_each_entry(p, journal_entries, list)
109                 for_each_jset_key(k, _n, entry, &p->j)
110                         keys.d[keys.nr++] = (struct journal_key) {
111                                 .btree_id       = entry->btree_id,
112                                 .pos            = bkey_start_pos(&k->k),
113                                 .k              = k,
114                                 .journal_seq    = le64_to_cpu(p->j.seq) -
115                                         keys.journal_seq_base,
116                                 .journal_offset = k->_data - p->j._data,
117                         };
118
119         sort(keys.d, nr_keys, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
120
121         i = keys.d;
122         while (i < keys.d + keys.nr) {
123                 if (i + 1 < keys.d + keys.nr &&
124                     i[0].btree_id == i[1].btree_id &&
125                     !bkey_cmp(i[0].pos, i[1].pos)) {
126                         if (bkey_cmp(i[0].k->k.p, i[1].k->k.p) <= 0) {
127                                 i++;
128                         } else {
129                                 bch2_cut_front(i[1].k->k.p, i[0].k);
130                                 i[0].pos = i[1].k->k.p;
131                                 journal_keys_sift(&keys, i);
132                         }
133                         continue;
134                 }
135
136                 if (i + 1 < keys.d + keys.nr &&
137                     i[0].btree_id == i[1].btree_id &&
138                     bkey_cmp(i[0].k->k.p, bkey_start_pos(&i[1].k->k)) > 0) {
139                         if ((cmp_int(i[0].journal_seq, i[1].journal_seq) ?:
140                              cmp_int(i[0].journal_offset, i[1].journal_offset)) < 0) {
141                                 if (bkey_cmp(i[0].k->k.p, i[1].k->k.p) <= 0) {
142                                         bch2_cut_back(bkey_start_pos(&i[1].k->k), &i[0].k->k);
143                                 } else {
144                                         struct bkey_i *split =
145                                                 kmalloc(bkey_bytes(i[0].k), GFP_KERNEL);
146
147                                         if (!split)
148                                                 goto err;
149
150                                         bkey_copy(split, i[0].k);
151                                         bch2_cut_back(bkey_start_pos(&i[1].k->k), &split->k);
152                                         keys_deduped.d[keys_deduped.nr++] = (struct journal_key) {
153                                                 .btree_id       = i[0].btree_id,
154                                                 .allocated      = true,
155                                                 .pos            = bkey_start_pos(&split->k),
156                                                 .k              = split,
157                                                 .journal_seq    = i[0].journal_seq,
158                                                 .journal_offset = i[0].journal_offset,
159                                         };
160
161                                         bch2_cut_front(i[1].k->k.p, i[0].k);
162                                         i[0].pos = i[1].k->k.p;
163                                         journal_keys_sift(&keys, i);
164                                         continue;
165                                 }
166                         } else {
167                                 if (bkey_cmp(i[0].k->k.p, i[1].k->k.p) >= 0) {
168                                         i[1] = i[0];
169                                         i++;
170                                         continue;
171                                 } else {
172                                         bch2_cut_front(i[0].k->k.p, i[1].k);
173                                         i[1].pos = i[0].k->k.p;
174                                         journal_keys_sift(&keys, i + 1);
175                                         continue;
176                                 }
177                         }
178                 }
179
180                 keys_deduped.d[keys_deduped.nr++] = *i++;
181         }
182
183         kvfree(keys.d);
184         return keys_deduped;
185 err:
186         journal_keys_free(&keys_deduped);
187         kvfree(keys.d);
188         return (struct journal_keys) { NULL };
189 }
190
191 /* journal replay: */
192
193 static void replay_now_at(struct journal *j, u64 seq)
194 {
195         BUG_ON(seq < j->replay_journal_seq);
196         BUG_ON(seq > j->replay_journal_seq_end);
197
198         while (j->replay_journal_seq < seq)
199                 bch2_journal_pin_put(j, j->replay_journal_seq++);
200 }
201
202 static int bch2_extent_replay_key(struct bch_fs *c, struct bkey_i *k)
203 {
204         struct btree_trans trans;
205         struct btree_iter *iter, *split_iter;
206         /*
207          * We might cause compressed extents to be split, so we need to pass in
208          * a disk_reservation:
209          */
210         struct disk_reservation disk_res =
211                 bch2_disk_reservation_init(c, 0);
212         struct bkey_i *split;
213         bool split_compressed = false;
214         int ret;
215
216         bch2_trans_init(&trans, c);
217         bch2_trans_preload_iters(&trans);
218 retry:
219         bch2_trans_begin(&trans);
220
221         iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
222                                    bkey_start_pos(&k->k),
223                                    BTREE_ITER_INTENT);
224
225         do {
226                 ret = bch2_btree_iter_traverse(iter);
227                 if (ret)
228                         goto err;
229
230                 split_iter = bch2_trans_copy_iter(&trans, iter);
231                 ret = PTR_ERR_OR_ZERO(split_iter);
232                 if (ret)
233                         goto err;
234
235                 split = bch2_trans_kmalloc(&trans, bkey_bytes(&k->k));
236                 ret = PTR_ERR_OR_ZERO(split);
237                 if (ret)
238                         goto err;
239
240                 if (!split_compressed &&
241                     bch2_extent_is_compressed(bkey_i_to_s_c(k)) &&
242                     !bch2_extent_is_atomic(k, split_iter)) {
243                         ret = bch2_disk_reservation_add(c, &disk_res,
244                                         k->k.size *
245                                         bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(k)),
246                                         BCH_DISK_RESERVATION_NOFAIL);
247                         BUG_ON(ret);
248
249                         split_compressed = true;
250                 }
251
252                 bkey_copy(split, k);
253                 bch2_cut_front(split_iter->pos, split);
254                 bch2_extent_trim_atomic(split, split_iter);
255
256                 bch2_trans_update(&trans, BTREE_INSERT_ENTRY(split_iter, split));
257                 bch2_btree_iter_set_pos(iter, split->k.p);
258         } while (bkey_cmp(iter->pos, k->k.p) < 0);
259
260         if (split_compressed) {
261                 memset(&trans.fs_usage_deltas.fs_usage, 0,
262                        sizeof(trans.fs_usage_deltas.fs_usage));
263                 trans.fs_usage_deltas.top = trans.fs_usage_deltas.d;
264
265                 ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k), false,
266                                           -((s64) k->k.size),
267                                           &trans.fs_usage_deltas) ?:
268                       bch2_trans_commit(&trans, &disk_res, NULL,
269                                         BTREE_INSERT_ATOMIC|
270                                         BTREE_INSERT_NOFAIL|
271                                         BTREE_INSERT_LAZY_RW|
272                                         BTREE_INSERT_NOMARK_OVERWRITES|
273                                         BTREE_INSERT_NO_CLEAR_REPLICAS);
274         } else {
275                 ret = bch2_trans_commit(&trans, &disk_res, NULL,
276                                         BTREE_INSERT_ATOMIC|
277                                         BTREE_INSERT_NOFAIL|
278                                         BTREE_INSERT_LAZY_RW|
279                                         BTREE_INSERT_JOURNAL_REPLAY|
280                                         BTREE_INSERT_NOMARK);
281         }
282
283         if (ret)
284                 goto err;
285 err:
286         if (ret == -EINTR)
287                 goto retry;
288
289         bch2_disk_reservation_put(c, &disk_res);
290
291         return bch2_trans_exit(&trans) ?: ret;
292 }
293
294 static int bch2_journal_replay(struct bch_fs *c,
295                                struct journal_keys keys)
296 {
297         struct journal *j = &c->journal;
298         struct journal_key *i;
299         int ret;
300
301         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
302
303         for_each_journal_key(keys, i) {
304                 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
305
306                 switch (i->btree_id) {
307                 case BTREE_ID_ALLOC:
308                         ret = bch2_alloc_replay_key(c, i->k);
309                         break;
310                 case BTREE_ID_EXTENTS:
311                         ret = bch2_extent_replay_key(c, i->k);
312                         break;
313                 default:
314                         ret = bch2_btree_insert(c, i->btree_id, i->k,
315                                                 NULL, NULL,
316                                                 BTREE_INSERT_NOFAIL|
317                                                 BTREE_INSERT_LAZY_RW|
318                                                 BTREE_INSERT_JOURNAL_REPLAY|
319                                                 BTREE_INSERT_NOMARK);
320                         break;
321                 }
322
323                 if (ret) {
324                         bch_err(c, "journal replay: error %d while replaying key",
325                                 ret);
326                         return ret;
327                 }
328
329                 cond_resched();
330         }
331
332         replay_now_at(j, j->replay_journal_seq_end);
333         j->replay_journal_seq = 0;
334
335         bch2_journal_set_replay_done(j);
336         bch2_journal_flush_all_pins(j);
337         return bch2_journal_error(j);
338 }
339
340 static bool journal_empty(struct list_head *journal)
341 {
342         return list_empty(journal) ||
343                 journal_entry_empty(&list_last_entry(journal,
344                                         struct journal_replay, list)->j);
345 }
346
347 static int
348 verify_journal_entries_not_blacklisted_or_missing(struct bch_fs *c,
349                                                   struct list_head *journal)
350 {
351         struct journal_replay *i =
352                 list_last_entry(journal, struct journal_replay, list);
353         u64 start_seq   = le64_to_cpu(i->j.last_seq);
354         u64 end_seq     = le64_to_cpu(i->j.seq);
355         u64 seq         = start_seq;
356         int ret = 0;
357
358         list_for_each_entry(i, journal, list) {
359                 fsck_err_on(seq != le64_to_cpu(i->j.seq), c,
360                         "journal entries %llu-%llu missing! (replaying %llu-%llu)",
361                         seq, le64_to_cpu(i->j.seq) - 1,
362                         start_seq, end_seq);
363
364                 seq = le64_to_cpu(i->j.seq);
365
366                 fsck_err_on(bch2_journal_seq_is_blacklisted(c, seq, false), c,
367                             "found blacklisted journal entry %llu", seq);
368
369                 do {
370                         seq++;
371                 } while (bch2_journal_seq_is_blacklisted(c, seq, false));
372         }
373 fsck_err:
374         return ret;
375 }
376
377 /* journal replay early: */
378
379 static int journal_replay_entry_early(struct bch_fs *c,
380                                       struct jset_entry *entry)
381 {
382         int ret = 0;
383
384         switch (entry->type) {
385         case BCH_JSET_ENTRY_btree_root: {
386                 struct btree_root *r = &c->btree_roots[entry->btree_id];
387
388                 if (entry->u64s) {
389                         r->level = entry->level;
390                         bkey_copy(&r->key, &entry->start[0]);
391                         r->error = 0;
392                 } else {
393                         r->error = -EIO;
394                 }
395                 r->alive = true;
396                 break;
397         }
398         case BCH_JSET_ENTRY_usage: {
399                 struct jset_entry_usage *u =
400                         container_of(entry, struct jset_entry_usage, entry);
401
402                 switch (entry->btree_id) {
403                 case FS_USAGE_RESERVED:
404                         if (entry->level < BCH_REPLICAS_MAX)
405                                 c->usage_base->persistent_reserved[entry->level] =
406                                         le64_to_cpu(u->v);
407                         break;
408                 case FS_USAGE_INODES:
409                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
410                         break;
411                 case FS_USAGE_KEY_VERSION:
412                         atomic64_set(&c->key_version,
413                                      le64_to_cpu(u->v));
414                         break;
415                 }
416
417                 break;
418         }
419         case BCH_JSET_ENTRY_data_usage: {
420                 struct jset_entry_data_usage *u =
421                         container_of(entry, struct jset_entry_data_usage, entry);
422                 ret = bch2_replicas_set_usage(c, &u->r,
423                                               le64_to_cpu(u->v));
424                 break;
425         }
426         case BCH_JSET_ENTRY_blacklist: {
427                 struct jset_entry_blacklist *bl_entry =
428                         container_of(entry, struct jset_entry_blacklist, entry);
429
430                 ret = bch2_journal_seq_blacklist_add(c,
431                                 le64_to_cpu(bl_entry->seq),
432                                 le64_to_cpu(bl_entry->seq) + 1);
433                 break;
434         }
435         case BCH_JSET_ENTRY_blacklist_v2: {
436                 struct jset_entry_blacklist_v2 *bl_entry =
437                         container_of(entry, struct jset_entry_blacklist_v2, entry);
438
439                 ret = bch2_journal_seq_blacklist_add(c,
440                                 le64_to_cpu(bl_entry->start),
441                                 le64_to_cpu(bl_entry->end) + 1);
442                 break;
443         }
444         }
445
446         return ret;
447 }
448
449 static int journal_replay_early(struct bch_fs *c,
450                                 struct bch_sb_field_clean *clean,
451                                 struct list_head *journal)
452 {
453         struct jset_entry *entry;
454         int ret;
455
456         if (clean) {
457                 c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
458                 c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
459
460                 for (entry = clean->start;
461                      entry != vstruct_end(&clean->field);
462                      entry = vstruct_next(entry)) {
463                         ret = journal_replay_entry_early(c, entry);
464                         if (ret)
465                                 return ret;
466                 }
467         } else {
468                 struct journal_replay *i =
469                         list_last_entry(journal, struct journal_replay, list);
470
471                 c->bucket_clock[READ].hand = le16_to_cpu(i->j.read_clock);
472                 c->bucket_clock[WRITE].hand = le16_to_cpu(i->j.write_clock);
473
474                 list_for_each_entry(i, journal, list)
475                         vstruct_for_each(&i->j, entry) {
476                                 ret = journal_replay_entry_early(c, entry);
477                                 if (ret)
478                                         return ret;
479                         }
480         }
481
482         bch2_fs_usage_initialize(c);
483
484         return 0;
485 }
486
487 /* sb clean section: */
488
489 static struct bkey_i *btree_root_find(struct bch_fs *c,
490                                       struct bch_sb_field_clean *clean,
491                                       struct jset *j,
492                                       enum btree_id id, unsigned *level)
493 {
494         struct bkey_i *k;
495         struct jset_entry *entry, *start, *end;
496
497         if (clean) {
498                 start = clean->start;
499                 end = vstruct_end(&clean->field);
500         } else {
501                 start = j->start;
502                 end = vstruct_last(j);
503         }
504
505         for (entry = start; entry < end; entry = vstruct_next(entry))
506                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
507                     entry->btree_id == id)
508                         goto found;
509
510         return NULL;
511 found:
512         if (!entry->u64s)
513                 return ERR_PTR(-EINVAL);
514
515         k = entry->start;
516         *level = entry->level;
517         return k;
518 }
519
520 static int verify_superblock_clean(struct bch_fs *c,
521                                    struct bch_sb_field_clean **cleanp,
522                                    struct jset *j)
523 {
524         unsigned i;
525         struct bch_sb_field_clean *clean = *cleanp;
526         int ret = 0;
527
528         if (!c->sb.clean || !j)
529                 return 0;
530
531         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
532                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
533                         le64_to_cpu(clean->journal_seq),
534                         le64_to_cpu(j->seq))) {
535                 kfree(clean);
536                 *cleanp = NULL;
537                 return 0;
538         }
539
540         mustfix_fsck_err_on(j->read_clock != clean->read_clock, c,
541                         "superblock read clock doesn't match journal after clean shutdown");
542         mustfix_fsck_err_on(j->write_clock != clean->write_clock, c,
543                         "superblock read clock doesn't match journal after clean shutdown");
544
545         for (i = 0; i < BTREE_ID_NR; i++) {
546                 struct bkey_i *k1, *k2;
547                 unsigned l1 = 0, l2 = 0;
548
549                 k1 = btree_root_find(c, clean, NULL, i, &l1);
550                 k2 = btree_root_find(c, NULL, j, i, &l2);
551
552                 if (!k1 && !k2)
553                         continue;
554
555                 mustfix_fsck_err_on(!k1 || !k2 ||
556                                     IS_ERR(k1) ||
557                                     IS_ERR(k2) ||
558                                     k1->k.u64s != k2->k.u64s ||
559                                     memcmp(k1, k2, bkey_bytes(k1)) ||
560                                     l1 != l2, c,
561                         "superblock btree root doesn't match journal after clean shutdown");
562         }
563 fsck_err:
564         return ret;
565 }
566
567 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
568 {
569         struct bch_sb_field_clean *clean, *sb_clean;
570         int ret;
571
572         mutex_lock(&c->sb_lock);
573         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
574
575         if (fsck_err_on(!sb_clean, c,
576                         "superblock marked clean but clean section not present")) {
577                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
578                 c->sb.clean = false;
579                 mutex_unlock(&c->sb_lock);
580                 return NULL;
581         }
582
583         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
584                         GFP_KERNEL);
585         if (!clean) {
586                 mutex_unlock(&c->sb_lock);
587                 return ERR_PTR(-ENOMEM);
588         }
589
590         if (le16_to_cpu(c->disk_sb.sb->version) <
591             bcachefs_metadata_version_bkey_renumber)
592                 bch2_sb_clean_renumber(clean, READ);
593
594         mutex_unlock(&c->sb_lock);
595
596         return clean;
597 fsck_err:
598         mutex_unlock(&c->sb_lock);
599         return ERR_PTR(ret);
600 }
601
602 static int read_btree_roots(struct bch_fs *c)
603 {
604         unsigned i;
605         int ret = 0;
606
607         for (i = 0; i < BTREE_ID_NR; i++) {
608                 struct btree_root *r = &c->btree_roots[i];
609
610                 if (!r->alive)
611                         continue;
612
613                 if (i == BTREE_ID_ALLOC &&
614                     test_reconstruct_alloc(c)) {
615                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
616                         continue;
617                 }
618
619
620                 if (r->error) {
621                         __fsck_err(c, i == BTREE_ID_ALLOC
622                                    ? FSCK_CAN_IGNORE : 0,
623                                    "invalid btree root %s",
624                                    bch2_btree_ids[i]);
625                         if (i == BTREE_ID_ALLOC)
626                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
627                 }
628
629                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
630                 if (ret) {
631                         __fsck_err(c, i == BTREE_ID_ALLOC
632                                    ? FSCK_CAN_IGNORE : 0,
633                                    "error reading btree root %s",
634                                    bch2_btree_ids[i]);
635                         if (i == BTREE_ID_ALLOC)
636                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
637                 }
638         }
639
640         for (i = 0; i < BTREE_ID_NR; i++)
641                 if (!c->btree_roots[i].b)
642                         bch2_btree_root_alloc(c, i);
643 fsck_err:
644         return ret;
645 }
646
647 int bch2_fs_recovery(struct bch_fs *c)
648 {
649         const char *err = "cannot allocate memory";
650         struct bch_sb_field_clean *clean = NULL;
651         u64 journal_seq;
652         LIST_HEAD(journal_entries);
653         struct journal_keys journal_keys = { NULL };
654         bool wrote = false, write_sb = false;
655         int ret;
656
657         if (c->sb.clean)
658                 clean = read_superblock_clean(c);
659         ret = PTR_ERR_OR_ZERO(clean);
660         if (ret)
661                 goto err;
662
663         if (c->sb.clean)
664                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
665                          le64_to_cpu(clean->journal_seq));
666
667         if (!c->replicas.entries) {
668                 bch_info(c, "building replicas info");
669                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
670         }
671
672         if (!c->sb.clean || c->opts.fsck) {
673                 struct jset *j;
674
675                 ret = bch2_journal_read(c, &journal_entries);
676                 if (ret)
677                         goto err;
678
679                 if (mustfix_fsck_err_on(c->sb.clean && !journal_empty(&journal_entries), c,
680                                 "filesystem marked clean but journal not empty")) {
681                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
682                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
683                         c->sb.clean = false;
684                 }
685
686                 if (!c->sb.clean && list_empty(&journal_entries)) {
687                         bch_err(c, "no journal entries found");
688                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
689                         goto err;
690                 }
691
692                 journal_keys = journal_keys_sort(&journal_entries);
693                 if (!journal_keys.d) {
694                         ret = -ENOMEM;
695                         goto err;
696                 }
697
698                 j = &list_last_entry(&journal_entries,
699                                      struct journal_replay, list)->j;
700
701                 ret = verify_superblock_clean(c, &clean, j);
702                 if (ret)
703                         goto err;
704
705                 journal_seq = le64_to_cpu(j->seq) + 1;
706         } else {
707                 journal_seq = le64_to_cpu(clean->journal_seq) + 1;
708         }
709
710         ret = journal_replay_early(c, clean, &journal_entries);
711         if (ret)
712                 goto err;
713
714         if (!c->sb.clean) {
715                 ret = bch2_journal_seq_blacklist_add(c,
716                                                      journal_seq,
717                                                      journal_seq + 4);
718                 if (ret) {
719                         bch_err(c, "error creating new journal seq blacklist entry");
720                         goto err;
721                 }
722
723                 journal_seq += 4;
724         }
725
726         ret = bch2_blacklist_table_initialize(c);
727
728         ret = verify_journal_entries_not_blacklisted_or_missing(c,
729                                                 &journal_entries);
730         if (ret)
731                 goto err;
732
733         ret = bch2_fs_journal_start(&c->journal, journal_seq,
734                                     &journal_entries);
735         if (ret)
736                 goto err;
737
738         ret = read_btree_roots(c);
739         if (ret)
740                 goto err;
741
742         bch_verbose(c, "starting alloc read");
743         err = "error reading allocation information";
744         ret = bch2_alloc_read(c, &journal_keys);
745         if (ret)
746                 goto err;
747         bch_verbose(c, "alloc read done");
748
749         bch_verbose(c, "starting stripes_read");
750         err = "error reading stripes";
751         ret = bch2_stripes_read(c, &journal_keys);
752         if (ret)
753                 goto err;
754         bch_verbose(c, "stripes_read done");
755
756         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
757
758         if ((c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) &&
759             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA))) {
760                 /*
761                  * interior btree node updates aren't consistent with the
762                  * journal; after an unclean shutdown we have to walk all
763                  * pointers to metadata:
764                  */
765                 bch_info(c, "starting metadata mark and sweep");
766                 err = "error in mark and sweep";
767                 ret = bch2_gc(c, NULL, true, true);
768                 if (ret)
769                         goto err;
770                 bch_verbose(c, "mark and sweep done");
771         }
772
773         if (c->opts.fsck ||
774             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
775             test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
776                 bch_info(c, "starting mark and sweep");
777                 err = "error in mark and sweep";
778                 ret = bch2_gc(c, &journal_keys, true, false);
779                 if (ret)
780                         goto err;
781                 bch_verbose(c, "mark and sweep done");
782         }
783
784         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
785         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
786
787         /*
788          * Skip past versions that might have possibly been used (as nonces),
789          * but hadn't had their pointers written:
790          */
791         if (c->sb.encryption_type && !c->sb.clean)
792                 atomic64_add(1 << 16, &c->key_version);
793
794         if (c->opts.norecovery)
795                 goto out;
796
797         bch_verbose(c, "starting journal replay");
798         err = "journal replay failed";
799         ret = bch2_journal_replay(c, journal_keys);
800         if (ret)
801                 goto err;
802         bch_verbose(c, "journal replay done");
803
804         if (!c->opts.nochanges) {
805                 /*
806                  * note that even when filesystem was clean there might be work
807                  * to do here, if we ran gc (because of fsck) which recalculated
808                  * oldest_gen:
809                  */
810                 bch_verbose(c, "writing allocation info");
811                 err = "error writing out alloc info";
812                 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW, &wrote) ?:
813                         bch2_alloc_write(c, BTREE_INSERT_LAZY_RW, &wrote);
814                 if (ret) {
815                         bch_err(c, "error writing alloc info");
816                         goto err;
817                 }
818                 bch_verbose(c, "alloc write done");
819         }
820
821         if (!c->sb.clean) {
822                 if (!(c->sb.features & (1 << BCH_FEATURE_ATOMIC_NLINK))) {
823                         bch_info(c, "checking inode link counts");
824                         err = "error in recovery";
825                         ret = bch2_fsck_inode_nlink(c);
826                         if (ret)
827                                 goto err;
828                         bch_verbose(c, "check inodes done");
829
830                 } else {
831                         bch_verbose(c, "checking for deleted inodes");
832                         err = "error in recovery";
833                         ret = bch2_fsck_walk_inodes_only(c);
834                         if (ret)
835                                 goto err;
836                         bch_verbose(c, "check inodes done");
837                 }
838         }
839
840         if (c->opts.fsck) {
841                 bch_info(c, "starting fsck");
842                 err = "error in fsck";
843                 ret = bch2_fsck_full(c);
844                 if (ret)
845                         goto err;
846                 bch_verbose(c, "fsck done");
847         }
848
849         if (enabled_qtypes(c)) {
850                 bch_verbose(c, "reading quotas");
851                 ret = bch2_fs_quota_read(c);
852                 if (ret)
853                         goto err;
854                 bch_verbose(c, "quotas done");
855         }
856
857         mutex_lock(&c->sb_lock);
858         if (c->opts.version_upgrade) {
859                 if (c->sb.version < bcachefs_metadata_version_new_versioning)
860                         c->disk_sb.sb->version_min =
861                                 le16_to_cpu(bcachefs_metadata_version_min);
862                 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
863                 write_sb = true;
864         }
865
866         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
867                 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
868                 write_sb = true;
869         }
870
871         if (c->opts.fsck &&
872             !test_bit(BCH_FS_ERROR, &c->flags)) {
873                 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK;
874                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
875                 write_sb = true;
876         }
877
878         if (write_sb)
879                 bch2_write_super(c);
880         mutex_unlock(&c->sb_lock);
881
882         if (c->journal_seq_blacklist_table &&
883             c->journal_seq_blacklist_table->nr > 128)
884                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
885 out:
886         ret = 0;
887 err:
888 fsck_err:
889         bch2_flush_fsck_errs(c);
890         journal_keys_free(&journal_keys);
891         journal_entries_free(&journal_entries);
892         kfree(clean);
893         if (ret)
894                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
895         else
896                 bch_verbose(c, "ret %i", ret);
897         return ret;
898 }
899
900 int bch2_fs_initialize(struct bch_fs *c)
901 {
902         struct bch_inode_unpacked root_inode, lostfound_inode;
903         struct bkey_inode_buf packed_inode;
904         struct bch_hash_info root_hash_info;
905         struct qstr lostfound = QSTR("lost+found");
906         const char *err = "cannot allocate memory";
907         struct bch_dev *ca;
908         LIST_HEAD(journal);
909         unsigned i;
910         int ret;
911
912         bch_notice(c, "initializing new filesystem");
913
914         mutex_lock(&c->sb_lock);
915         for_each_online_member(ca, c, i)
916                 bch2_mark_dev_superblock(c, ca, 0);
917         mutex_unlock(&c->sb_lock);
918
919         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
920         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
921
922         for (i = 0; i < BTREE_ID_NR; i++)
923                 bch2_btree_root_alloc(c, i);
924
925         err = "unable to allocate journal buckets";
926         for_each_online_member(ca, c, i) {
927                 ret = bch2_dev_journal_alloc(ca);
928                 if (ret) {
929                         percpu_ref_put(&ca->io_ref);
930                         goto err;
931                 }
932         }
933
934         /*
935          * journal_res_get() will crash if called before this has
936          * set up the journal.pin FIFO and journal.cur pointer:
937          */
938         bch2_fs_journal_start(&c->journal, 1, &journal);
939         bch2_journal_set_replay_done(&c->journal);
940
941         err = "error going read write";
942         ret = __bch2_fs_read_write(c, true);
943         if (ret)
944                 goto err;
945
946         bch2_inode_init(c, &root_inode, 0, 0,
947                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
948         root_inode.bi_inum = BCACHEFS_ROOT_INO;
949         root_inode.bi_nlink++; /* lost+found */
950         bch2_inode_pack(&packed_inode, &root_inode);
951
952         err = "error creating root directory";
953         ret = bch2_btree_insert(c, BTREE_ID_INODES,
954                                 &packed_inode.inode.k_i,
955                                 NULL, NULL, 0);
956         if (ret)
957                 goto err;
958
959         bch2_inode_init(c, &lostfound_inode, 0, 0,
960                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0,
961                         &root_inode);
962         lostfound_inode.bi_inum = BCACHEFS_ROOT_INO + 1;
963         bch2_inode_pack(&packed_inode, &lostfound_inode);
964
965         err = "error creating lost+found";
966         ret = bch2_btree_insert(c, BTREE_ID_INODES,
967                                 &packed_inode.inode.k_i,
968                                 NULL, NULL, 0);
969         if (ret)
970                 goto err;
971
972         root_hash_info = bch2_hash_info_init(c, &root_inode);
973
974         ret = bch2_dirent_create(c, BCACHEFS_ROOT_INO, &root_hash_info, DT_DIR,
975                                  &lostfound, lostfound_inode.bi_inum, NULL,
976                                  BTREE_INSERT_NOFAIL);
977         if (ret)
978                 goto err;
979
980         if (enabled_qtypes(c)) {
981                 ret = bch2_fs_quota_read(c);
982                 if (ret)
983                         goto err;
984         }
985
986         err = "error writing first journal entry";
987         ret = bch2_journal_meta(&c->journal);
988         if (ret)
989                 goto err;
990
991         mutex_lock(&c->sb_lock);
992         c->disk_sb.sb->version = c->disk_sb.sb->version_min =
993                 le16_to_cpu(bcachefs_metadata_version_current);
994         c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK;
995
996         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
997         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
998
999         bch2_write_super(c);
1000         mutex_unlock(&c->sb_lock);
1001
1002         return 0;
1003 err:
1004         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1005         return ret;
1006 }