]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
f32414171aab82e062f6f3c43cd3f9a09e18d1e0
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "alloc_background.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "buckets.h"
11 #include "dirent.h"
12 #include "ec.h"
13 #include "error.h"
14 #include "fs-common.h"
15 #include "fsck.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "move.h"
20 #include "quota.h"
21 #include "recovery.h"
22 #include "replicas.h"
23 #include "super-io.h"
24
25 #include <linux/sort.h>
26 #include <linux/stat.h>
27
28 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
29
30 /* for -o reconstruct_alloc: */
31 static void drop_alloc_keys(struct journal_keys *keys)
32 {
33         size_t src, dst;
34
35         for (src = 0, dst = 0; src < keys->nr; src++)
36                 if (keys->d[src].btree_id != BTREE_ID_alloc)
37                         keys->d[dst++] = keys->d[src];
38
39         keys->nr = dst;
40 }
41
42 /* iterate over keys read from the journal: */
43
44 static int __journal_key_cmp(enum btree_id      l_btree_id,
45                              unsigned           l_level,
46                              struct bpos        l_pos,
47                              struct journal_key *r)
48 {
49         return (cmp_int(l_btree_id,     r->btree_id) ?:
50                 cmp_int(l_level,        r->level) ?:
51                 bpos_cmp(l_pos, r->k->k.p));
52 }
53
54 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
55 {
56         return (cmp_int(l->btree_id,    r->btree_id) ?:
57                 cmp_int(l->level,       r->level) ?:
58                 bpos_cmp(l->k->k.p,     r->k->k.p));
59 }
60
61 static size_t journal_key_search(struct journal_keys *journal_keys,
62                                  enum btree_id id, unsigned level,
63                                  struct bpos pos)
64 {
65         size_t l = 0, r = journal_keys->nr, m;
66
67         while (l < r) {
68                 m = l + ((r - l) >> 1);
69                 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
70                         l = m + 1;
71                 else
72                         r = m;
73         }
74
75         BUG_ON(l < journal_keys->nr &&
76                __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
77
78         BUG_ON(l &&
79                __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
80
81         return l;
82 }
83
84 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
85 {
86         struct bkey_i *n = iter->keys->d[idx].k;
87         struct btree_and_journal_iter *biter =
88                 container_of(iter, struct btree_and_journal_iter, journal);
89
90         if (iter->idx > idx ||
91             (iter->idx == idx &&
92              biter->last &&
93              bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
94                 iter->idx++;
95 }
96
97 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
98                             unsigned level, struct bkey_i *k)
99 {
100         struct journal_key n = {
101                 .btree_id       = id,
102                 .level          = level,
103                 .k              = k,
104                 .allocated      = true
105         };
106         struct journal_keys *keys = &c->journal_keys;
107         struct journal_iter *iter;
108         unsigned idx = journal_key_search(keys, id, level, k->k.p);
109
110         if (idx < keys->nr &&
111             journal_key_cmp(&n, &keys->d[idx]) == 0) {
112                 if (keys->d[idx].allocated)
113                         kfree(keys->d[idx].k);
114                 keys->d[idx] = n;
115                 return 0;
116         }
117
118         if (keys->nr == keys->size) {
119                 struct journal_keys new_keys = {
120                         .nr                     = keys->nr,
121                         .size                   = keys->size * 2,
122                         .journal_seq_base       = keys->journal_seq_base,
123                 };
124
125                 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
126                 if (!new_keys.d) {
127                         bch_err(c, "%s: error allocating new key array (size %zu)",
128                                 __func__, new_keys.size);
129                         return -ENOMEM;
130                 }
131
132                 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
133                 kvfree(keys->d);
134                 *keys = new_keys;
135         }
136
137         array_insert_item(keys->d, keys->nr, idx, n);
138
139         list_for_each_entry(iter, &c->journal_iters, list)
140                 journal_iter_fix(c, iter, idx);
141
142         return 0;
143 }
144
145 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
146                             unsigned level, struct bpos pos)
147 {
148         struct bkey_i *whiteout =
149                 kmalloc(sizeof(struct bkey), GFP_KERNEL);
150         int ret;
151
152         if (!whiteout) {
153                 bch_err(c, "%s: error allocating new key", __func__);
154                 return -ENOMEM;
155         }
156
157         bkey_init(&whiteout->k);
158         whiteout->k.p = pos;
159
160         ret = bch2_journal_key_insert(c, id, level, whiteout);
161         if (ret)
162                 kfree(whiteout);
163         return ret;
164 }
165
166 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
167 {
168         struct journal_key *k = iter->idx - iter->keys->nr
169                 ? iter->keys->d + iter->idx : NULL;
170
171         if (k &&
172             k->btree_id == iter->btree_id &&
173             k->level    == iter->level)
174                 return k->k;
175
176         iter->idx = iter->keys->nr;
177         return NULL;
178 }
179
180 static void bch2_journal_iter_advance(struct journal_iter *iter)
181 {
182         if (iter->idx < iter->keys->nr)
183                 iter->idx++;
184 }
185
186 static void bch2_journal_iter_exit(struct journal_iter *iter)
187 {
188         list_del(&iter->list);
189 }
190
191 static void bch2_journal_iter_init(struct bch_fs *c,
192                                    struct journal_iter *iter,
193                                    enum btree_id id, unsigned level,
194                                    struct bpos pos)
195 {
196         iter->btree_id  = id;
197         iter->level     = level;
198         iter->keys      = &c->journal_keys;
199         iter->idx       = journal_key_search(&c->journal_keys, id, level, pos);
200         list_add(&iter->list, &c->journal_iters);
201 }
202
203 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
204 {
205         return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
206                                                 iter->b, &iter->unpacked);
207 }
208
209 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
210 {
211         bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
212 }
213
214 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
215 {
216         switch (iter->last) {
217         case none:
218                 break;
219         case btree:
220                 bch2_journal_iter_advance_btree(iter);
221                 break;
222         case journal:
223                 bch2_journal_iter_advance(&iter->journal);
224                 break;
225         }
226
227         iter->last = none;
228 }
229
230 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
231 {
232         struct bkey_s_c ret;
233
234         while (1) {
235                 struct bkey_s_c btree_k         =
236                         bch2_journal_iter_peek_btree(iter);
237                 struct bkey_s_c journal_k       =
238                         bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
239
240                 if (btree_k.k && journal_k.k) {
241                         int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
242
243                         if (!cmp)
244                                 bch2_journal_iter_advance_btree(iter);
245
246                         iter->last = cmp < 0 ? btree : journal;
247                 } else if (btree_k.k) {
248                         iter->last = btree;
249                 } else if (journal_k.k) {
250                         iter->last = journal;
251                 } else {
252                         iter->last = none;
253                         return bkey_s_c_null;
254                 }
255
256                 ret = iter->last == journal ? journal_k : btree_k;
257
258                 if (iter->b &&
259                     bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
260                         iter->journal.idx = iter->journal.keys->nr;
261                         iter->last = none;
262                         return bkey_s_c_null;
263                 }
264
265                 if (!bkey_deleted(ret.k))
266                         break;
267
268                 bch2_btree_and_journal_iter_advance(iter);
269         }
270
271         return ret;
272 }
273
274 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
275 {
276         bch2_btree_and_journal_iter_advance(iter);
277
278         return bch2_btree_and_journal_iter_peek(iter);
279 }
280
281 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
282 {
283         bch2_journal_iter_exit(&iter->journal);
284 }
285
286 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
287                                                 struct bch_fs *c,
288                                                 struct btree *b)
289 {
290         memset(iter, 0, sizeof(*iter));
291
292         iter->b = b;
293         bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
294         bch2_journal_iter_init(c, &iter->journal,
295                                b->c.btree_id, b->c.level, b->data->min_key);
296 }
297
298 /* Walk btree, overlaying keys from the journal: */
299
300 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
301                                            struct btree_and_journal_iter iter)
302 {
303         unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
304         struct bkey_s_c k;
305         struct bkey_buf tmp;
306
307         BUG_ON(!b->c.level);
308
309         bch2_bkey_buf_init(&tmp);
310
311         while (i < nr &&
312                (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
313                 bch2_bkey_buf_reassemble(&tmp, c, k);
314
315                 bch2_btree_node_prefetch(c, NULL, tmp.k,
316                                         b->c.btree_id, b->c.level - 1);
317
318                 bch2_btree_and_journal_iter_advance(&iter);
319                 i++;
320         }
321
322         bch2_bkey_buf_exit(&tmp, c);
323 }
324
325 static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
326                                 enum btree_id btree_id,
327                                 btree_walk_key_fn key_fn)
328 {
329         struct btree_and_journal_iter iter;
330         struct bkey_s_c k;
331         struct bkey_buf tmp;
332         struct btree *child;
333         int ret = 0;
334
335         bch2_bkey_buf_init(&tmp);
336         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
337
338         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
339                 if (b->c.level) {
340                         bch2_bkey_buf_reassemble(&tmp, c, k);
341
342                         child = bch2_btree_node_get_noiter(c, tmp.k,
343                                                 b->c.btree_id, b->c.level - 1,
344                                                 false);
345
346                         ret = PTR_ERR_OR_ZERO(child);
347                         if (ret)
348                                 break;
349
350                         btree_and_journal_iter_prefetch(c, b, iter);
351
352                         ret = bch2_btree_and_journal_walk_recurse(c, child,
353                                         btree_id, key_fn);
354                         six_unlock_read(&child->c.lock);
355                 } else {
356                         ret = key_fn(c, k);
357                 }
358
359                 if (ret)
360                         break;
361
362                 bch2_btree_and_journal_iter_advance(&iter);
363         }
364
365         bch2_btree_and_journal_iter_exit(&iter);
366         bch2_bkey_buf_exit(&tmp, c);
367         return ret;
368 }
369
370 int bch2_btree_and_journal_walk(struct bch_fs *c, enum btree_id btree_id,
371                                 btree_walk_key_fn key_fn)
372 {
373         struct btree *b = c->btree_roots[btree_id].b;
374         int ret = 0;
375
376         if (btree_node_fake(b))
377                 return 0;
378
379         six_lock_read(&b->c.lock, NULL, NULL);
380         ret = bch2_btree_and_journal_walk_recurse(c, b, btree_id, key_fn);
381         six_unlock_read(&b->c.lock);
382
383         return ret;
384 }
385
386 /* sort and dedup all keys in the journal: */
387
388 void bch2_journal_entries_free(struct list_head *list)
389 {
390
391         while (!list_empty(list)) {
392                 struct journal_replay *i =
393                         list_first_entry(list, struct journal_replay, list);
394                 list_del(&i->list);
395                 kvpfree(i, offsetof(struct journal_replay, j) +
396                         vstruct_bytes(&i->j));
397         }
398 }
399
400 /*
401  * When keys compare equal, oldest compares first:
402  */
403 static int journal_sort_key_cmp(const void *_l, const void *_r)
404 {
405         const struct journal_key *l = _l;
406         const struct journal_key *r = _r;
407
408         return  cmp_int(l->btree_id,    r->btree_id) ?:
409                 cmp_int(l->level,       r->level) ?:
410                 bpos_cmp(l->k->k.p, r->k->k.p) ?:
411                 cmp_int(l->journal_seq, r->journal_seq) ?:
412                 cmp_int(l->journal_offset, r->journal_offset);
413 }
414
415 void bch2_journal_keys_free(struct journal_keys *keys)
416 {
417         struct journal_key *i;
418
419         for (i = keys->d; i < keys->d + keys->nr; i++)
420                 if (i->allocated)
421                         kfree(i->k);
422
423         kvfree(keys->d);
424         keys->d = NULL;
425         keys->nr = 0;
426 }
427
428 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
429 {
430         struct journal_replay *i;
431         struct jset_entry *entry;
432         struct bkey_i *k, *_n;
433         struct journal_keys keys = { NULL };
434         struct journal_key *src, *dst;
435         size_t nr_keys = 0;
436
437         if (list_empty(journal_entries))
438                 return keys;
439
440         list_for_each_entry(i, journal_entries, list) {
441                 if (i->ignore)
442                         continue;
443
444                 if (!keys.journal_seq_base)
445                         keys.journal_seq_base = le64_to_cpu(i->j.seq);
446
447                 for_each_jset_key(k, _n, entry, &i->j)
448                         nr_keys++;
449         }
450
451         keys.size = roundup_pow_of_two(nr_keys);
452
453         keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
454         if (!keys.d)
455                 goto err;
456
457         list_for_each_entry(i, journal_entries, list) {
458                 if (i->ignore)
459                         continue;
460
461                 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
462
463                 for_each_jset_key(k, _n, entry, &i->j)
464                         keys.d[keys.nr++] = (struct journal_key) {
465                                 .btree_id       = entry->btree_id,
466                                 .level          = entry->level,
467                                 .k              = k,
468                                 .journal_seq    = le64_to_cpu(i->j.seq) -
469                                         keys.journal_seq_base,
470                                 .journal_offset = k->_data - i->j._data,
471                         };
472         }
473
474         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
475
476         src = dst = keys.d;
477         while (src < keys.d + keys.nr) {
478                 while (src + 1 < keys.d + keys.nr &&
479                        src[0].btree_id  == src[1].btree_id &&
480                        src[0].level     == src[1].level &&
481                        !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
482                         src++;
483
484                 *dst++ = *src++;
485         }
486
487         keys.nr = dst - keys.d;
488 err:
489         return keys;
490 }
491
492 /* journal replay: */
493
494 static void replay_now_at(struct journal *j, u64 seq)
495 {
496         BUG_ON(seq < j->replay_journal_seq);
497         BUG_ON(seq > j->replay_journal_seq_end);
498
499         while (j->replay_journal_seq < seq)
500                 bch2_journal_pin_put(j, j->replay_journal_seq++);
501 }
502
503 static int __bch2_journal_replay_key(struct btree_trans *trans,
504                                      enum btree_id id, unsigned level,
505                                      struct bkey_i *k)
506 {
507         struct btree_iter *iter;
508         int ret;
509
510         iter = bch2_trans_get_node_iter(trans, id, k->k.p,
511                                         BTREE_MAX_DEPTH, level,
512                                         BTREE_ITER_INTENT);
513
514         /*
515          * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
516          * extent_handle_overwrites() and extent_update_to_keys() - but we don't
517          * want that here, journal replay is supposed to treat extents like
518          * regular keys:
519          */
520         BUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
521
522         ret   = bch2_btree_iter_traverse(iter) ?:
523                 bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
524         bch2_trans_iter_put(trans, iter);
525         return ret;
526 }
527
528 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
529 {
530         unsigned commit_flags = BTREE_INSERT_NOFAIL|
531                 BTREE_INSERT_LAZY_RW;
532
533         if (!k->allocated)
534                 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
535
536         return bch2_trans_do(c, NULL, NULL, commit_flags,
537                              __bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
538 }
539
540 static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
541 {
542         struct btree_iter *iter;
543         int ret;
544
545         iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
546                                    BTREE_ITER_CACHED|
547                                    BTREE_ITER_CACHED_NOFILL|
548                                    BTREE_ITER_INTENT);
549         ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
550         bch2_trans_iter_put(trans, iter);
551         return ret;
552 }
553
554 static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
555 {
556         return bch2_trans_do(c, NULL, NULL,
557                              BTREE_INSERT_NOFAIL|
558                              BTREE_INSERT_USE_RESERVE|
559                              BTREE_INSERT_LAZY_RW|
560                              BTREE_INSERT_JOURNAL_REPLAY,
561                         __bch2_alloc_replay_key(&trans, k));
562 }
563
564 static int journal_sort_seq_cmp(const void *_l, const void *_r)
565 {
566         const struct journal_key *l = _l;
567         const struct journal_key *r = _r;
568
569         return  cmp_int(r->level,       l->level) ?:
570                 cmp_int(l->journal_seq, r->journal_seq) ?:
571                 cmp_int(l->btree_id,    r->btree_id) ?:
572                 bpos_cmp(l->k->k.p,     r->k->k.p);
573 }
574
575 static int bch2_journal_replay(struct bch_fs *c,
576                                struct journal_keys keys)
577 {
578         struct journal *j = &c->journal;
579         struct journal_key *i;
580         u64 seq;
581         int ret;
582
583         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
584
585         if (keys.nr)
586                 replay_now_at(j, keys.journal_seq_base);
587
588         seq = j->replay_journal_seq;
589
590         /*
591          * First replay updates to the alloc btree - these will only update the
592          * btree key cache:
593          */
594         for_each_journal_key(keys, i) {
595                 cond_resched();
596
597                 if (!i->level && i->btree_id == BTREE_ID_alloc) {
598                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
599                         ret = bch2_alloc_replay_key(c, i->k);
600                         if (ret)
601                                 goto err;
602                 }
603         }
604
605         /*
606          * Next replay updates to interior btree nodes:
607          */
608         for_each_journal_key(keys, i) {
609                 cond_resched();
610
611                 if (i->level) {
612                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
613                         ret = bch2_journal_replay_key(c, i);
614                         if (ret)
615                                 goto err;
616                 }
617         }
618
619         /*
620          * Now that the btree is in a consistent state, we can start journal
621          * reclaim (which will be flushing entries from the btree key cache back
622          * to the btree:
623          */
624         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
625         set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
626         journal_reclaim_kick(j);
627
628         j->replay_journal_seq = seq;
629
630         /*
631          * Now replay leaf node updates:
632          */
633         for_each_journal_key(keys, i) {
634                 cond_resched();
635
636                 if (i->level || i->btree_id == BTREE_ID_alloc)
637                         continue;
638
639                 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
640
641                 ret = bch2_journal_replay_key(c, i);
642                 if (ret)
643                         goto err;
644         }
645
646         replay_now_at(j, j->replay_journal_seq_end);
647         j->replay_journal_seq = 0;
648
649         bch2_journal_set_replay_done(j);
650         bch2_journal_flush_all_pins(j);
651         return bch2_journal_error(j);
652 err:
653         bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
654                 ret, bch2_btree_ids[i->btree_id], i->level);
655         return ret;
656 }
657
658 /* journal replay early: */
659
660 static int journal_replay_entry_early(struct bch_fs *c,
661                                       struct jset_entry *entry)
662 {
663         int ret = 0;
664
665         switch (entry->type) {
666         case BCH_JSET_ENTRY_btree_root: {
667                 struct btree_root *r;
668
669                 if (entry->btree_id >= BTREE_ID_NR) {
670                         bch_err(c, "filesystem has unknown btree type %u",
671                                 entry->btree_id);
672                         return -EINVAL;
673                 }
674
675                 r = &c->btree_roots[entry->btree_id];
676
677                 if (entry->u64s) {
678                         r->level = entry->level;
679                         bkey_copy(&r->key, &entry->start[0]);
680                         r->error = 0;
681                 } else {
682                         r->error = -EIO;
683                 }
684                 r->alive = true;
685                 break;
686         }
687         case BCH_JSET_ENTRY_usage: {
688                 struct jset_entry_usage *u =
689                         container_of(entry, struct jset_entry_usage, entry);
690
691                 switch (entry->btree_id) {
692                 case FS_USAGE_RESERVED:
693                         if (entry->level < BCH_REPLICAS_MAX)
694                                 c->usage_base->persistent_reserved[entry->level] =
695                                         le64_to_cpu(u->v);
696                         break;
697                 case FS_USAGE_INODES:
698                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
699                         break;
700                 case FS_USAGE_KEY_VERSION:
701                         atomic64_set(&c->key_version,
702                                      le64_to_cpu(u->v));
703                         break;
704                 }
705
706                 break;
707         }
708         case BCH_JSET_ENTRY_data_usage: {
709                 struct jset_entry_data_usage *u =
710                         container_of(entry, struct jset_entry_data_usage, entry);
711
712                 ret = bch2_replicas_set_usage(c, &u->r,
713                                               le64_to_cpu(u->v));
714                 break;
715         }
716         case BCH_JSET_ENTRY_dev_usage: {
717                 struct jset_entry_dev_usage *u =
718                         container_of(entry, struct jset_entry_dev_usage, entry);
719                 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
720                 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
721                 unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
722                         sizeof(struct jset_entry_dev_usage_type);
723                 unsigned i;
724
725                 ca->usage_base->buckets_ec              = le64_to_cpu(u->buckets_ec);
726                 ca->usage_base->buckets_unavailable     = le64_to_cpu(u->buckets_unavailable);
727
728                 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
729                         ca->usage_base->d[i].buckets    = le64_to_cpu(u->d[i].buckets);
730                         ca->usage_base->d[i].sectors    = le64_to_cpu(u->d[i].sectors);
731                         ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
732                 }
733
734                 break;
735         }
736         case BCH_JSET_ENTRY_blacklist: {
737                 struct jset_entry_blacklist *bl_entry =
738                         container_of(entry, struct jset_entry_blacklist, entry);
739
740                 ret = bch2_journal_seq_blacklist_add(c,
741                                 le64_to_cpu(bl_entry->seq),
742                                 le64_to_cpu(bl_entry->seq) + 1);
743                 break;
744         }
745         case BCH_JSET_ENTRY_blacklist_v2: {
746                 struct jset_entry_blacklist_v2 *bl_entry =
747                         container_of(entry, struct jset_entry_blacklist_v2, entry);
748
749                 ret = bch2_journal_seq_blacklist_add(c,
750                                 le64_to_cpu(bl_entry->start),
751                                 le64_to_cpu(bl_entry->end) + 1);
752                 break;
753         }
754         case BCH_JSET_ENTRY_clock: {
755                 struct jset_entry_clock *clock =
756                         container_of(entry, struct jset_entry_clock, entry);
757
758                 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
759         }
760         }
761
762         return ret;
763 }
764
765 static int journal_replay_early(struct bch_fs *c,
766                                 struct bch_sb_field_clean *clean,
767                                 struct list_head *journal)
768 {
769         struct journal_replay *i;
770         struct jset_entry *entry;
771         int ret;
772
773         if (clean) {
774                 for (entry = clean->start;
775                      entry != vstruct_end(&clean->field);
776                      entry = vstruct_next(entry)) {
777                         ret = journal_replay_entry_early(c, entry);
778                         if (ret)
779                                 return ret;
780                 }
781         } else {
782                 list_for_each_entry(i, journal, list) {
783                         if (i->ignore)
784                                 continue;
785
786                         vstruct_for_each(&i->j, entry) {
787                                 ret = journal_replay_entry_early(c, entry);
788                                 if (ret)
789                                         return ret;
790                         }
791                 }
792         }
793
794         bch2_fs_usage_initialize(c);
795
796         return 0;
797 }
798
799 /* sb clean section: */
800
801 static struct bkey_i *btree_root_find(struct bch_fs *c,
802                                       struct bch_sb_field_clean *clean,
803                                       struct jset *j,
804                                       enum btree_id id, unsigned *level)
805 {
806         struct bkey_i *k;
807         struct jset_entry *entry, *start, *end;
808
809         if (clean) {
810                 start = clean->start;
811                 end = vstruct_end(&clean->field);
812         } else {
813                 start = j->start;
814                 end = vstruct_last(j);
815         }
816
817         for (entry = start; entry < end; entry = vstruct_next(entry))
818                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
819                     entry->btree_id == id)
820                         goto found;
821
822         return NULL;
823 found:
824         if (!entry->u64s)
825                 return ERR_PTR(-EINVAL);
826
827         k = entry->start;
828         *level = entry->level;
829         return k;
830 }
831
832 static int verify_superblock_clean(struct bch_fs *c,
833                                    struct bch_sb_field_clean **cleanp,
834                                    struct jset *j)
835 {
836         unsigned i;
837         struct bch_sb_field_clean *clean = *cleanp;
838         int ret = 0;
839
840         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
841                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
842                         le64_to_cpu(clean->journal_seq),
843                         le64_to_cpu(j->seq))) {
844                 kfree(clean);
845                 *cleanp = NULL;
846                 return 0;
847         }
848
849         for (i = 0; i < BTREE_ID_NR; i++) {
850                 char buf1[200], buf2[200];
851                 struct bkey_i *k1, *k2;
852                 unsigned l1 = 0, l2 = 0;
853
854                 k1 = btree_root_find(c, clean, NULL, i, &l1);
855                 k2 = btree_root_find(c, NULL, j, i, &l2);
856
857                 if (!k1 && !k2)
858                         continue;
859
860                 mustfix_fsck_err_on(!k1 || !k2 ||
861                                     IS_ERR(k1) ||
862                                     IS_ERR(k2) ||
863                                     k1->k.u64s != k2->k.u64s ||
864                                     memcmp(k1, k2, bkey_bytes(k1)) ||
865                                     l1 != l2, c,
866                         "superblock btree root %u doesn't match journal after clean shutdown\n"
867                         "sb:      l=%u %s\n"
868                         "journal: l=%u %s\n", i,
869                         l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
870                         l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
871         }
872 fsck_err:
873         return ret;
874 }
875
876 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
877 {
878         struct bch_sb_field_clean *clean, *sb_clean;
879         int ret;
880
881         mutex_lock(&c->sb_lock);
882         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
883
884         if (fsck_err_on(!sb_clean, c,
885                         "superblock marked clean but clean section not present")) {
886                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
887                 c->sb.clean = false;
888                 mutex_unlock(&c->sb_lock);
889                 return NULL;
890         }
891
892         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
893                         GFP_KERNEL);
894         if (!clean) {
895                 mutex_unlock(&c->sb_lock);
896                 return ERR_PTR(-ENOMEM);
897         }
898
899         ret = bch2_sb_clean_validate(c, clean, READ);
900         if (ret) {
901                 mutex_unlock(&c->sb_lock);
902                 return ERR_PTR(ret);
903         }
904
905         mutex_unlock(&c->sb_lock);
906
907         return clean;
908 fsck_err:
909         mutex_unlock(&c->sb_lock);
910         return ERR_PTR(ret);
911 }
912
913 static int read_btree_roots(struct bch_fs *c)
914 {
915         unsigned i;
916         int ret = 0;
917
918         for (i = 0; i < BTREE_ID_NR; i++) {
919                 struct btree_root *r = &c->btree_roots[i];
920
921                 if (!r->alive)
922                         continue;
923
924                 if (i == BTREE_ID_alloc &&
925                     c->opts.reconstruct_alloc) {
926                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
927                         continue;
928                 }
929
930                 if (r->error) {
931                         __fsck_err(c, i == BTREE_ID_alloc
932                                    ? FSCK_CAN_IGNORE : 0,
933                                    "invalid btree root %s",
934                                    bch2_btree_ids[i]);
935                         if (i == BTREE_ID_alloc)
936                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
937                 }
938
939                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
940                 if (ret) {
941                         __fsck_err(c, i == BTREE_ID_alloc
942                                    ? FSCK_CAN_IGNORE : 0,
943                                    "error reading btree root %s",
944                                    bch2_btree_ids[i]);
945                         if (i == BTREE_ID_alloc)
946                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
947                 }
948         }
949
950         for (i = 0; i < BTREE_ID_NR; i++)
951                 if (!c->btree_roots[i].b)
952                         bch2_btree_root_alloc(c, i);
953 fsck_err:
954         return ret;
955 }
956
957 int bch2_fs_recovery(struct bch_fs *c)
958 {
959         const char *err = "cannot allocate memory";
960         struct bch_sb_field_clean *clean = NULL;
961         struct jset *last_journal_entry = NULL;
962         u64 blacklist_seq, journal_seq;
963         bool write_sb = false;
964         int ret = 0;
965
966         if (c->sb.clean)
967                 clean = read_superblock_clean(c);
968         ret = PTR_ERR_OR_ZERO(clean);
969         if (ret)
970                 goto err;
971
972         if (c->sb.clean)
973                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
974                          le64_to_cpu(clean->journal_seq));
975
976         if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
977                 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
978                 ret = -EINVAL;
979                 goto err;
980         }
981
982         if (!c->sb.clean &&
983             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
984                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
985                 ret = -EINVAL;
986                 goto err;
987         }
988
989         if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
990                 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
991                 ret = -EINVAL;
992                 goto err;
993
994         }
995
996         if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
997                 bch_info(c, "alloc_v2 feature bit not set, fsck required");
998                 c->opts.fsck = true;
999                 c->opts.fix_errors = FSCK_OPT_YES;
1000         }
1001
1002         if (!c->replicas.entries ||
1003             c->opts.rebuild_replicas) {
1004                 bch_info(c, "building replicas info");
1005                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1006         }
1007
1008         if (c->sb.version < bcachefs_metadata_version_inode_backpointers) {
1009                 bch_info(c, "version prior to inode backpointers, upgrade and fsck required");
1010                 c->opts.version_upgrade = true;
1011                 c->opts.fsck            = true;
1012                 c->opts.fix_errors      = FSCK_OPT_YES;
1013         }
1014
1015         ret = bch2_blacklist_table_initialize(c);
1016         if (ret) {
1017                 bch_err(c, "error initializing blacklist table");
1018                 goto err;
1019         }
1020
1021         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1022                 struct journal_replay *i;
1023
1024                 ret = bch2_journal_read(c, &c->journal_entries,
1025                                         &blacklist_seq, &journal_seq);
1026                 if (ret)
1027                         goto err;
1028
1029                 list_for_each_entry_reverse(i, &c->journal_entries, list)
1030                         if (!i->ignore) {
1031                                 last_journal_entry = &i->j;
1032                                 break;
1033                         }
1034
1035                 if (mustfix_fsck_err_on(c->sb.clean &&
1036                                         last_journal_entry &&
1037                                         !journal_entry_empty(last_journal_entry), c,
1038                                 "filesystem marked clean but journal not empty")) {
1039                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1040                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1041                         c->sb.clean = false;
1042                 }
1043
1044                 if (!last_journal_entry) {
1045                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
1046                         goto use_clean;
1047                 }
1048
1049                 c->journal_keys = journal_keys_sort(&c->journal_entries);
1050                 if (!c->journal_keys.d) {
1051                         ret = -ENOMEM;
1052                         goto err;
1053                 }
1054
1055                 if (c->sb.clean && last_journal_entry) {
1056                         ret = verify_superblock_clean(c, &clean,
1057                                                       last_journal_entry);
1058                         if (ret)
1059                                 goto err;
1060                 }
1061         } else {
1062 use_clean:
1063                 if (!clean) {
1064                         bch_err(c, "no superblock clean section found");
1065                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1066                         goto err;
1067
1068                 }
1069                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1070         }
1071
1072         if (c->opts.reconstruct_alloc) {
1073                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1074                 drop_alloc_keys(&c->journal_keys);
1075         }
1076
1077         ret = journal_replay_early(c, clean, &c->journal_entries);
1078         if (ret)
1079                 goto err;
1080
1081         /*
1082          * After an unclean shutdown, skip then next few journal sequence
1083          * numbers as they may have been referenced by btree writes that
1084          * happened before their corresponding journal writes - those btree
1085          * writes need to be ignored, by skipping and blacklisting the next few
1086          * journal sequence numbers:
1087          */
1088         if (!c->sb.clean)
1089                 journal_seq += 8;
1090
1091         if (blacklist_seq != journal_seq) {
1092                 ret = bch2_journal_seq_blacklist_add(c,
1093                                         blacklist_seq, journal_seq);
1094                 if (ret) {
1095                         bch_err(c, "error creating new journal seq blacklist entry");
1096                         goto err;
1097                 }
1098         }
1099
1100         ret = bch2_fs_journal_start(&c->journal, journal_seq,
1101                                     &c->journal_entries);
1102         if (ret)
1103                 goto err;
1104
1105         ret = read_btree_roots(c);
1106         if (ret)
1107                 goto err;
1108
1109         bch_verbose(c, "starting alloc read");
1110         err = "error reading allocation information";
1111         ret = bch2_alloc_read(c);
1112         if (ret)
1113                 goto err;
1114         bch_verbose(c, "alloc read done");
1115
1116         bch_verbose(c, "starting stripes_read");
1117         err = "error reading stripes";
1118         ret = bch2_stripes_read(c);
1119         if (ret)
1120                 goto err;
1121         bch_verbose(c, "stripes_read done");
1122
1123         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1124
1125         if (c->opts.fsck ||
1126             !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) ||
1127             !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) ||
1128             test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1129                 bool metadata_only = c->opts.norecovery;
1130
1131                 bch_info(c, "starting mark and sweep");
1132                 err = "error in mark and sweep";
1133                 ret = bch2_gc(c, true, metadata_only);
1134                 if (ret)
1135                         goto err;
1136                 bch_verbose(c, "mark and sweep done");
1137         }
1138
1139         bch2_stripes_heap_start(c);
1140
1141         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1142         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1143
1144         /*
1145          * Skip past versions that might have possibly been used (as nonces),
1146          * but hadn't had their pointers written:
1147          */
1148         if (c->sb.encryption_type && !c->sb.clean)
1149                 atomic64_add(1 << 16, &c->key_version);
1150
1151         if (c->opts.norecovery)
1152                 goto out;
1153
1154         bch_verbose(c, "starting journal replay");
1155         err = "journal replay failed";
1156         ret = bch2_journal_replay(c, c->journal_keys);
1157         if (ret)
1158                 goto err;
1159         bch_verbose(c, "journal replay done");
1160
1161         if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1162             !c->opts.nochanges) {
1163                 /*
1164                  * note that even when filesystem was clean there might be work
1165                  * to do here, if we ran gc (because of fsck) which recalculated
1166                  * oldest_gen:
1167                  */
1168                 bch_verbose(c, "writing allocation info");
1169                 err = "error writing out alloc info";
1170                 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
1171                         bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
1172                 if (ret) {
1173                         bch_err(c, "error writing alloc info");
1174                         goto err;
1175                 }
1176                 bch_verbose(c, "alloc write done");
1177         }
1178
1179         if (c->opts.fsck) {
1180                 bch_info(c, "starting fsck");
1181                 err = "error in fsck";
1182                 ret = bch2_fsck_full(c);
1183                 if (ret)
1184                         goto err;
1185                 bch_verbose(c, "fsck done");
1186         } else if (!c->sb.clean) {
1187                 bch_verbose(c, "checking for deleted inodes");
1188                 err = "error in recovery";
1189                 ret = bch2_fsck_walk_inodes_only(c);
1190                 if (ret)
1191                         goto err;
1192                 bch_verbose(c, "check inodes done");
1193         }
1194
1195         if (enabled_qtypes(c)) {
1196                 bch_verbose(c, "reading quotas");
1197                 ret = bch2_fs_quota_read(c);
1198                 if (ret)
1199                         goto err;
1200                 bch_verbose(c, "quotas done");
1201         }
1202
1203         if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1204             !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1205                 struct bch_move_stats stats = { 0 };
1206
1207                 bch_info(c, "scanning for old btree nodes");
1208                 ret = bch2_fs_read_write(c);
1209                 if (ret)
1210                         goto err;
1211
1212                 ret = bch2_scan_old_btree_nodes(c, &stats);
1213                 if (ret)
1214                         goto err;
1215                 bch_info(c, "scanning for old btree nodes done");
1216         }
1217
1218         mutex_lock(&c->sb_lock);
1219         if (c->opts.version_upgrade) {
1220                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1221                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1222                 write_sb = true;
1223         }
1224
1225         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1226                 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1227                 write_sb = true;
1228         }
1229
1230         if (c->opts.fsck &&
1231             !test_bit(BCH_FS_ERROR, &c->flags) &&
1232             !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1233                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1234                 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1235                 write_sb = true;
1236         }
1237
1238         if (write_sb)
1239                 bch2_write_super(c);
1240         mutex_unlock(&c->sb_lock);
1241
1242         if (c->journal_seq_blacklist_table &&
1243             c->journal_seq_blacklist_table->nr > 128)
1244                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1245
1246         ret = 0;
1247 out:
1248         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1249         bch2_flush_fsck_errs(c);
1250
1251         if (!c->opts.keep_journal) {
1252                 bch2_journal_keys_free(&c->journal_keys);
1253                 bch2_journal_entries_free(&c->journal_entries);
1254         }
1255         kfree(clean);
1256         if (ret)
1257                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1258         else
1259                 bch_verbose(c, "ret %i", ret);
1260         return ret;
1261 err:
1262 fsck_err:
1263         bch2_fs_emergency_read_only(c);
1264         goto out;
1265 }
1266
1267 int bch2_fs_initialize(struct bch_fs *c)
1268 {
1269         struct bch_inode_unpacked root_inode, lostfound_inode;
1270         struct bkey_inode_buf packed_inode;
1271         struct qstr lostfound = QSTR("lost+found");
1272         const char *err = "cannot allocate memory";
1273         struct bch_dev *ca;
1274         LIST_HEAD(journal);
1275         unsigned i;
1276         int ret;
1277
1278         bch_notice(c, "initializing new filesystem");
1279
1280         mutex_lock(&c->sb_lock);
1281         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1282         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1283
1284         if (c->opts.version_upgrade) {
1285                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1286                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1287                 bch2_write_super(c);
1288         }
1289
1290         for_each_online_member(ca, c, i)
1291                 bch2_mark_dev_superblock(c, ca, 0);
1292         mutex_unlock(&c->sb_lock);
1293
1294         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1295         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1296
1297         for (i = 0; i < BTREE_ID_NR; i++)
1298                 bch2_btree_root_alloc(c, i);
1299
1300         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1301         set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1302
1303         err = "unable to allocate journal buckets";
1304         for_each_online_member(ca, c, i) {
1305                 ret = bch2_dev_journal_alloc(ca);
1306                 if (ret) {
1307                         percpu_ref_put(&ca->io_ref);
1308                         goto err;
1309                 }
1310         }
1311
1312         /*
1313          * journal_res_get() will crash if called before this has
1314          * set up the journal.pin FIFO and journal.cur pointer:
1315          */
1316         bch2_fs_journal_start(&c->journal, 1, &journal);
1317         bch2_journal_set_replay_done(&c->journal);
1318
1319         err = "error going read-write";
1320         ret = bch2_fs_read_write_early(c);
1321         if (ret)
1322                 goto err;
1323
1324         /*
1325          * Write out the superblock and journal buckets, now that we can do
1326          * btree updates
1327          */
1328         err = "error marking superblock and journal";
1329         for_each_member_device(ca, c, i) {
1330                 ret = bch2_trans_mark_dev_sb(c, ca);
1331                 if (ret) {
1332                         percpu_ref_put(&ca->ref);
1333                         goto err;
1334                 }
1335         }
1336
1337         bch2_inode_init(c, &root_inode, 0, 0,
1338                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1339         root_inode.bi_inum = BCACHEFS_ROOT_INO;
1340         bch2_inode_pack(c, &packed_inode, &root_inode);
1341         packed_inode.inode.k.p.snapshot = U32_MAX;
1342
1343         err = "error creating root directory";
1344         ret = bch2_btree_insert(c, BTREE_ID_inodes,
1345                                 &packed_inode.inode.k_i,
1346                                 NULL, NULL, 0);
1347         if (ret)
1348                 goto err;
1349
1350         bch2_inode_init_early(c, &lostfound_inode);
1351
1352         err = "error creating lost+found";
1353         ret = bch2_trans_do(c, NULL, NULL, 0,
1354                 bch2_create_trans(&trans, BCACHEFS_ROOT_INO,
1355                                   &root_inode, &lostfound_inode,
1356                                   &lostfound,
1357                                   0, 0, S_IFDIR|0700, 0,
1358                                   NULL, NULL));
1359         if (ret) {
1360                 bch_err(c, "error creating lost+found");
1361                 goto err;
1362         }
1363
1364         if (enabled_qtypes(c)) {
1365                 ret = bch2_fs_quota_read(c);
1366                 if (ret)
1367                         goto err;
1368         }
1369
1370         err = "error writing first journal entry";
1371         ret = bch2_journal_meta(&c->journal);
1372         if (ret)
1373                 goto err;
1374
1375         mutex_lock(&c->sb_lock);
1376         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1377         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1378
1379         bch2_write_super(c);
1380         mutex_unlock(&c->sb_lock);
1381
1382         return 0;
1383 err:
1384         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1385         return ret;
1386 }