]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
Update bcachefs sources to ca3cfad39f fixup! bcachefs: Improve iter->should_be_locked
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "alloc_background.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "buckets.h"
11 #include "dirent.h"
12 #include "ec.h"
13 #include "error.h"
14 #include "fs-common.h"
15 #include "fsck.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "move.h"
20 #include "quota.h"
21 #include "recovery.h"
22 #include "replicas.h"
23 #include "super-io.h"
24
25 #include <linux/sort.h>
26 #include <linux/stat.h>
27
28 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
29
30 /* for -o reconstruct_alloc: */
31 static void drop_alloc_keys(struct journal_keys *keys)
32 {
33         size_t src, dst;
34
35         for (src = 0, dst = 0; src < keys->nr; src++)
36                 if (keys->d[src].btree_id != BTREE_ID_alloc)
37                         keys->d[dst++] = keys->d[src];
38
39         keys->nr = dst;
40 }
41
42 /* iterate over keys read from the journal: */
43
44 static int __journal_key_cmp(enum btree_id      l_btree_id,
45                              unsigned           l_level,
46                              struct bpos        l_pos,
47                              struct journal_key *r)
48 {
49         return (cmp_int(l_btree_id,     r->btree_id) ?:
50                 cmp_int(l_level,        r->level) ?:
51                 bpos_cmp(l_pos, r->k->k.p));
52 }
53
54 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
55 {
56         return (cmp_int(l->btree_id,    r->btree_id) ?:
57                 cmp_int(l->level,       r->level) ?:
58                 bpos_cmp(l->k->k.p,     r->k->k.p));
59 }
60
61 static size_t journal_key_search(struct journal_keys *journal_keys,
62                                  enum btree_id id, unsigned level,
63                                  struct bpos pos)
64 {
65         size_t l = 0, r = journal_keys->nr, m;
66
67         while (l < r) {
68                 m = l + ((r - l) >> 1);
69                 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
70                         l = m + 1;
71                 else
72                         r = m;
73         }
74
75         BUG_ON(l < journal_keys->nr &&
76                __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
77
78         BUG_ON(l &&
79                __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
80
81         return l;
82 }
83
84 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
85 {
86         struct bkey_i *n = iter->keys->d[idx].k;
87         struct btree_and_journal_iter *biter =
88                 container_of(iter, struct btree_and_journal_iter, journal);
89
90         if (iter->idx > idx ||
91             (iter->idx == idx &&
92              biter->last &&
93              bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
94                 iter->idx++;
95 }
96
97 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
98                             unsigned level, struct bkey_i *k)
99 {
100         struct journal_key n = {
101                 .btree_id       = id,
102                 .level          = level,
103                 .k              = k,
104                 .allocated      = true
105         };
106         struct journal_keys *keys = &c->journal_keys;
107         struct journal_iter *iter;
108         unsigned idx = journal_key_search(keys, id, level, k->k.p);
109
110         if (idx < keys->nr &&
111             journal_key_cmp(&n, &keys->d[idx]) == 0) {
112                 if (keys->d[idx].allocated)
113                         kfree(keys->d[idx].k);
114                 keys->d[idx] = n;
115                 return 0;
116         }
117
118         if (keys->nr == keys->size) {
119                 struct journal_keys new_keys = {
120                         .nr                     = keys->nr,
121                         .size                   = keys->size * 2,
122                         .journal_seq_base       = keys->journal_seq_base,
123                 };
124
125                 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
126                 if (!new_keys.d) {
127                         bch_err(c, "%s: error allocating new key array (size %zu)",
128                                 __func__, new_keys.size);
129                         return -ENOMEM;
130                 }
131
132                 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
133                 kvfree(keys->d);
134                 *keys = new_keys;
135         }
136
137         array_insert_item(keys->d, keys->nr, idx, n);
138
139         list_for_each_entry(iter, &c->journal_iters, list)
140                 journal_iter_fix(c, iter, idx);
141
142         return 0;
143 }
144
145 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
146                             unsigned level, struct bpos pos)
147 {
148         struct bkey_i *whiteout =
149                 kmalloc(sizeof(struct bkey), GFP_KERNEL);
150         int ret;
151
152         if (!whiteout) {
153                 bch_err(c, "%s: error allocating new key", __func__);
154                 return -ENOMEM;
155         }
156
157         bkey_init(&whiteout->k);
158         whiteout->k.p = pos;
159
160         ret = bch2_journal_key_insert(c, id, level, whiteout);
161         if (ret)
162                 kfree(whiteout);
163         return ret;
164 }
165
166 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
167 {
168         struct journal_key *k = iter->idx - iter->keys->nr
169                 ? iter->keys->d + iter->idx : NULL;
170
171         if (k &&
172             k->btree_id == iter->btree_id &&
173             k->level    == iter->level)
174                 return k->k;
175
176         iter->idx = iter->keys->nr;
177         return NULL;
178 }
179
180 static void bch2_journal_iter_advance(struct journal_iter *iter)
181 {
182         if (iter->idx < iter->keys->nr)
183                 iter->idx++;
184 }
185
186 static void bch2_journal_iter_exit(struct journal_iter *iter)
187 {
188         list_del(&iter->list);
189 }
190
191 static void bch2_journal_iter_init(struct bch_fs *c,
192                                    struct journal_iter *iter,
193                                    enum btree_id id, unsigned level,
194                                    struct bpos pos)
195 {
196         iter->btree_id  = id;
197         iter->level     = level;
198         iter->keys      = &c->journal_keys;
199         iter->idx       = journal_key_search(&c->journal_keys, id, level, pos);
200         list_add(&iter->list, &c->journal_iters);
201 }
202
203 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
204 {
205         return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
206                                                 iter->b, &iter->unpacked);
207 }
208
209 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
210 {
211         bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
212 }
213
214 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
215 {
216         switch (iter->last) {
217         case none:
218                 break;
219         case btree:
220                 bch2_journal_iter_advance_btree(iter);
221                 break;
222         case journal:
223                 bch2_journal_iter_advance(&iter->journal);
224                 break;
225         }
226
227         iter->last = none;
228 }
229
230 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
231 {
232         struct bkey_s_c ret;
233
234         while (1) {
235                 struct bkey_s_c btree_k         =
236                         bch2_journal_iter_peek_btree(iter);
237                 struct bkey_s_c journal_k       =
238                         bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
239
240                 if (btree_k.k && journal_k.k) {
241                         int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
242
243                         if (!cmp)
244                                 bch2_journal_iter_advance_btree(iter);
245
246                         iter->last = cmp < 0 ? btree : journal;
247                 } else if (btree_k.k) {
248                         iter->last = btree;
249                 } else if (journal_k.k) {
250                         iter->last = journal;
251                 } else {
252                         iter->last = none;
253                         return bkey_s_c_null;
254                 }
255
256                 ret = iter->last == journal ? journal_k : btree_k;
257
258                 if (iter->b &&
259                     bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
260                         iter->journal.idx = iter->journal.keys->nr;
261                         iter->last = none;
262                         return bkey_s_c_null;
263                 }
264
265                 if (!bkey_deleted(ret.k))
266                         break;
267
268                 bch2_btree_and_journal_iter_advance(iter);
269         }
270
271         return ret;
272 }
273
274 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
275 {
276         bch2_btree_and_journal_iter_advance(iter);
277
278         return bch2_btree_and_journal_iter_peek(iter);
279 }
280
281 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
282 {
283         bch2_journal_iter_exit(&iter->journal);
284 }
285
286 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
287                                                 struct bch_fs *c,
288                                                 struct btree *b)
289 {
290         memset(iter, 0, sizeof(*iter));
291
292         iter->b = b;
293         bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
294         bch2_journal_iter_init(c, &iter->journal,
295                                b->c.btree_id, b->c.level, b->data->min_key);
296 }
297
298 /* Walk btree, overlaying keys from the journal: */
299
300 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
301                                            struct btree_and_journal_iter iter)
302 {
303         unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
304         struct bkey_s_c k;
305         struct bkey_buf tmp;
306
307         BUG_ON(!b->c.level);
308
309         bch2_bkey_buf_init(&tmp);
310
311         while (i < nr &&
312                (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
313                 bch2_bkey_buf_reassemble(&tmp, c, k);
314
315                 bch2_btree_node_prefetch(c, NULL, tmp.k,
316                                         b->c.btree_id, b->c.level - 1);
317
318                 bch2_btree_and_journal_iter_advance(&iter);
319                 i++;
320         }
321
322         bch2_bkey_buf_exit(&tmp, c);
323 }
324
325 static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
326                                 enum btree_id btree_id,
327                                 btree_walk_key_fn key_fn)
328 {
329         struct btree_and_journal_iter iter;
330         struct bkey_s_c k;
331         struct bkey_buf tmp;
332         struct btree *child;
333         int ret = 0;
334
335         bch2_bkey_buf_init(&tmp);
336         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
337
338         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
339                 if (b->c.level) {
340                         bch2_bkey_buf_reassemble(&tmp, c, k);
341
342                         child = bch2_btree_node_get_noiter(c, tmp.k,
343                                                 b->c.btree_id, b->c.level - 1,
344                                                 false);
345
346                         ret = PTR_ERR_OR_ZERO(child);
347                         if (ret)
348                                 break;
349
350                         btree_and_journal_iter_prefetch(c, b, iter);
351
352                         ret = bch2_btree_and_journal_walk_recurse(c, child,
353                                         btree_id, key_fn);
354                         six_unlock_read(&child->c.lock);
355                 } else {
356                         ret = key_fn(c, k);
357                 }
358
359                 if (ret)
360                         break;
361
362                 bch2_btree_and_journal_iter_advance(&iter);
363         }
364
365         bch2_btree_and_journal_iter_exit(&iter);
366         bch2_bkey_buf_exit(&tmp, c);
367         return ret;
368 }
369
370 int bch2_btree_and_journal_walk(struct bch_fs *c, enum btree_id btree_id,
371                                 btree_walk_key_fn key_fn)
372 {
373         struct btree *b = c->btree_roots[btree_id].b;
374         int ret = 0;
375
376         if (btree_node_fake(b))
377                 return 0;
378
379         six_lock_read(&b->c.lock, NULL, NULL);
380         ret = bch2_btree_and_journal_walk_recurse(c, b, btree_id, key_fn);
381         six_unlock_read(&b->c.lock);
382
383         return ret;
384 }
385
386 /* sort and dedup all keys in the journal: */
387
388 void bch2_journal_entries_free(struct list_head *list)
389 {
390
391         while (!list_empty(list)) {
392                 struct journal_replay *i =
393                         list_first_entry(list, struct journal_replay, list);
394                 list_del(&i->list);
395                 kvpfree(i, offsetof(struct journal_replay, j) +
396                         vstruct_bytes(&i->j));
397         }
398 }
399
400 /*
401  * When keys compare equal, oldest compares first:
402  */
403 static int journal_sort_key_cmp(const void *_l, const void *_r)
404 {
405         const struct journal_key *l = _l;
406         const struct journal_key *r = _r;
407
408         return  cmp_int(l->btree_id,    r->btree_id) ?:
409                 cmp_int(l->level,       r->level) ?:
410                 bpos_cmp(l->k->k.p, r->k->k.p) ?:
411                 cmp_int(l->journal_seq, r->journal_seq) ?:
412                 cmp_int(l->journal_offset, r->journal_offset);
413 }
414
415 void bch2_journal_keys_free(struct journal_keys *keys)
416 {
417         struct journal_key *i;
418
419         for (i = keys->d; i < keys->d + keys->nr; i++)
420                 if (i->allocated)
421                         kfree(i->k);
422
423         kvfree(keys->d);
424         keys->d = NULL;
425         keys->nr = 0;
426 }
427
428 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
429 {
430         struct journal_replay *i;
431         struct jset_entry *entry;
432         struct bkey_i *k, *_n;
433         struct journal_keys keys = { NULL };
434         struct journal_key *src, *dst;
435         size_t nr_keys = 0;
436
437         if (list_empty(journal_entries))
438                 return keys;
439
440         list_for_each_entry(i, journal_entries, list) {
441                 if (i->ignore)
442                         continue;
443
444                 if (!keys.journal_seq_base)
445                         keys.journal_seq_base = le64_to_cpu(i->j.seq);
446
447                 for_each_jset_key(k, _n, entry, &i->j)
448                         nr_keys++;
449         }
450
451         keys.size = roundup_pow_of_two(nr_keys);
452
453         keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
454         if (!keys.d)
455                 goto err;
456
457         list_for_each_entry(i, journal_entries, list) {
458                 if (i->ignore)
459                         continue;
460
461                 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
462
463                 for_each_jset_key(k, _n, entry, &i->j)
464                         keys.d[keys.nr++] = (struct journal_key) {
465                                 .btree_id       = entry->btree_id,
466                                 .level          = entry->level,
467                                 .k              = k,
468                                 .journal_seq    = le64_to_cpu(i->j.seq) -
469                                         keys.journal_seq_base,
470                                 .journal_offset = k->_data - i->j._data,
471                         };
472         }
473
474         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
475
476         src = dst = keys.d;
477         while (src < keys.d + keys.nr) {
478                 while (src + 1 < keys.d + keys.nr &&
479                        src[0].btree_id  == src[1].btree_id &&
480                        src[0].level     == src[1].level &&
481                        !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
482                         src++;
483
484                 *dst++ = *src++;
485         }
486
487         keys.nr = dst - keys.d;
488 err:
489         return keys;
490 }
491
492 /* journal replay: */
493
494 static void replay_now_at(struct journal *j, u64 seq)
495 {
496         BUG_ON(seq < j->replay_journal_seq);
497         BUG_ON(seq > j->replay_journal_seq_end);
498
499         while (j->replay_journal_seq < seq)
500                 bch2_journal_pin_put(j, j->replay_journal_seq++);
501 }
502
503 static int __bch2_journal_replay_key(struct btree_trans *trans,
504                                      enum btree_id id, unsigned level,
505                                      struct bkey_i *k)
506 {
507         struct btree_iter *iter;
508         int ret;
509
510         iter = bch2_trans_get_node_iter(trans, id, k->k.p,
511                                         BTREE_MAX_DEPTH, level,
512                                         BTREE_ITER_INTENT|
513                                         BTREE_ITER_NOT_EXTENTS);
514         ret   = bch2_btree_iter_traverse(iter) ?:
515                 bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
516         bch2_trans_iter_put(trans, iter);
517         return ret;
518 }
519
520 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
521 {
522         unsigned commit_flags = BTREE_INSERT_NOFAIL|
523                 BTREE_INSERT_LAZY_RW;
524
525         if (!k->allocated)
526                 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
527
528         return bch2_trans_do(c, NULL, NULL, commit_flags,
529                              __bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
530 }
531
532 static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
533 {
534         struct btree_iter *iter;
535         int ret;
536
537         iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
538                                    BTREE_ITER_CACHED|
539                                    BTREE_ITER_CACHED_NOFILL|
540                                    BTREE_ITER_INTENT);
541         ret   = bch2_btree_iter_traverse(iter) ?:
542                 bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
543         bch2_trans_iter_put(trans, iter);
544         return ret;
545 }
546
547 static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
548 {
549         return bch2_trans_do(c, NULL, NULL,
550                              BTREE_INSERT_NOFAIL|
551                              BTREE_INSERT_USE_RESERVE|
552                              BTREE_INSERT_LAZY_RW|
553                              BTREE_INSERT_JOURNAL_REPLAY,
554                         __bch2_alloc_replay_key(&trans, k));
555 }
556
557 static int journal_sort_seq_cmp(const void *_l, const void *_r)
558 {
559         const struct journal_key *l = _l;
560         const struct journal_key *r = _r;
561
562         return  cmp_int(r->level,       l->level) ?:
563                 cmp_int(l->journal_seq, r->journal_seq) ?:
564                 cmp_int(l->btree_id,    r->btree_id) ?:
565                 bpos_cmp(l->k->k.p,     r->k->k.p);
566 }
567
568 static int bch2_journal_replay(struct bch_fs *c,
569                                struct journal_keys keys)
570 {
571         struct journal *j = &c->journal;
572         struct journal_key *i;
573         u64 seq;
574         int ret;
575
576         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
577
578         if (keys.nr)
579                 replay_now_at(j, keys.journal_seq_base);
580
581         seq = j->replay_journal_seq;
582
583         /*
584          * First replay updates to the alloc btree - these will only update the
585          * btree key cache:
586          */
587         for_each_journal_key(keys, i) {
588                 cond_resched();
589
590                 if (!i->level && i->btree_id == BTREE_ID_alloc) {
591                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
592                         ret = bch2_alloc_replay_key(c, i->k);
593                         if (ret)
594                                 goto err;
595                 }
596         }
597
598         /*
599          * Next replay updates to interior btree nodes:
600          */
601         for_each_journal_key(keys, i) {
602                 cond_resched();
603
604                 if (i->level) {
605                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
606                         ret = bch2_journal_replay_key(c, i);
607                         if (ret)
608                                 goto err;
609                 }
610         }
611
612         /*
613          * Now that the btree is in a consistent state, we can start journal
614          * reclaim (which will be flushing entries from the btree key cache back
615          * to the btree:
616          */
617         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
618         set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
619         journal_reclaim_kick(j);
620
621         j->replay_journal_seq = seq;
622
623         /*
624          * Now replay leaf node updates:
625          */
626         for_each_journal_key(keys, i) {
627                 cond_resched();
628
629                 if (i->level || i->btree_id == BTREE_ID_alloc)
630                         continue;
631
632                 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
633
634                 ret = bch2_journal_replay_key(c, i);
635                 if (ret)
636                         goto err;
637         }
638
639         replay_now_at(j, j->replay_journal_seq_end);
640         j->replay_journal_seq = 0;
641
642         bch2_journal_set_replay_done(j);
643         bch2_journal_flush_all_pins(j);
644         return bch2_journal_error(j);
645 err:
646         bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
647                 ret, bch2_btree_ids[i->btree_id], i->level);
648         return ret;
649 }
650
651 /* journal replay early: */
652
653 static int journal_replay_entry_early(struct bch_fs *c,
654                                       struct jset_entry *entry)
655 {
656         int ret = 0;
657
658         switch (entry->type) {
659         case BCH_JSET_ENTRY_btree_root: {
660                 struct btree_root *r;
661
662                 if (entry->btree_id >= BTREE_ID_NR) {
663                         bch_err(c, "filesystem has unknown btree type %u",
664                                 entry->btree_id);
665                         return -EINVAL;
666                 }
667
668                 r = &c->btree_roots[entry->btree_id];
669
670                 if (entry->u64s) {
671                         r->level = entry->level;
672                         bkey_copy(&r->key, &entry->start[0]);
673                         r->error = 0;
674                 } else {
675                         r->error = -EIO;
676                 }
677                 r->alive = true;
678                 break;
679         }
680         case BCH_JSET_ENTRY_usage: {
681                 struct jset_entry_usage *u =
682                         container_of(entry, struct jset_entry_usage, entry);
683
684                 switch (entry->btree_id) {
685                 case FS_USAGE_RESERVED:
686                         if (entry->level < BCH_REPLICAS_MAX)
687                                 c->usage_base->persistent_reserved[entry->level] =
688                                         le64_to_cpu(u->v);
689                         break;
690                 case FS_USAGE_INODES:
691                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
692                         break;
693                 case FS_USAGE_KEY_VERSION:
694                         atomic64_set(&c->key_version,
695                                      le64_to_cpu(u->v));
696                         break;
697                 }
698
699                 break;
700         }
701         case BCH_JSET_ENTRY_data_usage: {
702                 struct jset_entry_data_usage *u =
703                         container_of(entry, struct jset_entry_data_usage, entry);
704
705                 ret = bch2_replicas_set_usage(c, &u->r,
706                                               le64_to_cpu(u->v));
707                 break;
708         }
709         case BCH_JSET_ENTRY_dev_usage: {
710                 struct jset_entry_dev_usage *u =
711                         container_of(entry, struct jset_entry_dev_usage, entry);
712                 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
713                 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
714                 unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
715                         sizeof(struct jset_entry_dev_usage_type);
716                 unsigned i;
717
718                 ca->usage_base->buckets_ec              = le64_to_cpu(u->buckets_ec);
719                 ca->usage_base->buckets_unavailable     = le64_to_cpu(u->buckets_unavailable);
720
721                 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
722                         ca->usage_base->d[i].buckets    = le64_to_cpu(u->d[i].buckets);
723                         ca->usage_base->d[i].sectors    = le64_to_cpu(u->d[i].sectors);
724                         ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
725                 }
726
727                 break;
728         }
729         case BCH_JSET_ENTRY_blacklist: {
730                 struct jset_entry_blacklist *bl_entry =
731                         container_of(entry, struct jset_entry_blacklist, entry);
732
733                 ret = bch2_journal_seq_blacklist_add(c,
734                                 le64_to_cpu(bl_entry->seq),
735                                 le64_to_cpu(bl_entry->seq) + 1);
736                 break;
737         }
738         case BCH_JSET_ENTRY_blacklist_v2: {
739                 struct jset_entry_blacklist_v2 *bl_entry =
740                         container_of(entry, struct jset_entry_blacklist_v2, entry);
741
742                 ret = bch2_journal_seq_blacklist_add(c,
743                                 le64_to_cpu(bl_entry->start),
744                                 le64_to_cpu(bl_entry->end) + 1);
745                 break;
746         }
747         case BCH_JSET_ENTRY_clock: {
748                 struct jset_entry_clock *clock =
749                         container_of(entry, struct jset_entry_clock, entry);
750
751                 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
752         }
753         }
754
755         return ret;
756 }
757
758 static int journal_replay_early(struct bch_fs *c,
759                                 struct bch_sb_field_clean *clean,
760                                 struct list_head *journal)
761 {
762         struct journal_replay *i;
763         struct jset_entry *entry;
764         int ret;
765
766         if (clean) {
767                 for (entry = clean->start;
768                      entry != vstruct_end(&clean->field);
769                      entry = vstruct_next(entry)) {
770                         ret = journal_replay_entry_early(c, entry);
771                         if (ret)
772                                 return ret;
773                 }
774         } else {
775                 list_for_each_entry(i, journal, list) {
776                         if (i->ignore)
777                                 continue;
778
779                         vstruct_for_each(&i->j, entry) {
780                                 ret = journal_replay_entry_early(c, entry);
781                                 if (ret)
782                                         return ret;
783                         }
784                 }
785         }
786
787         bch2_fs_usage_initialize(c);
788
789         return 0;
790 }
791
792 /* sb clean section: */
793
794 static struct bkey_i *btree_root_find(struct bch_fs *c,
795                                       struct bch_sb_field_clean *clean,
796                                       struct jset *j,
797                                       enum btree_id id, unsigned *level)
798 {
799         struct bkey_i *k;
800         struct jset_entry *entry, *start, *end;
801
802         if (clean) {
803                 start = clean->start;
804                 end = vstruct_end(&clean->field);
805         } else {
806                 start = j->start;
807                 end = vstruct_last(j);
808         }
809
810         for (entry = start; entry < end; entry = vstruct_next(entry))
811                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
812                     entry->btree_id == id)
813                         goto found;
814
815         return NULL;
816 found:
817         if (!entry->u64s)
818                 return ERR_PTR(-EINVAL);
819
820         k = entry->start;
821         *level = entry->level;
822         return k;
823 }
824
825 static int verify_superblock_clean(struct bch_fs *c,
826                                    struct bch_sb_field_clean **cleanp,
827                                    struct jset *j)
828 {
829         unsigned i;
830         struct bch_sb_field_clean *clean = *cleanp;
831         int ret = 0;
832
833         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
834                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
835                         le64_to_cpu(clean->journal_seq),
836                         le64_to_cpu(j->seq))) {
837                 kfree(clean);
838                 *cleanp = NULL;
839                 return 0;
840         }
841
842         for (i = 0; i < BTREE_ID_NR; i++) {
843                 char buf1[200], buf2[200];
844                 struct bkey_i *k1, *k2;
845                 unsigned l1 = 0, l2 = 0;
846
847                 k1 = btree_root_find(c, clean, NULL, i, &l1);
848                 k2 = btree_root_find(c, NULL, j, i, &l2);
849
850                 if (!k1 && !k2)
851                         continue;
852
853                 mustfix_fsck_err_on(!k1 || !k2 ||
854                                     IS_ERR(k1) ||
855                                     IS_ERR(k2) ||
856                                     k1->k.u64s != k2->k.u64s ||
857                                     memcmp(k1, k2, bkey_bytes(k1)) ||
858                                     l1 != l2, c,
859                         "superblock btree root %u doesn't match journal after clean shutdown\n"
860                         "sb:      l=%u %s\n"
861                         "journal: l=%u %s\n", i,
862                         l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
863                         l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
864         }
865 fsck_err:
866         return ret;
867 }
868
869 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
870 {
871         struct bch_sb_field_clean *clean, *sb_clean;
872         int ret;
873
874         mutex_lock(&c->sb_lock);
875         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
876
877         if (fsck_err_on(!sb_clean, c,
878                         "superblock marked clean but clean section not present")) {
879                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
880                 c->sb.clean = false;
881                 mutex_unlock(&c->sb_lock);
882                 return NULL;
883         }
884
885         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
886                         GFP_KERNEL);
887         if (!clean) {
888                 mutex_unlock(&c->sb_lock);
889                 return ERR_PTR(-ENOMEM);
890         }
891
892         ret = bch2_sb_clean_validate(c, clean, READ);
893         if (ret) {
894                 mutex_unlock(&c->sb_lock);
895                 return ERR_PTR(ret);
896         }
897
898         mutex_unlock(&c->sb_lock);
899
900         return clean;
901 fsck_err:
902         mutex_unlock(&c->sb_lock);
903         return ERR_PTR(ret);
904 }
905
906 static int read_btree_roots(struct bch_fs *c)
907 {
908         unsigned i;
909         int ret = 0;
910
911         for (i = 0; i < BTREE_ID_NR; i++) {
912                 struct btree_root *r = &c->btree_roots[i];
913
914                 if (!r->alive)
915                         continue;
916
917                 if (i == BTREE_ID_alloc &&
918                     c->opts.reconstruct_alloc) {
919                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
920                         continue;
921                 }
922
923                 if (r->error) {
924                         __fsck_err(c, i == BTREE_ID_alloc
925                                    ? FSCK_CAN_IGNORE : 0,
926                                    "invalid btree root %s",
927                                    bch2_btree_ids[i]);
928                         if (i == BTREE_ID_alloc)
929                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
930                 }
931
932                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
933                 if (ret) {
934                         __fsck_err(c, i == BTREE_ID_alloc
935                                    ? FSCK_CAN_IGNORE : 0,
936                                    "error reading btree root %s",
937                                    bch2_btree_ids[i]);
938                         if (i == BTREE_ID_alloc)
939                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
940                 }
941         }
942
943         for (i = 0; i < BTREE_ID_NR; i++)
944                 if (!c->btree_roots[i].b)
945                         bch2_btree_root_alloc(c, i);
946 fsck_err:
947         return ret;
948 }
949
950 int bch2_fs_recovery(struct bch_fs *c)
951 {
952         const char *err = "cannot allocate memory";
953         struct bch_sb_field_clean *clean = NULL;
954         struct jset *last_journal_entry = NULL;
955         u64 blacklist_seq, journal_seq;
956         bool write_sb = false;
957         int ret = 0;
958
959         if (c->sb.clean)
960                 clean = read_superblock_clean(c);
961         ret = PTR_ERR_OR_ZERO(clean);
962         if (ret)
963                 goto err;
964
965         if (c->sb.clean)
966                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
967                          le64_to_cpu(clean->journal_seq));
968
969         if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
970                 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
971                 ret = -EINVAL;
972                 goto err;
973         }
974
975         if (!c->sb.clean &&
976             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
977                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
978                 ret = -EINVAL;
979                 goto err;
980         }
981
982         if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
983                 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
984                 ret = -EINVAL;
985                 goto err;
986
987         }
988
989         if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
990                 bch_info(c, "alloc_v2 feature bit not set, fsck required");
991                 c->opts.fsck = true;
992                 c->opts.fix_errors = FSCK_OPT_YES;
993         }
994
995         if (!c->replicas.entries ||
996             c->opts.rebuild_replicas) {
997                 bch_info(c, "building replicas info");
998                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
999         }
1000
1001         if (c->sb.version < bcachefs_metadata_version_inode_backpointers) {
1002                 bch_info(c, "version prior to inode backpointers, upgrade and fsck required");
1003                 c->opts.version_upgrade = true;
1004                 c->opts.fsck            = true;
1005                 c->opts.fix_errors      = FSCK_OPT_YES;
1006         }
1007
1008         ret = bch2_blacklist_table_initialize(c);
1009         if (ret) {
1010                 bch_err(c, "error initializing blacklist table");
1011                 goto err;
1012         }
1013
1014         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1015                 struct journal_replay *i;
1016
1017                 ret = bch2_journal_read(c, &c->journal_entries,
1018                                         &blacklist_seq, &journal_seq);
1019                 if (ret)
1020                         goto err;
1021
1022                 list_for_each_entry_reverse(i, &c->journal_entries, list)
1023                         if (!i->ignore) {
1024                                 last_journal_entry = &i->j;
1025                                 break;
1026                         }
1027
1028                 if (mustfix_fsck_err_on(c->sb.clean &&
1029                                         last_journal_entry &&
1030                                         !journal_entry_empty(last_journal_entry), c,
1031                                 "filesystem marked clean but journal not empty")) {
1032                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1033                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1034                         c->sb.clean = false;
1035                 }
1036
1037                 if (!last_journal_entry) {
1038                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
1039                         goto use_clean;
1040                 }
1041
1042                 c->journal_keys = journal_keys_sort(&c->journal_entries);
1043                 if (!c->journal_keys.d) {
1044                         ret = -ENOMEM;
1045                         goto err;
1046                 }
1047
1048                 if (c->sb.clean && last_journal_entry) {
1049                         ret = verify_superblock_clean(c, &clean,
1050                                                       last_journal_entry);
1051                         if (ret)
1052                                 goto err;
1053                 }
1054         } else {
1055 use_clean:
1056                 if (!clean) {
1057                         bch_err(c, "no superblock clean section found");
1058                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1059                         goto err;
1060
1061                 }
1062                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1063         }
1064
1065         if (c->opts.reconstruct_alloc) {
1066                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1067                 drop_alloc_keys(&c->journal_keys);
1068         }
1069
1070         ret = journal_replay_early(c, clean, &c->journal_entries);
1071         if (ret)
1072                 goto err;
1073
1074         /*
1075          * After an unclean shutdown, skip then next few journal sequence
1076          * numbers as they may have been referenced by btree writes that
1077          * happened before their corresponding journal writes - those btree
1078          * writes need to be ignored, by skipping and blacklisting the next few
1079          * journal sequence numbers:
1080          */
1081         if (!c->sb.clean)
1082                 journal_seq += 8;
1083
1084         if (blacklist_seq != journal_seq) {
1085                 ret = bch2_journal_seq_blacklist_add(c,
1086                                         blacklist_seq, journal_seq);
1087                 if (ret) {
1088                         bch_err(c, "error creating new journal seq blacklist entry");
1089                         goto err;
1090                 }
1091         }
1092
1093         ret = bch2_fs_journal_start(&c->journal, journal_seq,
1094                                     &c->journal_entries);
1095         if (ret)
1096                 goto err;
1097
1098         ret = read_btree_roots(c);
1099         if (ret)
1100                 goto err;
1101
1102         bch_verbose(c, "starting alloc read");
1103         err = "error reading allocation information";
1104         ret = bch2_alloc_read(c);
1105         if (ret)
1106                 goto err;
1107         bch_verbose(c, "alloc read done");
1108
1109         bch_verbose(c, "starting stripes_read");
1110         err = "error reading stripes";
1111         ret = bch2_stripes_read(c);
1112         if (ret)
1113                 goto err;
1114         bch_verbose(c, "stripes_read done");
1115
1116         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1117
1118         if (c->opts.fsck ||
1119             !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) ||
1120             !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) ||
1121             test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1122                 bool metadata_only = c->opts.norecovery;
1123
1124                 bch_info(c, "starting mark and sweep");
1125                 err = "error in mark and sweep";
1126                 ret = bch2_gc(c, true, metadata_only);
1127                 if (ret)
1128                         goto err;
1129                 bch_verbose(c, "mark and sweep done");
1130         }
1131
1132         bch2_stripes_heap_start(c);
1133
1134         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1135         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1136
1137         /*
1138          * Skip past versions that might have possibly been used (as nonces),
1139          * but hadn't had their pointers written:
1140          */
1141         if (c->sb.encryption_type && !c->sb.clean)
1142                 atomic64_add(1 << 16, &c->key_version);
1143
1144         if (c->opts.norecovery)
1145                 goto out;
1146
1147         bch_verbose(c, "starting journal replay");
1148         err = "journal replay failed";
1149         ret = bch2_journal_replay(c, c->journal_keys);
1150         if (ret)
1151                 goto err;
1152         bch_verbose(c, "journal replay done");
1153
1154         if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1155             !c->opts.nochanges) {
1156                 /*
1157                  * note that even when filesystem was clean there might be work
1158                  * to do here, if we ran gc (because of fsck) which recalculated
1159                  * oldest_gen:
1160                  */
1161                 bch_verbose(c, "writing allocation info");
1162                 err = "error writing out alloc info";
1163                 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
1164                         bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
1165                 if (ret) {
1166                         bch_err(c, "error writing alloc info");
1167                         goto err;
1168                 }
1169                 bch_verbose(c, "alloc write done");
1170         }
1171
1172         if (c->opts.fsck) {
1173                 bch_info(c, "starting fsck");
1174                 err = "error in fsck";
1175                 ret = bch2_fsck_full(c);
1176                 if (ret)
1177                         goto err;
1178                 bch_verbose(c, "fsck done");
1179         } else if (!c->sb.clean) {
1180                 bch_verbose(c, "checking for deleted inodes");
1181                 err = "error in recovery";
1182                 ret = bch2_fsck_walk_inodes_only(c);
1183                 if (ret)
1184                         goto err;
1185                 bch_verbose(c, "check inodes done");
1186         }
1187
1188         if (enabled_qtypes(c)) {
1189                 bch_verbose(c, "reading quotas");
1190                 ret = bch2_fs_quota_read(c);
1191                 if (ret)
1192                         goto err;
1193                 bch_verbose(c, "quotas done");
1194         }
1195
1196         if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1197             !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1198                 struct bch_move_stats stats = { 0 };
1199
1200                 bch_info(c, "scanning for old btree nodes");
1201                 ret = bch2_fs_read_write(c);
1202                 if (ret)
1203                         goto err;
1204
1205                 ret = bch2_scan_old_btree_nodes(c, &stats);
1206                 if (ret)
1207                         goto err;
1208                 bch_info(c, "scanning for old btree nodes done");
1209         }
1210
1211         mutex_lock(&c->sb_lock);
1212         if (c->opts.version_upgrade) {
1213                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1214                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1215                 write_sb = true;
1216         }
1217
1218         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1219                 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1220                 write_sb = true;
1221         }
1222
1223         if (c->opts.fsck &&
1224             !test_bit(BCH_FS_ERROR, &c->flags) &&
1225             !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1226                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1227                 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1228                 write_sb = true;
1229         }
1230
1231         if (write_sb)
1232                 bch2_write_super(c);
1233         mutex_unlock(&c->sb_lock);
1234
1235         if (c->journal_seq_blacklist_table &&
1236             c->journal_seq_blacklist_table->nr > 128)
1237                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1238
1239         ret = 0;
1240 out:
1241         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1242         bch2_flush_fsck_errs(c);
1243
1244         if (!c->opts.keep_journal) {
1245                 bch2_journal_keys_free(&c->journal_keys);
1246                 bch2_journal_entries_free(&c->journal_entries);
1247         }
1248         kfree(clean);
1249         if (ret)
1250                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1251         else
1252                 bch_verbose(c, "ret %i", ret);
1253         return ret;
1254 err:
1255 fsck_err:
1256         bch2_fs_emergency_read_only(c);
1257         goto out;
1258 }
1259
1260 int bch2_fs_initialize(struct bch_fs *c)
1261 {
1262         struct bch_inode_unpacked root_inode, lostfound_inode;
1263         struct bkey_inode_buf packed_inode;
1264         struct qstr lostfound = QSTR("lost+found");
1265         const char *err = "cannot allocate memory";
1266         struct bch_dev *ca;
1267         LIST_HEAD(journal);
1268         unsigned i;
1269         int ret;
1270
1271         bch_notice(c, "initializing new filesystem");
1272
1273         mutex_lock(&c->sb_lock);
1274         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1275         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1276
1277         if (c->opts.version_upgrade) {
1278                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1279                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1280                 bch2_write_super(c);
1281         }
1282
1283         for_each_online_member(ca, c, i)
1284                 bch2_mark_dev_superblock(c, ca, 0);
1285         mutex_unlock(&c->sb_lock);
1286
1287         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1288         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1289
1290         for (i = 0; i < BTREE_ID_NR; i++)
1291                 bch2_btree_root_alloc(c, i);
1292
1293         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1294         set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1295
1296         err = "unable to allocate journal buckets";
1297         for_each_online_member(ca, c, i) {
1298                 ret = bch2_dev_journal_alloc(ca);
1299                 if (ret) {
1300                         percpu_ref_put(&ca->io_ref);
1301                         goto err;
1302                 }
1303         }
1304
1305         /*
1306          * journal_res_get() will crash if called before this has
1307          * set up the journal.pin FIFO and journal.cur pointer:
1308          */
1309         bch2_fs_journal_start(&c->journal, 1, &journal);
1310         bch2_journal_set_replay_done(&c->journal);
1311
1312         err = "error going read-write";
1313         ret = bch2_fs_read_write_early(c);
1314         if (ret)
1315                 goto err;
1316
1317         /*
1318          * Write out the superblock and journal buckets, now that we can do
1319          * btree updates
1320          */
1321         err = "error marking superblock and journal";
1322         for_each_member_device(ca, c, i) {
1323                 ret = bch2_trans_mark_dev_sb(c, ca);
1324                 if (ret) {
1325                         percpu_ref_put(&ca->ref);
1326                         goto err;
1327                 }
1328         }
1329
1330         bch2_inode_init(c, &root_inode, 0, 0,
1331                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1332         root_inode.bi_inum = BCACHEFS_ROOT_INO;
1333         bch2_inode_pack(c, &packed_inode, &root_inode);
1334         packed_inode.inode.k.p.snapshot = U32_MAX;
1335
1336         err = "error creating root directory";
1337         ret = bch2_btree_insert(c, BTREE_ID_inodes,
1338                                 &packed_inode.inode.k_i,
1339                                 NULL, NULL, 0);
1340         if (ret)
1341                 goto err;
1342
1343         bch2_inode_init_early(c, &lostfound_inode);
1344
1345         err = "error creating lost+found";
1346         ret = bch2_trans_do(c, NULL, NULL, 0,
1347                 bch2_create_trans(&trans, BCACHEFS_ROOT_INO,
1348                                   &root_inode, &lostfound_inode,
1349                                   &lostfound,
1350                                   0, 0, S_IFDIR|0700, 0,
1351                                   NULL, NULL));
1352         if (ret) {
1353                 bch_err(c, "error creating lost+found");
1354                 goto err;
1355         }
1356
1357         if (enabled_qtypes(c)) {
1358                 ret = bch2_fs_quota_read(c);
1359                 if (ret)
1360                         goto err;
1361         }
1362
1363         err = "error writing first journal entry";
1364         ret = bch2_journal_meta(&c->journal);
1365         if (ret)
1366                 goto err;
1367
1368         mutex_lock(&c->sb_lock);
1369         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1370         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1371
1372         bch2_write_super(c);
1373         mutex_unlock(&c->sb_lock);
1374
1375         return 0;
1376 err:
1377         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1378         return ret;
1379 }