]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
Update bcachefs sources to c7defb5793 bcachefs: Split btree_iter_traverse and bch2_bt...
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "alloc_background.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "buckets.h"
11 #include "dirent.h"
12 #include "ec.h"
13 #include "error.h"
14 #include "fs-common.h"
15 #include "fsck.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "move.h"
20 #include "quota.h"
21 #include "recovery.h"
22 #include "replicas.h"
23 #include "super-io.h"
24
25 #include <linux/sort.h>
26 #include <linux/stat.h>
27
28 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
29
30 /* for -o reconstruct_alloc: */
31 static void drop_alloc_keys(struct journal_keys *keys)
32 {
33         size_t src, dst;
34
35         for (src = 0, dst = 0; src < keys->nr; src++)
36                 if (keys->d[src].btree_id != BTREE_ID_alloc)
37                         keys->d[dst++] = keys->d[src];
38
39         keys->nr = dst;
40 }
41
42 /* iterate over keys read from the journal: */
43
44 static int __journal_key_cmp(enum btree_id      l_btree_id,
45                              unsigned           l_level,
46                              struct bpos        l_pos,
47                              struct journal_key *r)
48 {
49         return (cmp_int(l_btree_id,     r->btree_id) ?:
50                 cmp_int(l_level,        r->level) ?:
51                 bkey_cmp(l_pos, r->k->k.p));
52 }
53
54 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
55 {
56         return (cmp_int(l->btree_id,    r->btree_id) ?:
57                 cmp_int(l->level,       r->level) ?:
58                 bkey_cmp(l->k->k.p,     r->k->k.p));
59 }
60
61 static size_t journal_key_search(struct journal_keys *journal_keys,
62                                  enum btree_id id, unsigned level,
63                                  struct bpos pos)
64 {
65         size_t l = 0, r = journal_keys->nr, m;
66
67         while (l < r) {
68                 m = l + ((r - l) >> 1);
69                 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
70                         l = m + 1;
71                 else
72                         r = m;
73         }
74
75         BUG_ON(l < journal_keys->nr &&
76                __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
77
78         BUG_ON(l &&
79                __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
80
81         return l;
82 }
83
84 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
85 {
86         struct bkey_i *n = iter->keys->d[idx].k;
87         struct btree_and_journal_iter *biter =
88                 container_of(iter, struct btree_and_journal_iter, journal);
89
90         if (iter->idx > idx ||
91             (iter->idx == idx &&
92              biter->last &&
93              bkey_cmp(n->k.p, biter->unpacked.p) <= 0))
94                 iter->idx++;
95 }
96
97 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
98                             unsigned level, struct bkey_i *k)
99 {
100         struct journal_key n = {
101                 .btree_id       = id,
102                 .level          = level,
103                 .k              = k,
104                 .allocated      = true
105         };
106         struct journal_keys *keys = &c->journal_keys;
107         struct journal_iter *iter;
108         unsigned idx = journal_key_search(keys, id, level, k->k.p);
109
110         if (idx < keys->nr &&
111             journal_key_cmp(&n, &keys->d[idx]) == 0) {
112                 if (keys->d[idx].allocated)
113                         kfree(keys->d[idx].k);
114                 keys->d[idx] = n;
115                 return 0;
116         }
117
118         if (keys->nr == keys->size) {
119                 struct journal_keys new_keys = {
120                         .nr                     = keys->nr,
121                         .size                   = keys->size * 2,
122                         .journal_seq_base       = keys->journal_seq_base,
123                 };
124
125                 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
126                 if (!new_keys.d) {
127                         bch_err(c, "%s: error allocating new key array (size %zu)",
128                                 __func__, new_keys.size);
129                         return -ENOMEM;
130                 }
131
132                 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
133                 kvfree(keys->d);
134                 *keys = new_keys;
135         }
136
137         array_insert_item(keys->d, keys->nr, idx, n);
138
139         list_for_each_entry(iter, &c->journal_iters, list)
140                 journal_iter_fix(c, iter, idx);
141
142         return 0;
143 }
144
145 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
146                             unsigned level, struct bpos pos)
147 {
148         struct bkey_i *whiteout =
149                 kmalloc(sizeof(struct bkey), GFP_KERNEL);
150         int ret;
151
152         if (!whiteout) {
153                 bch_err(c, "%s: error allocating new key", __func__);
154                 return -ENOMEM;
155         }
156
157         bkey_init(&whiteout->k);
158         whiteout->k.p = pos;
159
160         ret = bch2_journal_key_insert(c, id, level, whiteout);
161         if (ret)
162                 kfree(whiteout);
163         return ret;
164 }
165
166 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
167 {
168         struct journal_key *k = iter->idx - iter->keys->nr
169                 ? iter->keys->d + iter->idx : NULL;
170
171         if (k &&
172             k->btree_id == iter->btree_id &&
173             k->level    == iter->level)
174                 return k->k;
175
176         iter->idx = iter->keys->nr;
177         return NULL;
178 }
179
180 static void bch2_journal_iter_advance(struct journal_iter *iter)
181 {
182         if (iter->idx < iter->keys->nr)
183                 iter->idx++;
184 }
185
186 static void bch2_journal_iter_exit(struct journal_iter *iter)
187 {
188         list_del(&iter->list);
189 }
190
191 static void bch2_journal_iter_init(struct bch_fs *c,
192                                    struct journal_iter *iter,
193                                    enum btree_id id, unsigned level,
194                                    struct bpos pos)
195 {
196         iter->btree_id  = id;
197         iter->level     = level;
198         iter->keys      = &c->journal_keys;
199         iter->idx       = journal_key_search(&c->journal_keys, id, level, pos);
200         list_add(&iter->list, &c->journal_iters);
201 }
202
203 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
204 {
205         return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
206                                                 iter->b, &iter->unpacked);
207 }
208
209 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
210 {
211         bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
212 }
213
214 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
215 {
216         switch (iter->last) {
217         case none:
218                 break;
219         case btree:
220                 bch2_journal_iter_advance_btree(iter);
221                 break;
222         case journal:
223                 bch2_journal_iter_advance(&iter->journal);
224                 break;
225         }
226
227         iter->last = none;
228 }
229
230 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
231 {
232         struct bkey_s_c ret;
233
234         while (1) {
235                 struct bkey_s_c btree_k         =
236                         bch2_journal_iter_peek_btree(iter);
237                 struct bkey_s_c journal_k       =
238                         bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
239
240                 if (btree_k.k && journal_k.k) {
241                         int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
242
243                         if (!cmp)
244                                 bch2_journal_iter_advance_btree(iter);
245
246                         iter->last = cmp < 0 ? btree : journal;
247                 } else if (btree_k.k) {
248                         iter->last = btree;
249                 } else if (journal_k.k) {
250                         iter->last = journal;
251                 } else {
252                         iter->last = none;
253                         return bkey_s_c_null;
254                 }
255
256                 ret = iter->last == journal ? journal_k : btree_k;
257
258                 if (iter->b &&
259                     bkey_cmp(ret.k->p, iter->b->data->max_key) > 0) {
260                         iter->journal.idx = iter->journal.keys->nr;
261                         iter->last = none;
262                         return bkey_s_c_null;
263                 }
264
265                 if (!bkey_deleted(ret.k))
266                         break;
267
268                 bch2_btree_and_journal_iter_advance(iter);
269         }
270
271         return ret;
272 }
273
274 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
275 {
276         bch2_btree_and_journal_iter_advance(iter);
277
278         return bch2_btree_and_journal_iter_peek(iter);
279 }
280
281 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
282 {
283         bch2_journal_iter_exit(&iter->journal);
284 }
285
286 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
287                                                 struct bch_fs *c,
288                                                 struct btree *b)
289 {
290         memset(iter, 0, sizeof(*iter));
291
292         iter->b = b;
293         bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
294         bch2_journal_iter_init(c, &iter->journal,
295                                b->c.btree_id, b->c.level, b->data->min_key);
296 }
297
298 /* Walk btree, overlaying keys from the journal: */
299
300 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
301                                            struct btree_and_journal_iter iter)
302 {
303         unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
304         struct bkey_s_c k;
305         struct bkey_buf tmp;
306
307         BUG_ON(!b->c.level);
308
309         bch2_bkey_buf_init(&tmp);
310
311         while (i < nr &&
312                (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
313                 bch2_bkey_buf_reassemble(&tmp, c, k);
314
315                 bch2_btree_node_prefetch(c, NULL, tmp.k,
316                                         b->c.btree_id, b->c.level - 1);
317
318                 bch2_btree_and_journal_iter_advance(&iter);
319                 i++;
320         }
321
322         bch2_bkey_buf_exit(&tmp, c);
323 }
324
325 static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
326                                 struct journal_keys *journal_keys,
327                                 enum btree_id btree_id,
328                                 btree_walk_node_fn node_fn,
329                                 btree_walk_key_fn key_fn)
330 {
331         struct btree_and_journal_iter iter;
332         struct bkey_s_c k;
333         struct bkey_buf tmp;
334         struct btree *child;
335         int ret = 0;
336
337         bch2_bkey_buf_init(&tmp);
338         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
339
340         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
341                 ret = key_fn(c, btree_id, b->c.level, k);
342                 if (ret)
343                         break;
344
345                 if (b->c.level) {
346                         bch2_bkey_buf_reassemble(&tmp, c, k);
347
348                         bch2_btree_and_journal_iter_advance(&iter);
349
350                         child = bch2_btree_node_get_noiter(c, tmp.k,
351                                                 b->c.btree_id, b->c.level - 1,
352                                                 false);
353
354                         ret = PTR_ERR_OR_ZERO(child);
355                         if (ret)
356                                 break;
357
358                         btree_and_journal_iter_prefetch(c, b, iter);
359
360                         ret   = (node_fn ? node_fn(c, b) : 0) ?:
361                                 bch2_btree_and_journal_walk_recurse(c, child,
362                                         journal_keys, btree_id, node_fn, key_fn);
363                         six_unlock_read(&child->c.lock);
364
365                         if (ret)
366                                 break;
367                 } else {
368                         bch2_btree_and_journal_iter_advance(&iter);
369                 }
370         }
371
372         bch2_btree_and_journal_iter_exit(&iter);
373         bch2_bkey_buf_exit(&tmp, c);
374         return ret;
375 }
376
377 int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
378                                 enum btree_id btree_id,
379                                 btree_walk_node_fn node_fn,
380                                 btree_walk_key_fn key_fn)
381 {
382         struct btree *b = c->btree_roots[btree_id].b;
383         int ret = 0;
384
385         if (btree_node_fake(b))
386                 return 0;
387
388         six_lock_read(&b->c.lock, NULL, NULL);
389         ret   = (node_fn ? node_fn(c, b) : 0) ?:
390                 bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
391                                                     node_fn, key_fn) ?:
392                 key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
393         six_unlock_read(&b->c.lock);
394
395         return ret;
396 }
397
398 /* sort and dedup all keys in the journal: */
399
400 void bch2_journal_entries_free(struct list_head *list)
401 {
402
403         while (!list_empty(list)) {
404                 struct journal_replay *i =
405                         list_first_entry(list, struct journal_replay, list);
406                 list_del(&i->list);
407                 kvpfree(i, offsetof(struct journal_replay, j) +
408                         vstruct_bytes(&i->j));
409         }
410 }
411
412 /*
413  * When keys compare equal, oldest compares first:
414  */
415 static int journal_sort_key_cmp(const void *_l, const void *_r)
416 {
417         const struct journal_key *l = _l;
418         const struct journal_key *r = _r;
419
420         return  cmp_int(l->btree_id,    r->btree_id) ?:
421                 cmp_int(l->level,       r->level) ?:
422                 bkey_cmp(l->k->k.p, r->k->k.p) ?:
423                 cmp_int(l->journal_seq, r->journal_seq) ?:
424                 cmp_int(l->journal_offset, r->journal_offset);
425 }
426
427 void bch2_journal_keys_free(struct journal_keys *keys)
428 {
429         struct journal_key *i;
430
431         for (i = keys->d; i < keys->d + keys->nr; i++)
432                 if (i->allocated)
433                         kfree(i->k);
434
435         kvfree(keys->d);
436         keys->d = NULL;
437         keys->nr = 0;
438 }
439
440 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
441 {
442         struct journal_replay *i;
443         struct jset_entry *entry;
444         struct bkey_i *k, *_n;
445         struct journal_keys keys = { NULL };
446         struct journal_key *src, *dst;
447         size_t nr_keys = 0;
448
449         if (list_empty(journal_entries))
450                 return keys;
451
452         list_for_each_entry(i, journal_entries, list) {
453                 if (i->ignore)
454                         continue;
455
456                 if (!keys.journal_seq_base)
457                         keys.journal_seq_base = le64_to_cpu(i->j.seq);
458
459                 for_each_jset_key(k, _n, entry, &i->j)
460                         nr_keys++;
461         }
462
463         keys.size = roundup_pow_of_two(nr_keys);
464
465         keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
466         if (!keys.d)
467                 goto err;
468
469         list_for_each_entry(i, journal_entries, list) {
470                 if (i->ignore)
471                         continue;
472
473                 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
474
475                 for_each_jset_key(k, _n, entry, &i->j)
476                         keys.d[keys.nr++] = (struct journal_key) {
477                                 .btree_id       = entry->btree_id,
478                                 .level          = entry->level,
479                                 .k              = k,
480                                 .journal_seq    = le64_to_cpu(i->j.seq) -
481                                         keys.journal_seq_base,
482                                 .journal_offset = k->_data - i->j._data,
483                         };
484         }
485
486         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
487
488         src = dst = keys.d;
489         while (src < keys.d + keys.nr) {
490                 while (src + 1 < keys.d + keys.nr &&
491                        src[0].btree_id  == src[1].btree_id &&
492                        src[0].level     == src[1].level &&
493                        !bkey_cmp(src[0].k->k.p, src[1].k->k.p))
494                         src++;
495
496                 *dst++ = *src++;
497         }
498
499         keys.nr = dst - keys.d;
500 err:
501         return keys;
502 }
503
504 /* journal replay: */
505
506 static void replay_now_at(struct journal *j, u64 seq)
507 {
508         BUG_ON(seq < j->replay_journal_seq);
509         BUG_ON(seq > j->replay_journal_seq_end);
510
511         while (j->replay_journal_seq < seq)
512                 bch2_journal_pin_put(j, j->replay_journal_seq++);
513 }
514
515 static int __bch2_journal_replay_key(struct btree_trans *trans,
516                                      enum btree_id id, unsigned level,
517                                      struct bkey_i *k)
518 {
519         struct btree_iter *iter;
520         int ret;
521
522         iter = bch2_trans_get_node_iter(trans, id, k->k.p,
523                                         BTREE_MAX_DEPTH, level,
524                                         BTREE_ITER_INTENT);
525
526         /*
527          * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
528          * extent_handle_overwrites() and extent_update_to_keys() - but we don't
529          * want that here, journal replay is supposed to treat extents like
530          * regular keys:
531          */
532         BUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
533
534         ret   = bch2_btree_iter_traverse(iter) ?:
535                 bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
536         bch2_trans_iter_put(trans, iter);
537         return ret;
538 }
539
540 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
541 {
542         unsigned commit_flags = BTREE_INSERT_NOFAIL|
543                 BTREE_INSERT_LAZY_RW;
544
545         if (!k->allocated)
546                 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
547
548         return bch2_trans_do(c, NULL, NULL, commit_flags,
549                              __bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
550 }
551
552 static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
553 {
554         struct btree_iter *iter;
555         int ret;
556
557         iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
558                                    BTREE_ITER_CACHED|
559                                    BTREE_ITER_CACHED_NOFILL|
560                                    BTREE_ITER_INTENT);
561         ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
562         bch2_trans_iter_put(trans, iter);
563         return ret;
564 }
565
566 static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
567 {
568         return bch2_trans_do(c, NULL, NULL,
569                              BTREE_INSERT_NOFAIL|
570                              BTREE_INSERT_USE_RESERVE|
571                              BTREE_INSERT_LAZY_RW|
572                              BTREE_INSERT_JOURNAL_REPLAY,
573                         __bch2_alloc_replay_key(&trans, k));
574 }
575
576 static int journal_sort_seq_cmp(const void *_l, const void *_r)
577 {
578         const struct journal_key *l = _l;
579         const struct journal_key *r = _r;
580
581         return  cmp_int(r->level,       l->level) ?:
582                 cmp_int(l->journal_seq, r->journal_seq) ?:
583                 cmp_int(l->btree_id,    r->btree_id) ?:
584                 bkey_cmp(l->k->k.p,     r->k->k.p);
585 }
586
587 static int bch2_journal_replay(struct bch_fs *c,
588                                struct journal_keys keys)
589 {
590         struct journal *j = &c->journal;
591         struct journal_key *i;
592         u64 seq;
593         int ret;
594
595         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
596
597         if (keys.nr)
598                 replay_now_at(j, keys.journal_seq_base);
599
600         seq = j->replay_journal_seq;
601
602         /*
603          * First replay updates to the alloc btree - these will only update the
604          * btree key cache:
605          */
606         for_each_journal_key(keys, i) {
607                 cond_resched();
608
609                 if (!i->level && i->btree_id == BTREE_ID_alloc) {
610                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
611                         ret = bch2_alloc_replay_key(c, i->k);
612                         if (ret)
613                                 goto err;
614                 }
615         }
616
617         /*
618          * Next replay updates to interior btree nodes:
619          */
620         for_each_journal_key(keys, i) {
621                 cond_resched();
622
623                 if (i->level) {
624                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
625                         ret = bch2_journal_replay_key(c, i);
626                         if (ret)
627                                 goto err;
628                 }
629         }
630
631         /*
632          * Now that the btree is in a consistent state, we can start journal
633          * reclaim (which will be flushing entries from the btree key cache back
634          * to the btree:
635          */
636         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
637         set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
638         journal_reclaim_kick(j);
639
640         j->replay_journal_seq = seq;
641
642         /*
643          * Now replay leaf node updates:
644          */
645         for_each_journal_key(keys, i) {
646                 cond_resched();
647
648                 if (i->level || i->btree_id == BTREE_ID_alloc)
649                         continue;
650
651                 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
652
653                 ret = bch2_journal_replay_key(c, i);
654                 if (ret)
655                         goto err;
656         }
657
658         replay_now_at(j, j->replay_journal_seq_end);
659         j->replay_journal_seq = 0;
660
661         bch2_journal_set_replay_done(j);
662         bch2_journal_flush_all_pins(j);
663         return bch2_journal_error(j);
664 err:
665         bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
666                 ret, bch2_btree_ids[i->btree_id], i->level);
667         return ret;
668 }
669
670 /* journal replay early: */
671
672 static int journal_replay_entry_early(struct bch_fs *c,
673                                       struct jset_entry *entry)
674 {
675         int ret = 0;
676
677         switch (entry->type) {
678         case BCH_JSET_ENTRY_btree_root: {
679                 struct btree_root *r;
680
681                 if (entry->btree_id >= BTREE_ID_NR) {
682                         bch_err(c, "filesystem has unknown btree type %u",
683                                 entry->btree_id);
684                         return -EINVAL;
685                 }
686
687                 r = &c->btree_roots[entry->btree_id];
688
689                 if (entry->u64s) {
690                         r->level = entry->level;
691                         bkey_copy(&r->key, &entry->start[0]);
692                         r->error = 0;
693                 } else {
694                         r->error = -EIO;
695                 }
696                 r->alive = true;
697                 break;
698         }
699         case BCH_JSET_ENTRY_usage: {
700                 struct jset_entry_usage *u =
701                         container_of(entry, struct jset_entry_usage, entry);
702
703                 switch (entry->btree_id) {
704                 case FS_USAGE_RESERVED:
705                         if (entry->level < BCH_REPLICAS_MAX)
706                                 c->usage_base->persistent_reserved[entry->level] =
707                                         le64_to_cpu(u->v);
708                         break;
709                 case FS_USAGE_INODES:
710                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
711                         break;
712                 case FS_USAGE_KEY_VERSION:
713                         atomic64_set(&c->key_version,
714                                      le64_to_cpu(u->v));
715                         break;
716                 }
717
718                 break;
719         }
720         case BCH_JSET_ENTRY_data_usage: {
721                 struct jset_entry_data_usage *u =
722                         container_of(entry, struct jset_entry_data_usage, entry);
723
724                 ret = bch2_replicas_set_usage(c, &u->r,
725                                               le64_to_cpu(u->v));
726                 break;
727         }
728         case BCH_JSET_ENTRY_dev_usage: {
729                 struct jset_entry_dev_usage *u =
730                         container_of(entry, struct jset_entry_dev_usage, entry);
731                 struct bch_dev *ca = bch_dev_bkey_exists(c, u->dev);
732                 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
733                 unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
734                         sizeof(struct jset_entry_dev_usage_type);
735                 unsigned i;
736
737                 ca->usage_base->buckets_ec              = le64_to_cpu(u->buckets_ec);
738                 ca->usage_base->buckets_unavailable     = le64_to_cpu(u->buckets_unavailable);
739
740                 for (i = 0; i < nr_types; i++) {
741                         ca->usage_base->d[i].buckets    = le64_to_cpu(u->d[i].buckets);
742                         ca->usage_base->d[i].sectors    = le64_to_cpu(u->d[i].sectors);
743                         ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
744                 }
745
746                 break;
747         }
748         case BCH_JSET_ENTRY_blacklist: {
749                 struct jset_entry_blacklist *bl_entry =
750                         container_of(entry, struct jset_entry_blacklist, entry);
751
752                 ret = bch2_journal_seq_blacklist_add(c,
753                                 le64_to_cpu(bl_entry->seq),
754                                 le64_to_cpu(bl_entry->seq) + 1);
755                 break;
756         }
757         case BCH_JSET_ENTRY_blacklist_v2: {
758                 struct jset_entry_blacklist_v2 *bl_entry =
759                         container_of(entry, struct jset_entry_blacklist_v2, entry);
760
761                 ret = bch2_journal_seq_blacklist_add(c,
762                                 le64_to_cpu(bl_entry->start),
763                                 le64_to_cpu(bl_entry->end) + 1);
764                 break;
765         }
766         case BCH_JSET_ENTRY_clock: {
767                 struct jset_entry_clock *clock =
768                         container_of(entry, struct jset_entry_clock, entry);
769
770                 atomic64_set(&c->io_clock[clock->rw].now, clock->time);
771         }
772         }
773
774         return ret;
775 }
776
777 static int journal_replay_early(struct bch_fs *c,
778                                 struct bch_sb_field_clean *clean,
779                                 struct list_head *journal)
780 {
781         struct journal_replay *i;
782         struct jset_entry *entry;
783         int ret;
784
785         if (clean) {
786                 for (entry = clean->start;
787                      entry != vstruct_end(&clean->field);
788                      entry = vstruct_next(entry)) {
789                         ret = journal_replay_entry_early(c, entry);
790                         if (ret)
791                                 return ret;
792                 }
793         } else {
794                 list_for_each_entry(i, journal, list) {
795                         if (i->ignore)
796                                 continue;
797
798                         vstruct_for_each(&i->j, entry) {
799                                 ret = journal_replay_entry_early(c, entry);
800                                 if (ret)
801                                         return ret;
802                         }
803                 }
804         }
805
806         bch2_fs_usage_initialize(c);
807
808         return 0;
809 }
810
811 /* sb clean section: */
812
813 static struct bkey_i *btree_root_find(struct bch_fs *c,
814                                       struct bch_sb_field_clean *clean,
815                                       struct jset *j,
816                                       enum btree_id id, unsigned *level)
817 {
818         struct bkey_i *k;
819         struct jset_entry *entry, *start, *end;
820
821         if (clean) {
822                 start = clean->start;
823                 end = vstruct_end(&clean->field);
824         } else {
825                 start = j->start;
826                 end = vstruct_last(j);
827         }
828
829         for (entry = start; entry < end; entry = vstruct_next(entry))
830                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
831                     entry->btree_id == id)
832                         goto found;
833
834         return NULL;
835 found:
836         if (!entry->u64s)
837                 return ERR_PTR(-EINVAL);
838
839         k = entry->start;
840         *level = entry->level;
841         return k;
842 }
843
844 static int verify_superblock_clean(struct bch_fs *c,
845                                    struct bch_sb_field_clean **cleanp,
846                                    struct jset *j)
847 {
848         unsigned i;
849         struct bch_sb_field_clean *clean = *cleanp;
850         int ret = 0;
851
852         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
853                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
854                         le64_to_cpu(clean->journal_seq),
855                         le64_to_cpu(j->seq))) {
856                 kfree(clean);
857                 *cleanp = NULL;
858                 return 0;
859         }
860
861         for (i = 0; i < BTREE_ID_NR; i++) {
862                 char buf1[200], buf2[200];
863                 struct bkey_i *k1, *k2;
864                 unsigned l1 = 0, l2 = 0;
865
866                 k1 = btree_root_find(c, clean, NULL, i, &l1);
867                 k2 = btree_root_find(c, NULL, j, i, &l2);
868
869                 if (!k1 && !k2)
870                         continue;
871
872                 mustfix_fsck_err_on(!k1 || !k2 ||
873                                     IS_ERR(k1) ||
874                                     IS_ERR(k2) ||
875                                     k1->k.u64s != k2->k.u64s ||
876                                     memcmp(k1, k2, bkey_bytes(k1)) ||
877                                     l1 != l2, c,
878                         "superblock btree root %u doesn't match journal after clean shutdown\n"
879                         "sb:      l=%u %s\n"
880                         "journal: l=%u %s\n", i,
881                         l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
882                         l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
883         }
884 fsck_err:
885         return ret;
886 }
887
888 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
889 {
890         struct bch_sb_field_clean *clean, *sb_clean;
891         int ret;
892
893         mutex_lock(&c->sb_lock);
894         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
895
896         if (fsck_err_on(!sb_clean, c,
897                         "superblock marked clean but clean section not present")) {
898                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
899                 c->sb.clean = false;
900                 mutex_unlock(&c->sb_lock);
901                 return NULL;
902         }
903
904         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
905                         GFP_KERNEL);
906         if (!clean) {
907                 mutex_unlock(&c->sb_lock);
908                 return ERR_PTR(-ENOMEM);
909         }
910
911         ret = bch2_sb_clean_validate(c, clean, READ);
912         if (ret) {
913                 mutex_unlock(&c->sb_lock);
914                 return ERR_PTR(ret);
915         }
916
917         mutex_unlock(&c->sb_lock);
918
919         return clean;
920 fsck_err:
921         mutex_unlock(&c->sb_lock);
922         return ERR_PTR(ret);
923 }
924
925 static int read_btree_roots(struct bch_fs *c)
926 {
927         unsigned i;
928         int ret = 0;
929
930         for (i = 0; i < BTREE_ID_NR; i++) {
931                 struct btree_root *r = &c->btree_roots[i];
932
933                 if (!r->alive)
934                         continue;
935
936                 if (i == BTREE_ID_alloc &&
937                     c->opts.reconstruct_alloc) {
938                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
939                         continue;
940                 }
941
942                 if (r->error) {
943                         __fsck_err(c, i == BTREE_ID_alloc
944                                    ? FSCK_CAN_IGNORE : 0,
945                                    "invalid btree root %s",
946                                    bch2_btree_ids[i]);
947                         if (i == BTREE_ID_alloc)
948                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
949                 }
950
951                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
952                 if (ret) {
953                         __fsck_err(c, i == BTREE_ID_alloc
954                                    ? FSCK_CAN_IGNORE : 0,
955                                    "error reading btree root %s",
956                                    bch2_btree_ids[i]);
957                         if (i == BTREE_ID_alloc)
958                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
959                 }
960         }
961
962         for (i = 0; i < BTREE_ID_NR; i++)
963                 if (!c->btree_roots[i].b)
964                         bch2_btree_root_alloc(c, i);
965 fsck_err:
966         return ret;
967 }
968
969 int bch2_fs_recovery(struct bch_fs *c)
970 {
971         const char *err = "cannot allocate memory";
972         struct bch_sb_field_clean *clean = NULL;
973         struct jset *last_journal_entry = NULL;
974         u64 blacklist_seq, journal_seq;
975         bool write_sb = false;
976         int ret;
977
978         if (c->sb.clean)
979                 clean = read_superblock_clean(c);
980         ret = PTR_ERR_OR_ZERO(clean);
981         if (ret)
982                 goto err;
983
984         if (c->sb.clean)
985                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
986                          le64_to_cpu(clean->journal_seq));
987
988         if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
989                 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
990                 ret = -EINVAL;
991                 goto err;
992         }
993
994         if (!c->sb.clean &&
995             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
996                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
997                 ret = -EINVAL;
998                 goto err;
999         }
1000
1001         if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1002                 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1003                 c->opts.fsck = true;
1004                 c->opts.fix_errors = FSCK_OPT_YES;
1005         }
1006
1007         if (!c->replicas.entries ||
1008             c->opts.rebuild_replicas) {
1009                 bch_info(c, "building replicas info");
1010                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1011         }
1012
1013         ret = bch2_blacklist_table_initialize(c);
1014         if (ret) {
1015                 bch_err(c, "error initializing blacklist table");
1016                 goto err;
1017         }
1018
1019         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1020                 struct journal_replay *i;
1021
1022                 ret = bch2_journal_read(c, &c->journal_entries,
1023                                         &blacklist_seq, &journal_seq);
1024                 if (ret)
1025                         goto err;
1026
1027                 list_for_each_entry_reverse(i, &c->journal_entries, list)
1028                         if (!i->ignore) {
1029                                 last_journal_entry = &i->j;
1030                                 break;
1031                         }
1032
1033                 if (mustfix_fsck_err_on(c->sb.clean &&
1034                                         last_journal_entry &&
1035                                         !journal_entry_empty(last_journal_entry), c,
1036                                 "filesystem marked clean but journal not empty")) {
1037                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1038                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1039                         c->sb.clean = false;
1040                 }
1041
1042                 if (!last_journal_entry) {
1043                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
1044                         goto use_clean;
1045                 }
1046
1047                 c->journal_keys = journal_keys_sort(&c->journal_entries);
1048                 if (!c->journal_keys.d) {
1049                         ret = -ENOMEM;
1050                         goto err;
1051                 }
1052
1053                 if (c->sb.clean && last_journal_entry) {
1054                         ret = verify_superblock_clean(c, &clean,
1055                                                       last_journal_entry);
1056                         if (ret)
1057                                 goto err;
1058                 }
1059         } else {
1060 use_clean:
1061                 if (!clean) {
1062                         bch_err(c, "no superblock clean section found");
1063                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1064                         goto err;
1065
1066                 }
1067                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1068         }
1069
1070         if (c->opts.reconstruct_alloc) {
1071                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1072                 drop_alloc_keys(&c->journal_keys);
1073         }
1074
1075         ret = journal_replay_early(c, clean, &c->journal_entries);
1076         if (ret)
1077                 goto err;
1078
1079         /*
1080          * After an unclean shutdown, skip then next few journal sequence
1081          * numbers as they may have been referenced by btree writes that
1082          * happened before their corresponding journal writes - those btree
1083          * writes need to be ignored, by skipping and blacklisting the next few
1084          * journal sequence numbers:
1085          */
1086         if (!c->sb.clean)
1087                 journal_seq += 8;
1088
1089         if (blacklist_seq != journal_seq) {
1090                 ret = bch2_journal_seq_blacklist_add(c,
1091                                         blacklist_seq, journal_seq);
1092                 if (ret) {
1093                         bch_err(c, "error creating new journal seq blacklist entry");
1094                         goto err;
1095                 }
1096         }
1097
1098         ret = bch2_fs_journal_start(&c->journal, journal_seq,
1099                                     &c->journal_entries);
1100         if (ret)
1101                 goto err;
1102
1103         ret = read_btree_roots(c);
1104         if (ret)
1105                 goto err;
1106
1107         bch_verbose(c, "starting alloc read");
1108         err = "error reading allocation information";
1109         ret = bch2_alloc_read(c, &c->journal_keys);
1110         if (ret)
1111                 goto err;
1112         bch_verbose(c, "alloc read done");
1113
1114         bch_verbose(c, "starting stripes_read");
1115         err = "error reading stripes";
1116         ret = bch2_stripes_read(c, &c->journal_keys);
1117         if (ret)
1118                 goto err;
1119         bch_verbose(c, "stripes_read done");
1120
1121         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1122
1123         if (c->opts.fsck ||
1124             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
1125             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA)) ||
1126             test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1127                 bch_info(c, "starting mark and sweep");
1128                 err = "error in mark and sweep";
1129                 ret = bch2_gc(c, true);
1130                 if (ret)
1131                         goto err;
1132                 bch_verbose(c, "mark and sweep done");
1133         }
1134
1135         bch2_stripes_heap_start(c);
1136
1137         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1138         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1139
1140         /*
1141          * Skip past versions that might have possibly been used (as nonces),
1142          * but hadn't had their pointers written:
1143          */
1144         if (c->sb.encryption_type && !c->sb.clean)
1145                 atomic64_add(1 << 16, &c->key_version);
1146
1147         if (c->opts.norecovery)
1148                 goto out;
1149
1150         bch_verbose(c, "starting journal replay");
1151         err = "journal replay failed";
1152         ret = bch2_journal_replay(c, c->journal_keys);
1153         if (ret)
1154                 goto err;
1155         bch_verbose(c, "journal replay done");
1156
1157         if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1158             !c->opts.nochanges) {
1159                 /*
1160                  * note that even when filesystem was clean there might be work
1161                  * to do here, if we ran gc (because of fsck) which recalculated
1162                  * oldest_gen:
1163                  */
1164                 bch_verbose(c, "writing allocation info");
1165                 err = "error writing out alloc info";
1166                 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
1167                         bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
1168                 if (ret) {
1169                         bch_err(c, "error writing alloc info");
1170                         goto err;
1171                 }
1172                 bch_verbose(c, "alloc write done");
1173         }
1174
1175         if (!c->sb.clean) {
1176                 if (!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) {
1177                         bch_info(c, "checking inode link counts");
1178                         err = "error in recovery";
1179                         ret = bch2_fsck_inode_nlink(c);
1180                         if (ret)
1181                                 goto err;
1182                         bch_verbose(c, "check inodes done");
1183
1184                 } else {
1185                         bch_verbose(c, "checking for deleted inodes");
1186                         err = "error in recovery";
1187                         ret = bch2_fsck_walk_inodes_only(c);
1188                         if (ret)
1189                                 goto err;
1190                         bch_verbose(c, "check inodes done");
1191                 }
1192         }
1193
1194         if (c->opts.fsck) {
1195                 bch_info(c, "starting fsck");
1196                 err = "error in fsck";
1197                 ret = bch2_fsck_full(c);
1198                 if (ret)
1199                         goto err;
1200                 bch_verbose(c, "fsck done");
1201         }
1202
1203         if (enabled_qtypes(c)) {
1204                 bch_verbose(c, "reading quotas");
1205                 ret = bch2_fs_quota_read(c);
1206                 if (ret)
1207                         goto err;
1208                 bch_verbose(c, "quotas done");
1209         }
1210
1211         if (!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_EXTENTS_ABOVE_BTREE_UPDATES_DONE)) ||
1212             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_BFORMAT_OVERFLOW_DONE))) {
1213                 struct bch_move_stats stats = { 0 };
1214
1215                 bch_verbose(c, "scanning for old btree nodes");
1216                 ret = bch2_fs_read_write(c);
1217                 if (ret)
1218                         goto err;
1219
1220                 ret = bch2_scan_old_btree_nodes(c, &stats);
1221                 if (ret)
1222                         goto err;
1223                 bch_verbose(c, "scanning for old btree nodes done");
1224         }
1225
1226         mutex_lock(&c->sb_lock);
1227         if (c->opts.version_upgrade) {
1228                 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
1229                 c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1230                 write_sb = true;
1231         }
1232
1233         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1234                 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
1235                 write_sb = true;
1236         }
1237
1238         if (c->opts.fsck &&
1239             !test_bit(BCH_FS_ERROR, &c->flags)) {
1240                 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
1241                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1242                 write_sb = true;
1243         }
1244
1245         if (write_sb)
1246                 bch2_write_super(c);
1247         mutex_unlock(&c->sb_lock);
1248
1249         if (c->journal_seq_blacklist_table &&
1250             c->journal_seq_blacklist_table->nr > 128)
1251                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1252 out:
1253         ret = 0;
1254 err:
1255 fsck_err:
1256         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1257         bch2_flush_fsck_errs(c);
1258
1259         if (!c->opts.keep_journal) {
1260                 bch2_journal_keys_free(&c->journal_keys);
1261                 bch2_journal_entries_free(&c->journal_entries);
1262         }
1263         kfree(clean);
1264         if (ret)
1265                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1266         else
1267                 bch_verbose(c, "ret %i", ret);
1268         return ret;
1269 }
1270
1271 int bch2_fs_initialize(struct bch_fs *c)
1272 {
1273         struct bch_inode_unpacked root_inode, lostfound_inode;
1274         struct bkey_inode_buf packed_inode;
1275         struct qstr lostfound = QSTR("lost+found");
1276         const char *err = "cannot allocate memory";
1277         struct bch_dev *ca;
1278         LIST_HEAD(journal);
1279         unsigned i;
1280         int ret;
1281
1282         bch_notice(c, "initializing new filesystem");
1283
1284         mutex_lock(&c->sb_lock);
1285         c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_EXTENTS_ABOVE_BTREE_UPDATES_DONE;
1286         c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_BFORMAT_OVERFLOW_DONE;
1287
1288         if (c->opts.version_upgrade) {
1289                 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
1290                 c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1291                 bch2_write_super(c);
1292         }
1293
1294         for_each_online_member(ca, c, i)
1295                 bch2_mark_dev_superblock(c, ca, 0);
1296         mutex_unlock(&c->sb_lock);
1297
1298         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1299         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1300
1301         for (i = 0; i < BTREE_ID_NR; i++)
1302                 bch2_btree_root_alloc(c, i);
1303
1304         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1305         set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1306
1307         err = "unable to allocate journal buckets";
1308         for_each_online_member(ca, c, i) {
1309                 ret = bch2_dev_journal_alloc(ca);
1310                 if (ret) {
1311                         percpu_ref_put(&ca->io_ref);
1312                         goto err;
1313                 }
1314         }
1315
1316         /*
1317          * journal_res_get() will crash if called before this has
1318          * set up the journal.pin FIFO and journal.cur pointer:
1319          */
1320         bch2_fs_journal_start(&c->journal, 1, &journal);
1321         bch2_journal_set_replay_done(&c->journal);
1322
1323         err = "error going read-write";
1324         ret = bch2_fs_read_write_early(c);
1325         if (ret)
1326                 goto err;
1327
1328         /*
1329          * Write out the superblock and journal buckets, now that we can do
1330          * btree updates
1331          */
1332         err = "error writing alloc info";
1333         ret = bch2_alloc_write(c, 0);
1334         if (ret)
1335                 goto err;
1336
1337         bch2_inode_init(c, &root_inode, 0, 0,
1338                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1339         root_inode.bi_inum = BCACHEFS_ROOT_INO;
1340         bch2_inode_pack(c, &packed_inode, &root_inode);
1341
1342         err = "error creating root directory";
1343         ret = bch2_btree_insert(c, BTREE_ID_inodes,
1344                                 &packed_inode.inode.k_i,
1345                                 NULL, NULL, 0);
1346         if (ret)
1347                 goto err;
1348
1349         bch2_inode_init_early(c, &lostfound_inode);
1350
1351         err = "error creating lost+found";
1352         ret = bch2_trans_do(c, NULL, NULL, 0,
1353                 bch2_create_trans(&trans, BCACHEFS_ROOT_INO,
1354                                   &root_inode, &lostfound_inode,
1355                                   &lostfound,
1356                                   0, 0, S_IFDIR|0700, 0,
1357                                   NULL, NULL));
1358         if (ret) {
1359                 bch_err(c, "error creating lost+found");
1360                 goto err;
1361         }
1362
1363         if (enabled_qtypes(c)) {
1364                 ret = bch2_fs_quota_read(c);
1365                 if (ret)
1366                         goto err;
1367         }
1368
1369         err = "error writing first journal entry";
1370         ret = bch2_journal_meta(&c->journal);
1371         if (ret)
1372                 goto err;
1373
1374         mutex_lock(&c->sb_lock);
1375         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1376         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1377
1378         bch2_write_super(c);
1379         mutex_unlock(&c->sb_lock);
1380
1381         return 0;
1382 err:
1383         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1384         return ret;
1385 }