]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
f470e0e233ce949c46480cb57242fae6d34074d3
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "alloc_background.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "buckets.h"
11 #include "dirent.h"
12 #include "ec.h"
13 #include "error.h"
14 #include "fs-common.h"
15 #include "fsck.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "quota.h"
20 #include "recovery.h"
21 #include "replicas.h"
22 #include "super-io.h"
23
24 #include <linux/sort.h>
25 #include <linux/stat.h>
26
27 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
28
29 /* for -o reconstruct_alloc: */
30 static void drop_alloc_keys(struct journal_keys *keys)
31 {
32         size_t src, dst;
33
34         for (src = 0, dst = 0; src < keys->nr; src++)
35                 if (keys->d[src].btree_id != BTREE_ID_ALLOC)
36                         keys->d[dst++] = keys->d[src];
37
38         keys->nr = dst;
39 }
40
41 /* iterate over keys read from the journal: */
42
43 static int __journal_key_cmp(enum btree_id      l_btree_id,
44                              unsigned           l_level,
45                              struct bpos        l_pos,
46                              struct journal_key *r)
47 {
48         return (cmp_int(l_btree_id,     r->btree_id) ?:
49                 cmp_int(l_level,        r->level) ?:
50                 bkey_cmp(l_pos, r->k->k.p));
51 }
52
53 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
54 {
55         return (cmp_int(l->btree_id,    r->btree_id) ?:
56                 cmp_int(l->level,       r->level) ?:
57                 bkey_cmp(l->k->k.p,     r->k->k.p));
58 }
59
60 static size_t journal_key_search(struct journal_keys *journal_keys,
61                                  enum btree_id id, unsigned level,
62                                  struct bpos pos)
63 {
64         size_t l = 0, r = journal_keys->nr, m;
65
66         while (l < r) {
67                 m = l + ((r - l) >> 1);
68                 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
69                         l = m + 1;
70                 else
71                         r = m;
72         }
73
74         BUG_ON(l < journal_keys->nr &&
75                __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
76
77         BUG_ON(l &&
78                __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
79
80         return l;
81 }
82
83 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
84 {
85         struct bkey_i *n = iter->keys->d[idx].k;
86         struct btree_and_journal_iter *biter =
87                 container_of(iter, struct btree_and_journal_iter, journal);
88
89         if (iter->idx > idx ||
90             (iter->idx == idx &&
91              biter->last &&
92              bkey_cmp(n->k.p, biter->unpacked.p) <= 0))
93                 iter->idx++;
94 }
95
96 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
97                             unsigned level, struct bkey_i *k)
98 {
99         struct journal_key n = {
100                 .btree_id       = id,
101                 .level          = level,
102                 .k              = k,
103                 .allocated      = true
104         };
105         struct journal_keys *keys = &c->journal_keys;
106         struct journal_iter *iter;
107         unsigned idx = journal_key_search(keys, id, level, k->k.p);
108
109         if (idx < keys->nr &&
110             journal_key_cmp(&n, &keys->d[idx]) == 0) {
111                 if (keys->d[idx].allocated)
112                         kfree(keys->d[idx].k);
113                 keys->d[idx] = n;
114                 return 0;
115         }
116
117         if (keys->nr == keys->size) {
118                 struct journal_keys new_keys = {
119                         .nr                     = keys->nr,
120                         .size                   = keys->size * 2,
121                         .journal_seq_base       = keys->journal_seq_base,
122                 };
123
124                 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
125                 if (!new_keys.d)
126                         return -ENOMEM;
127
128                 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
129                 kvfree(keys->d);
130                 *keys = new_keys;
131         }
132
133         array_insert_item(keys->d, keys->nr, idx, n);
134
135         list_for_each_entry(iter, &c->journal_iters, list)
136                 journal_iter_fix(c, iter, idx);
137
138         return 0;
139 }
140
141 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
142                             unsigned level, struct bpos pos)
143 {
144         struct bkey_i *whiteout =
145                 kmalloc(sizeof(struct bkey), GFP_KERNEL);
146         int ret;
147
148         if (!whiteout)
149                 return -ENOMEM;
150
151         bkey_init(&whiteout->k);
152         whiteout->k.p = pos;
153
154         ret = bch2_journal_key_insert(c, id, level, whiteout);
155         if (ret)
156                 kfree(whiteout);
157         return ret;
158 }
159
160 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
161 {
162         struct journal_key *k = iter->idx - iter->keys->nr
163                 ? iter->keys->d + iter->idx : NULL;
164
165         if (k &&
166             k->btree_id == iter->btree_id &&
167             k->level    == iter->level)
168                 return k->k;
169
170         iter->idx = iter->keys->nr;
171         return NULL;
172 }
173
174 static void bch2_journal_iter_advance(struct journal_iter *iter)
175 {
176         if (iter->idx < iter->keys->nr)
177                 iter->idx++;
178 }
179
180 static void bch2_journal_iter_exit(struct journal_iter *iter)
181 {
182         list_del(&iter->list);
183 }
184
185 static void bch2_journal_iter_init(struct bch_fs *c,
186                                    struct journal_iter *iter,
187                                    enum btree_id id, unsigned level,
188                                    struct bpos pos)
189 {
190         iter->btree_id  = id;
191         iter->level     = level;
192         iter->keys      = &c->journal_keys;
193         iter->idx       = journal_key_search(&c->journal_keys, id, level, pos);
194         list_add(&iter->list, &c->journal_iters);
195 }
196
197 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
198 {
199         return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
200                                                 iter->b, &iter->unpacked);
201 }
202
203 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
204 {
205         bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
206 }
207
208 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
209 {
210         switch (iter->last) {
211         case none:
212                 break;
213         case btree:
214                 bch2_journal_iter_advance_btree(iter);
215                 break;
216         case journal:
217                 bch2_journal_iter_advance(&iter->journal);
218                 break;
219         }
220
221         iter->last = none;
222 }
223
224 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
225 {
226         struct bkey_s_c ret;
227
228         while (1) {
229                 struct bkey_s_c btree_k         =
230                         bch2_journal_iter_peek_btree(iter);
231                 struct bkey_s_c journal_k       =
232                         bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
233
234                 if (btree_k.k && journal_k.k) {
235                         int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
236
237                         if (!cmp)
238                                 bch2_journal_iter_advance_btree(iter);
239
240                         iter->last = cmp < 0 ? btree : journal;
241                 } else if (btree_k.k) {
242                         iter->last = btree;
243                 } else if (journal_k.k) {
244                         iter->last = journal;
245                 } else {
246                         iter->last = none;
247                         return bkey_s_c_null;
248                 }
249
250                 ret = iter->last == journal ? journal_k : btree_k;
251
252                 if (iter->b &&
253                     bkey_cmp(ret.k->p, iter->b->data->max_key) > 0) {
254                         iter->journal.idx = iter->journal.keys->nr;
255                         iter->last = none;
256                         return bkey_s_c_null;
257                 }
258
259                 if (!bkey_deleted(ret.k))
260                         break;
261
262                 bch2_btree_and_journal_iter_advance(iter);
263         }
264
265         return ret;
266 }
267
268 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
269 {
270         bch2_btree_and_journal_iter_advance(iter);
271
272         return bch2_btree_and_journal_iter_peek(iter);
273 }
274
275 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
276 {
277         bch2_journal_iter_exit(&iter->journal);
278 }
279
280 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
281                                                 struct bch_fs *c,
282                                                 struct btree *b)
283 {
284         memset(iter, 0, sizeof(*iter));
285
286         iter->b = b;
287         bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
288         bch2_journal_iter_init(c, &iter->journal,
289                                b->c.btree_id, b->c.level, b->data->min_key);
290 }
291
292 /* Walk btree, overlaying keys from the journal: */
293
294 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
295                                            struct btree_and_journal_iter iter)
296 {
297         unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
298         struct bkey_s_c k;
299         struct bkey_buf tmp;
300
301         BUG_ON(!b->c.level);
302
303         bch2_bkey_buf_init(&tmp);
304
305         while (i < nr &&
306                (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
307                 bch2_bkey_buf_reassemble(&tmp, c, k);
308
309                 bch2_btree_node_prefetch(c, NULL, tmp.k,
310                                         b->c.btree_id, b->c.level - 1);
311
312                 bch2_btree_and_journal_iter_advance(&iter);
313                 i++;
314         }
315
316         bch2_bkey_buf_exit(&tmp, c);
317 }
318
319 static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
320                                 struct journal_keys *journal_keys,
321                                 enum btree_id btree_id,
322                                 btree_walk_node_fn node_fn,
323                                 btree_walk_key_fn key_fn)
324 {
325         struct btree_and_journal_iter iter;
326         struct bkey_s_c k;
327         struct bkey_buf tmp;
328         struct btree *child;
329         int ret = 0;
330
331         bch2_bkey_buf_init(&tmp);
332         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
333
334         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
335                 ret = key_fn(c, btree_id, b->c.level, k);
336                 if (ret)
337                         break;
338
339                 if (b->c.level) {
340                         bch2_bkey_buf_reassemble(&tmp, c, k);
341
342                         bch2_btree_and_journal_iter_advance(&iter);
343
344                         child = bch2_btree_node_get_noiter(c, tmp.k,
345                                                 b->c.btree_id, b->c.level - 1,
346                                                 false);
347
348                         ret = PTR_ERR_OR_ZERO(child);
349                         if (ret)
350                                 break;
351
352                         btree_and_journal_iter_prefetch(c, b, iter);
353
354                         ret   = (node_fn ? node_fn(c, b) : 0) ?:
355                                 bch2_btree_and_journal_walk_recurse(c, child,
356                                         journal_keys, btree_id, node_fn, key_fn);
357                         six_unlock_read(&child->c.lock);
358
359                         if (ret)
360                                 break;
361                 } else {
362                         bch2_btree_and_journal_iter_advance(&iter);
363                 }
364         }
365
366         bch2_btree_and_journal_iter_exit(&iter);
367         bch2_bkey_buf_exit(&tmp, c);
368         return ret;
369 }
370
371 int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
372                                 enum btree_id btree_id,
373                                 btree_walk_node_fn node_fn,
374                                 btree_walk_key_fn key_fn)
375 {
376         struct btree *b = c->btree_roots[btree_id].b;
377         int ret = 0;
378
379         if (btree_node_fake(b))
380                 return 0;
381
382         six_lock_read(&b->c.lock, NULL, NULL);
383         ret   = (node_fn ? node_fn(c, b) : 0) ?:
384                 bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
385                                                     node_fn, key_fn) ?:
386                 key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
387         six_unlock_read(&b->c.lock);
388
389         return ret;
390 }
391
392 /* sort and dedup all keys in the journal: */
393
394 void bch2_journal_entries_free(struct list_head *list)
395 {
396
397         while (!list_empty(list)) {
398                 struct journal_replay *i =
399                         list_first_entry(list, struct journal_replay, list);
400                 list_del(&i->list);
401                 kvpfree(i, offsetof(struct journal_replay, j) +
402                         vstruct_bytes(&i->j));
403         }
404 }
405
406 /*
407  * When keys compare equal, oldest compares first:
408  */
409 static int journal_sort_key_cmp(const void *_l, const void *_r)
410 {
411         const struct journal_key *l = _l;
412         const struct journal_key *r = _r;
413
414         return  cmp_int(l->btree_id,    r->btree_id) ?:
415                 cmp_int(l->level,       r->level) ?:
416                 bkey_cmp(l->k->k.p, r->k->k.p) ?:
417                 cmp_int(l->journal_seq, r->journal_seq) ?:
418                 cmp_int(l->journal_offset, r->journal_offset);
419 }
420
421 void bch2_journal_keys_free(struct journal_keys *keys)
422 {
423         struct journal_key *i;
424
425         for (i = keys->d; i < keys->d + keys->nr; i++)
426                 if (i->allocated)
427                         kfree(i->k);
428
429         kvfree(keys->d);
430         keys->d = NULL;
431         keys->nr = 0;
432 }
433
434 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
435 {
436         struct journal_replay *i;
437         struct jset_entry *entry;
438         struct bkey_i *k, *_n;
439         struct journal_keys keys = { NULL };
440         struct journal_key *src, *dst;
441         size_t nr_keys = 0;
442
443         if (list_empty(journal_entries))
444                 return keys;
445
446         list_for_each_entry(i, journal_entries, list) {
447                 if (i->ignore)
448                         continue;
449
450                 if (!keys.journal_seq_base)
451                         keys.journal_seq_base = le64_to_cpu(i->j.seq);
452
453                 for_each_jset_key(k, _n, entry, &i->j)
454                         nr_keys++;
455         }
456
457         keys.size = roundup_pow_of_two(nr_keys);
458
459         keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
460         if (!keys.d)
461                 goto err;
462
463         list_for_each_entry(i, journal_entries, list) {
464                 if (i->ignore)
465                         continue;
466
467                 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
468
469                 for_each_jset_key(k, _n, entry, &i->j)
470                         keys.d[keys.nr++] = (struct journal_key) {
471                                 .btree_id       = entry->btree_id,
472                                 .level          = entry->level,
473                                 .k              = k,
474                                 .journal_seq    = le64_to_cpu(i->j.seq) -
475                                         keys.journal_seq_base,
476                                 .journal_offset = k->_data - i->j._data,
477                         };
478         }
479
480         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
481
482         src = dst = keys.d;
483         while (src < keys.d + keys.nr) {
484                 while (src + 1 < keys.d + keys.nr &&
485                        src[0].btree_id  == src[1].btree_id &&
486                        src[0].level     == src[1].level &&
487                        !bkey_cmp(src[0].k->k.p, src[1].k->k.p))
488                         src++;
489
490                 *dst++ = *src++;
491         }
492
493         keys.nr = dst - keys.d;
494 err:
495         return keys;
496 }
497
498 /* journal replay: */
499
500 static void replay_now_at(struct journal *j, u64 seq)
501 {
502         BUG_ON(seq < j->replay_journal_seq);
503         BUG_ON(seq > j->replay_journal_seq_end);
504
505         while (j->replay_journal_seq < seq)
506                 bch2_journal_pin_put(j, j->replay_journal_seq++);
507 }
508
509 static int bch2_extent_replay_key(struct bch_fs *c, enum btree_id btree_id,
510                                   struct bkey_i *k)
511 {
512         struct btree_trans trans;
513         struct btree_iter *iter, *split_iter;
514         /*
515          * We might cause compressed extents to be split, so we need to pass in
516          * a disk_reservation:
517          */
518         struct disk_reservation disk_res =
519                 bch2_disk_reservation_init(c, 0);
520         struct bkey_i *split;
521         struct bpos atomic_end;
522         /*
523          * Some extents aren't equivalent - w.r.t. what the triggers do
524          * - if they're split:
525          */
526         bool remark_if_split = bch2_bkey_sectors_compressed(bkey_i_to_s_c(k)) ||
527                 k->k.type == KEY_TYPE_reflink_p;
528         bool remark = false;
529         int ret;
530
531         bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
532 retry:
533         bch2_trans_begin(&trans);
534
535         iter = bch2_trans_get_iter(&trans, btree_id,
536                                    bkey_start_pos(&k->k),
537                                    BTREE_ITER_INTENT);
538
539         do {
540                 ret = bch2_btree_iter_traverse(iter);
541                 if (ret)
542                         goto err;
543
544                 atomic_end = bpos_min(k->k.p, iter->l[0].b->key.k.p);
545
546                 split = bch2_trans_kmalloc(&trans, bkey_bytes(&k->k));
547                 ret = PTR_ERR_OR_ZERO(split);
548                 if (ret)
549                         goto err;
550
551                 if (!remark &&
552                     remark_if_split &&
553                     bkey_cmp(atomic_end, k->k.p) < 0) {
554                         ret = bch2_disk_reservation_add(c, &disk_res,
555                                         k->k.size *
556                                         bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(k)),
557                                         BCH_DISK_RESERVATION_NOFAIL);
558                         BUG_ON(ret);
559
560                         remark = true;
561                 }
562
563                 bkey_copy(split, k);
564                 bch2_cut_front(iter->pos, split);
565                 bch2_cut_back(atomic_end, split);
566
567                 split_iter = bch2_trans_copy_iter(&trans, iter);
568
569                 /*
570                  * It's important that we don't go through the
571                  * extent_handle_overwrites() and extent_update_to_keys() path
572                  * here: journal replay is supposed to treat extents like
573                  * regular keys
574                  */
575                 __bch2_btree_iter_set_pos(split_iter, split->k.p, false);
576                 bch2_trans_update(&trans, split_iter, split,
577                                   BTREE_TRIGGER_NORUN);
578                 bch2_trans_iter_put(&trans, split_iter);
579
580                 bch2_btree_iter_set_pos(iter, split->k.p);
581
582                 if (remark) {
583                         ret = bch2_trans_mark_key(&trans,
584                                                   bkey_s_c_null,
585                                                   bkey_i_to_s_c(split),
586                                                   0, split->k.size,
587                                                   BTREE_TRIGGER_INSERT);
588                         if (ret)
589                                 goto err;
590                 }
591         } while (bkey_cmp(iter->pos, k->k.p) < 0);
592
593         if (remark) {
594                 ret = bch2_trans_mark_key(&trans,
595                                           bkey_i_to_s_c(k),
596                                           bkey_s_c_null,
597                                           0, -((s64) k->k.size),
598                                           BTREE_TRIGGER_OVERWRITE);
599                 if (ret)
600                         goto err;
601         }
602
603         ret = bch2_trans_commit(&trans, &disk_res, NULL,
604                                 BTREE_INSERT_NOFAIL|
605                                 BTREE_INSERT_LAZY_RW|
606                                 BTREE_INSERT_JOURNAL_REPLAY);
607 err:
608         bch2_trans_iter_put(&trans, iter);
609
610         if (ret == -EINTR)
611                 goto retry;
612
613         bch2_disk_reservation_put(c, &disk_res);
614
615         return bch2_trans_exit(&trans) ?: ret;
616 }
617
618 static int __bch2_journal_replay_key(struct btree_trans *trans,
619                                      enum btree_id id, unsigned level,
620                                      struct bkey_i *k)
621 {
622         struct btree_iter *iter;
623         int ret;
624
625         iter = bch2_trans_get_node_iter(trans, id, k->k.p,
626                                         BTREE_MAX_DEPTH, level,
627                                         BTREE_ITER_INTENT);
628
629         /*
630          * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
631          * extent_handle_overwrites() and extent_update_to_keys() - but we don't
632          * want that here, journal replay is supposed to treat extents like
633          * regular keys:
634          */
635         __bch2_btree_iter_set_pos(iter, k->k.p, false);
636
637         ret   = bch2_btree_iter_traverse(iter) ?:
638                 bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
639         bch2_trans_iter_put(trans, iter);
640         return ret;
641 }
642
643 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
644 {
645         unsigned commit_flags = BTREE_INSERT_NOFAIL|
646                 BTREE_INSERT_LAZY_RW;
647
648         if (!k->allocated)
649                 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
650
651         return bch2_trans_do(c, NULL, NULL, commit_flags,
652                              __bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
653 }
654
655 static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
656 {
657         struct btree_iter *iter;
658         int ret;
659
660         iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, k->k.p,
661                                    BTREE_ITER_CACHED|
662                                    BTREE_ITER_CACHED_NOFILL|
663                                    BTREE_ITER_INTENT);
664         ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
665         bch2_trans_iter_put(trans, iter);
666         return ret;
667 }
668
669 static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
670 {
671         return bch2_trans_do(c, NULL, NULL,
672                              BTREE_INSERT_NOFAIL|
673                              BTREE_INSERT_USE_RESERVE|
674                              BTREE_INSERT_LAZY_RW|
675                              BTREE_INSERT_JOURNAL_REPLAY,
676                         __bch2_alloc_replay_key(&trans, k));
677 }
678
679 static int journal_sort_seq_cmp(const void *_l, const void *_r)
680 {
681         const struct journal_key *l = _l;
682         const struct journal_key *r = _r;
683
684         return  cmp_int(r->level,       l->level) ?:
685                 cmp_int(l->journal_seq, r->journal_seq) ?:
686                 cmp_int(l->btree_id,    r->btree_id) ?:
687                 bkey_cmp(l->k->k.p,     r->k->k.p);
688 }
689
690 static int bch2_journal_replay(struct bch_fs *c,
691                                struct journal_keys keys)
692 {
693         struct journal *j = &c->journal;
694         struct journal_key *i;
695         u64 seq;
696         int ret;
697
698         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
699
700         if (keys.nr)
701                 replay_now_at(j, keys.journal_seq_base);
702
703         seq = j->replay_journal_seq;
704
705         /*
706          * First replay updates to the alloc btree - these will only update the
707          * btree key cache:
708          */
709         for_each_journal_key(keys, i) {
710                 cond_resched();
711
712                 if (!i->level && i->btree_id == BTREE_ID_ALLOC) {
713                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
714                         ret = bch2_alloc_replay_key(c, i->k);
715                         if (ret)
716                                 goto err;
717                 }
718         }
719
720         /*
721          * Next replay updates to interior btree nodes:
722          */
723         for_each_journal_key(keys, i) {
724                 cond_resched();
725
726                 if (i->level) {
727                         j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
728                         ret = bch2_journal_replay_key(c, i);
729                         if (ret)
730                                 goto err;
731                 }
732         }
733
734         /*
735          * Now that the btree is in a consistent state, we can start journal
736          * reclaim (which will be flushing entries from the btree key cache back
737          * to the btree:
738          */
739         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
740         set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
741         journal_reclaim_kick(j);
742
743         j->replay_journal_seq = seq;
744
745         /*
746          * Now replay leaf node updates:
747          */
748         for_each_journal_key(keys, i) {
749                 cond_resched();
750
751                 if (i->level || i->btree_id == BTREE_ID_ALLOC)
752                         continue;
753
754                 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
755
756                 ret = i->k->k.size
757                         ? bch2_extent_replay_key(c, i->btree_id, i->k)
758                         : bch2_journal_replay_key(c, i);
759                 if (ret)
760                         goto err;
761         }
762
763         replay_now_at(j, j->replay_journal_seq_end);
764         j->replay_journal_seq = 0;
765
766         bch2_journal_set_replay_done(j);
767         bch2_journal_flush_all_pins(j);
768         return bch2_journal_error(j);
769 err:
770         bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
771                 ret, bch2_btree_ids[i->btree_id], i->level);
772         return ret;
773 }
774
775 /* journal replay early: */
776
777 static int journal_replay_entry_early(struct bch_fs *c,
778                                       struct jset_entry *entry)
779 {
780         int ret = 0;
781
782         switch (entry->type) {
783         case BCH_JSET_ENTRY_btree_root: {
784                 struct btree_root *r;
785
786                 if (entry->btree_id >= BTREE_ID_NR) {
787                         bch_err(c, "filesystem has unknown btree type %u",
788                                 entry->btree_id);
789                         return -EINVAL;
790                 }
791
792                 r = &c->btree_roots[entry->btree_id];
793
794                 if (entry->u64s) {
795                         r->level = entry->level;
796                         bkey_copy(&r->key, &entry->start[0]);
797                         r->error = 0;
798                 } else {
799                         r->error = -EIO;
800                 }
801                 r->alive = true;
802                 break;
803         }
804         case BCH_JSET_ENTRY_usage: {
805                 struct jset_entry_usage *u =
806                         container_of(entry, struct jset_entry_usage, entry);
807
808                 switch (entry->btree_id) {
809                 case FS_USAGE_RESERVED:
810                         if (entry->level < BCH_REPLICAS_MAX)
811                                 c->usage_base->persistent_reserved[entry->level] =
812                                         le64_to_cpu(u->v);
813                         break;
814                 case FS_USAGE_INODES:
815                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
816                         break;
817                 case FS_USAGE_KEY_VERSION:
818                         atomic64_set(&c->key_version,
819                                      le64_to_cpu(u->v));
820                         break;
821                 }
822
823                 break;
824         }
825         case BCH_JSET_ENTRY_data_usage: {
826                 struct jset_entry_data_usage *u =
827                         container_of(entry, struct jset_entry_data_usage, entry);
828                 ret = bch2_replicas_set_usage(c, &u->r,
829                                               le64_to_cpu(u->v));
830                 break;
831         }
832         case BCH_JSET_ENTRY_blacklist: {
833                 struct jset_entry_blacklist *bl_entry =
834                         container_of(entry, struct jset_entry_blacklist, entry);
835
836                 ret = bch2_journal_seq_blacklist_add(c,
837                                 le64_to_cpu(bl_entry->seq),
838                                 le64_to_cpu(bl_entry->seq) + 1);
839                 break;
840         }
841         case BCH_JSET_ENTRY_blacklist_v2: {
842                 struct jset_entry_blacklist_v2 *bl_entry =
843                         container_of(entry, struct jset_entry_blacklist_v2, entry);
844
845                 ret = bch2_journal_seq_blacklist_add(c,
846                                 le64_to_cpu(bl_entry->start),
847                                 le64_to_cpu(bl_entry->end) + 1);
848                 break;
849         }
850         }
851
852         return ret;
853 }
854
855 static int journal_replay_early(struct bch_fs *c,
856                                 struct bch_sb_field_clean *clean,
857                                 struct list_head *journal)
858 {
859         struct journal_replay *i;
860         struct jset_entry *entry;
861         int ret;
862
863         if (clean) {
864                 c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
865                 c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
866
867                 for (entry = clean->start;
868                      entry != vstruct_end(&clean->field);
869                      entry = vstruct_next(entry)) {
870                         ret = journal_replay_entry_early(c, entry);
871                         if (ret)
872                                 return ret;
873                 }
874         } else {
875                 list_for_each_entry(i, journal, list) {
876                         if (i->ignore)
877                                 continue;
878
879                         c->bucket_clock[READ].hand = le16_to_cpu(i->j.read_clock);
880                         c->bucket_clock[WRITE].hand = le16_to_cpu(i->j.write_clock);
881
882                         vstruct_for_each(&i->j, entry) {
883                                 ret = journal_replay_entry_early(c, entry);
884                                 if (ret)
885                                         return ret;
886                         }
887                 }
888         }
889
890         bch2_fs_usage_initialize(c);
891
892         return 0;
893 }
894
895 /* sb clean section: */
896
897 static struct bkey_i *btree_root_find(struct bch_fs *c,
898                                       struct bch_sb_field_clean *clean,
899                                       struct jset *j,
900                                       enum btree_id id, unsigned *level)
901 {
902         struct bkey_i *k;
903         struct jset_entry *entry, *start, *end;
904
905         if (clean) {
906                 start = clean->start;
907                 end = vstruct_end(&clean->field);
908         } else {
909                 start = j->start;
910                 end = vstruct_last(j);
911         }
912
913         for (entry = start; entry < end; entry = vstruct_next(entry))
914                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
915                     entry->btree_id == id)
916                         goto found;
917
918         return NULL;
919 found:
920         if (!entry->u64s)
921                 return ERR_PTR(-EINVAL);
922
923         k = entry->start;
924         *level = entry->level;
925         return k;
926 }
927
928 static int verify_superblock_clean(struct bch_fs *c,
929                                    struct bch_sb_field_clean **cleanp,
930                                    struct jset *j)
931 {
932         unsigned i;
933         struct bch_sb_field_clean *clean = *cleanp;
934         int ret = 0;
935
936         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
937                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
938                         le64_to_cpu(clean->journal_seq),
939                         le64_to_cpu(j->seq))) {
940                 kfree(clean);
941                 *cleanp = NULL;
942                 return 0;
943         }
944
945         mustfix_fsck_err_on(j->read_clock != clean->read_clock, c,
946                         "superblock read clock %u doesn't match journal %u after clean shutdown",
947                         clean->read_clock, j->read_clock);
948         mustfix_fsck_err_on(j->write_clock != clean->write_clock, c,
949                         "superblock write clock %u doesn't match journal %u after clean shutdown",
950                         clean->write_clock, j->write_clock);
951
952         for (i = 0; i < BTREE_ID_NR; i++) {
953                 char buf1[200], buf2[200];
954                 struct bkey_i *k1, *k2;
955                 unsigned l1 = 0, l2 = 0;
956
957                 k1 = btree_root_find(c, clean, NULL, i, &l1);
958                 k2 = btree_root_find(c, NULL, j, i, &l2);
959
960                 if (!k1 && !k2)
961                         continue;
962
963                 mustfix_fsck_err_on(!k1 || !k2 ||
964                                     IS_ERR(k1) ||
965                                     IS_ERR(k2) ||
966                                     k1->k.u64s != k2->k.u64s ||
967                                     memcmp(k1, k2, bkey_bytes(k1)) ||
968                                     l1 != l2, c,
969                         "superblock btree root %u doesn't match journal after clean shutdown\n"
970                         "sb:      l=%u %s\n"
971                         "journal: l=%u %s\n", i,
972                         l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
973                         l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
974         }
975 fsck_err:
976         return ret;
977 }
978
979 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
980 {
981         struct bch_sb_field_clean *clean, *sb_clean;
982         int ret;
983
984         mutex_lock(&c->sb_lock);
985         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
986
987         if (fsck_err_on(!sb_clean, c,
988                         "superblock marked clean but clean section not present")) {
989                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
990                 c->sb.clean = false;
991                 mutex_unlock(&c->sb_lock);
992                 return NULL;
993         }
994
995         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
996                         GFP_KERNEL);
997         if (!clean) {
998                 mutex_unlock(&c->sb_lock);
999                 return ERR_PTR(-ENOMEM);
1000         }
1001
1002         if (le16_to_cpu(c->disk_sb.sb->version) <
1003             bcachefs_metadata_version_bkey_renumber)
1004                 bch2_sb_clean_renumber(clean, READ);
1005
1006         mutex_unlock(&c->sb_lock);
1007
1008         return clean;
1009 fsck_err:
1010         mutex_unlock(&c->sb_lock);
1011         return ERR_PTR(ret);
1012 }
1013
1014 static int read_btree_roots(struct bch_fs *c)
1015 {
1016         unsigned i;
1017         int ret = 0;
1018
1019         for (i = 0; i < BTREE_ID_NR; i++) {
1020                 struct btree_root *r = &c->btree_roots[i];
1021
1022                 if (!r->alive)
1023                         continue;
1024
1025                 if (i == BTREE_ID_ALLOC &&
1026                     c->opts.reconstruct_alloc) {
1027                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1028                         continue;
1029                 }
1030
1031                 if (r->error) {
1032                         __fsck_err(c, i == BTREE_ID_ALLOC
1033                                    ? FSCK_CAN_IGNORE : 0,
1034                                    "invalid btree root %s",
1035                                    bch2_btree_ids[i]);
1036                         if (i == BTREE_ID_ALLOC)
1037                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1038                 }
1039
1040                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
1041                 if (ret) {
1042                         __fsck_err(c, i == BTREE_ID_ALLOC
1043                                    ? FSCK_CAN_IGNORE : 0,
1044                                    "error reading btree root %s",
1045                                    bch2_btree_ids[i]);
1046                         if (i == BTREE_ID_ALLOC)
1047                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1048                 }
1049         }
1050
1051         for (i = 0; i < BTREE_ID_NR; i++)
1052                 if (!c->btree_roots[i].b)
1053                         bch2_btree_root_alloc(c, i);
1054 fsck_err:
1055         return ret;
1056 }
1057
1058 int bch2_fs_recovery(struct bch_fs *c)
1059 {
1060         const char *err = "cannot allocate memory";
1061         struct bch_sb_field_clean *clean = NULL;
1062         struct jset *last_journal_entry = NULL;
1063         u64 blacklist_seq, journal_seq;
1064         bool write_sb = false;
1065         int ret;
1066
1067         if (c->sb.clean)
1068                 clean = read_superblock_clean(c);
1069         ret = PTR_ERR_OR_ZERO(clean);
1070         if (ret)
1071                 goto err;
1072
1073         if (c->sb.clean)
1074                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
1075                          le64_to_cpu(clean->journal_seq));
1076
1077         if (!c->replicas.entries ||
1078             c->opts.rebuild_replicas) {
1079                 bch_info(c, "building replicas info");
1080                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1081         }
1082
1083         ret = bch2_blacklist_table_initialize(c);
1084         if (ret) {
1085                 bch_err(c, "error initializing blacklist table");
1086                 goto err;
1087         }
1088
1089         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1090                 struct journal_replay *i;
1091
1092                 ret = bch2_journal_read(c, &c->journal_entries,
1093                                         &blacklist_seq, &journal_seq);
1094                 if (ret)
1095                         goto err;
1096
1097                 list_for_each_entry_reverse(i, &c->journal_entries, list)
1098                         if (!i->ignore) {
1099                                 last_journal_entry = &i->j;
1100                                 break;
1101                         }
1102
1103                 if (mustfix_fsck_err_on(c->sb.clean &&
1104                                         last_journal_entry &&
1105                                         !journal_entry_empty(last_journal_entry), c,
1106                                 "filesystem marked clean but journal not empty")) {
1107                         c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1108                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1109                         c->sb.clean = false;
1110                 }
1111
1112                 if (!last_journal_entry) {
1113                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
1114                         goto use_clean;
1115                 }
1116
1117                 c->journal_keys = journal_keys_sort(&c->journal_entries);
1118                 if (!c->journal_keys.d) {
1119                         ret = -ENOMEM;
1120                         goto err;
1121                 }
1122
1123                 if (c->sb.clean && last_journal_entry) {
1124                         ret = verify_superblock_clean(c, &clean,
1125                                                       last_journal_entry);
1126                         if (ret)
1127                                 goto err;
1128                 }
1129         } else {
1130 use_clean:
1131                 if (!clean) {
1132                         bch_err(c, "no superblock clean section found");
1133                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1134                         goto err;
1135
1136                 }
1137                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1138         }
1139
1140         if (!c->sb.clean &&
1141             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1142                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1143                 ret = -EINVAL;
1144                 goto err;
1145         }
1146
1147         if (c->opts.reconstruct_alloc) {
1148                 c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
1149                 drop_alloc_keys(&c->journal_keys);
1150         }
1151
1152         ret = journal_replay_early(c, clean, &c->journal_entries);
1153         if (ret)
1154                 goto err;
1155
1156         /*
1157          * After an unclean shutdown, skip then next few journal sequence
1158          * numbers as they may have been referenced by btree writes that
1159          * happened before their corresponding journal writes - those btree
1160          * writes need to be ignored, by skipping and blacklisting the next few
1161          * journal sequence numbers:
1162          */
1163         if (!c->sb.clean)
1164                 journal_seq += 8;
1165
1166         if (blacklist_seq != journal_seq) {
1167                 ret = bch2_journal_seq_blacklist_add(c,
1168                                         blacklist_seq, journal_seq);
1169                 if (ret) {
1170                         bch_err(c, "error creating new journal seq blacklist entry");
1171                         goto err;
1172                 }
1173         }
1174
1175         ret = bch2_fs_journal_start(&c->journal, journal_seq,
1176                                     &c->journal_entries);
1177         if (ret)
1178                 goto err;
1179
1180         ret = read_btree_roots(c);
1181         if (ret)
1182                 goto err;
1183
1184         bch_verbose(c, "starting alloc read");
1185         err = "error reading allocation information";
1186         ret = bch2_alloc_read(c, &c->journal_keys);
1187         if (ret)
1188                 goto err;
1189         bch_verbose(c, "alloc read done");
1190
1191         bch_verbose(c, "starting stripes_read");
1192         err = "error reading stripes";
1193         ret = bch2_stripes_read(c, &c->journal_keys);
1194         if (ret)
1195                 goto err;
1196         bch_verbose(c, "stripes_read done");
1197
1198         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1199
1200         if (c->opts.fsck ||
1201             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_INFO)) ||
1202             !(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_ALLOC_METADATA)) ||
1203             test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1204                 bch_info(c, "starting mark and sweep");
1205                 err = "error in mark and sweep";
1206                 ret = bch2_gc(c, true);
1207                 if (ret)
1208                         goto err;
1209                 bch_verbose(c, "mark and sweep done");
1210         }
1211
1212         bch2_stripes_heap_start(c);
1213
1214         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1215         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1216
1217         /*
1218          * Skip past versions that might have possibly been used (as nonces),
1219          * but hadn't had their pointers written:
1220          */
1221         if (c->sb.encryption_type && !c->sb.clean)
1222                 atomic64_add(1 << 16, &c->key_version);
1223
1224         if (c->opts.norecovery)
1225                 goto out;
1226
1227         bch_verbose(c, "starting journal replay");
1228         err = "journal replay failed";
1229         ret = bch2_journal_replay(c, c->journal_keys);
1230         if (ret)
1231                 goto err;
1232         bch_verbose(c, "journal replay done");
1233
1234         if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1235             !c->opts.nochanges) {
1236                 /*
1237                  * note that even when filesystem was clean there might be work
1238                  * to do here, if we ran gc (because of fsck) which recalculated
1239                  * oldest_gen:
1240                  */
1241                 bch_verbose(c, "writing allocation info");
1242                 err = "error writing out alloc info";
1243                 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
1244                         bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
1245                 if (ret) {
1246                         bch_err(c, "error writing alloc info");
1247                         goto err;
1248                 }
1249                 bch_verbose(c, "alloc write done");
1250         }
1251
1252         if (!c->sb.clean) {
1253                 if (!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) {
1254                         bch_info(c, "checking inode link counts");
1255                         err = "error in recovery";
1256                         ret = bch2_fsck_inode_nlink(c);
1257                         if (ret)
1258                                 goto err;
1259                         bch_verbose(c, "check inodes done");
1260
1261                 } else {
1262                         bch_verbose(c, "checking for deleted inodes");
1263                         err = "error in recovery";
1264                         ret = bch2_fsck_walk_inodes_only(c);
1265                         if (ret)
1266                                 goto err;
1267                         bch_verbose(c, "check inodes done");
1268                 }
1269         }
1270
1271         if (c->opts.fsck) {
1272                 bch_info(c, "starting fsck");
1273                 err = "error in fsck";
1274                 ret = bch2_fsck_full(c);
1275                 if (ret)
1276                         goto err;
1277                 bch_verbose(c, "fsck done");
1278         }
1279
1280         if (enabled_qtypes(c)) {
1281                 bch_verbose(c, "reading quotas");
1282                 ret = bch2_fs_quota_read(c);
1283                 if (ret)
1284                         goto err;
1285                 bch_verbose(c, "quotas done");
1286         }
1287
1288         mutex_lock(&c->sb_lock);
1289         if (c->opts.version_upgrade) {
1290                 if (c->sb.version < bcachefs_metadata_version_new_versioning)
1291                         c->disk_sb.sb->version_min =
1292                                 le16_to_cpu(bcachefs_metadata_version_min);
1293                 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
1294                 c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1295                 write_sb = true;
1296         }
1297
1298         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1299                 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
1300                 write_sb = true;
1301         }
1302
1303         if (c->opts.fsck &&
1304             !test_bit(BCH_FS_ERROR, &c->flags)) {
1305                 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
1306                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1307                 write_sb = true;
1308         }
1309
1310         if (write_sb)
1311                 bch2_write_super(c);
1312         mutex_unlock(&c->sb_lock);
1313
1314         if (c->journal_seq_blacklist_table &&
1315             c->journal_seq_blacklist_table->nr > 128)
1316                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1317 out:
1318         ret = 0;
1319 err:
1320 fsck_err:
1321         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1322         bch2_flush_fsck_errs(c);
1323
1324         if (!c->opts.keep_journal) {
1325                 bch2_journal_keys_free(&c->journal_keys);
1326                 bch2_journal_entries_free(&c->journal_entries);
1327         }
1328         kfree(clean);
1329         if (ret)
1330                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1331         else
1332                 bch_verbose(c, "ret %i", ret);
1333         return ret;
1334 }
1335
1336 int bch2_fs_initialize(struct bch_fs *c)
1337 {
1338         struct bch_inode_unpacked root_inode, lostfound_inode;
1339         struct bkey_inode_buf packed_inode;
1340         struct qstr lostfound = QSTR("lost+found");
1341         const char *err = "cannot allocate memory";
1342         struct bch_dev *ca;
1343         LIST_HEAD(journal);
1344         unsigned i;
1345         int ret;
1346
1347         bch_notice(c, "initializing new filesystem");
1348
1349         mutex_lock(&c->sb_lock);
1350         for_each_online_member(ca, c, i)
1351                 bch2_mark_dev_superblock(c, ca, 0);
1352         mutex_unlock(&c->sb_lock);
1353
1354         mutex_lock(&c->sb_lock);
1355         c->disk_sb.sb->version = c->disk_sb.sb->version_min =
1356                 le16_to_cpu(bcachefs_metadata_version_current);
1357         c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
1358         c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1359
1360         bch2_write_super(c);
1361         mutex_unlock(&c->sb_lock);
1362
1363         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1364         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1365
1366         for (i = 0; i < BTREE_ID_NR; i++)
1367                 bch2_btree_root_alloc(c, i);
1368
1369         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1370         set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1371
1372         err = "unable to allocate journal buckets";
1373         for_each_online_member(ca, c, i) {
1374                 ret = bch2_dev_journal_alloc(ca);
1375                 if (ret) {
1376                         percpu_ref_put(&ca->io_ref);
1377                         goto err;
1378                 }
1379         }
1380
1381         /*
1382          * journal_res_get() will crash if called before this has
1383          * set up the journal.pin FIFO and journal.cur pointer:
1384          */
1385         bch2_fs_journal_start(&c->journal, 1, &journal);
1386         bch2_journal_set_replay_done(&c->journal);
1387
1388         err = "error going read-write";
1389         ret = bch2_fs_read_write_early(c);
1390         if (ret)
1391                 goto err;
1392
1393         /*
1394          * Write out the superblock and journal buckets, now that we can do
1395          * btree updates
1396          */
1397         err = "error writing alloc info";
1398         ret = bch2_alloc_write(c, 0);
1399         if (ret)
1400                 goto err;
1401
1402         bch2_inode_init(c, &root_inode, 0, 0,
1403                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1404         root_inode.bi_inum = BCACHEFS_ROOT_INO;
1405         bch2_inode_pack(c, &packed_inode, &root_inode);
1406
1407         err = "error creating root directory";
1408         ret = bch2_btree_insert(c, BTREE_ID_INODES,
1409                                 &packed_inode.inode.k_i,
1410                                 NULL, NULL, 0);
1411         if (ret)
1412                 goto err;
1413
1414         bch2_inode_init_early(c, &lostfound_inode);
1415
1416         err = "error creating lost+found";
1417         ret = bch2_trans_do(c, NULL, NULL, 0,
1418                 bch2_create_trans(&trans, BCACHEFS_ROOT_INO,
1419                                   &root_inode, &lostfound_inode,
1420                                   &lostfound,
1421                                   0, 0, S_IFDIR|0700, 0,
1422                                   NULL, NULL));
1423         if (ret)
1424                 goto err;
1425
1426         if (enabled_qtypes(c)) {
1427                 ret = bch2_fs_quota_read(c);
1428                 if (ret)
1429                         goto err;
1430         }
1431
1432         err = "error writing first journal entry";
1433         ret = bch2_journal_meta(&c->journal);
1434         if (ret)
1435                 goto err;
1436
1437         mutex_lock(&c->sb_lock);
1438         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1439         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1440
1441         bch2_write_super(c);
1442         mutex_unlock(&c->sb_lock);
1443
1444         return 0;
1445 err:
1446         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1447         return ret;
1448 }