]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
Update bcachefs sources to bdf6d7c135 fixup! bcachefs: Kill journal buf bloom filter
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "alloc_background.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "buckets.h"
11 #include "dirent.h"
12 #include "ec.h"
13 #include "error.h"
14 #include "fs-common.h"
15 #include "fsck.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "lru.h"
20 #include "move.h"
21 #include "quota.h"
22 #include "recovery.h"
23 #include "replicas.h"
24 #include "subvolume.h"
25 #include "super-io.h"
26
27 #include <linux/sort.h>
28 #include <linux/stat.h>
29
30 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
31
32 /* for -o reconstruct_alloc: */
33 static void drop_alloc_keys(struct journal_keys *keys)
34 {
35         size_t src, dst;
36
37         for (src = 0, dst = 0; src < keys->nr; src++)
38                 if (keys->d[src].btree_id != BTREE_ID_alloc)
39                         keys->d[dst++] = keys->d[src];
40
41         keys->nr = dst;
42 }
43
44 /*
45  * Btree node pointers have a field to stack a pointer to the in memory btree
46  * node; we need to zero out this field when reading in btree nodes, or when
47  * reading in keys from the journal:
48  */
49 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
50 {
51         struct journal_key *i;
52
53         for (i = keys->d; i < keys->d + keys->nr; i++)
54                 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
55                         bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
56 }
57
58 /* iterate over keys read from the journal: */
59
60 static int __journal_key_cmp(enum btree_id      l_btree_id,
61                              unsigned           l_level,
62                              struct bpos        l_pos,
63                              const struct journal_key *r)
64 {
65         return (cmp_int(l_btree_id,     r->btree_id) ?:
66                 cmp_int(l_level,        r->level) ?:
67                 bpos_cmp(l_pos, r->k->k.p));
68 }
69
70 static int journal_key_cmp(const struct journal_key *l, const struct journal_key *r)
71 {
72         return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
73 }
74
75 static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
76 {
77         size_t gap_size = keys->size - keys->nr;
78
79         if (idx >= keys->gap)
80                 idx += gap_size;
81         return idx;
82 }
83
84 static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
85 {
86         return keys->d + idx_to_pos(keys, idx);
87 }
88
89 size_t bch2_journal_key_search(struct journal_keys *keys,
90                                enum btree_id id, unsigned level,
91                                struct bpos pos)
92 {
93         size_t l = 0, r = keys->nr, m;
94
95         while (l < r) {
96                 m = l + ((r - l) >> 1);
97                 if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
98                         l = m + 1;
99                 else
100                         r = m;
101         }
102
103         BUG_ON(l < keys->nr &&
104                __journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
105
106         BUG_ON(l &&
107                __journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
108
109         return idx_to_pos(keys, l);
110 }
111
112 struct bkey_i *bch2_journal_keys_peek_upto(struct bch_fs *c, enum btree_id btree_id,
113                                            unsigned level, struct bpos pos,
114                                            struct bpos end_pos)
115 {
116         struct journal_keys *keys = &c->journal_keys;
117         size_t idx = bch2_journal_key_search(keys, btree_id, level, pos);
118
119         while (idx < keys->size &&
120                keys->d[idx].btree_id == btree_id &&
121                keys->d[idx].level == level &&
122                bpos_cmp(keys->d[idx].k->k.p, end_pos) <= 0) {
123                 if (!keys->d[idx].overwritten)
124                         return keys->d[idx].k;
125
126                 idx++;
127                 if (idx == keys->gap)
128                         idx += keys->size - keys->nr;
129         }
130
131         return NULL;
132 }
133
134 struct bkey_i *bch2_journal_keys_peek_slot(struct bch_fs *c, enum btree_id btree_id,
135                                            unsigned level, struct bpos pos)
136 {
137         return bch2_journal_keys_peek_upto(c, btree_id, level, pos, pos);
138 }
139
140 static void journal_iters_fix(struct bch_fs *c)
141 {
142         struct journal_keys *keys = &c->journal_keys;
143         /* The key we just inserted is immediately before the gap: */
144         struct journal_key *n = &keys->d[keys->gap - 1];
145         size_t gap_end = keys->gap + (keys->size - keys->nr);
146         struct btree_and_journal_iter *iter;
147
148         /*
149          * If an iterator points one after the key we just inserted,
150          * and the key we just inserted compares > the iterator's position,
151          * decrement the iterator so it points at the key we just inserted:
152          */
153         list_for_each_entry(iter, &c->journal_iters, journal.list)
154                 if (iter->journal.idx == gap_end &&
155                     iter->last &&
156                     iter->b->c.btree_id == n->btree_id &&
157                     iter->b->c.level    == n->level &&
158                     bpos_cmp(n->k->k.p, iter->unpacked.p) > 0)
159                         iter->journal.idx = keys->gap - 1;
160 }
161
162 static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
163 {
164         struct journal_keys *keys = &c->journal_keys;
165         struct journal_iter *iter;
166         size_t gap_size = keys->size - keys->nr;
167
168         list_for_each_entry(iter, &c->journal_iters, list) {
169                 if (iter->idx > old_gap)
170                         iter->idx -= gap_size;
171                 if (iter->idx >= new_gap)
172                         iter->idx += gap_size;
173         }
174 }
175
176 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
177                                  unsigned level, struct bkey_i *k)
178 {
179         struct journal_key n = {
180                 .btree_id       = id,
181                 .level          = level,
182                 .k              = k,
183                 .allocated      = true,
184                 /*
185                  * Ensure these keys are done last by journal replay, to unblock
186                  * journal reclaim:
187                  */
188                 .journal_seq    = U32_MAX,
189         };
190         struct journal_keys *keys = &c->journal_keys;
191         size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
192
193         BUG_ON(test_bit(BCH_FS_RW, &c->flags));
194
195         if (idx < keys->size &&
196             journal_key_cmp(&n, &keys->d[idx]) == 0) {
197                 if (keys->d[idx].allocated)
198                         kfree(keys->d[idx].k);
199                 keys->d[idx] = n;
200                 return 0;
201         }
202
203         if (idx > keys->gap)
204                 idx -= keys->size - keys->nr;
205
206         if (keys->nr == keys->size) {
207                 struct journal_keys new_keys = {
208                         .nr                     = keys->nr,
209                         .size                   = max_t(size_t, keys->size, 8) * 2,
210                         .journal_seq_base       = keys->journal_seq_base,
211                 };
212
213                 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
214                 if (!new_keys.d) {
215                         bch_err(c, "%s: error allocating new key array (size %zu)",
216                                 __func__, new_keys.size);
217                         return -ENOMEM;
218                 }
219
220                 /* Since @keys was full, there was no gap: */
221                 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
222                 kvfree(keys->d);
223                 *keys = new_keys;
224
225                 /* And now the gap is at the end: */
226                 keys->gap = keys->nr;
227         }
228
229         journal_iters_move_gap(c, keys->gap, idx);
230
231         move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
232         keys->gap = idx;
233
234         keys->nr++;
235         keys->d[keys->gap++] = n;
236
237         journal_iters_fix(c);
238
239         return 0;
240 }
241
242 /*
243  * Can only be used from the recovery thread while we're still RO - can't be
244  * used once we've got RW, as journal_keys is at that point used by multiple
245  * threads:
246  */
247 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
248                             unsigned level, struct bkey_i *k)
249 {
250         struct bkey_i *n;
251         int ret;
252
253         n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
254         if (!n)
255                 return -ENOMEM;
256
257         bkey_copy(n, k);
258         ret = bch2_journal_key_insert_take(c, id, level, n);
259         if (ret)
260                 kfree(n);
261         return ret;
262 }
263
264 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
265                             unsigned level, struct bpos pos)
266 {
267         struct bkey_i whiteout;
268
269         bkey_init(&whiteout.k);
270         whiteout.k.p = pos;
271
272         return bch2_journal_key_insert(c, id, level, &whiteout);
273 }
274
275 void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
276                                   unsigned level, struct bpos pos)
277 {
278         struct journal_keys *keys = &c->journal_keys;
279         size_t idx = bch2_journal_key_search(keys, btree, level, pos);
280
281         if (idx < keys->size &&
282             keys->d[idx].btree_id       == btree &&
283             keys->d[idx].level          == level &&
284             !bpos_cmp(keys->d[idx].k->k.p, pos))
285                 keys->d[idx].overwritten = true;
286 }
287
288 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
289 {
290         struct journal_key *k = iter->keys->d + iter->idx;
291
292         while (k < iter->keys->d + iter->keys->nr &&
293                k->btree_id      == iter->btree_id &&
294                k->level         == iter->level) {
295                 if (!k->overwritten)
296                         return k->k;
297
298                 iter->idx++;
299                 k = iter->keys->d + iter->idx;
300         }
301
302         return NULL;
303 }
304
305 static void bch2_journal_iter_advance(struct journal_iter *iter)
306 {
307         if (iter->idx < iter->keys->size) {
308                 iter->idx++;
309                 if (iter->idx == iter->keys->gap)
310                         iter->idx += iter->keys->size - iter->keys->nr;
311         }
312 }
313
314 static void bch2_journal_iter_exit(struct journal_iter *iter)
315 {
316         list_del(&iter->list);
317 }
318
319 static void bch2_journal_iter_init(struct bch_fs *c,
320                                    struct journal_iter *iter,
321                                    enum btree_id id, unsigned level,
322                                    struct bpos pos)
323 {
324         iter->btree_id  = id;
325         iter->level     = level;
326         iter->keys      = &c->journal_keys;
327         iter->idx       = bch2_journal_key_search(&c->journal_keys, id, level, pos);
328 }
329
330 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
331 {
332         return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
333                                                 iter->b, &iter->unpacked);
334 }
335
336 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
337 {
338         bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
339 }
340
341 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
342 {
343         switch (iter->last) {
344         case none:
345                 break;
346         case btree:
347                 bch2_journal_iter_advance_btree(iter);
348                 break;
349         case journal:
350                 bch2_journal_iter_advance(&iter->journal);
351                 break;
352         }
353
354         iter->last = none;
355 }
356
357 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
358 {
359         struct bkey_s_c ret;
360
361         while (1) {
362                 struct bkey_s_c btree_k         =
363                         bch2_journal_iter_peek_btree(iter);
364                 struct bkey_s_c journal_k       =
365                         bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
366
367                 if (btree_k.k && journal_k.k) {
368                         int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
369
370                         if (!cmp)
371                                 bch2_journal_iter_advance_btree(iter);
372
373                         iter->last = cmp < 0 ? btree : journal;
374                 } else if (btree_k.k) {
375                         iter->last = btree;
376                 } else if (journal_k.k) {
377                         iter->last = journal;
378                 } else {
379                         iter->last = none;
380                         return bkey_s_c_null;
381                 }
382
383                 ret = iter->last == journal ? journal_k : btree_k;
384
385                 if (iter->b &&
386                     bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
387                         iter->journal.idx = iter->journal.keys->nr;
388                         iter->last = none;
389                         return bkey_s_c_null;
390                 }
391
392                 if (!bkey_deleted(ret.k))
393                         break;
394
395                 bch2_btree_and_journal_iter_advance(iter);
396         }
397
398         return ret;
399 }
400
401 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
402 {
403         bch2_btree_and_journal_iter_advance(iter);
404
405         return bch2_btree_and_journal_iter_peek(iter);
406 }
407
408 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
409 {
410         bch2_journal_iter_exit(&iter->journal);
411 }
412
413 void __bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
414                                                   struct bch_fs *c,
415                                                   struct btree *b,
416                                                   struct btree_node_iter node_iter,
417                                                   struct bpos pos)
418 {
419         memset(iter, 0, sizeof(*iter));
420
421         iter->b = b;
422         iter->node_iter = node_iter;
423         bch2_journal_iter_init(c, &iter->journal, b->c.btree_id, b->c.level, pos);
424         INIT_LIST_HEAD(&iter->journal.list);
425 }
426
427 /*
428  * this version is used by btree_gc before filesystem has gone RW and
429  * multithreaded, so uses the journal_iters list:
430  */
431 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
432                                                 struct bch_fs *c,
433                                                 struct btree *b)
434 {
435         struct btree_node_iter node_iter;
436
437         bch2_btree_node_iter_init_from_start(&node_iter, b);
438         __bch2_btree_and_journal_iter_init_node_iter(iter, c, b, node_iter, b->data->min_key);
439         list_add(&iter->journal.list, &c->journal_iters);
440 }
441
442 /* sort and dedup all keys in the journal: */
443
444 void bch2_journal_entries_free(struct bch_fs *c)
445 {
446         struct journal_replay **i;
447         struct genradix_iter iter;
448
449         genradix_for_each(&c->journal_entries, iter, i)
450                 if (*i)
451                         kvpfree(*i, offsetof(struct journal_replay, j) +
452                                 vstruct_bytes(&(*i)->j));
453         genradix_free(&c->journal_entries);
454 }
455
456 /*
457  * When keys compare equal, oldest compares first:
458  */
459 static int journal_sort_key_cmp(const void *_l, const void *_r)
460 {
461         const struct journal_key *l = _l;
462         const struct journal_key *r = _r;
463
464         return  journal_key_cmp(l, r) ?:
465                 cmp_int(l->journal_seq, r->journal_seq) ?:
466                 cmp_int(l->journal_offset, r->journal_offset);
467 }
468
469 void bch2_journal_keys_free(struct journal_keys *keys)
470 {
471         struct journal_key *i;
472
473         move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
474         keys->gap = keys->nr;
475
476         for (i = keys->d; i < keys->d + keys->nr; i++)
477                 if (i->allocated)
478                         kfree(i->k);
479
480         kvfree(keys->d);
481         keys->d = NULL;
482         keys->nr = keys->gap = keys->size = 0;
483 }
484
485 static int journal_keys_sort(struct bch_fs *c)
486 {
487         struct genradix_iter iter;
488         struct journal_replay *i, **_i;
489         struct jset_entry *entry;
490         struct bkey_i *k, *_n;
491         struct journal_keys *keys = &c->journal_keys;
492         struct journal_key *src, *dst;
493         size_t nr_keys = 0;
494
495         genradix_for_each(&c->journal_entries, iter, _i) {
496                 i = *_i;
497
498                 if (!i || i->ignore)
499                         continue;
500
501                 if (!keys->journal_seq_base)
502                         keys->journal_seq_base = le64_to_cpu(i->j.seq);
503
504                 for_each_jset_key(k, _n, entry, &i->j)
505                         nr_keys++;
506         }
507
508         if (!nr_keys)
509                 return 0;
510
511         keys->size = roundup_pow_of_two(nr_keys);
512
513         keys->d = kvmalloc(sizeof(keys->d[0]) * keys->size, GFP_KERNEL);
514         if (!keys->d)
515                 return -ENOMEM;
516
517         genradix_for_each(&c->journal_entries, iter, _i) {
518                 i = *_i;
519
520                 if (!i || i->ignore)
521                         continue;
522
523                 BUG_ON(le64_to_cpu(i->j.seq) - keys->journal_seq_base > U32_MAX);
524
525                 for_each_jset_key(k, _n, entry, &i->j)
526                         keys->d[keys->nr++] = (struct journal_key) {
527                                 .btree_id       = entry->btree_id,
528                                 .level          = entry->level,
529                                 .k              = k,
530                                 .journal_seq    = le64_to_cpu(i->j.seq) -
531                                         keys->journal_seq_base,
532                                 .journal_offset = k->_data - i->j._data,
533                         };
534         }
535
536         sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
537
538         src = dst = keys->d;
539         while (src < keys->d + keys->nr) {
540                 while (src + 1 < keys->d + keys->nr &&
541                        src[0].btree_id  == src[1].btree_id &&
542                        src[0].level     == src[1].level &&
543                        !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
544                         src++;
545
546                 *dst++ = *src++;
547         }
548
549         keys->nr = dst - keys->d;
550         keys->gap = keys->nr;
551         return 0;
552 }
553
554 /* journal replay: */
555
556 static void replay_now_at(struct journal *j, u64 seq)
557 {
558         BUG_ON(seq < j->replay_journal_seq);
559
560         seq = min(seq, j->replay_journal_seq_end);
561
562         while (j->replay_journal_seq < seq)
563                 bch2_journal_pin_put(j, j->replay_journal_seq++);
564 }
565
566 static int bch2_journal_replay_key(struct btree_trans *trans,
567                                    struct journal_key *k)
568 {
569         struct btree_iter iter;
570         unsigned iter_flags =
571                 BTREE_ITER_INTENT|
572                 BTREE_ITER_NOT_EXTENTS;
573         int ret;
574
575         if (!k->level && k->btree_id == BTREE_ID_alloc)
576                 iter_flags |= BTREE_ITER_CACHED;
577
578         bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
579                                   BTREE_MAX_DEPTH, k->level,
580                                   iter_flags);
581         ret = bch2_btree_iter_traverse(&iter);
582         if (ret)
583                 goto out;
584
585         /* Must be checked with btree locked: */
586         if (k->overwritten)
587                 goto out;
588
589         ret = bch2_trans_update(trans, &iter, k->k, BTREE_TRIGGER_NORUN);
590 out:
591         bch2_trans_iter_exit(trans, &iter);
592         return ret;
593 }
594
595 static int journal_sort_seq_cmp(const void *_l, const void *_r)
596 {
597         const struct journal_key *l = *((const struct journal_key **)_l);
598         const struct journal_key *r = *((const struct journal_key **)_r);
599
600         return cmp_int(l->journal_seq, r->journal_seq);
601 }
602
603 static int bch2_journal_replay(struct bch_fs *c)
604 {
605         struct journal_keys *keys = &c->journal_keys;
606         struct journal_key **keys_sorted, *k;
607         struct journal *j = &c->journal;
608         size_t i;
609         int ret;
610
611         move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
612         keys->gap = keys->nr;
613
614         keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
615         if (!keys_sorted)
616                 return -ENOMEM;
617
618         for (i = 0; i < keys->nr; i++)
619                 keys_sorted[i] = &keys->d[i];
620
621         sort(keys_sorted, keys->nr,
622              sizeof(keys_sorted[0]),
623              journal_sort_seq_cmp, NULL);
624
625         if (keys->nr)
626                 replay_now_at(j, keys->journal_seq_base);
627
628         for (i = 0; i < keys->nr; i++) {
629                 k = keys_sorted[i];
630
631                 cond_resched();
632
633                 replay_now_at(j, keys->journal_seq_base + k->journal_seq);
634
635                 ret = bch2_trans_do(c, NULL, NULL,
636                                     BTREE_INSERT_LAZY_RW|
637                                     BTREE_INSERT_NOFAIL|
638                                     (!k->allocated
639                                      ? BTREE_INSERT_JOURNAL_REPLAY|JOURNAL_WATERMARK_reserved
640                                      : 0),
641                              bch2_journal_replay_key(&trans, k));
642                 if (ret) {
643                         bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
644                                 ret, bch2_btree_ids[k->btree_id], k->level);
645                         goto err;
646                 }
647         }
648
649         replay_now_at(j, j->replay_journal_seq_end);
650         j->replay_journal_seq = 0;
651
652         bch2_journal_set_replay_done(j);
653         bch2_journal_flush_all_pins(j);
654         ret = bch2_journal_error(j);
655
656         if (keys->nr && !ret)
657                 bch2_journal_log_msg(&c->journal, "journal replay finished");
658 err:
659         kvfree(keys_sorted);
660         return ret;
661 }
662
663 /* journal replay early: */
664
665 static int journal_replay_entry_early(struct bch_fs *c,
666                                       struct jset_entry *entry)
667 {
668         int ret = 0;
669
670         switch (entry->type) {
671         case BCH_JSET_ENTRY_btree_root: {
672                 struct btree_root *r;
673
674                 if (entry->btree_id >= BTREE_ID_NR) {
675                         bch_err(c, "filesystem has unknown btree type %u",
676                                 entry->btree_id);
677                         return -EINVAL;
678                 }
679
680                 r = &c->btree_roots[entry->btree_id];
681
682                 if (entry->u64s) {
683                         r->level = entry->level;
684                         bkey_copy(&r->key, &entry->start[0]);
685                         r->error = 0;
686                 } else {
687                         r->error = -EIO;
688                 }
689                 r->alive = true;
690                 break;
691         }
692         case BCH_JSET_ENTRY_usage: {
693                 struct jset_entry_usage *u =
694                         container_of(entry, struct jset_entry_usage, entry);
695
696                 switch (entry->btree_id) {
697                 case BCH_FS_USAGE_reserved:
698                         if (entry->level < BCH_REPLICAS_MAX)
699                                 c->usage_base->persistent_reserved[entry->level] =
700                                         le64_to_cpu(u->v);
701                         break;
702                 case BCH_FS_USAGE_inodes:
703                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
704                         break;
705                 case BCH_FS_USAGE_key_version:
706                         atomic64_set(&c->key_version,
707                                      le64_to_cpu(u->v));
708                         break;
709                 }
710
711                 break;
712         }
713         case BCH_JSET_ENTRY_data_usage: {
714                 struct jset_entry_data_usage *u =
715                         container_of(entry, struct jset_entry_data_usage, entry);
716
717                 ret = bch2_replicas_set_usage(c, &u->r,
718                                               le64_to_cpu(u->v));
719                 break;
720         }
721         case BCH_JSET_ENTRY_dev_usage: {
722                 struct jset_entry_dev_usage *u =
723                         container_of(entry, struct jset_entry_dev_usage, entry);
724                 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
725                 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
726
727                 ca->usage_base->buckets_ec              = le64_to_cpu(u->buckets_ec);
728
729                 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
730                         ca->usage_base->d[i].buckets    = le64_to_cpu(u->d[i].buckets);
731                         ca->usage_base->d[i].sectors    = le64_to_cpu(u->d[i].sectors);
732                         ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
733                 }
734
735                 break;
736         }
737         case BCH_JSET_ENTRY_blacklist: {
738                 struct jset_entry_blacklist *bl_entry =
739                         container_of(entry, struct jset_entry_blacklist, entry);
740
741                 ret = bch2_journal_seq_blacklist_add(c,
742                                 le64_to_cpu(bl_entry->seq),
743                                 le64_to_cpu(bl_entry->seq) + 1);
744                 break;
745         }
746         case BCH_JSET_ENTRY_blacklist_v2: {
747                 struct jset_entry_blacklist_v2 *bl_entry =
748                         container_of(entry, struct jset_entry_blacklist_v2, entry);
749
750                 ret = bch2_journal_seq_blacklist_add(c,
751                                 le64_to_cpu(bl_entry->start),
752                                 le64_to_cpu(bl_entry->end) + 1);
753                 break;
754         }
755         case BCH_JSET_ENTRY_clock: {
756                 struct jset_entry_clock *clock =
757                         container_of(entry, struct jset_entry_clock, entry);
758
759                 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
760         }
761         }
762
763         return ret;
764 }
765
766 static int journal_replay_early(struct bch_fs *c,
767                                 struct bch_sb_field_clean *clean)
768 {
769         struct jset_entry *entry;
770         int ret;
771
772         if (clean) {
773                 for (entry = clean->start;
774                      entry != vstruct_end(&clean->field);
775                      entry = vstruct_next(entry)) {
776                         ret = journal_replay_entry_early(c, entry);
777                         if (ret)
778                                 return ret;
779                 }
780         } else {
781                 struct genradix_iter iter;
782                 struct journal_replay *i, **_i;
783
784                 genradix_for_each(&c->journal_entries, iter, _i) {
785                         i = *_i;
786
787                         if (!i || i->ignore)
788                                 continue;
789
790                         vstruct_for_each(&i->j, entry) {
791                                 ret = journal_replay_entry_early(c, entry);
792                                 if (ret)
793                                         return ret;
794                         }
795                 }
796         }
797
798         bch2_fs_usage_initialize(c);
799
800         return 0;
801 }
802
803 /* sb clean section: */
804
805 static struct bkey_i *btree_root_find(struct bch_fs *c,
806                                       struct bch_sb_field_clean *clean,
807                                       struct jset *j,
808                                       enum btree_id id, unsigned *level)
809 {
810         struct bkey_i *k;
811         struct jset_entry *entry, *start, *end;
812
813         if (clean) {
814                 start = clean->start;
815                 end = vstruct_end(&clean->field);
816         } else {
817                 start = j->start;
818                 end = vstruct_last(j);
819         }
820
821         for (entry = start; entry < end; entry = vstruct_next(entry))
822                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
823                     entry->btree_id == id)
824                         goto found;
825
826         return NULL;
827 found:
828         if (!entry->u64s)
829                 return ERR_PTR(-EINVAL);
830
831         k = entry->start;
832         *level = entry->level;
833         return k;
834 }
835
836 static int verify_superblock_clean(struct bch_fs *c,
837                                    struct bch_sb_field_clean **cleanp,
838                                    struct jset *j)
839 {
840         unsigned i;
841         struct bch_sb_field_clean *clean = *cleanp;
842         struct printbuf buf1 = PRINTBUF;
843         struct printbuf buf2 = PRINTBUF;
844         int ret = 0;
845
846         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
847                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
848                         le64_to_cpu(clean->journal_seq),
849                         le64_to_cpu(j->seq))) {
850                 kfree(clean);
851                 *cleanp = NULL;
852                 return 0;
853         }
854
855         for (i = 0; i < BTREE_ID_NR; i++) {
856                 struct bkey_i *k1, *k2;
857                 unsigned l1 = 0, l2 = 0;
858
859                 k1 = btree_root_find(c, clean, NULL, i, &l1);
860                 k2 = btree_root_find(c, NULL, j, i, &l2);
861
862                 if (!k1 && !k2)
863                         continue;
864
865                 printbuf_reset(&buf1);
866                 printbuf_reset(&buf2);
867
868                 if (k1)
869                         bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(k1));
870                 else
871                         pr_buf(&buf1, "(none)");
872
873                 if (k2)
874                         bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(k2));
875                 else
876                         pr_buf(&buf2, "(none)");
877
878                 mustfix_fsck_err_on(!k1 || !k2 ||
879                                     IS_ERR(k1) ||
880                                     IS_ERR(k2) ||
881                                     k1->k.u64s != k2->k.u64s ||
882                                     memcmp(k1, k2, bkey_bytes(k1)) ||
883                                     l1 != l2, c,
884                         "superblock btree root %u doesn't match journal after clean shutdown\n"
885                         "sb:      l=%u %s\n"
886                         "journal: l=%u %s\n", i,
887                         l1, buf1.buf,
888                         l2, buf2.buf);
889         }
890 fsck_err:
891         printbuf_exit(&buf2);
892         printbuf_exit(&buf1);
893         return ret;
894 }
895
896 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
897 {
898         struct bch_sb_field_clean *clean, *sb_clean;
899         int ret;
900
901         mutex_lock(&c->sb_lock);
902         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
903
904         if (fsck_err_on(!sb_clean, c,
905                         "superblock marked clean but clean section not present")) {
906                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
907                 c->sb.clean = false;
908                 mutex_unlock(&c->sb_lock);
909                 return NULL;
910         }
911
912         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
913                         GFP_KERNEL);
914         if (!clean) {
915                 mutex_unlock(&c->sb_lock);
916                 return ERR_PTR(-ENOMEM);
917         }
918
919         ret = bch2_sb_clean_validate_late(c, clean, READ);
920         if (ret) {
921                 mutex_unlock(&c->sb_lock);
922                 return ERR_PTR(ret);
923         }
924
925         mutex_unlock(&c->sb_lock);
926
927         return clean;
928 fsck_err:
929         mutex_unlock(&c->sb_lock);
930         return ERR_PTR(ret);
931 }
932
933 static int read_btree_roots(struct bch_fs *c)
934 {
935         unsigned i;
936         int ret = 0;
937
938         for (i = 0; i < BTREE_ID_NR; i++) {
939                 struct btree_root *r = &c->btree_roots[i];
940
941                 if (!r->alive)
942                         continue;
943
944                 if (i == BTREE_ID_alloc &&
945                     c->opts.reconstruct_alloc) {
946                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
947                         continue;
948                 }
949
950                 if (r->error) {
951                         __fsck_err(c, i == BTREE_ID_alloc
952                                    ? FSCK_CAN_IGNORE : 0,
953                                    "invalid btree root %s",
954                                    bch2_btree_ids[i]);
955                         if (i == BTREE_ID_alloc)
956                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
957                 }
958
959                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
960                 if (ret) {
961                         __fsck_err(c, i == BTREE_ID_alloc
962                                    ? FSCK_CAN_IGNORE : 0,
963                                    "error reading btree root %s",
964                                    bch2_btree_ids[i]);
965                         if (i == BTREE_ID_alloc)
966                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
967                 }
968         }
969
970         for (i = 0; i < BTREE_ID_NR; i++)
971                 if (!c->btree_roots[i].b)
972                         bch2_btree_root_alloc(c, i);
973 fsck_err:
974         return ret;
975 }
976
977 static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
978 {
979         struct bkey_i_snapshot  root_snapshot;
980         struct bkey_i_subvolume root_volume;
981         int ret;
982
983         bkey_snapshot_init(&root_snapshot.k_i);
984         root_snapshot.k.p.offset = U32_MAX;
985         root_snapshot.v.flags   = 0;
986         root_snapshot.v.parent  = 0;
987         root_snapshot.v.subvol  = BCACHEFS_ROOT_SUBVOL;
988         root_snapshot.v.pad     = 0;
989         SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
990
991         ret = bch2_btree_insert(c, BTREE_ID_snapshots,
992                                 &root_snapshot.k_i,
993                                 NULL, NULL, 0);
994         if (ret)
995                 return ret;
996
997         bkey_subvolume_init(&root_volume.k_i);
998         root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
999         root_volume.v.flags     = 0;
1000         root_volume.v.snapshot  = cpu_to_le32(U32_MAX);
1001         root_volume.v.inode     = cpu_to_le64(BCACHEFS_ROOT_INO);
1002
1003         ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
1004                                 &root_volume.k_i,
1005                                 NULL, NULL, 0);
1006         if (ret)
1007                 return ret;
1008
1009         return 0;
1010 }
1011
1012 static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
1013 {
1014         struct btree_iter iter;
1015         struct bkey_s_c k;
1016         struct bch_inode_unpacked inode;
1017         int ret;
1018
1019         bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
1020                              SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
1021         k = bch2_btree_iter_peek_slot(&iter);
1022         ret = bkey_err(k);
1023         if (ret)
1024                 goto err;
1025
1026         if (!bkey_is_inode(k.k)) {
1027                 bch_err(trans->c, "root inode not found");
1028                 ret = -ENOENT;
1029                 goto err;
1030         }
1031
1032         ret = bch2_inode_unpack(k, &inode);
1033         BUG_ON(ret);
1034
1035         inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1036
1037         ret = bch2_inode_write(trans, &iter, &inode);
1038 err:
1039         bch2_trans_iter_exit(trans, &iter);
1040         return ret;
1041 }
1042
1043 int bch2_fs_recovery(struct bch_fs *c)
1044 {
1045         const char *err = "cannot allocate memory";
1046         struct bch_sb_field_clean *clean = NULL;
1047         struct jset *last_journal_entry = NULL;
1048         u64 blacklist_seq, journal_seq;
1049         bool write_sb = false;
1050         int ret = 0;
1051
1052         if (c->sb.clean)
1053                 clean = read_superblock_clean(c);
1054         ret = PTR_ERR_OR_ZERO(clean);
1055         if (ret)
1056                 goto err;
1057
1058         if (c->sb.clean)
1059                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
1060                          le64_to_cpu(clean->journal_seq));
1061         else
1062                 bch_info(c, "recovering from unclean shutdown");
1063
1064         if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
1065                 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
1066                 ret = -EINVAL;
1067                 goto err;
1068         }
1069
1070         if (!c->sb.clean &&
1071             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1072                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1073                 ret = -EINVAL;
1074                 goto err;
1075         }
1076
1077         if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1078                 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
1079                 ret = -EINVAL;
1080                 goto err;
1081         }
1082
1083         if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1084                 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1085                 c->opts.fsck = true;
1086                 c->opts.fix_errors = FSCK_OPT_YES;
1087         }
1088
1089         if (!c->opts.nochanges) {
1090                 if (c->sb.version < bcachefs_metadata_version_new_data_types) {
1091                         bch_info(c, "version prior to new_data_types, upgrade and fsck required");
1092                         c->opts.version_upgrade = true;
1093                         c->opts.fsck            = true;
1094                         c->opts.fix_errors      = FSCK_OPT_YES;
1095                 }
1096         }
1097
1098         if (c->opts.fsck && c->opts.norecovery) {
1099                 bch_err(c, "cannot select both norecovery and fsck");
1100                 ret = -EINVAL;
1101                 goto err;
1102         }
1103
1104         ret = bch2_blacklist_table_initialize(c);
1105         if (ret) {
1106                 bch_err(c, "error initializing blacklist table");
1107                 goto err;
1108         }
1109
1110         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1111                 struct genradix_iter iter;
1112                 struct journal_replay **i;
1113
1114                 bch_verbose(c, "starting journal read");
1115                 ret = bch2_journal_read(c, &blacklist_seq, &journal_seq);
1116                 if (ret)
1117                         goto err;
1118
1119                 genradix_for_each_reverse(&c->journal_entries, iter, i)
1120                         if (*i && !(*i)->ignore) {
1121                                 last_journal_entry = &(*i)->j;
1122                                 break;
1123                         }
1124
1125                 if (mustfix_fsck_err_on(c->sb.clean &&
1126                                         last_journal_entry &&
1127                                         !journal_entry_empty(last_journal_entry), c,
1128                                 "filesystem marked clean but journal not empty")) {
1129                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1130                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1131                         c->sb.clean = false;
1132                 }
1133
1134                 if (!last_journal_entry) {
1135                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
1136                         goto use_clean;
1137                 }
1138
1139                 ret = journal_keys_sort(c);
1140                 if (ret)
1141                         goto err;
1142
1143                 if (c->sb.clean && last_journal_entry) {
1144                         ret = verify_superblock_clean(c, &clean,
1145                                                       last_journal_entry);
1146                         if (ret)
1147                                 goto err;
1148                 }
1149         } else {
1150 use_clean:
1151                 if (!clean) {
1152                         bch_err(c, "no superblock clean section found");
1153                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1154                         goto err;
1155
1156                 }
1157                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1158         }
1159
1160         if (c->opts.read_journal_only)
1161                 goto out;
1162
1163         if (c->opts.reconstruct_alloc) {
1164                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1165                 drop_alloc_keys(&c->journal_keys);
1166         }
1167
1168         zero_out_btree_mem_ptr(&c->journal_keys);
1169
1170         ret = journal_replay_early(c, clean);
1171         if (ret)
1172                 goto err;
1173
1174         /*
1175          * After an unclean shutdown, skip then next few journal sequence
1176          * numbers as they may have been referenced by btree writes that
1177          * happened before their corresponding journal writes - those btree
1178          * writes need to be ignored, by skipping and blacklisting the next few
1179          * journal sequence numbers:
1180          */
1181         if (!c->sb.clean)
1182                 journal_seq += 8;
1183
1184         if (blacklist_seq != journal_seq) {
1185                 ret = bch2_journal_seq_blacklist_add(c,
1186                                         blacklist_seq, journal_seq);
1187                 if (ret) {
1188                         bch_err(c, "error creating new journal seq blacklist entry");
1189                         goto err;
1190                 }
1191         }
1192
1193         ret = bch2_fs_journal_start(&c->journal, journal_seq);
1194         if (ret)
1195                 goto err;
1196
1197         /*
1198          * Skip past versions that might have possibly been used (as nonces),
1199          * but hadn't had their pointers written:
1200          */
1201         if (c->sb.encryption_type && !c->sb.clean)
1202                 atomic64_add(1 << 16, &c->key_version);
1203
1204         ret = read_btree_roots(c);
1205         if (ret)
1206                 goto err;
1207
1208         bch_verbose(c, "starting alloc read");
1209         err = "error reading allocation information";
1210
1211         down_read(&c->gc_lock);
1212         ret = bch2_alloc_read(c);
1213         up_read(&c->gc_lock);
1214
1215         if (ret)
1216                 goto err;
1217         bch_verbose(c, "alloc read done");
1218
1219         bch_verbose(c, "starting stripes_read");
1220         err = "error reading stripes";
1221         ret = bch2_stripes_read(c);
1222         if (ret)
1223                 goto err;
1224         bch_verbose(c, "stripes_read done");
1225
1226         bch2_stripes_heap_start(c);
1227
1228         if (c->opts.fsck) {
1229                 bool metadata_only = c->opts.norecovery;
1230
1231                 bch_info(c, "checking allocations");
1232                 err = "error checking allocations";
1233                 ret = bch2_gc(c, true, metadata_only);
1234                 if (ret)
1235                         goto err;
1236                 bch_verbose(c, "done checking allocations");
1237
1238                 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1239
1240                 bch_info(c, "checking need_discard and freespace btrees");
1241                 err = "error checking need_discard and freespace btrees";
1242                 ret = bch2_check_alloc_info(c);
1243                 if (ret)
1244                         goto err;
1245                 bch_verbose(c, "done checking need_discard and freespace btrees");
1246
1247                 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1248
1249                 bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
1250                 err = "journal replay failed";
1251                 ret = bch2_journal_replay(c);
1252                 if (ret)
1253                         goto err;
1254                 if (c->opts.verbose || !c->sb.clean)
1255                         bch_info(c, "journal replay done");
1256
1257                 bch_info(c, "checking lrus");
1258                 err = "error checking lrus";
1259                 ret = bch2_check_lrus(c, true);
1260                 if (ret)
1261                         goto err;
1262                 bch_verbose(c, "done checking lrus");
1263
1264                 set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
1265
1266                 bch_info(c, "checking alloc to lru refs");
1267                 err = "error checking alloc to lru refs";
1268                 ret = bch2_check_alloc_to_lru_refs(c);
1269                 if (ret)
1270                         goto err;
1271                 set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
1272
1273                 ret = bch2_check_lrus(c, true);
1274                 if (ret)
1275                         goto err;
1276                 bch_verbose(c, "done checking alloc to lru refs");
1277         } else {
1278                 set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1279                 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1280                 set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
1281                 set_bit(BCH_FS_CHECK_ALLOC_TO_LRU_REFS_DONE, &c->flags);
1282                 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1283
1284                 if (c->opts.norecovery)
1285                         goto out;
1286
1287                 bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
1288                 err = "journal replay failed";
1289                 ret = bch2_journal_replay(c);
1290                 if (ret)
1291                         goto err;
1292                 if (c->opts.verbose || !c->sb.clean)
1293                         bch_info(c, "journal replay done");
1294         }
1295
1296         err = "error initializing freespace";
1297         ret = bch2_fs_freespace_init(c);
1298         if (ret)
1299                 goto err;
1300
1301         if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1302                 bch2_fs_lazy_rw(c);
1303
1304                 err = "error creating root snapshot node";
1305                 ret = bch2_fs_initialize_subvolumes(c);
1306                 if (ret)
1307                         goto err;
1308         }
1309
1310         bch_verbose(c, "reading snapshots table");
1311         err = "error reading snapshots table";
1312         ret = bch2_fs_snapshots_start(c);
1313         if (ret)
1314                 goto err;
1315         bch_verbose(c, "reading snapshots done");
1316
1317         if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1318                 /* set bi_subvol on root inode */
1319                 err = "error upgrade root inode for subvolumes";
1320                 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1321                                     bch2_fs_upgrade_for_subvolumes(&trans));
1322                 if (ret)
1323                         goto err;
1324         }
1325
1326         if (c->opts.fsck) {
1327                 bch_info(c, "starting fsck");
1328                 err = "error in fsck";
1329                 ret = bch2_fsck_full(c);
1330                 if (ret)
1331                         goto err;
1332                 bch_verbose(c, "fsck done");
1333         } else if (!c->sb.clean) {
1334                 bch_verbose(c, "checking for deleted inodes");
1335                 err = "error in recovery";
1336                 ret = bch2_fsck_walk_inodes_only(c);
1337                 if (ret)
1338                         goto err;
1339                 bch_verbose(c, "check inodes done");
1340         }
1341
1342         if (enabled_qtypes(c)) {
1343                 bch_verbose(c, "reading quotas");
1344                 ret = bch2_fs_quota_read(c);
1345                 if (ret)
1346                         goto err;
1347                 bch_verbose(c, "quotas done");
1348         }
1349
1350         mutex_lock(&c->sb_lock);
1351         if (c->opts.version_upgrade) {
1352                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1353                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1354                 write_sb = true;
1355         }
1356
1357         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1358                 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1359                 write_sb = true;
1360         }
1361
1362         if (c->opts.fsck &&
1363             !test_bit(BCH_FS_ERROR, &c->flags) &&
1364             !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1365                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1366                 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1367                 write_sb = true;
1368         }
1369
1370         if (write_sb)
1371                 bch2_write_super(c);
1372         mutex_unlock(&c->sb_lock);
1373
1374         if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1375             !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
1376             le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
1377                 struct bch_move_stats stats;
1378
1379                 bch_move_stats_init(&stats, "recovery");
1380
1381                 bch_info(c, "scanning for old btree nodes");
1382                 ret = bch2_fs_read_write(c);
1383                 if (ret)
1384                         goto err;
1385
1386                 ret = bch2_scan_old_btree_nodes(c, &stats);
1387                 if (ret)
1388                         goto err;
1389                 bch_info(c, "scanning for old btree nodes done");
1390         }
1391
1392         if (c->journal_seq_blacklist_table &&
1393             c->journal_seq_blacklist_table->nr > 128)
1394                 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1395
1396         ret = 0;
1397 out:
1398         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1399         bch2_flush_fsck_errs(c);
1400
1401         if (!c->opts.keep_journal) {
1402                 bch2_journal_keys_free(&c->journal_keys);
1403                 bch2_journal_entries_free(c);
1404         }
1405         kfree(clean);
1406         if (ret)
1407                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1408         else
1409                 bch_verbose(c, "ret %i", ret);
1410         return ret;
1411 err:
1412 fsck_err:
1413         bch2_fs_emergency_read_only(c);
1414         goto out;
1415 }
1416
1417 int bch2_fs_initialize(struct bch_fs *c)
1418 {
1419         struct bch_inode_unpacked root_inode, lostfound_inode;
1420         struct bkey_inode_buf packed_inode;
1421         struct qstr lostfound = QSTR("lost+found");
1422         const char *err = "cannot allocate memory";
1423         struct bch_dev *ca;
1424         unsigned i;
1425         int ret;
1426
1427         bch_notice(c, "initializing new filesystem");
1428
1429         mutex_lock(&c->sb_lock);
1430         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1431         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1432
1433         if (c->opts.version_upgrade) {
1434                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1435                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1436                 bch2_write_super(c);
1437         }
1438         mutex_unlock(&c->sb_lock);
1439
1440         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1441         set_bit(BCH_FS_MAY_GO_RW, &c->flags);
1442         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1443
1444         for (i = 0; i < BTREE_ID_NR; i++)
1445                 bch2_btree_root_alloc(c, i);
1446
1447         for_each_online_member(ca, c, i)
1448                 bch2_dev_usage_init(ca);
1449
1450         err = "unable to allocate journal buckets";
1451         for_each_online_member(ca, c, i) {
1452                 ret = bch2_dev_journal_alloc(ca);
1453                 if (ret) {
1454                         percpu_ref_put(&ca->io_ref);
1455                         goto err;
1456                 }
1457         }
1458
1459         /*
1460          * journal_res_get() will crash if called before this has
1461          * set up the journal.pin FIFO and journal.cur pointer:
1462          */
1463         bch2_fs_journal_start(&c->journal, 1);
1464         bch2_journal_set_replay_done(&c->journal);
1465
1466         err = "error going read-write";
1467         ret = bch2_fs_read_write_early(c);
1468         if (ret)
1469                 goto err;
1470
1471         /*
1472          * Write out the superblock and journal buckets, now that we can do
1473          * btree updates
1474          */
1475         bch_verbose(c, "marking superblocks");
1476         err = "error marking superblock and journal";
1477         for_each_member_device(ca, c, i) {
1478                 ret = bch2_trans_mark_dev_sb(c, ca);
1479                 if (ret) {
1480                         percpu_ref_put(&ca->ref);
1481                         goto err;
1482                 }
1483
1484                 ca->new_fs_bucket_idx = 0;
1485         }
1486
1487         bch_verbose(c, "initializing freespace");
1488         err = "error initializing freespace";
1489         ret = bch2_fs_freespace_init(c);
1490         if (ret)
1491                 goto err;
1492
1493         err = "error creating root snapshot node";
1494         ret = bch2_fs_initialize_subvolumes(c);
1495         if (ret)
1496                 goto err;
1497
1498         bch_verbose(c, "reading snapshots table");
1499         err = "error reading snapshots table";
1500         ret = bch2_fs_snapshots_start(c);
1501         if (ret)
1502                 goto err;
1503         bch_verbose(c, "reading snapshots done");
1504
1505         bch2_inode_init(c, &root_inode, 0, 0,
1506                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1507         root_inode.bi_inum      = BCACHEFS_ROOT_INO;
1508         root_inode.bi_subvol    = BCACHEFS_ROOT_SUBVOL;
1509         bch2_inode_pack(c, &packed_inode, &root_inode);
1510         packed_inode.inode.k.p.snapshot = U32_MAX;
1511
1512         err = "error creating root directory";
1513         ret = bch2_btree_insert(c, BTREE_ID_inodes,
1514                                 &packed_inode.inode.k_i,
1515                                 NULL, NULL, 0);
1516         if (ret)
1517                 goto err;
1518
1519         bch2_inode_init_early(c, &lostfound_inode);
1520
1521         err = "error creating lost+found";
1522         ret = bch2_trans_do(c, NULL, NULL, 0,
1523                 bch2_create_trans(&trans,
1524                                   BCACHEFS_ROOT_SUBVOL_INUM,
1525                                   &root_inode, &lostfound_inode,
1526                                   &lostfound,
1527                                   0, 0, S_IFDIR|0700, 0,
1528                                   NULL, NULL, (subvol_inum) { 0 }, 0));
1529         if (ret) {
1530                 bch_err(c, "error creating lost+found");
1531                 goto err;
1532         }
1533
1534         if (enabled_qtypes(c)) {
1535                 ret = bch2_fs_quota_read(c);
1536                 if (ret)
1537                         goto err;
1538         }
1539
1540         err = "error writing first journal entry";
1541         ret = bch2_journal_flush(&c->journal);
1542         if (ret)
1543                 goto err;
1544
1545         mutex_lock(&c->sb_lock);
1546         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1547         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1548
1549         bch2_write_super(c);
1550         mutex_unlock(&c->sb_lock);
1551
1552         return 0;
1553 err:
1554         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1555         return ret;
1556 }