]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/recovery.c
Update bcachefs sources to 916d92b6b4 bcachefs: Add error messages for memory allocat...
[bcachefs-tools-debian] / libbcachefs / recovery.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "bkey_buf.h"
5 #include "alloc_background.h"
6 #include "btree_gc.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
9 #include "btree_io.h"
10 #include "buckets.h"
11 #include "dirent.h"
12 #include "ec.h"
13 #include "error.h"
14 #include "fs-common.h"
15 #include "fsck.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
19 #include "move.h"
20 #include "quota.h"
21 #include "recovery.h"
22 #include "replicas.h"
23 #include "subvolume.h"
24 #include "super-io.h"
25
26 #include <linux/sort.h>
27 #include <linux/stat.h>
28
29 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
30
31 /* for -o reconstruct_alloc: */
32 static void drop_alloc_keys(struct journal_keys *keys)
33 {
34         size_t src, dst;
35
36         for (src = 0, dst = 0; src < keys->nr; src++)
37                 if (keys->d[src].btree_id != BTREE_ID_alloc)
38                         keys->d[dst++] = keys->d[src];
39
40         keys->nr = dst;
41 }
42
43 /*
44  * Btree node pointers have a field to stack a pointer to the in memory btree
45  * node; we need to zero out this field when reading in btree nodes, or when
46  * reading in keys from the journal:
47  */
48 static void zero_out_btree_mem_ptr(struct journal_keys *keys)
49 {
50         struct journal_key *i;
51
52         for (i = keys->d; i < keys->d + keys->nr; i++)
53                 if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
54                         bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
55 }
56
57 /* iterate over keys read from the journal: */
58
59 static int __journal_key_cmp(enum btree_id      l_btree_id,
60                              unsigned           l_level,
61                              struct bpos        l_pos,
62                              struct journal_key *r)
63 {
64         return (cmp_int(l_btree_id,     r->btree_id) ?:
65                 cmp_int(l_level,        r->level) ?:
66                 bpos_cmp(l_pos, r->k->k.p));
67 }
68
69 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
70 {
71         return (cmp_int(l->btree_id,    r->btree_id) ?:
72                 cmp_int(l->level,       r->level) ?:
73                 bpos_cmp(l->k->k.p,     r->k->k.p));
74 }
75
76 static size_t journal_key_search(struct journal_keys *journal_keys,
77                                  enum btree_id id, unsigned level,
78                                  struct bpos pos)
79 {
80         size_t l = 0, r = journal_keys->nr, m;
81
82         while (l < r) {
83                 m = l + ((r - l) >> 1);
84                 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
85                         l = m + 1;
86                 else
87                         r = m;
88         }
89
90         BUG_ON(l < journal_keys->nr &&
91                __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
92
93         BUG_ON(l &&
94                __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
95
96         return l;
97 }
98
99 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
100 {
101         struct bkey_i *n = iter->keys->d[idx].k;
102         struct btree_and_journal_iter *biter =
103                 container_of(iter, struct btree_and_journal_iter, journal);
104
105         if (iter->idx > idx ||
106             (iter->idx == idx &&
107              biter->last &&
108              bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
109                 iter->idx++;
110 }
111
112 int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
113                                  unsigned level, struct bkey_i *k)
114 {
115         struct journal_key n = {
116                 .btree_id       = id,
117                 .level          = level,
118                 .k              = k,
119                 .allocated      = true
120         };
121         struct journal_keys *keys = &c->journal_keys;
122         struct journal_iter *iter;
123         unsigned idx = journal_key_search(keys, id, level, k->k.p);
124
125         if (idx < keys->nr &&
126             journal_key_cmp(&n, &keys->d[idx]) == 0) {
127                 if (keys->d[idx].allocated)
128                         kfree(keys->d[idx].k);
129                 keys->d[idx] = n;
130                 return 0;
131         }
132
133         if (keys->nr == keys->size) {
134                 struct journal_keys new_keys = {
135                         .nr                     = keys->nr,
136                         .size                   = keys->size * 2,
137                         .journal_seq_base       = keys->journal_seq_base,
138                 };
139
140                 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
141                 if (!new_keys.d) {
142                         bch_err(c, "%s: error allocating new key array (size %zu)",
143                                 __func__, new_keys.size);
144                         return -ENOMEM;
145                 }
146
147                 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
148                 kvfree(keys->d);
149                 *keys = new_keys;
150         }
151
152         array_insert_item(keys->d, keys->nr, idx, n);
153
154         list_for_each_entry(iter, &c->journal_iters, list)
155                 journal_iter_fix(c, iter, idx);
156
157         return 0;
158 }
159
160 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
161                             unsigned level, struct bkey_i *k)
162 {
163         struct bkey_i *n;
164         int ret;
165
166         n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
167         if (!n)
168                 return -ENOMEM;
169
170         bkey_copy(n, k);
171         ret = bch2_journal_key_insert_take(c, id, level, n);
172         if (ret)
173                 kfree(n);
174         return ret;
175 }
176
177 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
178                             unsigned level, struct bpos pos)
179 {
180         struct bkey_i whiteout;
181
182         bkey_init(&whiteout.k);
183         whiteout.k.p = pos;
184
185         return bch2_journal_key_insert(c, id, level, &whiteout);
186 }
187
188 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
189 {
190         struct journal_key *k = iter->idx - iter->keys->nr
191                 ? iter->keys->d + iter->idx : NULL;
192
193         if (k &&
194             k->btree_id == iter->btree_id &&
195             k->level    == iter->level)
196                 return k->k;
197
198         iter->idx = iter->keys->nr;
199         return NULL;
200 }
201
202 static void bch2_journal_iter_advance(struct journal_iter *iter)
203 {
204         if (iter->idx < iter->keys->nr)
205                 iter->idx++;
206 }
207
208 static void bch2_journal_iter_exit(struct journal_iter *iter)
209 {
210         list_del(&iter->list);
211 }
212
213 static void bch2_journal_iter_init(struct bch_fs *c,
214                                    struct journal_iter *iter,
215                                    enum btree_id id, unsigned level,
216                                    struct bpos pos)
217 {
218         iter->btree_id  = id;
219         iter->level     = level;
220         iter->keys      = &c->journal_keys;
221         iter->idx       = journal_key_search(&c->journal_keys, id, level, pos);
222         list_add(&iter->list, &c->journal_iters);
223 }
224
225 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
226 {
227         return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
228                                                 iter->b, &iter->unpacked);
229 }
230
231 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
232 {
233         bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
234 }
235
236 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
237 {
238         switch (iter->last) {
239         case none:
240                 break;
241         case btree:
242                 bch2_journal_iter_advance_btree(iter);
243                 break;
244         case journal:
245                 bch2_journal_iter_advance(&iter->journal);
246                 break;
247         }
248
249         iter->last = none;
250 }
251
252 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
253 {
254         struct bkey_s_c ret;
255
256         while (1) {
257                 struct bkey_s_c btree_k         =
258                         bch2_journal_iter_peek_btree(iter);
259                 struct bkey_s_c journal_k       =
260                         bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
261
262                 if (btree_k.k && journal_k.k) {
263                         int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
264
265                         if (!cmp)
266                                 bch2_journal_iter_advance_btree(iter);
267
268                         iter->last = cmp < 0 ? btree : journal;
269                 } else if (btree_k.k) {
270                         iter->last = btree;
271                 } else if (journal_k.k) {
272                         iter->last = journal;
273                 } else {
274                         iter->last = none;
275                         return bkey_s_c_null;
276                 }
277
278                 ret = iter->last == journal ? journal_k : btree_k;
279
280                 if (iter->b &&
281                     bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
282                         iter->journal.idx = iter->journal.keys->nr;
283                         iter->last = none;
284                         return bkey_s_c_null;
285                 }
286
287                 if (!bkey_deleted(ret.k))
288                         break;
289
290                 bch2_btree_and_journal_iter_advance(iter);
291         }
292
293         return ret;
294 }
295
296 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
297 {
298         bch2_btree_and_journal_iter_advance(iter);
299
300         return bch2_btree_and_journal_iter_peek(iter);
301 }
302
303 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
304 {
305         bch2_journal_iter_exit(&iter->journal);
306 }
307
308 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
309                                                 struct bch_fs *c,
310                                                 struct btree *b)
311 {
312         memset(iter, 0, sizeof(*iter));
313
314         iter->b = b;
315         bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
316         bch2_journal_iter_init(c, &iter->journal,
317                                b->c.btree_id, b->c.level, b->data->min_key);
318 }
319
320 /* Walk btree, overlaying keys from the journal: */
321
322 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
323                                            struct btree_and_journal_iter iter)
324 {
325         unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
326         struct bkey_s_c k;
327         struct bkey_buf tmp;
328
329         BUG_ON(!b->c.level);
330
331         bch2_bkey_buf_init(&tmp);
332
333         while (i < nr &&
334                (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
335                 bch2_bkey_buf_reassemble(&tmp, c, k);
336
337                 bch2_btree_node_prefetch(c, NULL, NULL, tmp.k,
338                                         b->c.btree_id, b->c.level - 1);
339
340                 bch2_btree_and_journal_iter_advance(&iter);
341                 i++;
342         }
343
344         bch2_bkey_buf_exit(&tmp, c);
345 }
346
347 static int bch2_btree_and_journal_walk_recurse(struct btree_trans *trans, struct btree *b,
348                                 enum btree_id btree_id,
349                                 btree_walk_key_fn key_fn)
350 {
351         struct bch_fs *c = trans->c;
352         struct btree_and_journal_iter iter;
353         struct bkey_s_c k;
354         struct bkey_buf tmp;
355         struct btree *child;
356         int ret = 0;
357
358         bch2_bkey_buf_init(&tmp);
359         bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
360
361         while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
362                 if (b->c.level) {
363                         bch2_bkey_buf_reassemble(&tmp, c, k);
364
365                         child = bch2_btree_node_get_noiter(c, tmp.k,
366                                                 b->c.btree_id, b->c.level - 1,
367                                                 false);
368
369                         ret = PTR_ERR_OR_ZERO(child);
370                         if (ret)
371                                 break;
372
373                         btree_and_journal_iter_prefetch(c, b, iter);
374
375                         ret = bch2_btree_and_journal_walk_recurse(trans, child,
376                                         btree_id, key_fn);
377                         six_unlock_read(&child->c.lock);
378                 } else {
379                         ret = key_fn(trans, k);
380                 }
381
382                 if (ret)
383                         break;
384
385                 bch2_btree_and_journal_iter_advance(&iter);
386         }
387
388         bch2_btree_and_journal_iter_exit(&iter);
389         bch2_bkey_buf_exit(&tmp, c);
390         return ret;
391 }
392
393 int bch2_btree_and_journal_walk(struct btree_trans *trans, enum btree_id btree_id,
394                                 btree_walk_key_fn key_fn)
395 {
396         struct bch_fs *c = trans->c;
397         struct btree *b = c->btree_roots[btree_id].b;
398         int ret = 0;
399
400         if (btree_node_fake(b))
401                 return 0;
402
403         six_lock_read(&b->c.lock, NULL, NULL);
404         ret = bch2_btree_and_journal_walk_recurse(trans, b, btree_id, key_fn);
405         six_unlock_read(&b->c.lock);
406
407         return ret;
408 }
409
410 /* sort and dedup all keys in the journal: */
411
412 void bch2_journal_entries_free(struct list_head *list)
413 {
414
415         while (!list_empty(list)) {
416                 struct journal_replay *i =
417                         list_first_entry(list, struct journal_replay, list);
418                 list_del(&i->list);
419                 kvpfree(i, offsetof(struct journal_replay, j) +
420                         vstruct_bytes(&i->j));
421         }
422 }
423
424 /*
425  * When keys compare equal, oldest compares first:
426  */
427 static int journal_sort_key_cmp(const void *_l, const void *_r)
428 {
429         const struct journal_key *l = _l;
430         const struct journal_key *r = _r;
431
432         return  cmp_int(l->btree_id,    r->btree_id) ?:
433                 cmp_int(l->level,       r->level) ?:
434                 bpos_cmp(l->k->k.p, r->k->k.p) ?:
435                 cmp_int(l->journal_seq, r->journal_seq) ?:
436                 cmp_int(l->journal_offset, r->journal_offset);
437 }
438
439 void bch2_journal_keys_free(struct journal_keys *keys)
440 {
441         struct journal_key *i;
442
443         for (i = keys->d; i < keys->d + keys->nr; i++)
444                 if (i->allocated)
445                         kfree(i->k);
446
447         kvfree(keys->d);
448         keys->d = NULL;
449         keys->nr = 0;
450 }
451
452 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
453 {
454         struct journal_replay *i;
455         struct jset_entry *entry;
456         struct bkey_i *k, *_n;
457         struct journal_keys keys = { NULL };
458         struct journal_key *src, *dst;
459         size_t nr_keys = 0;
460
461         if (list_empty(journal_entries))
462                 return keys;
463
464         list_for_each_entry(i, journal_entries, list) {
465                 if (i->ignore)
466                         continue;
467
468                 if (!keys.journal_seq_base)
469                         keys.journal_seq_base = le64_to_cpu(i->j.seq);
470
471                 for_each_jset_key(k, _n, entry, &i->j)
472                         nr_keys++;
473         }
474
475         keys.size = roundup_pow_of_two(nr_keys);
476
477         keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
478         if (!keys.d)
479                 goto err;
480
481         list_for_each_entry(i, journal_entries, list) {
482                 if (i->ignore)
483                         continue;
484
485                 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
486
487                 for_each_jset_key(k, _n, entry, &i->j)
488                         keys.d[keys.nr++] = (struct journal_key) {
489                                 .btree_id       = entry->btree_id,
490                                 .level          = entry->level,
491                                 .k              = k,
492                                 .journal_seq    = le64_to_cpu(i->j.seq) -
493                                         keys.journal_seq_base,
494                                 .journal_offset = k->_data - i->j._data,
495                         };
496         }
497
498         sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
499
500         src = dst = keys.d;
501         while (src < keys.d + keys.nr) {
502                 while (src + 1 < keys.d + keys.nr &&
503                        src[0].btree_id  == src[1].btree_id &&
504                        src[0].level     == src[1].level &&
505                        !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
506                         src++;
507
508                 *dst++ = *src++;
509         }
510
511         keys.nr = dst - keys.d;
512 err:
513         return keys;
514 }
515
516 /* journal replay: */
517
518 static void replay_now_at(struct journal *j, u64 seq)
519 {
520         BUG_ON(seq < j->replay_journal_seq);
521         BUG_ON(seq > j->replay_journal_seq_end);
522
523         while (j->replay_journal_seq < seq)
524                 bch2_journal_pin_put(j, j->replay_journal_seq++);
525 }
526
527 static int __bch2_journal_replay_key(struct btree_trans *trans,
528                                      struct journal_key *k)
529 {
530         struct btree_iter iter;
531         unsigned iter_flags =
532                 BTREE_ITER_INTENT|
533                 BTREE_ITER_NOT_EXTENTS;
534         int ret;
535
536         if (!k->level && k->btree_id == BTREE_ID_alloc)
537                 iter_flags |= BTREE_ITER_CACHED|BTREE_ITER_CACHED_NOFILL;
538
539         bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
540                                   BTREE_MAX_DEPTH, k->level,
541                                   iter_flags);
542         ret   = bch2_btree_iter_traverse(&iter) ?:
543                 bch2_trans_update(trans, &iter, k->k, BTREE_TRIGGER_NORUN);
544         bch2_trans_iter_exit(trans, &iter);
545         return ret;
546 }
547
548 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
549 {
550         unsigned commit_flags =
551                 BTREE_INSERT_LAZY_RW|
552                 BTREE_INSERT_NOFAIL|
553                 BTREE_INSERT_JOURNAL_RESERVED;
554
555         if (!k->allocated)
556                 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
557
558         return bch2_trans_do(c, NULL, NULL, commit_flags,
559                              __bch2_journal_replay_key(&trans, k));
560 }
561
562 static int journal_sort_seq_cmp(const void *_l, const void *_r)
563 {
564         const struct journal_key *l = *((const struct journal_key **)_l);
565         const struct journal_key *r = *((const struct journal_key **)_r);
566
567         return  cmp_int(r->level,       l->level) ?:
568                 cmp_int(l->journal_seq, r->journal_seq) ?:
569                 cmp_int(l->btree_id,    r->btree_id) ?:
570                 bpos_cmp(l->k->k.p,     r->k->k.p);
571 }
572
573 static int bch2_journal_replay(struct bch_fs *c)
574 {
575         struct journal_keys *keys = &c->journal_keys;
576         struct journal_key **keys_sorted, *k;
577         struct journal *j = &c->journal;
578         struct bch_dev *ca;
579         unsigned idx;
580         size_t i;
581         u64 seq;
582         int ret;
583
584         keys_sorted = kmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
585         if (!keys_sorted)
586                 return -ENOMEM;
587
588         for (i = 0; i < keys->nr; i++)
589                 keys_sorted[i] = &keys->d[i];
590
591         sort(keys_sorted, keys->nr,
592              sizeof(keys_sorted[0]),
593              journal_sort_seq_cmp, NULL);
594
595         if (keys->nr)
596                 replay_now_at(j, keys->journal_seq_base);
597
598         seq = j->replay_journal_seq;
599
600         /*
601          * First replay updates to the alloc btree - these will only update the
602          * btree key cache:
603          */
604         for (i = 0; i < keys->nr; i++) {
605                 k = keys_sorted[i];
606
607                 cond_resched();
608
609                 if (!k->level && k->btree_id == BTREE_ID_alloc) {
610                         j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
611                         ret = bch2_journal_replay_key(c, k);
612                         if (ret)
613                                 goto err;
614                 }
615         }
616
617         /* Now we can start the allocator threads: */
618         set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
619         for_each_member_device(ca, c, idx)
620                 bch2_wake_allocator(ca);
621
622         /*
623          * Next replay updates to interior btree nodes:
624          */
625         for (i = 0; i < keys->nr; i++) {
626                 k = keys_sorted[i];
627
628                 cond_resched();
629
630                 if (k->level) {
631                         j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
632                         ret = bch2_journal_replay_key(c, k);
633                         if (ret)
634                                 goto err;
635                 }
636         }
637
638         /*
639          * Now that the btree is in a consistent state, we can start journal
640          * reclaim (which will be flushing entries from the btree key cache back
641          * to the btree:
642          */
643         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
644         set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
645         journal_reclaim_kick(j);
646
647         j->replay_journal_seq = seq;
648
649         /*
650          * Now replay leaf node updates:
651          */
652         for (i = 0; i < keys->nr; i++) {
653                 k = keys_sorted[i];
654
655                 cond_resched();
656
657                 if (k->level || k->btree_id == BTREE_ID_alloc)
658                         continue;
659
660                 replay_now_at(j, keys->journal_seq_base + k->journal_seq);
661
662                 ret = bch2_journal_replay_key(c, k);
663                 if (ret)
664                         goto err;
665         }
666
667         replay_now_at(j, j->replay_journal_seq_end);
668         j->replay_journal_seq = 0;
669
670         bch2_journal_set_replay_done(j);
671         bch2_journal_flush_all_pins(j);
672         kfree(keys_sorted);
673
674         return bch2_journal_error(j);
675 err:
676         bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
677                 ret, bch2_btree_ids[k->btree_id], k->level);
678         kfree(keys_sorted);
679
680         return ret;
681 }
682
683 /* journal replay early: */
684
685 static int journal_replay_entry_early(struct bch_fs *c,
686                                       struct jset_entry *entry)
687 {
688         int ret = 0;
689
690         switch (entry->type) {
691         case BCH_JSET_ENTRY_btree_root: {
692                 struct btree_root *r;
693
694                 if (entry->btree_id >= BTREE_ID_NR) {
695                         bch_err(c, "filesystem has unknown btree type %u",
696                                 entry->btree_id);
697                         return -EINVAL;
698                 }
699
700                 r = &c->btree_roots[entry->btree_id];
701
702                 if (entry->u64s) {
703                         r->level = entry->level;
704                         bkey_copy(&r->key, &entry->start[0]);
705                         r->error = 0;
706                 } else {
707                         r->error = -EIO;
708                 }
709                 r->alive = true;
710                 break;
711         }
712         case BCH_JSET_ENTRY_usage: {
713                 struct jset_entry_usage *u =
714                         container_of(entry, struct jset_entry_usage, entry);
715
716                 switch (entry->btree_id) {
717                 case FS_USAGE_RESERVED:
718                         if (entry->level < BCH_REPLICAS_MAX)
719                                 c->usage_base->persistent_reserved[entry->level] =
720                                         le64_to_cpu(u->v);
721                         break;
722                 case FS_USAGE_INODES:
723                         c->usage_base->nr_inodes = le64_to_cpu(u->v);
724                         break;
725                 case FS_USAGE_KEY_VERSION:
726                         atomic64_set(&c->key_version,
727                                      le64_to_cpu(u->v));
728                         break;
729                 }
730
731                 break;
732         }
733         case BCH_JSET_ENTRY_data_usage: {
734                 struct jset_entry_data_usage *u =
735                         container_of(entry, struct jset_entry_data_usage, entry);
736
737                 ret = bch2_replicas_set_usage(c, &u->r,
738                                               le64_to_cpu(u->v));
739                 break;
740         }
741         case BCH_JSET_ENTRY_dev_usage: {
742                 struct jset_entry_dev_usage *u =
743                         container_of(entry, struct jset_entry_dev_usage, entry);
744                 struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
745                 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
746                 unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
747                         sizeof(struct jset_entry_dev_usage_type);
748                 unsigned i;
749
750                 ca->usage_base->buckets_ec              = le64_to_cpu(u->buckets_ec);
751                 ca->usage_base->buckets_unavailable     = le64_to_cpu(u->buckets_unavailable);
752
753                 for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
754                         ca->usage_base->d[i].buckets    = le64_to_cpu(u->d[i].buckets);
755                         ca->usage_base->d[i].sectors    = le64_to_cpu(u->d[i].sectors);
756                         ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
757                 }
758
759                 break;
760         }
761         case BCH_JSET_ENTRY_blacklist: {
762                 struct jset_entry_blacklist *bl_entry =
763                         container_of(entry, struct jset_entry_blacklist, entry);
764
765                 ret = bch2_journal_seq_blacklist_add(c,
766                                 le64_to_cpu(bl_entry->seq),
767                                 le64_to_cpu(bl_entry->seq) + 1);
768                 break;
769         }
770         case BCH_JSET_ENTRY_blacklist_v2: {
771                 struct jset_entry_blacklist_v2 *bl_entry =
772                         container_of(entry, struct jset_entry_blacklist_v2, entry);
773
774                 ret = bch2_journal_seq_blacklist_add(c,
775                                 le64_to_cpu(bl_entry->start),
776                                 le64_to_cpu(bl_entry->end) + 1);
777                 break;
778         }
779         case BCH_JSET_ENTRY_clock: {
780                 struct jset_entry_clock *clock =
781                         container_of(entry, struct jset_entry_clock, entry);
782
783                 atomic64_set(&c->io_clock[clock->rw].now, le64_to_cpu(clock->time));
784         }
785         }
786
787         return ret;
788 }
789
790 static int journal_replay_early(struct bch_fs *c,
791                                 struct bch_sb_field_clean *clean,
792                                 struct list_head *journal)
793 {
794         struct journal_replay *i;
795         struct jset_entry *entry;
796         int ret;
797
798         if (clean) {
799                 for (entry = clean->start;
800                      entry != vstruct_end(&clean->field);
801                      entry = vstruct_next(entry)) {
802                         ret = journal_replay_entry_early(c, entry);
803                         if (ret)
804                                 return ret;
805                 }
806         } else {
807                 list_for_each_entry(i, journal, list) {
808                         if (i->ignore)
809                                 continue;
810
811                         vstruct_for_each(&i->j, entry) {
812                                 ret = journal_replay_entry_early(c, entry);
813                                 if (ret)
814                                         return ret;
815                         }
816                 }
817         }
818
819         bch2_fs_usage_initialize(c);
820
821         return 0;
822 }
823
824 /* sb clean section: */
825
826 static struct bkey_i *btree_root_find(struct bch_fs *c,
827                                       struct bch_sb_field_clean *clean,
828                                       struct jset *j,
829                                       enum btree_id id, unsigned *level)
830 {
831         struct bkey_i *k;
832         struct jset_entry *entry, *start, *end;
833
834         if (clean) {
835                 start = clean->start;
836                 end = vstruct_end(&clean->field);
837         } else {
838                 start = j->start;
839                 end = vstruct_last(j);
840         }
841
842         for (entry = start; entry < end; entry = vstruct_next(entry))
843                 if (entry->type == BCH_JSET_ENTRY_btree_root &&
844                     entry->btree_id == id)
845                         goto found;
846
847         return NULL;
848 found:
849         if (!entry->u64s)
850                 return ERR_PTR(-EINVAL);
851
852         k = entry->start;
853         *level = entry->level;
854         return k;
855 }
856
857 static int verify_superblock_clean(struct bch_fs *c,
858                                    struct bch_sb_field_clean **cleanp,
859                                    struct jset *j)
860 {
861         unsigned i;
862         struct bch_sb_field_clean *clean = *cleanp;
863         int ret = 0;
864
865         if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
866                         "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
867                         le64_to_cpu(clean->journal_seq),
868                         le64_to_cpu(j->seq))) {
869                 kfree(clean);
870                 *cleanp = NULL;
871                 return 0;
872         }
873
874         for (i = 0; i < BTREE_ID_NR; i++) {
875                 char buf1[200], buf2[200];
876                 struct bkey_i *k1, *k2;
877                 unsigned l1 = 0, l2 = 0;
878
879                 k1 = btree_root_find(c, clean, NULL, i, &l1);
880                 k2 = btree_root_find(c, NULL, j, i, &l2);
881
882                 if (!k1 && !k2)
883                         continue;
884
885                 mustfix_fsck_err_on(!k1 || !k2 ||
886                                     IS_ERR(k1) ||
887                                     IS_ERR(k2) ||
888                                     k1->k.u64s != k2->k.u64s ||
889                                     memcmp(k1, k2, bkey_bytes(k1)) ||
890                                     l1 != l2, c,
891                         "superblock btree root %u doesn't match journal after clean shutdown\n"
892                         "sb:      l=%u %s\n"
893                         "journal: l=%u %s\n", i,
894                         l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
895                         l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
896         }
897 fsck_err:
898         return ret;
899 }
900
901 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
902 {
903         struct bch_sb_field_clean *clean, *sb_clean;
904         int ret;
905
906         mutex_lock(&c->sb_lock);
907         sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
908
909         if (fsck_err_on(!sb_clean, c,
910                         "superblock marked clean but clean section not present")) {
911                 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
912                 c->sb.clean = false;
913                 mutex_unlock(&c->sb_lock);
914                 return NULL;
915         }
916
917         clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
918                         GFP_KERNEL);
919         if (!clean) {
920                 mutex_unlock(&c->sb_lock);
921                 return ERR_PTR(-ENOMEM);
922         }
923
924         ret = bch2_sb_clean_validate(c, clean, READ);
925         if (ret) {
926                 mutex_unlock(&c->sb_lock);
927                 return ERR_PTR(ret);
928         }
929
930         mutex_unlock(&c->sb_lock);
931
932         return clean;
933 fsck_err:
934         mutex_unlock(&c->sb_lock);
935         return ERR_PTR(ret);
936 }
937
938 static int read_btree_roots(struct bch_fs *c)
939 {
940         unsigned i;
941         int ret = 0;
942
943         for (i = 0; i < BTREE_ID_NR; i++) {
944                 struct btree_root *r = &c->btree_roots[i];
945
946                 if (!r->alive)
947                         continue;
948
949                 if (i == BTREE_ID_alloc &&
950                     c->opts.reconstruct_alloc) {
951                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
952                         continue;
953                 }
954
955                 if (r->error) {
956                         __fsck_err(c, i == BTREE_ID_alloc
957                                    ? FSCK_CAN_IGNORE : 0,
958                                    "invalid btree root %s",
959                                    bch2_btree_ids[i]);
960                         if (i == BTREE_ID_alloc)
961                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
962                 }
963
964                 ret = bch2_btree_root_read(c, i, &r->key, r->level);
965                 if (ret) {
966                         __fsck_err(c, i == BTREE_ID_alloc
967                                    ? FSCK_CAN_IGNORE : 0,
968                                    "error reading btree root %s",
969                                    bch2_btree_ids[i]);
970                         if (i == BTREE_ID_alloc)
971                                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
972                 }
973         }
974
975         for (i = 0; i < BTREE_ID_NR; i++)
976                 if (!c->btree_roots[i].b)
977                         bch2_btree_root_alloc(c, i);
978 fsck_err:
979         return ret;
980 }
981
982 static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
983 {
984         struct bkey_i_snapshot  root_snapshot;
985         struct bkey_i_subvolume root_volume;
986         int ret;
987
988         bkey_snapshot_init(&root_snapshot.k_i);
989         root_snapshot.k.p.offset = U32_MAX;
990         root_snapshot.v.flags   = 0;
991         root_snapshot.v.parent  = 0;
992         root_snapshot.v.subvol  = BCACHEFS_ROOT_SUBVOL;
993         root_snapshot.v.pad     = 0;
994         SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
995
996         ret = bch2_btree_insert(c, BTREE_ID_snapshots,
997                                 &root_snapshot.k_i,
998                                 NULL, NULL, 0);
999         if (ret)
1000                 return ret;
1001
1002
1003         bkey_subvolume_init(&root_volume.k_i);
1004         root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
1005         root_volume.v.flags     = 0;
1006         root_volume.v.snapshot  = cpu_to_le32(U32_MAX);
1007         root_volume.v.inode     = cpu_to_le64(BCACHEFS_ROOT_INO);
1008
1009         ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
1010                                 &root_volume.k_i,
1011                                 NULL, NULL, 0);
1012         if (ret)
1013                 return ret;
1014
1015         return 0;
1016 }
1017
1018 static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
1019 {
1020         struct bch_fs *c = trans->c;
1021         struct btree_iter iter;
1022         struct bkey_s_c k;
1023         struct bch_inode_unpacked inode;
1024         int ret;
1025
1026         bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
1027                              SPOS(0, BCACHEFS_ROOT_INO, U32_MAX), 0);
1028         k = bch2_btree_iter_peek_slot(&iter);
1029         ret = bkey_err(k);
1030         if (ret)
1031                 goto err;
1032
1033         if (!bkey_is_inode(k.k)) {
1034                 bch_err(c, "root inode not found");
1035                 ret = -ENOENT;
1036                 goto err;
1037         }
1038
1039         ret = bch2_inode_unpack(k, &inode);
1040         BUG_ON(ret);
1041
1042         inode.bi_subvol = BCACHEFS_ROOT_SUBVOL;
1043
1044         ret = bch2_inode_write(trans, &iter, &inode);
1045 err:
1046         bch2_trans_iter_exit(trans, &iter);
1047         return ret;
1048 }
1049
1050 int bch2_fs_recovery(struct bch_fs *c)
1051 {
1052         const char *err = "cannot allocate memory";
1053         struct bch_sb_field_clean *clean = NULL;
1054         struct jset *last_journal_entry = NULL;
1055         u64 blacklist_seq, journal_seq;
1056         bool write_sb = false;
1057         int ret = 0;
1058
1059         if (c->sb.clean)
1060                 clean = read_superblock_clean(c);
1061         ret = PTR_ERR_OR_ZERO(clean);
1062         if (ret)
1063                 goto err;
1064
1065         if (c->sb.clean)
1066                 bch_info(c, "recovering from clean shutdown, journal seq %llu",
1067                          le64_to_cpu(clean->journal_seq));
1068         else
1069                 bch_info(c, "recovering from unclean shutdown");
1070
1071         if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
1072                 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
1073                 ret = -EINVAL;
1074                 goto err;
1075         }
1076
1077         if (!c->sb.clean &&
1078             !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
1079                 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1080                 ret = -EINVAL;
1081                 goto err;
1082         }
1083
1084         if (!(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1085                 bch_err(c, "filesystem may have incompatible bkey formats; run fsck from the compat branch to fix");
1086                 ret = -EINVAL;
1087                 goto err;
1088         }
1089
1090         if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1091                 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1092                 c->opts.fsck = true;
1093                 c->opts.fix_errors = FSCK_OPT_YES;
1094         }
1095
1096         if (!c->replicas.entries ||
1097             c->opts.rebuild_replicas) {
1098                 bch_info(c, "building replicas info");
1099                 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1100         }
1101
1102         if (!c->opts.nochanges) {
1103                 if (c->sb.version < bcachefs_metadata_version_inode_backpointers) {
1104                         bch_info(c, "version prior to inode backpointers, upgrade and fsck required");
1105                         c->opts.version_upgrade = true;
1106                         c->opts.fsck            = true;
1107                         c->opts.fix_errors      = FSCK_OPT_YES;
1108                 } else if (c->sb.version < bcachefs_metadata_version_subvol_dirent) {
1109                         bch_info(c, "filesystem version is prior to subvol_dirent - upgrading");
1110                         c->opts.version_upgrade = true;
1111                         c->opts.fsck            = true;
1112                 } else if (c->sb.version < bcachefs_metadata_version_inode_v2) {
1113                         bch_info(c, "filesystem version is prior to inode_v2 - upgrading");
1114                         c->opts.version_upgrade = true;
1115                 }
1116         }
1117
1118         ret = bch2_blacklist_table_initialize(c);
1119         if (ret) {
1120                 bch_err(c, "error initializing blacklist table");
1121                 goto err;
1122         }
1123
1124         if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1125                 struct journal_replay *i;
1126
1127                 ret = bch2_journal_read(c, &c->journal_entries,
1128                                         &blacklist_seq, &journal_seq);
1129                 if (ret)
1130                         goto err;
1131
1132                 list_for_each_entry_reverse(i, &c->journal_entries, list)
1133                         if (!i->ignore) {
1134                                 last_journal_entry = &i->j;
1135                                 break;
1136                         }
1137
1138                 if (mustfix_fsck_err_on(c->sb.clean &&
1139                                         last_journal_entry &&
1140                                         !journal_entry_empty(last_journal_entry), c,
1141                                 "filesystem marked clean but journal not empty")) {
1142                         c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1143                         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1144                         c->sb.clean = false;
1145                 }
1146
1147                 if (!last_journal_entry) {
1148                         fsck_err_on(!c->sb.clean, c, "no journal entries found");
1149                         goto use_clean;
1150                 }
1151
1152                 c->journal_keys = journal_keys_sort(&c->journal_entries);
1153                 if (!c->journal_keys.d) {
1154                         ret = -ENOMEM;
1155                         goto err;
1156                 }
1157
1158                 if (c->sb.clean && last_journal_entry) {
1159                         ret = verify_superblock_clean(c, &clean,
1160                                                       last_journal_entry);
1161                         if (ret)
1162                                 goto err;
1163                 }
1164         } else {
1165 use_clean:
1166                 if (!clean) {
1167                         bch_err(c, "no superblock clean section found");
1168                         ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1169                         goto err;
1170
1171                 }
1172                 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1173         }
1174
1175         if (c->opts.reconstruct_alloc) {
1176                 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1177                 drop_alloc_keys(&c->journal_keys);
1178         }
1179
1180         zero_out_btree_mem_ptr(&c->journal_keys);
1181
1182         ret = journal_replay_early(c, clean, &c->journal_entries);
1183         if (ret)
1184                 goto err;
1185
1186         if (blacklist_seq != journal_seq) {
1187                 ret = bch2_journal_seq_blacklist_add(c,
1188                                         blacklist_seq, journal_seq);
1189                 if (ret) {
1190                         bch_err(c, "error creating new journal seq blacklist entry");
1191                         goto err;
1192                 }
1193         }
1194
1195         ret = bch2_fs_journal_start(&c->journal, journal_seq,
1196                                     &c->journal_entries);
1197         if (ret)
1198                 goto err;
1199
1200         ret = read_btree_roots(c);
1201         if (ret)
1202                 goto err;
1203
1204         bch_verbose(c, "starting alloc read");
1205         err = "error reading allocation information";
1206         ret = bch2_alloc_read(c);
1207         if (ret)
1208                 goto err;
1209         bch_verbose(c, "alloc read done");
1210
1211         bch_verbose(c, "starting stripes_read");
1212         err = "error reading stripes";
1213         ret = bch2_stripes_read(c);
1214         if (ret)
1215                 goto err;
1216         bch_verbose(c, "stripes_read done");
1217
1218         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1219
1220         if (c->opts.fsck ||
1221             !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) ||
1222             !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) ||
1223             test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1224                 bool metadata_only = c->opts.norecovery;
1225
1226                 bch_info(c, "starting mark and sweep");
1227                 err = "error in mark and sweep";
1228                 ret = bch2_gc(c, true, metadata_only);
1229                 if (ret)
1230                         goto err;
1231                 bch_verbose(c, "mark and sweep done");
1232         }
1233
1234         bch2_stripes_heap_start(c);
1235
1236         clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1237         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1238
1239         /*
1240          * Skip past versions that might have possibly been used (as nonces),
1241          * but hadn't had their pointers written:
1242          */
1243         if (c->sb.encryption_type && !c->sb.clean)
1244                 atomic64_add(1 << 16, &c->key_version);
1245
1246         if (c->opts.norecovery)
1247                 goto out;
1248
1249         bch_verbose(c, "starting journal replay");
1250         err = "journal replay failed";
1251         ret = bch2_journal_replay(c);
1252         if (ret)
1253                 goto err;
1254         bch_verbose(c, "journal replay done");
1255
1256         if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1257             !c->opts.nochanges) {
1258                 /*
1259                  * note that even when filesystem was clean there might be work
1260                  * to do here, if we ran gc (because of fsck) which recalculated
1261                  * oldest_gen:
1262                  */
1263                 bch_verbose(c, "writing allocation info");
1264                 err = "error writing out alloc info";
1265                 ret = bch2_alloc_write_all(c, BTREE_INSERT_LAZY_RW);
1266                 if (ret) {
1267                         bch_err(c, "error writing alloc info");
1268                         goto err;
1269                 }
1270                 bch_verbose(c, "alloc write done");
1271         }
1272
1273         if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1274                 bch2_fs_lazy_rw(c);
1275
1276                 err = "error creating root snapshot node";
1277                 ret = bch2_fs_initialize_subvolumes(c);
1278                 if (ret)
1279                         goto err;
1280         }
1281
1282         bch_verbose(c, "reading snapshots table");
1283         err = "error reading snapshots table";
1284         ret = bch2_fs_snapshots_start(c);
1285         if (ret)
1286                 goto err;
1287         bch_verbose(c, "reading snapshots done");
1288
1289         if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
1290                 /* set bi_subvol on root inode */
1291                 err = "error upgrade root inode for subvolumes";
1292                 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
1293                                     bch2_fs_upgrade_for_subvolumes(&trans));
1294                 if (ret)
1295                         goto err;
1296         }
1297
1298         if (c->opts.fsck) {
1299                 bch_info(c, "starting fsck");
1300                 err = "error in fsck";
1301                 ret = bch2_fsck_full(c);
1302                 if (ret)
1303                         goto err;
1304                 bch_verbose(c, "fsck done");
1305         } else if (!c->sb.clean) {
1306                 bch_verbose(c, "checking for deleted inodes");
1307                 err = "error in recovery";
1308                 ret = bch2_fsck_walk_inodes_only(c);
1309                 if (ret)
1310                         goto err;
1311                 bch_verbose(c, "check inodes done");
1312         }
1313
1314         if (enabled_qtypes(c)) {
1315                 bch_verbose(c, "reading quotas");
1316                 ret = bch2_fs_quota_read(c);
1317                 if (ret)
1318                         goto err;
1319                 bch_verbose(c, "quotas done");
1320         }
1321
1322         mutex_lock(&c->sb_lock);
1323         /*
1324          * With journal replay done, we can clear the journal seq blacklist
1325          * table:
1326          */
1327         BUG_ON(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags));
1328         if (le16_to_cpu(c->sb.version_min) >= bcachefs_metadata_version_btree_ptr_sectors_written)
1329                 bch2_sb_resize_journal_seq_blacklist(&c->disk_sb, 0);
1330
1331         if (c->opts.version_upgrade) {
1332                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1333                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1334                 write_sb = true;
1335         }
1336
1337         if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1338                 c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_alloc_info);
1339                 write_sb = true;
1340         }
1341
1342         if (c->opts.fsck &&
1343             !test_bit(BCH_FS_ERROR, &c->flags) &&
1344             !test_bit(BCH_FS_ERRORS_NOT_FIXED, &c->flags)) {
1345                 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1346                 SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, 0);
1347                 write_sb = true;
1348         }
1349
1350         if (write_sb)
1351                 bch2_write_super(c);
1352         mutex_unlock(&c->sb_lock);
1353
1354         if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1355             !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done)) ||
1356             le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
1357                 struct bch_move_stats stats;
1358
1359                 bch_move_stats_init(&stats, "recovery");
1360
1361                 bch_info(c, "scanning for old btree nodes");
1362                 ret = bch2_fs_read_write(c);
1363                 if (ret)
1364                         goto err;
1365
1366                 ret = bch2_scan_old_btree_nodes(c, &stats);
1367                 if (ret)
1368                         goto err;
1369                 bch_info(c, "scanning for old btree nodes done");
1370         }
1371
1372         ret = 0;
1373 out:
1374         set_bit(BCH_FS_FSCK_DONE, &c->flags);
1375         bch2_flush_fsck_errs(c);
1376
1377         if (!c->opts.keep_journal) {
1378                 bch2_journal_keys_free(&c->journal_keys);
1379                 bch2_journal_entries_free(&c->journal_entries);
1380         }
1381         kfree(clean);
1382         if (ret)
1383                 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1384         else
1385                 bch_verbose(c, "ret %i", ret);
1386         return ret;
1387 err:
1388 fsck_err:
1389         bch2_fs_emergency_read_only(c);
1390         goto out;
1391 }
1392
1393 int bch2_fs_initialize(struct bch_fs *c)
1394 {
1395         struct bch_inode_unpacked root_inode, lostfound_inode;
1396         struct bkey_inode_buf packed_inode;
1397         struct qstr lostfound = QSTR("lost+found");
1398         const char *err = "cannot allocate memory";
1399         struct bch_dev *ca;
1400         LIST_HEAD(journal);
1401         unsigned i;
1402         int ret;
1403
1404         bch_notice(c, "initializing new filesystem");
1405
1406         mutex_lock(&c->sb_lock);
1407         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_extents_above_btree_updates_done);
1408         c->disk_sb.sb->compat[0] |= cpu_to_le64(1ULL << BCH_COMPAT_bformat_overflow_done);
1409
1410         if (c->opts.version_upgrade) {
1411                 c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
1412                 c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALL);
1413                 bch2_write_super(c);
1414         }
1415         mutex_unlock(&c->sb_lock);
1416
1417         set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1418         set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1419
1420         for (i = 0; i < BTREE_ID_NR; i++)
1421                 bch2_btree_root_alloc(c, i);
1422
1423         set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
1424         set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1425         set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1426
1427         err = "unable to allocate journal buckets";
1428         for_each_online_member(ca, c, i) {
1429                 ret = bch2_dev_journal_alloc(ca);
1430                 if (ret) {
1431                         percpu_ref_put(&ca->io_ref);
1432                         goto err;
1433                 }
1434         }
1435
1436         /*
1437          * journal_res_get() will crash if called before this has
1438          * set up the journal.pin FIFO and journal.cur pointer:
1439          */
1440         bch2_fs_journal_start(&c->journal, 1, &journal);
1441         bch2_journal_set_replay_done(&c->journal);
1442
1443         err = "error going read-write";
1444         ret = bch2_fs_read_write_early(c);
1445         if (ret)
1446                 goto err;
1447
1448         /*
1449          * Write out the superblock and journal buckets, now that we can do
1450          * btree updates
1451          */
1452         err = "error marking superblock and journal";
1453         for_each_member_device(ca, c, i) {
1454                 ret = bch2_trans_mark_dev_sb(c, ca);
1455                 if (ret) {
1456                         percpu_ref_put(&ca->ref);
1457                         goto err;
1458                 }
1459
1460                 ca->new_fs_bucket_idx = 0;
1461         }
1462
1463         err = "error creating root snapshot node";
1464         ret = bch2_fs_initialize_subvolumes(c);
1465         if (ret)
1466                 goto err;
1467
1468         bch_verbose(c, "reading snapshots table");
1469         err = "error reading snapshots table";
1470         ret = bch2_fs_snapshots_start(c);
1471         if (ret)
1472                 goto err;
1473         bch_verbose(c, "reading snapshots done");
1474
1475         bch2_inode_init(c, &root_inode, 0, 0,
1476                         S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1477         root_inode.bi_inum      = BCACHEFS_ROOT_INO;
1478         root_inode.bi_subvol    = BCACHEFS_ROOT_SUBVOL;
1479         bch2_inode_pack(c, &packed_inode, &root_inode);
1480         packed_inode.inode.k.p.snapshot = U32_MAX;
1481
1482         err = "error creating root directory";
1483         ret = bch2_btree_insert(c, BTREE_ID_inodes,
1484                                 &packed_inode.inode.k_i,
1485                                 NULL, NULL, 0);
1486         if (ret)
1487                 goto err;
1488
1489         bch2_inode_init_early(c, &lostfound_inode);
1490
1491         err = "error creating lost+found";
1492         ret = bch2_trans_do(c, NULL, NULL, 0,
1493                 bch2_create_trans(&trans,
1494                                   BCACHEFS_ROOT_SUBVOL_INUM,
1495                                   &root_inode, &lostfound_inode,
1496                                   &lostfound,
1497                                   0, 0, S_IFDIR|0700, 0,
1498                                   NULL, NULL, (subvol_inum) { 0 }, 0));
1499         if (ret) {
1500                 bch_err(c, "error creating lost+found");
1501                 goto err;
1502         }
1503
1504         if (enabled_qtypes(c)) {
1505                 ret = bch2_fs_quota_read(c);
1506                 if (ret)
1507                         goto err;
1508         }
1509
1510         err = "error writing first journal entry";
1511         ret = bch2_journal_flush(&c->journal);
1512         if (ret)
1513                 goto err;
1514
1515         mutex_lock(&c->sb_lock);
1516         SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1517         SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1518
1519         bch2_write_super(c);
1520         mutex_unlock(&c->sb_lock);
1521
1522         return 0;
1523 err:
1524         pr_err("Error initializing new filesystem: %s (%i)", err, ret);
1525         return ret;
1526 }