]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
Update bcachefs sources to 75e8a078b8 bcachefs: improved flush_held_btree_writes()
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 #include "bcachefs.h"
2 #include "alloc_background.h"
3 #include "alloc_foreground.h"
4 #include "btree_gc.h"
5 #include "btree_update.h"
6 #include "buckets.h"
7 #include "checksum.h"
8 #include "error.h"
9 #include "journal.h"
10 #include "journal_io.h"
11 #include "journal_reclaim.h"
12 #include "journal_seq_blacklist.h"
13 #include "replicas.h"
14
15 #include <trace/events/bcachefs.h>
16
17 struct journal_list {
18         struct closure          cl;
19         struct mutex            lock;
20         struct list_head        *head;
21         int                     ret;
22 };
23
24 #define JOURNAL_ENTRY_ADD_OK            0
25 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
26
27 /*
28  * Given a journal entry we just read, add it to the list of journal entries to
29  * be replayed:
30  */
31 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
32                              struct journal_list *jlist, struct jset *j)
33 {
34         struct journal_replay *i, *pos;
35         struct list_head *where;
36         size_t bytes = vstruct_bytes(j);
37         __le64 last_seq;
38         int ret;
39
40         last_seq = !list_empty(jlist->head)
41                 ? list_last_entry(jlist->head, struct journal_replay,
42                                   list)->j.last_seq
43                 : 0;
44
45         /* Is this entry older than the range we need? */
46         if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
47                 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
48                 goto out;
49         }
50
51         /* Drop entries we don't need anymore */
52         list_for_each_entry_safe(i, pos, jlist->head, list) {
53                 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
54                         break;
55                 list_del(&i->list);
56                 kvpfree(i, offsetof(struct journal_replay, j) +
57                         vstruct_bytes(&i->j));
58         }
59
60         list_for_each_entry_reverse(i, jlist->head, list) {
61                 /* Duplicate? */
62                 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
63                         fsck_err_on(bytes != vstruct_bytes(&i->j) ||
64                                     memcmp(j, &i->j, bytes), c,
65                                     "found duplicate but non identical journal entries (seq %llu)",
66                                     le64_to_cpu(j->seq));
67                         goto found;
68                 }
69
70                 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
71                         where = &i->list;
72                         goto add;
73                 }
74         }
75
76         where = jlist->head;
77 add:
78         i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
79         if (!i) {
80                 ret = -ENOMEM;
81                 goto out;
82         }
83
84         list_add(&i->list, where);
85         i->devs.nr = 0;
86         memcpy(&i->j, j, bytes);
87 found:
88         if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
89                 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
90         else
91                 fsck_err_on(1, c, "duplicate journal entries on same device");
92         ret = JOURNAL_ENTRY_ADD_OK;
93 out:
94 fsck_err:
95         return ret;
96 }
97
98 static struct nonce journal_nonce(const struct jset *jset)
99 {
100         return (struct nonce) {{
101                 [0] = 0,
102                 [1] = ((__le32 *) &jset->seq)[0],
103                 [2] = ((__le32 *) &jset->seq)[1],
104                 [3] = BCH_NONCE_JOURNAL,
105         }};
106 }
107
108 /* this fills in a range with empty jset_entries: */
109 static void journal_entry_null_range(void *start, void *end)
110 {
111         struct jset_entry *entry;
112
113         for (entry = start; entry != end; entry = vstruct_next(entry))
114                 memset(entry, 0, sizeof(*entry));
115 }
116
117 #define JOURNAL_ENTRY_REREAD    5
118 #define JOURNAL_ENTRY_NONE      6
119 #define JOURNAL_ENTRY_BAD       7
120
121 #define journal_entry_err(c, msg, ...)                                  \
122 ({                                                                      \
123         switch (write) {                                                \
124         case READ:                                                      \
125                 mustfix_fsck_err(c, msg, ##__VA_ARGS__);                \
126                 break;                                                  \
127         case WRITE:                                                     \
128                 bch_err(c, "corrupt metadata before write:\n"           \
129                         msg, ##__VA_ARGS__);                            \
130                 if (bch2_fs_inconsistent(c)) {                          \
131                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
132                         goto fsck_err;                                  \
133                 }                                                       \
134                 break;                                                  \
135         }                                                               \
136         true;                                                           \
137 })
138
139 #define journal_entry_err_on(cond, c, msg, ...)                         \
140         ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
141
142 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
143                                 struct jset_entry *entry,
144                                 struct bkey_i *k, enum btree_node_type key_type,
145                                 const char *type, int write)
146 {
147         void *next = vstruct_next(entry);
148         const char *invalid;
149         unsigned version = le32_to_cpu(jset->version);
150         int ret = 0;
151
152         if (journal_entry_err_on(!k->k.u64s, c,
153                         "invalid %s in journal: k->u64s 0", type)) {
154                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
155                 journal_entry_null_range(vstruct_next(entry), next);
156                 return 0;
157         }
158
159         if (journal_entry_err_on((void *) bkey_next(k) >
160                                 (void *) vstruct_next(entry), c,
161                         "invalid %s in journal: extends past end of journal entry",
162                         type)) {
163                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
164                 journal_entry_null_range(vstruct_next(entry), next);
165                 return 0;
166         }
167
168         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
169                         "invalid %s in journal: bad format %u",
170                         type, k->k.format)) {
171                 le16_add_cpu(&entry->u64s, -k->k.u64s);
172                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
173                 journal_entry_null_range(vstruct_next(entry), next);
174                 return 0;
175         }
176
177         if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN)
178                 bch2_bkey_swab(NULL, bkey_to_packed(k));
179
180         if (!write &&
181             version < bcachefs_metadata_version_bkey_renumber)
182                 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
183
184         invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k), key_type);
185         if (invalid) {
186                 char buf[160];
187
188                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
189                 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
190                                  type, invalid, buf);
191
192                 le16_add_cpu(&entry->u64s, -k->k.u64s);
193                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
194                 journal_entry_null_range(vstruct_next(entry), next);
195                 return 0;
196         }
197
198         if (write &&
199             version < bcachefs_metadata_version_bkey_renumber)
200                 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
201 fsck_err:
202         return ret;
203 }
204
205 static int journal_entry_validate_btree_keys(struct bch_fs *c,
206                                              struct jset *jset,
207                                              struct jset_entry *entry,
208                                              int write)
209 {
210         struct bkey_i *k;
211
212         vstruct_for_each(entry, k) {
213                 int ret = journal_validate_key(c, jset, entry, k,
214                                 __btree_node_type(entry->level,
215                                                   entry->btree_id),
216                                 "key", write);
217                 if (ret)
218                         return ret;
219         }
220
221         return 0;
222 }
223
224 static int journal_entry_validate_btree_root(struct bch_fs *c,
225                                              struct jset *jset,
226                                              struct jset_entry *entry,
227                                              int write)
228 {
229         struct bkey_i *k = entry->start;
230         int ret = 0;
231
232         if (journal_entry_err_on(!entry->u64s ||
233                                  le16_to_cpu(entry->u64s) != k->k.u64s, c,
234                                  "invalid btree root journal entry: wrong number of keys")) {
235                 void *next = vstruct_next(entry);
236                 /*
237                  * we don't want to null out this jset_entry,
238                  * just the contents, so that later we can tell
239                  * we were _supposed_ to have a btree root
240                  */
241                 entry->u64s = 0;
242                 journal_entry_null_range(vstruct_next(entry), next);
243                 return 0;
244         }
245
246         return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
247                                     "btree root", write);
248 fsck_err:
249         return ret;
250 }
251
252 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
253                                             struct jset *jset,
254                                             struct jset_entry *entry,
255                                             int write)
256 {
257         /* obsolete, don't care: */
258         return 0;
259 }
260
261 static int journal_entry_validate_blacklist(struct bch_fs *c,
262                                             struct jset *jset,
263                                             struct jset_entry *entry,
264                                             int write)
265 {
266         int ret = 0;
267
268         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
269                 "invalid journal seq blacklist entry: bad size")) {
270                 journal_entry_null_range(entry, vstruct_next(entry));
271         }
272 fsck_err:
273         return ret;
274 }
275
276 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
277                                                struct jset *jset,
278                                                struct jset_entry *entry,
279                                                int write)
280 {
281         struct jset_entry_blacklist_v2 *bl_entry;
282         int ret = 0;
283
284         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
285                 "invalid journal seq blacklist entry: bad size")) {
286                 journal_entry_null_range(entry, vstruct_next(entry));
287                 goto out;
288         }
289
290         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
291
292         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
293                                  le64_to_cpu(bl_entry->end), c,
294                 "invalid journal seq blacklist entry: start > end")) {
295                 journal_entry_null_range(entry, vstruct_next(entry));
296         }
297 out:
298 fsck_err:
299         return ret;
300 }
301
302 static int journal_entry_validate_usage(struct bch_fs *c,
303                                         struct jset *jset,
304                                         struct jset_entry *entry,
305                                         int write)
306 {
307         struct jset_entry_usage *u =
308                 container_of(entry, struct jset_entry_usage, entry);
309         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
310         int ret = 0;
311
312         if (journal_entry_err_on(bytes < sizeof(*u),
313                                  c,
314                                  "invalid journal entry usage: bad size")) {
315                 journal_entry_null_range(entry, vstruct_next(entry));
316                 return ret;
317         }
318
319 fsck_err:
320         return ret;
321 }
322
323 static int journal_entry_validate_data_usage(struct bch_fs *c,
324                                         struct jset *jset,
325                                         struct jset_entry *entry,
326                                         int write)
327 {
328         struct jset_entry_data_usage *u =
329                 container_of(entry, struct jset_entry_data_usage, entry);
330         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
331         int ret = 0;
332
333         if (journal_entry_err_on(bytes < sizeof(*u) ||
334                                  bytes < sizeof(*u) + u->r.nr_devs,
335                                  c,
336                                  "invalid journal entry usage: bad size")) {
337                 journal_entry_null_range(entry, vstruct_next(entry));
338                 return ret;
339         }
340
341 fsck_err:
342         return ret;
343 }
344
345 struct jset_entry_ops {
346         int (*validate)(struct bch_fs *, struct jset *,
347                         struct jset_entry *, int);
348 };
349
350 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
351 #define x(f, nr)                                                \
352         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
353                 .validate       = journal_entry_validate_##f,   \
354         },
355         BCH_JSET_ENTRY_TYPES()
356 #undef x
357 };
358
359 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
360                                   struct jset_entry *entry, int write)
361 {
362         return entry->type < BCH_JSET_ENTRY_NR
363                 ? bch2_jset_entry_ops[entry->type].validate(c, jset,
364                                                             entry, write)
365                 : 0;
366 }
367
368 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
369                                  int write)
370 {
371         struct jset_entry *entry;
372         int ret = 0;
373
374         vstruct_for_each(jset, entry) {
375                 if (journal_entry_err_on(vstruct_next(entry) >
376                                          vstruct_last(jset), c,
377                                 "journal entry extends past end of jset")) {
378                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
379                         break;
380                 }
381
382                 ret = journal_entry_validate(c, jset, entry, write);
383                 if (ret)
384                         break;
385         }
386 fsck_err:
387         return ret;
388 }
389
390 static int jset_validate(struct bch_fs *c,
391                          struct jset *jset, u64 sector,
392                          unsigned bucket_sectors_left,
393                          unsigned sectors_read,
394                          int write)
395 {
396         size_t bytes = vstruct_bytes(jset);
397         struct bch_csum csum;
398         unsigned version;
399         int ret = 0;
400
401         if (le64_to_cpu(jset->magic) != jset_magic(c))
402                 return JOURNAL_ENTRY_NONE;
403
404         version = le32_to_cpu(jset->version);
405         if ((version != BCH_JSET_VERSION_OLD &&
406              version < bcachefs_metadata_version_min) ||
407             version >= bcachefs_metadata_version_max) {
408                 bch_err(c, "unknown journal entry version %u", jset->version);
409                 return BCH_FSCK_UNKNOWN_VERSION;
410         }
411
412         if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
413                                  "journal entry too big (%zu bytes), sector %lluu",
414                                  bytes, sector)) {
415                 /* XXX: note we might have missing journal entries */
416                 return JOURNAL_ENTRY_BAD;
417         }
418
419         if (bytes > sectors_read << 9)
420                 return JOURNAL_ENTRY_REREAD;
421
422         if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
423                         "journal entry with unknown csum type %llu sector %lluu",
424                         JSET_CSUM_TYPE(jset), sector))
425                 return JOURNAL_ENTRY_BAD;
426
427         csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
428         if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
429                                  "journal checksum bad, sector %llu", sector)) {
430                 /* XXX: retry IO, when we start retrying checksum errors */
431                 /* XXX: note we might have missing journal entries */
432                 return JOURNAL_ENTRY_BAD;
433         }
434
435         bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
436                      jset->encrypted_start,
437                      vstruct_end(jset) - (void *) jset->encrypted_start);
438
439         if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
440                                  "invalid journal entry: last_seq > seq"))
441                 jset->last_seq = jset->seq;
442
443         return 0;
444 fsck_err:
445         return ret;
446 }
447
448 struct journal_read_buf {
449         void            *data;
450         size_t          size;
451 };
452
453 static int journal_read_buf_realloc(struct journal_read_buf *b,
454                                     size_t new_size)
455 {
456         void *n;
457
458         /* the bios are sized for this many pages, max: */
459         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
460                 return -ENOMEM;
461
462         new_size = roundup_pow_of_two(new_size);
463         n = kvpmalloc(new_size, GFP_KERNEL);
464         if (!n)
465                 return -ENOMEM;
466
467         kvpfree(b->data, b->size);
468         b->data = n;
469         b->size = new_size;
470         return 0;
471 }
472
473 static int journal_read_bucket(struct bch_dev *ca,
474                                struct journal_read_buf *buf,
475                                struct journal_list *jlist,
476                                unsigned bucket)
477 {
478         struct bch_fs *c = ca->fs;
479         struct journal_device *ja = &ca->journal;
480         struct jset *j = NULL;
481         unsigned sectors, sectors_read = 0;
482         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
483             end = offset + ca->mi.bucket_size;
484         bool saw_bad = false;
485         int ret = 0;
486
487         pr_debug("reading %u", bucket);
488
489         while (offset < end) {
490                 if (!sectors_read) {
491                         struct bio *bio;
492 reread:
493                         sectors_read = min_t(unsigned,
494                                 end - offset, buf->size >> 9);
495
496                         bio = bio_kmalloc(GFP_KERNEL,
497                                           buf_pages(buf->data,
498                                                     sectors_read << 9));
499                         bio_set_dev(bio, ca->disk_sb.bdev);
500                         bio->bi_iter.bi_sector  = offset;
501                         bio->bi_iter.bi_size    = sectors_read << 9;
502                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
503                         bch2_bio_map(bio, buf->data);
504
505                         ret = submit_bio_wait(bio);
506                         bio_put(bio);
507
508                         if (bch2_dev_io_err_on(ret, ca,
509                                                "journal read from sector %llu",
510                                                offset) ||
511                             bch2_meta_read_fault("journal"))
512                                 return -EIO;
513
514                         j = buf->data;
515                 }
516
517                 ret = jset_validate(c, j, offset,
518                                     end - offset, sectors_read,
519                                     READ);
520                 switch (ret) {
521                 case BCH_FSCK_OK:
522                         break;
523                 case JOURNAL_ENTRY_REREAD:
524                         if (vstruct_bytes(j) > buf->size) {
525                                 ret = journal_read_buf_realloc(buf,
526                                                         vstruct_bytes(j));
527                                 if (ret)
528                                         return ret;
529                         }
530                         goto reread;
531                 case JOURNAL_ENTRY_NONE:
532                         if (!saw_bad)
533                                 return 0;
534                         sectors = c->opts.block_size;
535                         goto next_block;
536                 case JOURNAL_ENTRY_BAD:
537                         saw_bad = true;
538                         sectors = c->opts.block_size;
539                         goto next_block;
540                 default:
541                         return ret;
542                 }
543
544                 /*
545                  * This happens sometimes if we don't have discards on -
546                  * when we've partially overwritten a bucket with new
547                  * journal entries. We don't need the rest of the
548                  * bucket:
549                  */
550                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
551                         return 0;
552
553                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
554
555                 mutex_lock(&jlist->lock);
556                 ret = journal_entry_add(c, ca, jlist, j);
557                 mutex_unlock(&jlist->lock);
558
559                 switch (ret) {
560                 case JOURNAL_ENTRY_ADD_OK:
561                         break;
562                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
563                         break;
564                 default:
565                         return ret;
566                 }
567
568                 sectors = vstruct_sectors(j, c->block_bits);
569 next_block:
570                 pr_debug("next");
571                 offset          += sectors;
572                 sectors_read    -= sectors;
573                 j = ((void *) j) + (sectors << 9);
574         }
575
576         return 0;
577 }
578
579 static void bch2_journal_read_device(struct closure *cl)
580 {
581         struct journal_device *ja =
582                 container_of(cl, struct journal_device, read);
583         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
584         struct journal_list *jlist =
585                 container_of(cl->parent, struct journal_list, cl);
586         struct journal_read_buf buf = { NULL, 0 };
587         u64 min_seq = U64_MAX;
588         unsigned i;
589         int ret;
590
591         if (!ja->nr)
592                 goto out;
593
594         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
595         if (ret)
596                 goto err;
597
598         pr_debug("%u journal buckets", ja->nr);
599
600         for (i = 0; i < ja->nr; i++) {
601                 ret = journal_read_bucket(ca, &buf, jlist, i);
602                 if (ret)
603                         goto err;
604         }
605
606         /* Find the journal bucket with the highest sequence number: */
607         for (i = 0; i < ja->nr; i++) {
608                 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
609                         ja->cur_idx = i;
610
611                 min_seq = min(ja->bucket_seq[i], min_seq);
612         }
613
614         /*
615          * If there's duplicate journal entries in multiple buckets (which
616          * definitely isn't supposed to happen, but...) - make sure to start
617          * cur_idx at the last of those buckets, so we don't deadlock trying to
618          * allocate
619          */
620         while (ja->bucket_seq[ja->cur_idx] > min_seq &&
621                ja->bucket_seq[ja->cur_idx] >
622                ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
623                 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
624
625         ja->sectors_free = 0;
626
627         /*
628          * Set last_idx to indicate the entire journal is full and needs to be
629          * reclaimed - journal reclaim will immediately reclaim whatever isn't
630          * pinned when it first runs:
631          */
632         ja->last_idx = (ja->cur_idx + 1) % ja->nr;
633 out:
634         kvpfree(buf.data, buf.size);
635         percpu_ref_put(&ca->io_ref);
636         closure_return(cl);
637         return;
638 err:
639         mutex_lock(&jlist->lock);
640         jlist->ret = ret;
641         mutex_unlock(&jlist->lock);
642         goto out;
643 }
644
645 void bch2_journal_entries_free(struct list_head *list)
646 {
647
648         while (!list_empty(list)) {
649                 struct journal_replay *i =
650                         list_first_entry(list, struct journal_replay, list);
651                 list_del(&i->list);
652                 kvpfree(i, offsetof(struct journal_replay, j) +
653                         vstruct_bytes(&i->j));
654         }
655 }
656
657 int bch2_journal_set_seq(struct bch_fs *c, u64 last_seq, u64 end_seq)
658 {
659         struct journal *j = &c->journal;
660         struct journal_entry_pin_list *p;
661         u64 seq, nr = end_seq - last_seq + 1;
662
663         if (nr > j->pin.size) {
664                 free_fifo(&j->pin);
665                 init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
666                 if (!j->pin.data) {
667                         bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
668                         return -ENOMEM;
669                 }
670         }
671
672         atomic64_set(&j->seq, end_seq);
673         j->last_seq_ondisk = last_seq;
674
675         j->pin.front    = last_seq;
676         j->pin.back     = end_seq + 1;
677
678         fifo_for_each_entry_ptr(p, &j->pin, seq) {
679                 INIT_LIST_HEAD(&p->list);
680                 INIT_LIST_HEAD(&p->flushed);
681                 atomic_set(&p->count, 0);
682                 p->devs.nr = 0;
683         }
684
685         return 0;
686 }
687
688 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
689 {
690         struct journal *j = &c->journal;
691         struct journal_list jlist;
692         struct journal_replay *i;
693         struct journal_entry_pin_list *p;
694         struct bch_dev *ca;
695         u64 cur_seq, end_seq;
696         unsigned iter;
697         size_t keys = 0, entries = 0;
698         bool degraded = false;
699         int ret = 0;
700
701         closure_init_stack(&jlist.cl);
702         mutex_init(&jlist.lock);
703         jlist.head = list;
704         jlist.ret = 0;
705
706         for_each_member_device(ca, c, iter) {
707                 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
708                     !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
709                         continue;
710
711                 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
712                      ca->mi.state == BCH_MEMBER_STATE_RO) &&
713                     percpu_ref_tryget(&ca->io_ref))
714                         closure_call(&ca->journal.read,
715                                      bch2_journal_read_device,
716                                      system_unbound_wq,
717                                      &jlist.cl);
718                 else
719                         degraded = true;
720         }
721
722         closure_sync(&jlist.cl);
723
724         if (jlist.ret)
725                 return jlist.ret;
726
727         if (list_empty(list)){
728                 bch_err(c, "no journal entries found");
729                 return BCH_FSCK_REPAIR_IMPOSSIBLE;
730         }
731
732         list_for_each_entry(i, list, list) {
733                 struct bch_replicas_padded replicas;
734                 char buf[80];
735
736                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
737
738                 ret = jset_validate_entries(c, &i->j, READ);
739                 if (ret)
740                         goto fsck_err;
741
742                 /*
743                  * If we're mounting in degraded mode - if we didn't read all
744                  * the devices - this is wrong:
745                  */
746
747                 if (!degraded &&
748                     (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
749                      fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
750                                  "superblock not marked as containing replicas %s",
751                                  (bch2_replicas_entry_to_text(&PBUF(buf),
752                                                               &replicas.e), buf)))) {
753                         ret = bch2_mark_replicas(c, &replicas.e);
754                         if (ret)
755                                 return ret;
756                 }
757         }
758
759         i = list_last_entry(list, struct journal_replay, list);
760
761         ret = bch2_journal_set_seq(c,
762                                    le64_to_cpu(i->j.last_seq),
763                                    le64_to_cpu(i->j.seq));
764         if (ret)
765                 return ret;
766
767         mutex_lock(&j->blacklist_lock);
768
769         list_for_each_entry(i, list, list) {
770                 p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
771
772                 atomic_set(&p->count, 1);
773                 p->devs = i->devs;
774
775                 if (bch2_journal_seq_blacklist_read(j, i)) {
776                         mutex_unlock(&j->blacklist_lock);
777                         return -ENOMEM;
778                 }
779         }
780
781         mutex_unlock(&j->blacklist_lock);
782
783         cur_seq = journal_last_seq(j);
784         end_seq = le64_to_cpu(list_last_entry(list,
785                                 struct journal_replay, list)->j.seq);
786
787         list_for_each_entry(i, list, list) {
788                 struct jset_entry *entry;
789                 struct bkey_i *k, *_n;
790                 bool blacklisted;
791
792                 mutex_lock(&j->blacklist_lock);
793                 while (cur_seq < le64_to_cpu(i->j.seq) &&
794                        bch2_journal_seq_blacklist_find(j, cur_seq))
795                         cur_seq++;
796
797                 blacklisted = bch2_journal_seq_blacklist_find(j,
798                                                          le64_to_cpu(i->j.seq));
799                 mutex_unlock(&j->blacklist_lock);
800
801                 fsck_err_on(blacklisted, c,
802                             "found blacklisted journal entry %llu",
803                             le64_to_cpu(i->j.seq));
804
805                 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
806                         "journal entries %llu-%llu missing! (replaying %llu-%llu)",
807                         cur_seq, le64_to_cpu(i->j.seq) - 1,
808                         journal_last_seq(j), end_seq);
809
810                 cur_seq = le64_to_cpu(i->j.seq) + 1;
811
812                 for_each_jset_key(k, _n, entry, &i->j)
813                         keys++;
814                 entries++;
815         }
816
817         bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
818                  keys, entries, journal_cur_seq(j));
819 fsck_err:
820         return ret;
821 }
822
823 /* journal replay: */
824
825 int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
826 {
827         struct journal *j = &c->journal;
828         struct bkey_i *k, *_n;
829         struct jset_entry *entry;
830         struct journal_replay *i, *n;
831         int ret = 0;
832
833         list_for_each_entry_safe(i, n, list, list) {
834                 j->replay_journal_seq = le64_to_cpu(i->j.seq);
835
836                 for_each_jset_key(k, _n, entry, &i->j) {
837
838                         if (entry->btree_id == BTREE_ID_ALLOC) {
839                                 /*
840                                  * allocation code handles replay for
841                                  * BTREE_ID_ALLOC keys:
842                                  */
843                                 ret = bch2_alloc_replay_key(c, k);
844                         } else {
845                                 /*
846                                  * We might cause compressed extents to be
847                                  * split, so we need to pass in a
848                                  * disk_reservation:
849                                  */
850                                 struct disk_reservation disk_res =
851                                         bch2_disk_reservation_init(c, 0);
852
853                                 ret = bch2_btree_insert(c, entry->btree_id, k,
854                                                 &disk_res, NULL,
855                                                 BTREE_INSERT_NOFAIL|
856                                                 BTREE_INSERT_JOURNAL_REPLAY|
857                                                 BTREE_INSERT_NOMARK);
858                         }
859
860                         if (ret) {
861                                 bch_err(c, "journal replay: error %d while replaying key",
862                                         ret);
863                                 goto err;
864                         }
865
866                         cond_resched();
867                 }
868
869                 bch2_journal_pin_put(j, j->replay_journal_seq);
870         }
871
872         j->replay_journal_seq = 0;
873
874         bch2_journal_set_replay_done(j);
875         bch2_journal_flush_all_pins(j);
876         ret = bch2_journal_error(j);
877 err:
878         bch2_journal_entries_free(list);
879         return ret;
880 }
881
882 /* journal write: */
883
884 static void __journal_write_alloc(struct journal *j,
885                                   struct journal_buf *w,
886                                   struct dev_alloc_list *devs_sorted,
887                                   unsigned sectors,
888                                   unsigned *replicas,
889                                   unsigned replicas_want)
890 {
891         struct bch_fs *c = container_of(j, struct bch_fs, journal);
892         struct journal_device *ja;
893         struct bch_dev *ca;
894         unsigned i;
895
896         if (*replicas >= replicas_want)
897                 return;
898
899         for (i = 0; i < devs_sorted->nr; i++) {
900                 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
901                 if (!ca)
902                         continue;
903
904                 ja = &ca->journal;
905
906                 /*
907                  * Check that we can use this device, and aren't already using
908                  * it:
909                  */
910                 if (!ca->mi.durability ||
911                     ca->mi.state != BCH_MEMBER_STATE_RW ||
912                     !ja->nr ||
913                     bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
914                                          ca->dev_idx) ||
915                     sectors > ja->sectors_free)
916                         continue;
917
918                 bch2_dev_stripe_increment(c, ca, &j->wp.stripe);
919
920                 bch2_bkey_append_ptr(&w->key,
921                         (struct bch_extent_ptr) {
922                                   .offset = bucket_to_sector(ca,
923                                         ja->buckets[ja->cur_idx]) +
924                                         ca->mi.bucket_size -
925                                         ja->sectors_free,
926                                   .dev = ca->dev_idx,
927                 });
928
929                 ja->sectors_free -= sectors;
930                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
931
932                 *replicas += ca->mi.durability;
933
934                 if (*replicas >= replicas_want)
935                         break;
936         }
937 }
938
939 /**
940  * journal_next_bucket - move on to the next journal bucket if possible
941  */
942 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
943                                unsigned sectors)
944 {
945         struct bch_fs *c = container_of(j, struct bch_fs, journal);
946         struct journal_device *ja;
947         struct bch_dev *ca;
948         struct dev_alloc_list devs_sorted;
949         unsigned i, replicas = 0, replicas_want =
950                 READ_ONCE(c->opts.metadata_replicas);
951
952         rcu_read_lock();
953
954         devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
955                                           &c->rw_devs[BCH_DATA_JOURNAL]);
956
957         __journal_write_alloc(j, w, &devs_sorted,
958                               sectors, &replicas, replicas_want);
959
960         if (replicas >= replicas_want)
961                 goto done;
962
963         for (i = 0; i < devs_sorted.nr; i++) {
964                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
965                 if (!ca)
966                         continue;
967
968                 ja = &ca->journal;
969
970                 if (sectors > ja->sectors_free &&
971                     sectors <= ca->mi.bucket_size &&
972                     bch2_journal_dev_buckets_available(j, ja)) {
973                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
974                         ja->sectors_free = ca->mi.bucket_size;
975                 }
976         }
977
978         __journal_write_alloc(j, w, &devs_sorted,
979                               sectors, &replicas, replicas_want);
980 done:
981         rcu_read_unlock();
982
983         return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
984 }
985
986 static void journal_write_compact(struct jset *jset)
987 {
988         struct jset_entry *i, *next, *prev = NULL;
989
990         /*
991          * Simple compaction, dropping empty jset_entries (from journal
992          * reservations that weren't fully used) and merging jset_entries that
993          * can be.
994          *
995          * If we wanted to be really fancy here, we could sort all the keys in
996          * the jset and drop keys that were overwritten - probably not worth it:
997          */
998         vstruct_for_each_safe(jset, i, next) {
999                 unsigned u64s = le16_to_cpu(i->u64s);
1000
1001                 /* Empty entry: */
1002                 if (!u64s)
1003                         continue;
1004
1005                 /* Can we merge with previous entry? */
1006                 if (prev &&
1007                     i->btree_id == prev->btree_id &&
1008                     i->level    == prev->level &&
1009                     i->type     == prev->type &&
1010                     i->type     == BCH_JSET_ENTRY_btree_keys &&
1011                     le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1012                         memmove_u64s_down(vstruct_next(prev),
1013                                           i->_data,
1014                                           u64s);
1015                         le16_add_cpu(&prev->u64s, u64s);
1016                         continue;
1017                 }
1018
1019                 /* Couldn't merge, move i into new position (after prev): */
1020                 prev = prev ? vstruct_next(prev) : jset->start;
1021                 if (i != prev)
1022                         memmove_u64s_down(prev, i, jset_u64s(u64s));
1023         }
1024
1025         prev = prev ? vstruct_next(prev) : jset->start;
1026         jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1027 }
1028
1029 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1030 {
1031         /* we aren't holding j->lock: */
1032         unsigned new_size = READ_ONCE(j->buf_size_want);
1033         void *new_buf;
1034
1035         if (buf->buf_size >= new_size)
1036                 return;
1037
1038         new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1039         if (!new_buf)
1040                 return;
1041
1042         memcpy(new_buf, buf->data, buf->buf_size);
1043         kvpfree(buf->data, buf->buf_size);
1044         buf->data       = new_buf;
1045         buf->buf_size   = new_size;
1046 }
1047
1048 static void journal_write_done(struct closure *cl)
1049 {
1050         struct journal *j = container_of(cl, struct journal, io);
1051         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1052         struct journal_buf *w = journal_prev_buf(j);
1053         struct bch_devs_list devs =
1054                 bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1055         struct bch_replicas_padded replicas;
1056         u64 seq = le64_to_cpu(w->data->seq);
1057         u64 last_seq = le64_to_cpu(w->data->last_seq);
1058
1059         bch2_time_stats_update(j->write_time, j->write_start_time);
1060
1061         if (!devs.nr) {
1062                 bch_err(c, "unable to write journal to sufficient devices");
1063                 goto err;
1064         }
1065
1066         bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
1067
1068         if (bch2_mark_replicas(c, &replicas.e))
1069                 goto err;
1070
1071         spin_lock(&j->lock);
1072         j->seq_ondisk           = seq;
1073         j->last_seq_ondisk      = last_seq;
1074
1075         if (seq >= j->pin.front)
1076                 journal_seq_pin(j, seq)->devs = devs;
1077
1078         /*
1079          * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1080          * more buckets:
1081          *
1082          * Must come before signaling write completion, for
1083          * bch2_fs_journal_stop():
1084          */
1085         mod_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
1086 out:
1087         /* also must come before signalling write completion: */
1088         closure_debug_destroy(cl);
1089
1090         BUG_ON(!j->reservations.prev_buf_unwritten);
1091         atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
1092                      &j->reservations.counter);
1093
1094         closure_wake_up(&w->wait);
1095         journal_wake(j);
1096
1097         if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1098                 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
1099         spin_unlock(&j->lock);
1100         return;
1101 err:
1102         bch2_fatal_error(c);
1103         bch2_journal_halt(j);
1104         spin_lock(&j->lock);
1105         goto out;
1106 }
1107
1108 static void journal_write_endio(struct bio *bio)
1109 {
1110         struct bch_dev *ca = bio->bi_private;
1111         struct journal *j = &ca->fs->journal;
1112
1113         if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
1114             bch2_meta_write_fault("journal")) {
1115                 struct journal_buf *w = journal_prev_buf(j);
1116                 unsigned long flags;
1117
1118                 spin_lock_irqsave(&j->err_lock, flags);
1119                 bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx);
1120                 spin_unlock_irqrestore(&j->err_lock, flags);
1121         }
1122
1123         closure_put(&j->io);
1124         percpu_ref_put(&ca->io_ref);
1125 }
1126
1127 void bch2_journal_write(struct closure *cl)
1128 {
1129         struct journal *j = container_of(cl, struct journal, io);
1130         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1131         struct bch_dev *ca;
1132         struct journal_buf *w = journal_prev_buf(j);
1133         struct jset_entry *start, *end;
1134         struct jset *jset;
1135         struct bio *bio;
1136         struct bch_extent_ptr *ptr;
1137         bool validate_before_checksum = false;
1138         unsigned i, sectors, bytes, u64s;
1139         int ret;
1140
1141         bch2_journal_pin_put(j, le64_to_cpu(w->data->seq));
1142
1143         journal_buf_realloc(j, w);
1144         jset = w->data;
1145
1146         j->write_start_time = local_clock();
1147
1148         start   = vstruct_last(jset);
1149         end     = bch2_journal_super_entries_add_common(c, start);
1150         u64s    = (u64 *) end - (u64 *) start;
1151         BUG_ON(u64s > j->entry_u64s_reserved);
1152
1153         le32_add_cpu(&jset->u64s, u64s);
1154         BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1155
1156         journal_write_compact(jset);
1157
1158         jset->read_clock        = cpu_to_le16(c->bucket_clock[READ].hand);
1159         jset->write_clock       = cpu_to_le16(c->bucket_clock[WRITE].hand);
1160         jset->magic             = cpu_to_le64(jset_magic(c));
1161
1162         jset->version           = c->sb.version < bcachefs_metadata_version_new_versioning
1163                 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1164                 : cpu_to_le32(c->sb.version);
1165
1166         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1167         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1168
1169         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1170                 validate_before_checksum = true;
1171
1172         if (le32_to_cpu(jset->version) <
1173             bcachefs_metadata_version_bkey_renumber)
1174                 validate_before_checksum = true;
1175
1176         if (validate_before_checksum &&
1177             jset_validate_entries(c, jset, WRITE))
1178                 goto err;
1179
1180         bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1181                     jset->encrypted_start,
1182                     vstruct_end(jset) - (void *) jset->encrypted_start);
1183
1184         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1185                                   journal_nonce(jset), jset);
1186
1187         if (!validate_before_checksum &&
1188             jset_validate_entries(c, jset, WRITE))
1189                 goto err;
1190
1191         sectors = vstruct_sectors(jset, c->block_bits);
1192         BUG_ON(sectors > w->sectors);
1193
1194         bytes = vstruct_bytes(jset);
1195         memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1196
1197         spin_lock(&j->lock);
1198         ret = journal_write_alloc(j, w, sectors);
1199
1200         /*
1201          * write is allocated, no longer need to account for it in
1202          * bch2_journal_space_available():
1203          */
1204         w->sectors = 0;
1205
1206         /*
1207          * journal entry has been compacted and allocated, recalculate space
1208          * available:
1209          */
1210         bch2_journal_space_available(j);
1211         spin_unlock(&j->lock);
1212
1213         if (ret) {
1214                 bch2_journal_halt(j);
1215                 bch_err(c, "Unable to allocate journal write");
1216                 bch2_fatal_error(c);
1217                 continue_at(cl, journal_write_done, system_highpri_wq);
1218                 return;
1219         }
1220
1221         /*
1222          * XXX: we really should just disable the entire journal in nochanges
1223          * mode
1224          */
1225         if (c->opts.nochanges)
1226                 goto no_io;
1227
1228         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1229                 ca = bch_dev_bkey_exists(c, ptr->dev);
1230                 if (!percpu_ref_tryget(&ca->io_ref)) {
1231                         /* XXX: fix this */
1232                         bch_err(c, "missing device for journal write\n");
1233                         continue;
1234                 }
1235
1236                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
1237                              sectors);
1238
1239                 bio = ca->journal.bio;
1240                 bio_reset(bio);
1241                 bio_set_dev(bio, ca->disk_sb.bdev);
1242                 bio->bi_iter.bi_sector  = ptr->offset;
1243                 bio->bi_iter.bi_size    = sectors << 9;
1244                 bio->bi_end_io          = journal_write_endio;
1245                 bio->bi_private         = ca;
1246                 bio_set_op_attrs(bio, REQ_OP_WRITE,
1247                                  REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
1248                 bch2_bio_map(bio, jset);
1249
1250                 trace_journal_write(bio);
1251                 closure_bio_submit(bio, cl);
1252
1253                 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(jset->seq);
1254         }
1255
1256         for_each_rw_member(ca, c, i)
1257                 if (journal_flushes_device(ca) &&
1258                     !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) {
1259                         percpu_ref_get(&ca->io_ref);
1260
1261                         bio = ca->journal.bio;
1262                         bio_reset(bio);
1263                         bio_set_dev(bio, ca->disk_sb.bdev);
1264                         bio->bi_opf             = REQ_OP_FLUSH;
1265                         bio->bi_end_io          = journal_write_endio;
1266                         bio->bi_private         = ca;
1267                         closure_bio_submit(bio, cl);
1268                 }
1269
1270 no_io:
1271         bch2_bucket_seq_cleanup(c);
1272
1273         continue_at(cl, journal_write_done, system_highpri_wq);
1274         return;
1275 err:
1276         bch2_inconsistent_error(c);
1277         continue_at(cl, journal_write_done, system_highpri_wq);
1278 }