]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
Merge pull request #26 from unquietwiki/master
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_foreground.h"
4 #include "buckets.h"
5 #include "checksum.h"
6 #include "error.h"
7 #include "journal.h"
8 #include "journal_io.h"
9 #include "journal_reclaim.h"
10 #include "replicas.h"
11
12 #include <trace/events/bcachefs.h>
13
14 struct journal_list {
15         struct closure          cl;
16         struct mutex            lock;
17         struct list_head        *head;
18         int                     ret;
19 };
20
21 #define JOURNAL_ENTRY_ADD_OK            0
22 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
23
24 /*
25  * Given a journal entry we just read, add it to the list of journal entries to
26  * be replayed:
27  */
28 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
29                              struct journal_list *jlist, struct jset *j)
30 {
31         struct journal_replay *i, *pos;
32         struct list_head *where;
33         size_t bytes = vstruct_bytes(j);
34         __le64 last_seq;
35         int ret;
36
37         last_seq = !list_empty(jlist->head)
38                 ? list_last_entry(jlist->head, struct journal_replay,
39                                   list)->j.last_seq
40                 : 0;
41
42         /* Is this entry older than the range we need? */
43         if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
44                 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
45                 goto out;
46         }
47
48         /* Drop entries we don't need anymore */
49         list_for_each_entry_safe(i, pos, jlist->head, list) {
50                 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
51                         break;
52                 list_del(&i->list);
53                 kvpfree(i, offsetof(struct journal_replay, j) +
54                         vstruct_bytes(&i->j));
55         }
56
57         list_for_each_entry_reverse(i, jlist->head, list) {
58                 /* Duplicate? */
59                 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
60                         fsck_err_on(bytes != vstruct_bytes(&i->j) ||
61                                     memcmp(j, &i->j, bytes), c,
62                                     "found duplicate but non identical journal entries (seq %llu)",
63                                     le64_to_cpu(j->seq));
64                         goto found;
65                 }
66
67                 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
68                         where = &i->list;
69                         goto add;
70                 }
71         }
72
73         where = jlist->head;
74 add:
75         i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
76         if (!i) {
77                 ret = -ENOMEM;
78                 goto out;
79         }
80
81         list_add(&i->list, where);
82         i->devs.nr = 0;
83         memcpy(&i->j, j, bytes);
84 found:
85         if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
86                 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
87         else
88                 fsck_err_on(1, c, "duplicate journal entries on same device");
89         ret = JOURNAL_ENTRY_ADD_OK;
90 out:
91 fsck_err:
92         return ret;
93 }
94
95 static struct nonce journal_nonce(const struct jset *jset)
96 {
97         return (struct nonce) {{
98                 [0] = 0,
99                 [1] = ((__le32 *) &jset->seq)[0],
100                 [2] = ((__le32 *) &jset->seq)[1],
101                 [3] = BCH_NONCE_JOURNAL,
102         }};
103 }
104
105 /* this fills in a range with empty jset_entries: */
106 static void journal_entry_null_range(void *start, void *end)
107 {
108         struct jset_entry *entry;
109
110         for (entry = start; entry != end; entry = vstruct_next(entry))
111                 memset(entry, 0, sizeof(*entry));
112 }
113
114 #define JOURNAL_ENTRY_REREAD    5
115 #define JOURNAL_ENTRY_NONE      6
116 #define JOURNAL_ENTRY_BAD       7
117
118 #define journal_entry_err(c, msg, ...)                                  \
119 ({                                                                      \
120         switch (write) {                                                \
121         case READ:                                                      \
122                 mustfix_fsck_err(c, msg, ##__VA_ARGS__);                \
123                 break;                                                  \
124         case WRITE:                                                     \
125                 bch_err(c, "corrupt metadata before write:\n"           \
126                         msg, ##__VA_ARGS__);                            \
127                 if (bch2_fs_inconsistent(c)) {                          \
128                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
129                         goto fsck_err;                                  \
130                 }                                                       \
131                 break;                                                  \
132         }                                                               \
133         true;                                                           \
134 })
135
136 #define journal_entry_err_on(cond, c, msg, ...)                         \
137         ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
138
139 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
140                                 struct jset_entry *entry,
141                                 struct bkey_i *k, enum btree_node_type key_type,
142                                 const char *type, int write)
143 {
144         void *next = vstruct_next(entry);
145         const char *invalid;
146         unsigned version = le32_to_cpu(jset->version);
147         int ret = 0;
148
149         if (journal_entry_err_on(!k->k.u64s, c,
150                         "invalid %s in journal: k->u64s 0", type)) {
151                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
152                 journal_entry_null_range(vstruct_next(entry), next);
153                 return 0;
154         }
155
156         if (journal_entry_err_on((void *) bkey_next(k) >
157                                 (void *) vstruct_next(entry), c,
158                         "invalid %s in journal: extends past end of journal entry",
159                         type)) {
160                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
161                 journal_entry_null_range(vstruct_next(entry), next);
162                 return 0;
163         }
164
165         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
166                         "invalid %s in journal: bad format %u",
167                         type, k->k.format)) {
168                 le16_add_cpu(&entry->u64s, -k->k.u64s);
169                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
170                 journal_entry_null_range(vstruct_next(entry), next);
171                 return 0;
172         }
173
174         if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN) {
175                 bch2_bkey_swab_key(NULL, bkey_to_packed(k));
176                 bch2_bkey_swab_val(bkey_i_to_s(k));
177         }
178
179         if (!write &&
180             version < bcachefs_metadata_version_bkey_renumber)
181                 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
182
183         invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k), key_type);
184         if (invalid) {
185                 char buf[160];
186
187                 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
188                 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
189                                  type, invalid, buf);
190
191                 le16_add_cpu(&entry->u64s, -k->k.u64s);
192                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
193                 journal_entry_null_range(vstruct_next(entry), next);
194                 return 0;
195         }
196
197         if (write &&
198             version < bcachefs_metadata_version_bkey_renumber)
199                 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
200 fsck_err:
201         return ret;
202 }
203
204 static int journal_entry_validate_btree_keys(struct bch_fs *c,
205                                              struct jset *jset,
206                                              struct jset_entry *entry,
207                                              int write)
208 {
209         struct bkey_i *k;
210
211         vstruct_for_each(entry, k) {
212                 int ret = journal_validate_key(c, jset, entry, k,
213                                 __btree_node_type(entry->level,
214                                                   entry->btree_id),
215                                 "key", write);
216                 if (ret)
217                         return ret;
218         }
219
220         return 0;
221 }
222
223 static int journal_entry_validate_btree_root(struct bch_fs *c,
224                                              struct jset *jset,
225                                              struct jset_entry *entry,
226                                              int write)
227 {
228         struct bkey_i *k = entry->start;
229         int ret = 0;
230
231         if (journal_entry_err_on(!entry->u64s ||
232                                  le16_to_cpu(entry->u64s) != k->k.u64s, c,
233                                  "invalid btree root journal entry: wrong number of keys")) {
234                 void *next = vstruct_next(entry);
235                 /*
236                  * we don't want to null out this jset_entry,
237                  * just the contents, so that later we can tell
238                  * we were _supposed_ to have a btree root
239                  */
240                 entry->u64s = 0;
241                 journal_entry_null_range(vstruct_next(entry), next);
242                 return 0;
243         }
244
245         return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
246                                     "btree root", write);
247 fsck_err:
248         return ret;
249 }
250
251 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
252                                             struct jset *jset,
253                                             struct jset_entry *entry,
254                                             int write)
255 {
256         /* obsolete, don't care: */
257         return 0;
258 }
259
260 static int journal_entry_validate_blacklist(struct bch_fs *c,
261                                             struct jset *jset,
262                                             struct jset_entry *entry,
263                                             int write)
264 {
265         int ret = 0;
266
267         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
268                 "invalid journal seq blacklist entry: bad size")) {
269                 journal_entry_null_range(entry, vstruct_next(entry));
270         }
271 fsck_err:
272         return ret;
273 }
274
275 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
276                                                struct jset *jset,
277                                                struct jset_entry *entry,
278                                                int write)
279 {
280         struct jset_entry_blacklist_v2 *bl_entry;
281         int ret = 0;
282
283         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
284                 "invalid journal seq blacklist entry: bad size")) {
285                 journal_entry_null_range(entry, vstruct_next(entry));
286                 goto out;
287         }
288
289         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
290
291         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
292                                  le64_to_cpu(bl_entry->end), c,
293                 "invalid journal seq blacklist entry: start > end")) {
294                 journal_entry_null_range(entry, vstruct_next(entry));
295         }
296 out:
297 fsck_err:
298         return ret;
299 }
300
301 static int journal_entry_validate_usage(struct bch_fs *c,
302                                         struct jset *jset,
303                                         struct jset_entry *entry,
304                                         int write)
305 {
306         struct jset_entry_usage *u =
307                 container_of(entry, struct jset_entry_usage, entry);
308         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
309         int ret = 0;
310
311         if (journal_entry_err_on(bytes < sizeof(*u),
312                                  c,
313                                  "invalid journal entry usage: bad size")) {
314                 journal_entry_null_range(entry, vstruct_next(entry));
315                 return ret;
316         }
317
318 fsck_err:
319         return ret;
320 }
321
322 static int journal_entry_validate_data_usage(struct bch_fs *c,
323                                         struct jset *jset,
324                                         struct jset_entry *entry,
325                                         int write)
326 {
327         struct jset_entry_data_usage *u =
328                 container_of(entry, struct jset_entry_data_usage, entry);
329         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
330         int ret = 0;
331
332         if (journal_entry_err_on(bytes < sizeof(*u) ||
333                                  bytes < sizeof(*u) + u->r.nr_devs,
334                                  c,
335                                  "invalid journal entry usage: bad size")) {
336                 journal_entry_null_range(entry, vstruct_next(entry));
337                 return ret;
338         }
339
340 fsck_err:
341         return ret;
342 }
343
344 struct jset_entry_ops {
345         int (*validate)(struct bch_fs *, struct jset *,
346                         struct jset_entry *, int);
347 };
348
349 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
350 #define x(f, nr)                                                \
351         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
352                 .validate       = journal_entry_validate_##f,   \
353         },
354         BCH_JSET_ENTRY_TYPES()
355 #undef x
356 };
357
358 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
359                                   struct jset_entry *entry, int write)
360 {
361         return entry->type < BCH_JSET_ENTRY_NR
362                 ? bch2_jset_entry_ops[entry->type].validate(c, jset,
363                                                             entry, write)
364                 : 0;
365 }
366
367 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
368                                  int write)
369 {
370         struct jset_entry *entry;
371         int ret = 0;
372
373         vstruct_for_each(jset, entry) {
374                 if (journal_entry_err_on(vstruct_next(entry) >
375                                          vstruct_last(jset), c,
376                                 "journal entry extends past end of jset")) {
377                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
378                         break;
379                 }
380
381                 ret = journal_entry_validate(c, jset, entry, write);
382                 if (ret)
383                         break;
384         }
385 fsck_err:
386         return ret;
387 }
388
389 static int jset_validate(struct bch_fs *c,
390                          struct jset *jset, u64 sector,
391                          unsigned bucket_sectors_left,
392                          unsigned sectors_read,
393                          int write)
394 {
395         size_t bytes = vstruct_bytes(jset);
396         struct bch_csum csum;
397         unsigned version;
398         int ret = 0;
399
400         if (le64_to_cpu(jset->magic) != jset_magic(c))
401                 return JOURNAL_ENTRY_NONE;
402
403         version = le32_to_cpu(jset->version);
404         if ((version != BCH_JSET_VERSION_OLD &&
405              version < bcachefs_metadata_version_min) ||
406             version >= bcachefs_metadata_version_max) {
407                 bch_err(c, "unknown journal entry version %u", jset->version);
408                 return BCH_FSCK_UNKNOWN_VERSION;
409         }
410
411         if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
412                                  "journal entry too big (%zu bytes), sector %lluu",
413                                  bytes, sector)) {
414                 /* XXX: note we might have missing journal entries */
415                 return JOURNAL_ENTRY_BAD;
416         }
417
418         if (bytes > sectors_read << 9)
419                 return JOURNAL_ENTRY_REREAD;
420
421         if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
422                         "journal entry with unknown csum type %llu sector %lluu",
423                         JSET_CSUM_TYPE(jset), sector))
424                 return JOURNAL_ENTRY_BAD;
425
426         csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
427         if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
428                                  "journal checksum bad, sector %llu", sector)) {
429                 /* XXX: retry IO, when we start retrying checksum errors */
430                 /* XXX: note we might have missing journal entries */
431                 return JOURNAL_ENTRY_BAD;
432         }
433
434         bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
435                      jset->encrypted_start,
436                      vstruct_end(jset) - (void *) jset->encrypted_start);
437
438         if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
439                                  "invalid journal entry: last_seq > seq"))
440                 jset->last_seq = jset->seq;
441
442         return 0;
443 fsck_err:
444         return ret;
445 }
446
447 struct journal_read_buf {
448         void            *data;
449         size_t          size;
450 };
451
452 static int journal_read_buf_realloc(struct journal_read_buf *b,
453                                     size_t new_size)
454 {
455         void *n;
456
457         /* the bios are sized for this many pages, max: */
458         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
459                 return -ENOMEM;
460
461         new_size = roundup_pow_of_two(new_size);
462         n = kvpmalloc(new_size, GFP_KERNEL);
463         if (!n)
464                 return -ENOMEM;
465
466         kvpfree(b->data, b->size);
467         b->data = n;
468         b->size = new_size;
469         return 0;
470 }
471
472 static int journal_read_bucket(struct bch_dev *ca,
473                                struct journal_read_buf *buf,
474                                struct journal_list *jlist,
475                                unsigned bucket)
476 {
477         struct bch_fs *c = ca->fs;
478         struct journal_device *ja = &ca->journal;
479         struct jset *j = NULL;
480         unsigned sectors, sectors_read = 0;
481         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
482             end = offset + ca->mi.bucket_size;
483         bool saw_bad = false;
484         int ret = 0;
485
486         pr_debug("reading %u", bucket);
487
488         while (offset < end) {
489                 if (!sectors_read) {
490                         struct bio *bio;
491 reread:
492                         sectors_read = min_t(unsigned,
493                                 end - offset, buf->size >> 9);
494
495                         bio = bio_kmalloc(GFP_KERNEL,
496                                           buf_pages(buf->data,
497                                                     sectors_read << 9));
498                         bio_set_dev(bio, ca->disk_sb.bdev);
499                         bio->bi_iter.bi_sector  = offset;
500                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
501                         bch2_bio_map(bio, buf->data, sectors_read << 9);
502
503                         ret = submit_bio_wait(bio);
504                         bio_put(bio);
505
506                         if (bch2_dev_io_err_on(ret, ca,
507                                                "journal read from sector %llu",
508                                                offset) ||
509                             bch2_meta_read_fault("journal"))
510                                 return -EIO;
511
512                         j = buf->data;
513                 }
514
515                 ret = jset_validate(c, j, offset,
516                                     end - offset, sectors_read,
517                                     READ);
518                 switch (ret) {
519                 case BCH_FSCK_OK:
520                         break;
521                 case JOURNAL_ENTRY_REREAD:
522                         if (vstruct_bytes(j) > buf->size) {
523                                 ret = journal_read_buf_realloc(buf,
524                                                         vstruct_bytes(j));
525                                 if (ret)
526                                         return ret;
527                         }
528                         goto reread;
529                 case JOURNAL_ENTRY_NONE:
530                         if (!saw_bad)
531                                 return 0;
532                         sectors = c->opts.block_size;
533                         goto next_block;
534                 case JOURNAL_ENTRY_BAD:
535                         saw_bad = true;
536                         sectors = c->opts.block_size;
537                         goto next_block;
538                 default:
539                         return ret;
540                 }
541
542                 /*
543                  * This happens sometimes if we don't have discards on -
544                  * when we've partially overwritten a bucket with new
545                  * journal entries. We don't need the rest of the
546                  * bucket:
547                  */
548                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
549                         return 0;
550
551                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
552
553                 mutex_lock(&jlist->lock);
554                 ret = journal_entry_add(c, ca, jlist, j);
555                 mutex_unlock(&jlist->lock);
556
557                 switch (ret) {
558                 case JOURNAL_ENTRY_ADD_OK:
559                         break;
560                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
561                         break;
562                 default:
563                         return ret;
564                 }
565
566                 sectors = vstruct_sectors(j, c->block_bits);
567 next_block:
568                 pr_debug("next");
569                 offset          += sectors;
570                 sectors_read    -= sectors;
571                 j = ((void *) j) + (sectors << 9);
572         }
573
574         return 0;
575 }
576
577 static void bch2_journal_read_device(struct closure *cl)
578 {
579         struct journal_device *ja =
580                 container_of(cl, struct journal_device, read);
581         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
582         struct journal_list *jlist =
583                 container_of(cl->parent, struct journal_list, cl);
584         struct journal_read_buf buf = { NULL, 0 };
585         u64 min_seq = U64_MAX;
586         unsigned i;
587         int ret;
588
589         if (!ja->nr)
590                 goto out;
591
592         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
593         if (ret)
594                 goto err;
595
596         pr_debug("%u journal buckets", ja->nr);
597
598         for (i = 0; i < ja->nr; i++) {
599                 ret = journal_read_bucket(ca, &buf, jlist, i);
600                 if (ret)
601                         goto err;
602         }
603
604         /* Find the journal bucket with the highest sequence number: */
605         for (i = 0; i < ja->nr; i++) {
606                 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
607                         ja->cur_idx = i;
608
609                 min_seq = min(ja->bucket_seq[i], min_seq);
610         }
611
612         /*
613          * If there's duplicate journal entries in multiple buckets (which
614          * definitely isn't supposed to happen, but...) - make sure to start
615          * cur_idx at the last of those buckets, so we don't deadlock trying to
616          * allocate
617          */
618         while (ja->bucket_seq[ja->cur_idx] > min_seq &&
619                ja->bucket_seq[ja->cur_idx] >
620                ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
621                 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
622
623         ja->sectors_free = 0;
624
625         /*
626          * Set dirty_idx to indicate the entire journal is full and needs to be
627          * reclaimed - journal reclaim will immediately reclaim whatever isn't
628          * pinned when it first runs:
629          */
630         ja->discard_idx = ja->dirty_idx_ondisk =
631                 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
632 out:
633         kvpfree(buf.data, buf.size);
634         percpu_ref_put(&ca->io_ref);
635         closure_return(cl);
636         return;
637 err:
638         mutex_lock(&jlist->lock);
639         jlist->ret = ret;
640         mutex_unlock(&jlist->lock);
641         goto out;
642 }
643
644 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
645 {
646         struct journal_list jlist;
647         struct journal_replay *i;
648         struct bch_dev *ca;
649         unsigned iter;
650         size_t keys = 0, entries = 0;
651         bool degraded = false;
652         int ret = 0;
653
654         closure_init_stack(&jlist.cl);
655         mutex_init(&jlist.lock);
656         jlist.head = list;
657         jlist.ret = 0;
658
659         for_each_member_device(ca, c, iter) {
660                 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
661                     !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
662                         continue;
663
664                 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
665                      ca->mi.state == BCH_MEMBER_STATE_RO) &&
666                     percpu_ref_tryget(&ca->io_ref))
667                         closure_call(&ca->journal.read,
668                                      bch2_journal_read_device,
669                                      system_unbound_wq,
670                                      &jlist.cl);
671                 else
672                         degraded = true;
673         }
674
675         closure_sync(&jlist.cl);
676
677         if (jlist.ret)
678                 return jlist.ret;
679
680         list_for_each_entry(i, list, list) {
681                 struct jset_entry *entry;
682                 struct bkey_i *k, *_n;
683                 struct bch_replicas_padded replicas;
684                 char buf[80];
685
686                 ret = jset_validate_entries(c, &i->j, READ);
687                 if (ret)
688                         goto fsck_err;
689
690                 /*
691                  * If we're mounting in degraded mode - if we didn't read all
692                  * the devices - this is wrong:
693                  */
694
695                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
696
697                 if (!degraded &&
698                     (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
699                      fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
700                                  "superblock not marked as containing replicas %s",
701                                  (bch2_replicas_entry_to_text(&PBUF(buf),
702                                                               &replicas.e), buf)))) {
703                         ret = bch2_mark_replicas(c, &replicas.e);
704                         if (ret)
705                                 return ret;
706                 }
707
708                 for_each_jset_key(k, _n, entry, &i->j)
709                         keys++;
710                 entries++;
711         }
712
713         if (!list_empty(list)) {
714                 i = list_last_entry(list, struct journal_replay, list);
715
716                 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
717                          keys, entries, le64_to_cpu(i->j.seq));
718         }
719 fsck_err:
720         return ret;
721 }
722
723 /* journal write: */
724
725 static void __journal_write_alloc(struct journal *j,
726                                   struct journal_buf *w,
727                                   struct dev_alloc_list *devs_sorted,
728                                   unsigned sectors,
729                                   unsigned *replicas,
730                                   unsigned replicas_want)
731 {
732         struct bch_fs *c = container_of(j, struct bch_fs, journal);
733         struct journal_device *ja;
734         struct bch_dev *ca;
735         unsigned i;
736
737         if (*replicas >= replicas_want)
738                 return;
739
740         for (i = 0; i < devs_sorted->nr; i++) {
741                 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
742                 if (!ca)
743                         continue;
744
745                 ja = &ca->journal;
746
747                 /*
748                  * Check that we can use this device, and aren't already using
749                  * it:
750                  */
751                 if (!ca->mi.durability ||
752                     ca->mi.state != BCH_MEMBER_STATE_RW ||
753                     !ja->nr ||
754                     bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
755                                          ca->dev_idx) ||
756                     sectors > ja->sectors_free)
757                         continue;
758
759                 bch2_dev_stripe_increment(c, ca, &j->wp.stripe);
760
761                 bch2_bkey_append_ptr(&w->key,
762                         (struct bch_extent_ptr) {
763                                   .offset = bucket_to_sector(ca,
764                                         ja->buckets[ja->cur_idx]) +
765                                         ca->mi.bucket_size -
766                                         ja->sectors_free,
767                                   .dev = ca->dev_idx,
768                 });
769
770                 ja->sectors_free -= sectors;
771                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
772
773                 *replicas += ca->mi.durability;
774
775                 if (*replicas >= replicas_want)
776                         break;
777         }
778 }
779
780 /**
781  * journal_next_bucket - move on to the next journal bucket if possible
782  */
783 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
784                                unsigned sectors)
785 {
786         struct bch_fs *c = container_of(j, struct bch_fs, journal);
787         struct journal_device *ja;
788         struct bch_dev *ca;
789         struct dev_alloc_list devs_sorted;
790         unsigned i, replicas = 0, replicas_want =
791                 READ_ONCE(c->opts.metadata_replicas);
792
793         rcu_read_lock();
794
795         devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
796                                           &c->rw_devs[BCH_DATA_JOURNAL]);
797
798         __journal_write_alloc(j, w, &devs_sorted,
799                               sectors, &replicas, replicas_want);
800
801         if (replicas >= replicas_want)
802                 goto done;
803
804         for (i = 0; i < devs_sorted.nr; i++) {
805                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
806                 if (!ca)
807                         continue;
808
809                 ja = &ca->journal;
810
811                 if (sectors > ja->sectors_free &&
812                     sectors <= ca->mi.bucket_size &&
813                     bch2_journal_dev_buckets_available(j, ja,
814                                         journal_space_discarded)) {
815                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
816                         ja->sectors_free = ca->mi.bucket_size;
817
818                         /*
819                          * ja->bucket_seq[ja->cur_idx] must always have
820                          * something sensible:
821                          */
822                         ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
823                 }
824         }
825
826         __journal_write_alloc(j, w, &devs_sorted,
827                               sectors, &replicas, replicas_want);
828 done:
829         rcu_read_unlock();
830
831         return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
832 }
833
834 static void journal_write_compact(struct jset *jset)
835 {
836         struct jset_entry *i, *next, *prev = NULL;
837
838         /*
839          * Simple compaction, dropping empty jset_entries (from journal
840          * reservations that weren't fully used) and merging jset_entries that
841          * can be.
842          *
843          * If we wanted to be really fancy here, we could sort all the keys in
844          * the jset and drop keys that were overwritten - probably not worth it:
845          */
846         vstruct_for_each_safe(jset, i, next) {
847                 unsigned u64s = le16_to_cpu(i->u64s);
848
849                 /* Empty entry: */
850                 if (!u64s)
851                         continue;
852
853                 /* Can we merge with previous entry? */
854                 if (prev &&
855                     i->btree_id == prev->btree_id &&
856                     i->level    == prev->level &&
857                     i->type     == prev->type &&
858                     i->type     == BCH_JSET_ENTRY_btree_keys &&
859                     le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
860                         memmove_u64s_down(vstruct_next(prev),
861                                           i->_data,
862                                           u64s);
863                         le16_add_cpu(&prev->u64s, u64s);
864                         continue;
865                 }
866
867                 /* Couldn't merge, move i into new position (after prev): */
868                 prev = prev ? vstruct_next(prev) : jset->start;
869                 if (i != prev)
870                         memmove_u64s_down(prev, i, jset_u64s(u64s));
871         }
872
873         prev = prev ? vstruct_next(prev) : jset->start;
874         jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
875 }
876
877 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
878 {
879         /* we aren't holding j->lock: */
880         unsigned new_size = READ_ONCE(j->buf_size_want);
881         void *new_buf;
882
883         if (buf->buf_size >= new_size)
884                 return;
885
886         new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
887         if (!new_buf)
888                 return;
889
890         memcpy(new_buf, buf->data, buf->buf_size);
891         kvpfree(buf->data, buf->buf_size);
892         buf->data       = new_buf;
893         buf->buf_size   = new_size;
894 }
895
896 static void journal_write_done(struct closure *cl)
897 {
898         struct journal *j = container_of(cl, struct journal, io);
899         struct bch_fs *c = container_of(j, struct bch_fs, journal);
900         struct journal_buf *w = journal_prev_buf(j);
901         struct bch_devs_list devs =
902                 bch2_bkey_devs(bkey_i_to_s_c(&w->key));
903         struct bch_replicas_padded replicas;
904         u64 seq = le64_to_cpu(w->data->seq);
905         u64 last_seq = le64_to_cpu(w->data->last_seq);
906
907         bch2_time_stats_update(j->write_time, j->write_start_time);
908
909         if (!devs.nr) {
910                 bch_err(c, "unable to write journal to sufficient devices");
911                 goto err;
912         }
913
914         bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
915
916         if (bch2_mark_replicas(c, &replicas.e))
917                 goto err;
918
919         spin_lock(&j->lock);
920         if (seq >= j->pin.front)
921                 journal_seq_pin(j, seq)->devs = devs;
922
923         j->seq_ondisk           = seq;
924         j->last_seq_ondisk      = last_seq;
925         bch2_journal_space_available(j);
926
927         /*
928          * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
929          * more buckets:
930          *
931          * Must come before signaling write completion, for
932          * bch2_fs_journal_stop():
933          */
934         mod_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
935 out:
936         /* also must come before signalling write completion: */
937         closure_debug_destroy(cl);
938
939         BUG_ON(!j->reservations.prev_buf_unwritten);
940         atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
941                      &j->reservations.counter);
942
943         closure_wake_up(&w->wait);
944         journal_wake(j);
945
946         if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
947                 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
948         spin_unlock(&j->lock);
949         return;
950 err:
951         bch2_fatal_error(c);
952         spin_lock(&j->lock);
953         goto out;
954 }
955
956 static void journal_write_endio(struct bio *bio)
957 {
958         struct bch_dev *ca = bio->bi_private;
959         struct journal *j = &ca->fs->journal;
960
961         if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
962             bch2_meta_write_fault("journal")) {
963                 struct journal_buf *w = journal_prev_buf(j);
964                 unsigned long flags;
965
966                 spin_lock_irqsave(&j->err_lock, flags);
967                 bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx);
968                 spin_unlock_irqrestore(&j->err_lock, flags);
969         }
970
971         closure_put(&j->io);
972         percpu_ref_put(&ca->io_ref);
973 }
974
975 void bch2_journal_write(struct closure *cl)
976 {
977         struct journal *j = container_of(cl, struct journal, io);
978         struct bch_fs *c = container_of(j, struct bch_fs, journal);
979         struct bch_dev *ca;
980         struct journal_buf *w = journal_prev_buf(j);
981         struct jset_entry *start, *end;
982         struct jset *jset;
983         struct bio *bio;
984         struct bch_extent_ptr *ptr;
985         bool validate_before_checksum = false;
986         unsigned i, sectors, bytes, u64s;
987         int ret;
988
989         bch2_journal_pin_put(j, le64_to_cpu(w->data->seq));
990
991         journal_buf_realloc(j, w);
992         jset = w->data;
993
994         j->write_start_time = local_clock();
995
996         start   = vstruct_last(jset);
997         end     = bch2_journal_super_entries_add_common(c, start,
998                                                 le64_to_cpu(jset->seq));
999         u64s    = (u64 *) end - (u64 *) start;
1000         BUG_ON(u64s > j->entry_u64s_reserved);
1001
1002         le32_add_cpu(&jset->u64s, u64s);
1003         BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1004
1005         journal_write_compact(jset);
1006
1007         jset->read_clock        = cpu_to_le16(c->bucket_clock[READ].hand);
1008         jset->write_clock       = cpu_to_le16(c->bucket_clock[WRITE].hand);
1009         jset->magic             = cpu_to_le64(jset_magic(c));
1010
1011         jset->version           = c->sb.version < bcachefs_metadata_version_new_versioning
1012                 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1013                 : cpu_to_le32(c->sb.version);
1014
1015         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1016         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1017
1018         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1019                 validate_before_checksum = true;
1020
1021         if (le32_to_cpu(jset->version) <
1022             bcachefs_metadata_version_bkey_renumber)
1023                 validate_before_checksum = true;
1024
1025         if (validate_before_checksum &&
1026             jset_validate_entries(c, jset, WRITE))
1027                 goto err;
1028
1029         bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1030                     jset->encrypted_start,
1031                     vstruct_end(jset) - (void *) jset->encrypted_start);
1032
1033         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1034                                   journal_nonce(jset), jset);
1035
1036         if (!validate_before_checksum &&
1037             jset_validate_entries(c, jset, WRITE))
1038                 goto err;
1039
1040         sectors = vstruct_sectors(jset, c->block_bits);
1041         BUG_ON(sectors > w->sectors);
1042
1043         bytes = vstruct_bytes(jset);
1044         memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1045
1046         spin_lock(&j->lock);
1047         ret = journal_write_alloc(j, w, sectors);
1048
1049         /*
1050          * write is allocated, no longer need to account for it in
1051          * bch2_journal_space_available():
1052          */
1053         w->sectors = 0;
1054
1055         /*
1056          * journal entry has been compacted and allocated, recalculate space
1057          * available:
1058          */
1059         bch2_journal_space_available(j);
1060         spin_unlock(&j->lock);
1061
1062         if (ret) {
1063                 bch_err(c, "Unable to allocate journal write");
1064                 bch2_fatal_error(c);
1065                 continue_at(cl, journal_write_done, system_highpri_wq);
1066                 return;
1067         }
1068
1069         /*
1070          * XXX: we really should just disable the entire journal in nochanges
1071          * mode
1072          */
1073         if (c->opts.nochanges)
1074                 goto no_io;
1075
1076         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1077                 ca = bch_dev_bkey_exists(c, ptr->dev);
1078                 if (!percpu_ref_tryget(&ca->io_ref)) {
1079                         /* XXX: fix this */
1080                         bch_err(c, "missing device for journal write\n");
1081                         continue;
1082                 }
1083
1084                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
1085                              sectors);
1086
1087                 bio = ca->journal.bio;
1088                 bio_reset(bio);
1089                 bio_set_dev(bio, ca->disk_sb.bdev);
1090                 bio->bi_iter.bi_sector  = ptr->offset;
1091                 bio->bi_end_io          = journal_write_endio;
1092                 bio->bi_private         = ca;
1093                 bio_set_op_attrs(bio, REQ_OP_WRITE,
1094                                  REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
1095                 bch2_bio_map(bio, jset, sectors << 9);
1096
1097                 trace_journal_write(bio);
1098                 closure_bio_submit(bio, cl);
1099
1100                 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(jset->seq);
1101         }
1102
1103         for_each_rw_member(ca, c, i)
1104                 if (journal_flushes_device(ca) &&
1105                     !bch2_bkey_has_device(bkey_i_to_s_c(&w->key), i)) {
1106                         percpu_ref_get(&ca->io_ref);
1107
1108                         bio = ca->journal.bio;
1109                         bio_reset(bio);
1110                         bio_set_dev(bio, ca->disk_sb.bdev);
1111                         bio->bi_opf             = REQ_OP_FLUSH;
1112                         bio->bi_end_io          = journal_write_endio;
1113                         bio->bi_private         = ca;
1114                         closure_bio_submit(bio, cl);
1115                 }
1116
1117 no_io:
1118         bch2_bucket_seq_cleanup(c);
1119
1120         continue_at(cl, journal_write_done, system_highpri_wq);
1121         return;
1122 err:
1123         bch2_inconsistent_error(c);
1124         continue_at(cl, journal_write_done, system_highpri_wq);
1125 }