]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
cbde21a4c5479dd9f3583fe125ed415118f886fe
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_io.h"
6 #include "btree_update_interior.h"
7 #include "buckets.h"
8 #include "checksum.h"
9 #include "disk_groups.h"
10 #include "error.h"
11 #include "io.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "replicas.h"
17
18 #include <trace/events/bcachefs.h>
19
20 static void __journal_replay_free(struct journal_replay *i)
21 {
22         list_del(&i->list);
23         kvpfree(i, offsetof(struct journal_replay, j) +
24                 vstruct_bytes(&i->j));
25
26 }
27
28 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
29 {
30         i->ignore = true;
31
32         if (!c->opts.read_entire_journal)
33                 __journal_replay_free(i);
34 }
35
36 struct journal_list {
37         struct closure          cl;
38         struct mutex            lock;
39         struct list_head        *head;
40         int                     ret;
41 };
42
43 #define JOURNAL_ENTRY_ADD_OK            0
44 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
45
46 /*
47  * Given a journal entry we just read, add it to the list of journal entries to
48  * be replayed:
49  */
50 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
51                              struct journal_ptr entry_ptr,
52                              struct journal_list *jlist, struct jset *j,
53                              bool bad)
54 {
55         struct journal_replay *i, *pos, *dup = NULL;
56         struct journal_ptr *ptr;
57         struct list_head *where;
58         size_t bytes = vstruct_bytes(j);
59         u64 last_seq = 0;
60         int ret = JOURNAL_ENTRY_ADD_OK;
61
62         list_for_each_entry_reverse(i, jlist->head, list) {
63                 if (!JSET_NO_FLUSH(&i->j)) {
64                         last_seq = le64_to_cpu(i->j.last_seq);
65                         break;
66                 }
67         }
68
69         /* Is this entry older than the range we need? */
70         if (!c->opts.read_entire_journal &&
71             le64_to_cpu(j->seq) < last_seq) {
72                 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
73                 goto out;
74         }
75
76         /* Drop entries we don't need anymore */
77         if (!JSET_NO_FLUSH(j)) {
78                 list_for_each_entry_safe(i, pos, jlist->head, list) {
79                         if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
80                                 break;
81                         journal_replay_free(c, i);
82                 }
83         }
84
85         list_for_each_entry_reverse(i, jlist->head, list) {
86                 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
87                         where = &i->list;
88                         goto add;
89                 }
90         }
91
92         where = jlist->head;
93 add:
94         dup = where->next != jlist->head
95                 ? container_of(where->next, struct journal_replay, list)
96                 : NULL;
97
98         if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
99                 dup = NULL;
100
101         /*
102          * Duplicate journal entries? If so we want the one that didn't have a
103          * checksum error:
104          */
105         if (dup) {
106                 if (dup->bad) {
107                         /* we'll replace @dup: */
108                 } else if (bad) {
109                         i = dup;
110                         goto found;
111                 } else {
112                         fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
113                                     memcmp(j, &dup->j, bytes), c,
114                                     "found duplicate but non identical journal entries (seq %llu)",
115                                     le64_to_cpu(j->seq));
116                         i = dup;
117                         goto found;
118                 }
119         }
120
121         i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
122         if (!i) {
123                 ret = -ENOMEM;
124                 goto out;
125         }
126
127         i->nr_ptrs       = 0;
128         i->bad          = bad;
129         i->ignore       = false;
130         memcpy(&i->j, j, bytes);
131
132         if (dup) {
133                 i->nr_ptrs = dup->nr_ptrs;
134                 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
135                 __journal_replay_free(dup);
136         }
137
138         list_add(&i->list, where);
139 found:
140         for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
141                 if (ptr->dev == ca->dev_idx) {
142                         bch_err(c, "duplicate journal entry %llu on same device",
143                                 le64_to_cpu(i->j.seq));
144                         goto out;
145                 }
146         }
147
148         if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
149                 bch_err(c, "found too many copies of journal entry %llu",
150                         le64_to_cpu(i->j.seq));
151                 goto out;
152         }
153
154         i->ptrs[i->nr_ptrs++] = entry_ptr;
155 out:
156 fsck_err:
157         return ret;
158 }
159
160 static struct nonce journal_nonce(const struct jset *jset)
161 {
162         return (struct nonce) {{
163                 [0] = 0,
164                 [1] = ((__le32 *) &jset->seq)[0],
165                 [2] = ((__le32 *) &jset->seq)[1],
166                 [3] = BCH_NONCE_JOURNAL,
167         }};
168 }
169
170 /* this fills in a range with empty jset_entries: */
171 static void journal_entry_null_range(void *start, void *end)
172 {
173         struct jset_entry *entry;
174
175         for (entry = start; entry != end; entry = vstruct_next(entry))
176                 memset(entry, 0, sizeof(*entry));
177 }
178
179 #define JOURNAL_ENTRY_REREAD    5
180 #define JOURNAL_ENTRY_NONE      6
181 #define JOURNAL_ENTRY_BAD       7
182
183 #define journal_entry_err(c, msg, ...)                                  \
184 ({                                                                      \
185         switch (write) {                                                \
186         case READ:                                                      \
187                 mustfix_fsck_err(c, msg, ##__VA_ARGS__);                \
188                 break;                                                  \
189         case WRITE:                                                     \
190                 bch_err(c, "corrupt metadata before write:\n"           \
191                         msg, ##__VA_ARGS__);                            \
192                 if (bch2_fs_inconsistent(c)) {                          \
193                         ret = BCH_FSCK_ERRORS_NOT_FIXED;                \
194                         goto fsck_err;                                  \
195                 }                                                       \
196                 break;                                                  \
197         }                                                               \
198         true;                                                           \
199 })
200
201 #define journal_entry_err_on(cond, c, msg, ...)                         \
202         ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
203
204 #define FSCK_DELETED_KEY        5
205
206 static int journal_validate_key(struct bch_fs *c, const char *where,
207                                 struct jset_entry *entry,
208                                 unsigned level, enum btree_id btree_id,
209                                 struct bkey_i *k, const char *type,
210                                 unsigned version, int big_endian, int write)
211 {
212         void *next = vstruct_next(entry);
213         struct printbuf buf = PRINTBUF;
214         int ret = 0;
215
216         if (journal_entry_err_on(!k->k.u64s, c,
217                         "invalid %s in %s entry offset %zi/%u: k->u64s 0",
218                         type, where,
219                         (u64 *) k - entry->_data,
220                         le16_to_cpu(entry->u64s))) {
221                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
222                 journal_entry_null_range(vstruct_next(entry), next);
223                 return FSCK_DELETED_KEY;
224         }
225
226         if (journal_entry_err_on((void *) bkey_next(k) >
227                                 (void *) vstruct_next(entry), c,
228                         "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
229                         type, where,
230                         (u64 *) k - entry->_data,
231                         le16_to_cpu(entry->u64s))) {
232                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
233                 journal_entry_null_range(vstruct_next(entry), next);
234                 return FSCK_DELETED_KEY;
235         }
236
237         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
238                         "invalid %s in %s entry offset %zi/%u: bad format %u",
239                         type, where,
240                         (u64 *) k - entry->_data,
241                         le16_to_cpu(entry->u64s),
242                         k->k.format)) {
243                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
244                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
245                 journal_entry_null_range(vstruct_next(entry), next);
246                 return FSCK_DELETED_KEY;
247         }
248
249         if (!write)
250                 bch2_bkey_compat(level, btree_id, version, big_endian,
251                                  write, NULL, bkey_to_packed(k));
252
253         if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
254                               __btree_node_type(level, btree_id), write, &buf)) {
255                 printbuf_reset(&buf);
256                 pr_buf(&buf, "invalid %s in %s entry offset %zi/%u:",
257                        type, where,
258                        (u64 *) k - entry->_data,
259                        le16_to_cpu(entry->u64s));
260                 pr_newline(&buf);
261                 pr_indent_push(&buf, 2);
262
263                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
264                 pr_newline(&buf);
265                 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
266                                   __btree_node_type(level, btree_id), write, &buf);
267
268                 mustfix_fsck_err(c, "%s", buf.buf);
269
270                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
271                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
272                 journal_entry_null_range(vstruct_next(entry), next);
273
274                 printbuf_exit(&buf);
275                 return FSCK_DELETED_KEY;
276         }
277
278         if (write)
279                 bch2_bkey_compat(level, btree_id, version, big_endian,
280                                  write, NULL, bkey_to_packed(k));
281 fsck_err:
282         printbuf_exit(&buf);
283         return ret;
284 }
285
286 static int journal_entry_btree_keys_validate(struct bch_fs *c,
287                                              const char *where,
288                                              struct jset_entry *entry,
289                                              unsigned version, int big_endian, int write)
290 {
291         struct bkey_i *k = entry->start;
292
293         while (k != vstruct_last(entry)) {
294                 int ret = journal_validate_key(c, where, entry,
295                                                entry->level,
296                                                entry->btree_id,
297                                                k, "key", version, big_endian, write);
298                 if (ret == FSCK_DELETED_KEY)
299                         continue;
300
301                 k = bkey_next(k);
302         }
303
304         return 0;
305 }
306
307 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
308                                              struct jset_entry *entry)
309 {
310         struct bkey_i *k;
311         bool first = true;
312
313         vstruct_for_each(entry, k) {
314                 if (!first) {
315                         pr_newline(out);
316                         pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
317                 }
318                 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
319                 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
320                 first = false;
321         }
322 }
323
324 static int journal_entry_btree_root_validate(struct bch_fs *c,
325                                              const char *where,
326                                              struct jset_entry *entry,
327                                              unsigned version, int big_endian, int write)
328 {
329         struct bkey_i *k = entry->start;
330         int ret = 0;
331
332         if (journal_entry_err_on(!entry->u64s ||
333                                  le16_to_cpu(entry->u64s) != k->k.u64s, c,
334                                  "invalid btree root journal entry: wrong number of keys")) {
335                 void *next = vstruct_next(entry);
336                 /*
337                  * we don't want to null out this jset_entry,
338                  * just the contents, so that later we can tell
339                  * we were _supposed_ to have a btree root
340                  */
341                 entry->u64s = 0;
342                 journal_entry_null_range(vstruct_next(entry), next);
343                 return 0;
344         }
345
346         return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
347                                     "btree root", version, big_endian, write);
348 fsck_err:
349         return ret;
350 }
351
352 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
353                                              struct jset_entry *entry)
354 {
355         journal_entry_btree_keys_to_text(out, c, entry);
356 }
357
358 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
359                                             const char *where,
360                                             struct jset_entry *entry,
361                                             unsigned version, int big_endian, int write)
362 {
363         /* obsolete, don't care: */
364         return 0;
365 }
366
367 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
368                                             struct jset_entry *entry)
369 {
370 }
371
372 static int journal_entry_blacklist_validate(struct bch_fs *c,
373                                             const char *where,
374                                             struct jset_entry *entry,
375                                             unsigned version, int big_endian, int write)
376 {
377         int ret = 0;
378
379         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
380                 "invalid journal seq blacklist entry: bad size")) {
381                 journal_entry_null_range(entry, vstruct_next(entry));
382         }
383 fsck_err:
384         return ret;
385 }
386
387 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
388                                             struct jset_entry *entry)
389 {
390         struct jset_entry_blacklist *bl =
391                 container_of(entry, struct jset_entry_blacklist, entry);
392
393         pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
394 }
395
396 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
397                                                const char *where,
398                                                struct jset_entry *entry,
399                                                unsigned version, int big_endian, int write)
400 {
401         struct jset_entry_blacklist_v2 *bl_entry;
402         int ret = 0;
403
404         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
405                 "invalid journal seq blacklist entry: bad size")) {
406                 journal_entry_null_range(entry, vstruct_next(entry));
407                 goto out;
408         }
409
410         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
411
412         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
413                                  le64_to_cpu(bl_entry->end), c,
414                 "invalid journal seq blacklist entry: start > end")) {
415                 journal_entry_null_range(entry, vstruct_next(entry));
416         }
417 out:
418 fsck_err:
419         return ret;
420 }
421
422 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
423                                                struct jset_entry *entry)
424 {
425         struct jset_entry_blacklist_v2 *bl =
426                 container_of(entry, struct jset_entry_blacklist_v2, entry);
427
428         pr_buf(out, "start=%llu end=%llu",
429                le64_to_cpu(bl->start),
430                le64_to_cpu(bl->end));
431 }
432
433 static int journal_entry_usage_validate(struct bch_fs *c,
434                                         const char *where,
435                                         struct jset_entry *entry,
436                                         unsigned version, int big_endian, int write)
437 {
438         struct jset_entry_usage *u =
439                 container_of(entry, struct jset_entry_usage, entry);
440         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
441         int ret = 0;
442
443         if (journal_entry_err_on(bytes < sizeof(*u),
444                                  c,
445                                  "invalid journal entry usage: bad size")) {
446                 journal_entry_null_range(entry, vstruct_next(entry));
447                 return ret;
448         }
449
450 fsck_err:
451         return ret;
452 }
453
454 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
455                                         struct jset_entry *entry)
456 {
457         struct jset_entry_usage *u =
458                 container_of(entry, struct jset_entry_usage, entry);
459
460         pr_buf(out, "type=%s v=%llu",
461                bch2_fs_usage_types[u->entry.btree_id],
462                le64_to_cpu(u->v));
463 }
464
465 static int journal_entry_data_usage_validate(struct bch_fs *c,
466                                         const char *where,
467                                         struct jset_entry *entry,
468                                         unsigned version, int big_endian, int write)
469 {
470         struct jset_entry_data_usage *u =
471                 container_of(entry, struct jset_entry_data_usage, entry);
472         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
473         int ret = 0;
474
475         if (journal_entry_err_on(bytes < sizeof(*u) ||
476                                  bytes < sizeof(*u) + u->r.nr_devs,
477                                  c,
478                                  "invalid journal entry usage: bad size")) {
479                 journal_entry_null_range(entry, vstruct_next(entry));
480                 return ret;
481         }
482
483 fsck_err:
484         return ret;
485 }
486
487 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
488                                              struct jset_entry *entry)
489 {
490         struct jset_entry_data_usage *u =
491                 container_of(entry, struct jset_entry_data_usage, entry);
492
493         bch2_replicas_entry_to_text(out, &u->r);
494         pr_buf(out, "=%llu", le64_to_cpu(u->v));
495 }
496
497 static int journal_entry_clock_validate(struct bch_fs *c,
498                                         const char *where,
499                                         struct jset_entry *entry,
500                                         unsigned version, int big_endian, int write)
501 {
502         struct jset_entry_clock *clock =
503                 container_of(entry, struct jset_entry_clock, entry);
504         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
505         int ret = 0;
506
507         if (journal_entry_err_on(bytes != sizeof(*clock),
508                                  c, "invalid journal entry clock: bad size")) {
509                 journal_entry_null_range(entry, vstruct_next(entry));
510                 return ret;
511         }
512
513         if (journal_entry_err_on(clock->rw > 1,
514                                  c, "invalid journal entry clock: bad rw")) {
515                 journal_entry_null_range(entry, vstruct_next(entry));
516                 return ret;
517         }
518
519 fsck_err:
520         return ret;
521 }
522
523 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
524                                         struct jset_entry *entry)
525 {
526         struct jset_entry_clock *clock =
527                 container_of(entry, struct jset_entry_clock, entry);
528
529         pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
530 }
531
532 static int journal_entry_dev_usage_validate(struct bch_fs *c,
533                                             const char *where,
534                                             struct jset_entry *entry,
535                                             unsigned version, int big_endian, int write)
536 {
537         struct jset_entry_dev_usage *u =
538                 container_of(entry, struct jset_entry_dev_usage, entry);
539         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
540         unsigned expected = sizeof(*u);
541         unsigned dev;
542         int ret = 0;
543
544         if (journal_entry_err_on(bytes < expected,
545                                  c, "invalid journal entry dev usage: bad size (%u < %u)",
546                                  bytes, expected)) {
547                 journal_entry_null_range(entry, vstruct_next(entry));
548                 return ret;
549         }
550
551         dev = le32_to_cpu(u->dev);
552
553         if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
554                                  c, "invalid journal entry dev usage: bad dev")) {
555                 journal_entry_null_range(entry, vstruct_next(entry));
556                 return ret;
557         }
558
559         if (journal_entry_err_on(u->pad,
560                                  c, "invalid journal entry dev usage: bad pad")) {
561                 journal_entry_null_range(entry, vstruct_next(entry));
562                 return ret;
563         }
564
565 fsck_err:
566         return ret;
567 }
568
569 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
570                                             struct jset_entry *entry)
571 {
572         struct jset_entry_dev_usage *u =
573                 container_of(entry, struct jset_entry_dev_usage, entry);
574         unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
575
576         pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
577
578         for (i = 0; i < nr_types; i++) {
579                 if (i < BCH_DATA_NR)
580                         pr_buf(out, " %s", bch2_data_types[i]);
581                 else
582                         pr_buf(out, " (unknown data type %u)", i);
583                 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
584                        le64_to_cpu(u->d[i].buckets),
585                        le64_to_cpu(u->d[i].sectors),
586                        le64_to_cpu(u->d[i].fragmented));
587         }
588
589         pr_buf(out, " buckets_ec: %llu buckets_unavailable: %llu",
590                le64_to_cpu(u->buckets_ec),
591                le64_to_cpu(u->buckets_unavailable));
592 }
593
594 static int journal_entry_log_validate(struct bch_fs *c,
595                                       const char *where,
596                                       struct jset_entry *entry,
597                                       unsigned version, int big_endian, int write)
598 {
599         return 0;
600 }
601
602 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
603                                       struct jset_entry *entry)
604 {
605         struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
606         unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
607
608         pr_buf(out, "%.*s", bytes, l->d);
609 }
610
611 struct jset_entry_ops {
612         int (*validate)(struct bch_fs *, const char *,
613                         struct jset_entry *, unsigned, int, int);
614         void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
615 };
616
617 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
618 #define x(f, nr)                                                \
619         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
620                 .validate       = journal_entry_##f##_validate, \
621                 .to_text        = journal_entry_##f##_to_text,  \
622         },
623         BCH_JSET_ENTRY_TYPES()
624 #undef x
625 };
626
627 int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
628                                 struct jset_entry *entry,
629                                 unsigned version, int big_endian, int write)
630 {
631         return entry->type < BCH_JSET_ENTRY_NR
632                 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
633                                 version, big_endian, write)
634                 : 0;
635 }
636
637 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
638                                 struct jset_entry *entry)
639 {
640         if (entry->type < BCH_JSET_ENTRY_NR) {
641                 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
642                 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
643         } else {
644                 pr_buf(out, "(unknown type %u)", entry->type);
645         }
646 }
647
648 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
649                                  int write)
650 {
651         char buf[100];
652         struct jset_entry *entry;
653         int ret = 0;
654
655         vstruct_for_each(jset, entry) {
656                 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
657                           le64_to_cpu(jset->seq),
658                           (u64 *) entry - jset->_data,
659                           le32_to_cpu(jset->u64s));
660
661                 if (journal_entry_err_on(vstruct_next(entry) >
662                                          vstruct_last(jset), c,
663                                 "journal entry extends past end of jset")) {
664                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
665                         break;
666                 }
667
668                 ret = bch2_journal_entry_validate(c, buf, entry,
669                                         le32_to_cpu(jset->version),
670                                         JSET_BIG_ENDIAN(jset), write);
671                 if (ret)
672                         break;
673         }
674 fsck_err:
675         return ret;
676 }
677
678 static int jset_validate(struct bch_fs *c,
679                          struct bch_dev *ca,
680                          struct jset *jset, u64 sector,
681                          unsigned bucket_sectors_left,
682                          unsigned sectors_read,
683                          int write)
684 {
685         size_t bytes = vstruct_bytes(jset);
686         struct bch_csum csum;
687         unsigned version;
688         int ret = 0;
689
690         if (le64_to_cpu(jset->magic) != jset_magic(c))
691                 return JOURNAL_ENTRY_NONE;
692
693         version = le32_to_cpu(jset->version);
694         if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
695                                   version < bcachefs_metadata_version_min) ||
696                                  version >= bcachefs_metadata_version_max, c,
697                         "%s sector %llu seq %llu: unknown journal entry version %u",
698                         ca ? ca->name : c->name,
699                         sector, le64_to_cpu(jset->seq),
700                         version)) {
701                 /* don't try to continue: */
702                 return EINVAL;
703         }
704
705         if (bytes > (sectors_read << 9) &&
706             sectors_read < bucket_sectors_left)
707                 return JOURNAL_ENTRY_REREAD;
708
709         if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
710                         "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
711                         ca ? ca->name : c->name,
712                         sector, le64_to_cpu(jset->seq), bytes)) {
713                 ret = JOURNAL_ENTRY_BAD;
714                 le32_add_cpu(&jset->u64s,
715                              -((bytes - (bucket_sectors_left << 9)) / 8));
716         }
717
718         if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
719                         "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
720                         ca ? ca->name : c->name,
721                         sector, le64_to_cpu(jset->seq),
722                         JSET_CSUM_TYPE(jset))) {
723                 ret = JOURNAL_ENTRY_BAD;
724                 goto csum_done;
725         }
726
727         if (write)
728                 goto csum_done;
729
730         csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
731         if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
732                                  "%s sector %llu seq %llu: journal checksum bad",
733                                  ca ? ca->name : c->name,
734                                  sector, le64_to_cpu(jset->seq)))
735                 ret = JOURNAL_ENTRY_BAD;
736
737         ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
738                      jset->encrypted_start,
739                      vstruct_end(jset) - (void *) jset->encrypted_start);
740         bch2_fs_fatal_err_on(ret, c,
741                         "error decrypting journal entry: %i", ret);
742 csum_done:
743         /* last_seq is ignored when JSET_NO_FLUSH is true */
744         if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
745                                  le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
746                                  "invalid journal entry: last_seq > seq (%llu > %llu)",
747                                  le64_to_cpu(jset->last_seq),
748                                  le64_to_cpu(jset->seq))) {
749                 jset->last_seq = jset->seq;
750                 return JOURNAL_ENTRY_BAD;
751         }
752 fsck_err:
753         return ret;
754 }
755
756 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
757 {
758         unsigned sectors = vstruct_sectors(jset, c->block_bits);
759
760         return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
761                 jset_validate_entries(c, jset, WRITE);
762 }
763
764 struct journal_read_buf {
765         void            *data;
766         size_t          size;
767 };
768
769 static int journal_read_buf_realloc(struct journal_read_buf *b,
770                                     size_t new_size)
771 {
772         void *n;
773
774         /* the bios are sized for this many pages, max: */
775         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
776                 return -ENOMEM;
777
778         new_size = roundup_pow_of_two(new_size);
779         n = kvpmalloc(new_size, GFP_KERNEL);
780         if (!n)
781                 return -ENOMEM;
782
783         kvpfree(b->data, b->size);
784         b->data = n;
785         b->size = new_size;
786         return 0;
787 }
788
789 static int journal_read_bucket(struct bch_dev *ca,
790                                struct journal_read_buf *buf,
791                                struct journal_list *jlist,
792                                unsigned bucket)
793 {
794         struct bch_fs *c = ca->fs;
795         struct journal_device *ja = &ca->journal;
796         struct jset *j = NULL;
797         unsigned sectors, sectors_read = 0;
798         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
799             end = offset + ca->mi.bucket_size;
800         bool saw_bad = false;
801         int ret = 0;
802
803         pr_debug("reading %u", bucket);
804
805         while (offset < end) {
806                 if (!sectors_read) {
807                         struct bio *bio;
808 reread:
809                         sectors_read = min_t(unsigned,
810                                 end - offset, buf->size >> 9);
811
812                         bio = bio_kmalloc(GFP_KERNEL,
813                                           buf_pages(buf->data,
814                                                     sectors_read << 9));
815                         bio_set_dev(bio, ca->disk_sb.bdev);
816                         bio->bi_iter.bi_sector  = offset;
817                         bio_set_op_attrs(bio, REQ_OP_READ, 0);
818                         bch2_bio_map(bio, buf->data, sectors_read << 9);
819
820                         ret = submit_bio_wait(bio);
821                         bio_put(bio);
822
823                         if (bch2_dev_io_err_on(ret, ca,
824                                                "journal read error: sector %llu",
825                                                offset) ||
826                             bch2_meta_read_fault("journal")) {
827                                 /*
828                                  * We don't error out of the recovery process
829                                  * here, since the relevant journal entry may be
830                                  * found on a different device, and missing or
831                                  * no journal entries will be handled later
832                                  */
833                                 return 0;
834                         }
835
836                         j = buf->data;
837                 }
838
839                 ret = jset_validate(c, ca, j, offset,
840                                     end - offset, sectors_read,
841                                     READ);
842                 switch (ret) {
843                 case BCH_FSCK_OK:
844                         sectors = vstruct_sectors(j, c->block_bits);
845                         break;
846                 case JOURNAL_ENTRY_REREAD:
847                         if (vstruct_bytes(j) > buf->size) {
848                                 ret = journal_read_buf_realloc(buf,
849                                                         vstruct_bytes(j));
850                                 if (ret)
851                                         return ret;
852                         }
853                         goto reread;
854                 case JOURNAL_ENTRY_NONE:
855                         if (!saw_bad)
856                                 return 0;
857                         sectors = block_sectors(c);
858                         goto next_block;
859                 case JOURNAL_ENTRY_BAD:
860                         saw_bad = true;
861                         /*
862                          * On checksum error we don't really trust the size
863                          * field of the journal entry we read, so try reading
864                          * again at next block boundary:
865                          */
866                         sectors = block_sectors(c);
867                         break;
868                 default:
869                         return ret;
870                 }
871
872                 /*
873                  * This happens sometimes if we don't have discards on -
874                  * when we've partially overwritten a bucket with new
875                  * journal entries. We don't need the rest of the
876                  * bucket:
877                  */
878                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
879                         return 0;
880
881                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
882
883                 mutex_lock(&jlist->lock);
884                 ret = journal_entry_add(c, ca, (struct journal_ptr) {
885                                         .dev            = ca->dev_idx,
886                                         .bucket         = bucket,
887                                         .bucket_offset  = offset -
888                                                 bucket_to_sector(ca, ja->buckets[bucket]),
889                                         .sector         = offset,
890                                         }, jlist, j, ret != 0);
891                 mutex_unlock(&jlist->lock);
892
893                 switch (ret) {
894                 case JOURNAL_ENTRY_ADD_OK:
895                         break;
896                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
897                         break;
898                 default:
899                         return ret;
900                 }
901 next_block:
902                 pr_debug("next");
903                 offset          += sectors;
904                 sectors_read    -= sectors;
905                 j = ((void *) j) + (sectors << 9);
906         }
907
908         return 0;
909 }
910
911 static void bch2_journal_read_device(struct closure *cl)
912 {
913         struct journal_device *ja =
914                 container_of(cl, struct journal_device, read);
915         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
916         struct bch_fs *c = ca->fs;
917         struct journal_list *jlist =
918                 container_of(cl->parent, struct journal_list, cl);
919         struct journal_replay *r;
920         struct journal_read_buf buf = { NULL, 0 };
921         u64 min_seq = U64_MAX;
922         unsigned i;
923         int ret = 0;
924
925         if (!ja->nr)
926                 goto out;
927
928         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
929         if (ret)
930                 goto err;
931
932         pr_debug("%u journal buckets", ja->nr);
933
934         for (i = 0; i < ja->nr; i++) {
935                 ret = journal_read_bucket(ca, &buf, jlist, i);
936                 if (ret)
937                         goto err;
938         }
939
940         /* Find the journal bucket with the highest sequence number: */
941         for (i = 0; i < ja->nr; i++) {
942                 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
943                         ja->cur_idx = i;
944
945                 min_seq = min(ja->bucket_seq[i], min_seq);
946         }
947
948         /*
949          * If there's duplicate journal entries in multiple buckets (which
950          * definitely isn't supposed to happen, but...) - make sure to start
951          * cur_idx at the last of those buckets, so we don't deadlock trying to
952          * allocate
953          */
954         while (ja->bucket_seq[ja->cur_idx] > min_seq &&
955                ja->bucket_seq[ja->cur_idx] ==
956                ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
957                 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
958
959         ja->sectors_free = ca->mi.bucket_size;
960
961         mutex_lock(&jlist->lock);
962         list_for_each_entry(r, jlist->head, list) {
963                 for (i = 0; i < r->nr_ptrs; i++) {
964                         if (r->ptrs[i].dev == ca->dev_idx &&
965                             sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
966                                 unsigned wrote = (r->ptrs[i].sector % ca->mi.bucket_size) +
967                                         vstruct_sectors(&r->j, c->block_bits);
968
969                                 ja->sectors_free = min(ja->sectors_free,
970                                                        ca->mi.bucket_size - wrote);
971                         }
972                 }
973         }
974         mutex_unlock(&jlist->lock);
975
976         if (ja->bucket_seq[ja->cur_idx] &&
977             ja->sectors_free == ca->mi.bucket_size) {
978                 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
979                 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
980                 for (i = 0; i < 3; i++) {
981                         unsigned idx = ja->cur_idx - 1 + i;
982                         bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
983                 }
984                 ja->sectors_free = 0;
985         }
986
987         /*
988          * Set dirty_idx to indicate the entire journal is full and needs to be
989          * reclaimed - journal reclaim will immediately reclaim whatever isn't
990          * pinned when it first runs:
991          */
992         ja->discard_idx = ja->dirty_idx_ondisk =
993                 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
994 out:
995         bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
996         kvpfree(buf.data, buf.size);
997         percpu_ref_put(&ca->io_ref);
998         closure_return(cl);
999         return;
1000 err:
1001         mutex_lock(&jlist->lock);
1002         jlist->ret = ret;
1003         mutex_unlock(&jlist->lock);
1004         goto out;
1005 }
1006
1007 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1008                                struct journal_replay *j)
1009 {
1010         unsigned i;
1011
1012         for (i = 0; i < j->nr_ptrs; i++) {
1013                 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1014                 u64 offset;
1015
1016                 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1017
1018                 if (i)
1019                         pr_buf(out, " ");
1020                 pr_buf(out, "%u:%u:%u (sector %llu)",
1021                        j->ptrs[i].dev,
1022                        j->ptrs[i].bucket,
1023                        j->ptrs[i].bucket_offset,
1024                        j->ptrs[i].sector);
1025         }
1026 }
1027
1028 int bch2_journal_read(struct bch_fs *c, struct list_head *list,
1029                       u64 *blacklist_seq, u64 *start_seq)
1030 {
1031         struct journal_list jlist;
1032         struct journal_replay *i, *t;
1033         struct bch_dev *ca;
1034         unsigned iter;
1035         struct printbuf buf = PRINTBUF;
1036         size_t keys = 0, entries = 0;
1037         bool degraded = false;
1038         u64 seq, last_seq = 0;
1039         int ret = 0;
1040
1041         closure_init_stack(&jlist.cl);
1042         mutex_init(&jlist.lock);
1043         jlist.head = list;
1044         jlist.ret = 0;
1045
1046         for_each_member_device(ca, c, iter) {
1047                 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1048                     !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1049                         continue;
1050
1051                 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1052                      ca->mi.state == BCH_MEMBER_STATE_ro) &&
1053                     percpu_ref_tryget(&ca->io_ref))
1054                         closure_call(&ca->journal.read,
1055                                      bch2_journal_read_device,
1056                                      system_unbound_wq,
1057                                      &jlist.cl);
1058                 else
1059                         degraded = true;
1060         }
1061
1062         closure_sync(&jlist.cl);
1063
1064         if (jlist.ret)
1065                 return jlist.ret;
1066
1067         if (list_empty(list)) {
1068                 bch_info(c, "journal read done, but no entries found");
1069                 return 0;
1070         }
1071
1072         i = list_last_entry(list, struct journal_replay, list);
1073         *start_seq = le64_to_cpu(i->j.seq) + 1;
1074
1075         /*
1076          * Find most recent flush entry, and ignore newer non flush entries -
1077          * those entries will be blacklisted:
1078          */
1079         list_for_each_entry_safe_reverse(i, t, list, list) {
1080                 if (i->ignore)
1081                         continue;
1082
1083                 if (!JSET_NO_FLUSH(&i->j)) {
1084                         last_seq        = le64_to_cpu(i->j.last_seq);
1085                         *blacklist_seq  = le64_to_cpu(i->j.seq) + 1;
1086                         break;
1087                 }
1088
1089                 journal_replay_free(c, i);
1090         }
1091
1092         if (!last_seq) {
1093                 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1094                 ret = -1;
1095                 goto err;
1096         }
1097
1098         /* Drop blacklisted entries and entries older than last_seq: */
1099         list_for_each_entry_safe(i, t, list, list) {
1100                 if (i->ignore)
1101                         continue;
1102
1103                 seq = le64_to_cpu(i->j.seq);
1104                 if (seq < last_seq) {
1105                         journal_replay_free(c, i);
1106                         continue;
1107                 }
1108
1109                 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1110                         fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1111                                     "found blacklisted journal entry %llu", seq);
1112
1113                         journal_replay_free(c, i);
1114                 }
1115         }
1116
1117         /* Check for missing entries: */
1118         seq = last_seq;
1119         list_for_each_entry(i, list, list) {
1120                 if (i->ignore)
1121                         continue;
1122
1123                 BUG_ON(seq > le64_to_cpu(i->j.seq));
1124
1125                 while (seq < le64_to_cpu(i->j.seq)) {
1126                         u64 missing_start, missing_end;
1127                         struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1128
1129                         while (seq < le64_to_cpu(i->j.seq) &&
1130                                bch2_journal_seq_is_blacklisted(c, seq, false))
1131                                 seq++;
1132
1133                         if (seq == le64_to_cpu(i->j.seq))
1134                                 break;
1135
1136                         missing_start = seq;
1137
1138                         while (seq < le64_to_cpu(i->j.seq) &&
1139                                !bch2_journal_seq_is_blacklisted(c, seq, false))
1140                                 seq++;
1141
1142                         if (i->list.prev != list) {
1143                                 struct journal_replay *p = list_prev_entry(i, list);
1144
1145                                 bch2_journal_ptrs_to_text(&buf1, c, p);
1146                                 pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
1147                         } else
1148                                 pr_buf(&buf1, "(none)");
1149                         bch2_journal_ptrs_to_text(&buf2, c, i);
1150
1151                         missing_end = seq - 1;
1152                         fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1153                                  "  prev at %s\n"
1154                                  "  next at %s",
1155                                  missing_start, missing_end,
1156                                  last_seq, *blacklist_seq - 1,
1157                                  buf1.buf, buf2.buf);
1158
1159                         printbuf_exit(&buf1);
1160                         printbuf_exit(&buf2);
1161                 }
1162
1163                 seq++;
1164         }
1165
1166         list_for_each_entry(i, list, list) {
1167                 struct jset_entry *entry;
1168                 struct bkey_i *k, *_n;
1169                 struct bch_replicas_padded replicas = {
1170                         .e.data_type = BCH_DATA_journal,
1171                         .e.nr_required = 1,
1172                 };
1173                 unsigned ptr;
1174
1175                 if (i->ignore)
1176                         continue;
1177
1178                 ret = jset_validate_entries(c, &i->j, READ);
1179                 if (ret)
1180                         goto err;
1181
1182                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1183                         replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1184
1185                 bch2_replicas_entry_sort(&replicas.e);
1186
1187                 /*
1188                  * If we're mounting in degraded mode - if we didn't read all
1189                  * the devices - this is wrong:
1190                  */
1191
1192                 printbuf_reset(&buf);
1193                 bch2_replicas_entry_to_text(&buf, &replicas.e);
1194
1195                 if (!degraded &&
1196                     (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1197                      fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1198                                  "superblock not marked as containing replicas %s",
1199                                  buf.buf))) {
1200                         ret = bch2_mark_replicas(c, &replicas.e);
1201                         if (ret)
1202                                 goto err;
1203                 }
1204
1205                 for_each_jset_key(k, _n, entry, &i->j)
1206                         keys++;
1207                 entries++;
1208         }
1209
1210         bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1211                  keys, entries, *start_seq);
1212
1213         if (*start_seq != *blacklist_seq)
1214                 bch_info(c, "dropped unflushed entries %llu-%llu",
1215                          *blacklist_seq, *start_seq - 1);
1216 err:
1217 fsck_err:
1218         printbuf_exit(&buf);
1219         return ret;
1220 }
1221
1222 /* journal write: */
1223
1224 static void __journal_write_alloc(struct journal *j,
1225                                   struct journal_buf *w,
1226                                   struct dev_alloc_list *devs_sorted,
1227                                   unsigned sectors,
1228                                   unsigned *replicas,
1229                                   unsigned replicas_want)
1230 {
1231         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1232         struct journal_device *ja;
1233         struct bch_dev *ca;
1234         unsigned i;
1235
1236         if (*replicas >= replicas_want)
1237                 return;
1238
1239         for (i = 0; i < devs_sorted->nr; i++) {
1240                 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1241                 if (!ca)
1242                         continue;
1243
1244                 ja = &ca->journal;
1245
1246                 /*
1247                  * Check that we can use this device, and aren't already using
1248                  * it:
1249                  */
1250                 if (!ca->mi.durability ||
1251                     ca->mi.state != BCH_MEMBER_STATE_rw ||
1252                     !ja->nr ||
1253                     bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1254                                          ca->dev_idx) ||
1255                     sectors > ja->sectors_free)
1256                         continue;
1257
1258                 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1259
1260                 bch2_bkey_append_ptr(&w->key,
1261                         (struct bch_extent_ptr) {
1262                                   .offset = bucket_to_sector(ca,
1263                                         ja->buckets[ja->cur_idx]) +
1264                                         ca->mi.bucket_size -
1265                                         ja->sectors_free,
1266                                   .dev = ca->dev_idx,
1267                 });
1268
1269                 ja->sectors_free -= sectors;
1270                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1271
1272                 *replicas += ca->mi.durability;
1273
1274                 if (*replicas >= replicas_want)
1275                         break;
1276         }
1277 }
1278
1279 /**
1280  * journal_next_bucket - move on to the next journal bucket if possible
1281  */
1282 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1283                                unsigned sectors)
1284 {
1285         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1286         struct bch_devs_mask devs;
1287         struct journal_device *ja;
1288         struct bch_dev *ca;
1289         struct dev_alloc_list devs_sorted;
1290         unsigned target = c->opts.metadata_target ?:
1291                 c->opts.foreground_target;
1292         unsigned i, replicas = 0, replicas_want =
1293                 READ_ONCE(c->opts.metadata_replicas);
1294
1295         rcu_read_lock();
1296 retry:
1297         devs = target_rw_devs(c, BCH_DATA_journal, target);
1298
1299         devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1300
1301         __journal_write_alloc(j, w, &devs_sorted,
1302                               sectors, &replicas, replicas_want);
1303
1304         if (replicas >= replicas_want)
1305                 goto done;
1306
1307         for (i = 0; i < devs_sorted.nr; i++) {
1308                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1309                 if (!ca)
1310                         continue;
1311
1312                 ja = &ca->journal;
1313
1314                 if (sectors > ja->sectors_free &&
1315                     sectors <= ca->mi.bucket_size &&
1316                     bch2_journal_dev_buckets_available(j, ja,
1317                                         journal_space_discarded)) {
1318                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1319                         ja->sectors_free = ca->mi.bucket_size;
1320
1321                         /*
1322                          * ja->bucket_seq[ja->cur_idx] must always have
1323                          * something sensible:
1324                          */
1325                         ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1326                 }
1327         }
1328
1329         __journal_write_alloc(j, w, &devs_sorted,
1330                               sectors, &replicas, replicas_want);
1331
1332         if (replicas < replicas_want && target) {
1333                 /* Retry from all devices: */
1334                 target = 0;
1335                 goto retry;
1336         }
1337 done:
1338         rcu_read_unlock();
1339
1340         BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1341
1342         return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1343 }
1344
1345 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1346 {
1347         /* we aren't holding j->lock: */
1348         unsigned new_size = READ_ONCE(j->buf_size_want);
1349         void *new_buf;
1350
1351         if (buf->buf_size >= new_size)
1352                 return;
1353
1354         new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1355         if (!new_buf)
1356                 return;
1357
1358         memcpy(new_buf, buf->data, buf->buf_size);
1359
1360         spin_lock(&j->lock);
1361         swap(buf->data,         new_buf);
1362         swap(buf->buf_size,     new_size);
1363         spin_unlock(&j->lock);
1364
1365         kvpfree(new_buf, new_size);
1366 }
1367
1368 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1369 {
1370         return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1371 }
1372
1373 static void journal_write_done(struct closure *cl)
1374 {
1375         struct journal *j = container_of(cl, struct journal, io);
1376         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1377         struct journal_buf *w = journal_last_unwritten_buf(j);
1378         struct bch_replicas_padded replicas;
1379         union journal_res_state old, new;
1380         u64 v, seq;
1381         int err = 0;
1382
1383         bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1384                                ? j->flush_write_time
1385                                : j->noflush_write_time, j->write_start_time);
1386
1387         if (!w->devs_written.nr) {
1388                 bch_err(c, "unable to write journal to sufficient devices");
1389                 err = -EIO;
1390         } else {
1391                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1392                                          w->devs_written);
1393                 if (bch2_mark_replicas(c, &replicas.e))
1394                         err = -EIO;
1395         }
1396
1397         if (err)
1398                 bch2_fatal_error(c);
1399
1400         spin_lock(&j->lock);
1401         seq = le64_to_cpu(w->data->seq);
1402
1403         if (seq >= j->pin.front)
1404                 journal_seq_pin(j, seq)->devs = w->devs_written;
1405
1406         if (!err) {
1407                 if (!JSET_NO_FLUSH(w->data)) {
1408                         j->flushed_seq_ondisk = seq;
1409                         j->last_seq_ondisk = w->last_seq;
1410
1411                         bch2_do_discards(c);
1412                         closure_wake_up(&c->freelist_wait);
1413                 }
1414         } else if (!j->err_seq || seq < j->err_seq)
1415                 j->err_seq      = seq;
1416
1417         j->seq_ondisk           = seq;
1418
1419         /*
1420          * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1421          * more buckets:
1422          *
1423          * Must come before signaling write completion, for
1424          * bch2_fs_journal_stop():
1425          */
1426         journal_reclaim_kick(&c->journal);
1427
1428         /* also must come before signalling write completion: */
1429         closure_debug_destroy(cl);
1430
1431         v = atomic64_read(&j->reservations.counter);
1432         do {
1433                 old.v = new.v = v;
1434                 BUG_ON(journal_state_count(new, new.unwritten_idx));
1435
1436                 new.unwritten_idx++;
1437         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1438                                        old.v, new.v)) != old.v);
1439
1440         bch2_journal_space_available(j);
1441
1442         closure_wake_up(&w->wait);
1443         journal_wake(j);
1444
1445         if (!journal_state_count(new, new.unwritten_idx) &&
1446             journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1447                 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1448         } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1449                    new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1450                 struct journal_buf *buf = journal_cur_buf(j);
1451                 long delta = buf->expires - jiffies;
1452
1453                 /*
1454                  * We don't close a journal entry to write it while there's
1455                  * previous entries still in flight - the current journal entry
1456                  * might want to be written now:
1457                  */
1458
1459                 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1460         }
1461
1462         spin_unlock(&j->lock);
1463 }
1464
1465 static void journal_write_endio(struct bio *bio)
1466 {
1467         struct bch_dev *ca = bio->bi_private;
1468         struct journal *j = &ca->fs->journal;
1469         struct journal_buf *w = journal_last_unwritten_buf(j);
1470         unsigned long flags;
1471
1472         if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1473                                le64_to_cpu(w->data->seq),
1474                                bch2_blk_status_to_str(bio->bi_status)) ||
1475             bch2_meta_write_fault("journal")) {
1476                 spin_lock_irqsave(&j->err_lock, flags);
1477                 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1478                 spin_unlock_irqrestore(&j->err_lock, flags);
1479         }
1480
1481         closure_put(&j->io);
1482         percpu_ref_put(&ca->io_ref);
1483 }
1484
1485 static void do_journal_write(struct closure *cl)
1486 {
1487         struct journal *j = container_of(cl, struct journal, io);
1488         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1489         struct bch_dev *ca;
1490         struct journal_buf *w = journal_last_unwritten_buf(j);
1491         struct bch_extent_ptr *ptr;
1492         struct bio *bio;
1493         unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1494
1495         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1496                 ca = bch_dev_bkey_exists(c, ptr->dev);
1497                 if (!percpu_ref_tryget(&ca->io_ref)) {
1498                         /* XXX: fix this */
1499                         bch_err(c, "missing device for journal write\n");
1500                         continue;
1501                 }
1502
1503                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1504                              sectors);
1505
1506                 bio = ca->journal.bio;
1507                 bio_reset(bio);
1508                 bio_set_dev(bio, ca->disk_sb.bdev);
1509                 bio->bi_iter.bi_sector  = ptr->offset;
1510                 bio->bi_end_io          = journal_write_endio;
1511                 bio->bi_private         = ca;
1512                 bio->bi_opf             = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1513
1514                 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1515                 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1516
1517                 if (!JSET_NO_FLUSH(w->data))
1518                         bio->bi_opf    |= REQ_FUA;
1519                 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1520                         bio->bi_opf    |= REQ_PREFLUSH;
1521
1522                 bch2_bio_map(bio, w->data, sectors << 9);
1523
1524                 trace_journal_write(bio);
1525                 closure_bio_submit(bio, cl);
1526
1527                 ca->journal.bucket_seq[ca->journal.cur_idx] =
1528                         le64_to_cpu(w->data->seq);
1529         }
1530
1531         continue_at(cl, journal_write_done, c->io_complete_wq);
1532         return;
1533 }
1534
1535 void bch2_journal_write(struct closure *cl)
1536 {
1537         struct journal *j = container_of(cl, struct journal, io);
1538         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1539         struct bch_dev *ca;
1540         struct journal_buf *w = journal_last_unwritten_buf(j);
1541         struct jset_entry *start, *end;
1542         struct jset *jset;
1543         struct bio *bio;
1544         struct printbuf journal_debug_buf = PRINTBUF;
1545         bool validate_before_checksum = false;
1546         unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1547         int ret;
1548
1549         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1550
1551         journal_buf_realloc(j, w);
1552         jset = w->data;
1553
1554         j->write_start_time = local_clock();
1555
1556         spin_lock(&j->lock);
1557         if (bch2_journal_error(j) ||
1558             w->noflush ||
1559             (!w->must_flush &&
1560              (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1561              test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1562                 w->noflush = true;
1563                 SET_JSET_NO_FLUSH(jset, true);
1564                 jset->last_seq  = 0;
1565                 w->last_seq     = 0;
1566
1567                 j->nr_noflush_writes++;
1568         } else {
1569                 j->last_flush_write = jiffies;
1570                 j->nr_flush_writes++;
1571         }
1572         spin_unlock(&j->lock);
1573
1574         /*
1575          * New btree roots are set by journalling them; when the journal entry
1576          * gets written we have to propagate them to c->btree_roots
1577          *
1578          * But, every journal entry we write has to contain all the btree roots
1579          * (at least for now); so after we copy btree roots to c->btree_roots we
1580          * have to get any missing btree roots and add them to this journal
1581          * entry:
1582          */
1583
1584         bch2_journal_entries_to_btree_roots(c, jset);
1585
1586         start = end = vstruct_last(jset);
1587
1588         end     = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1589
1590         bch2_journal_super_entries_add_common(c, &end,
1591                                 le64_to_cpu(jset->seq));
1592         u64s    = (u64 *) end - (u64 *) start;
1593         BUG_ON(u64s > j->entry_u64s_reserved);
1594
1595         le32_add_cpu(&jset->u64s, u64s);
1596         BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1597
1598         jset->magic             = cpu_to_le64(jset_magic(c));
1599         jset->version           = c->sb.version < bcachefs_metadata_version_bkey_renumber
1600                 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1601                 : cpu_to_le32(c->sb.version);
1602
1603         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1604         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1605
1606         if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1607                 j->last_empty_seq = le64_to_cpu(jset->seq);
1608
1609         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1610                 validate_before_checksum = true;
1611
1612         if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1613                 validate_before_checksum = true;
1614
1615         if (validate_before_checksum &&
1616             jset_validate_for_write(c, jset))
1617                 goto err;
1618
1619         ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1620                     jset->encrypted_start,
1621                     vstruct_end(jset) - (void *) jset->encrypted_start);
1622         if (bch2_fs_fatal_err_on(ret, c,
1623                         "error decrypting journal entry: %i", ret))
1624                 goto err;
1625
1626         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1627                                   journal_nonce(jset), jset);
1628
1629         if (!validate_before_checksum &&
1630             jset_validate_for_write(c, jset))
1631                 goto err;
1632
1633         sectors = vstruct_sectors(jset, c->block_bits);
1634         BUG_ON(sectors > w->sectors);
1635
1636         bytes = vstruct_bytes(jset);
1637         memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1638
1639 retry_alloc:
1640         spin_lock(&j->lock);
1641         ret = journal_write_alloc(j, w, sectors);
1642
1643         if (ret && j->can_discard) {
1644                 spin_unlock(&j->lock);
1645                 bch2_journal_do_discards(j);
1646                 goto retry_alloc;
1647         }
1648
1649         if (ret)
1650                 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1651
1652         /*
1653          * write is allocated, no longer need to account for it in
1654          * bch2_journal_space_available():
1655          */
1656         w->sectors = 0;
1657
1658         /*
1659          * journal entry has been compacted and allocated, recalculate space
1660          * available:
1661          */
1662         bch2_journal_space_available(j);
1663         spin_unlock(&j->lock);
1664
1665         if (ret) {
1666                 bch_err(c, "Unable to allocate journal write:\n%s",
1667                         journal_debug_buf.buf);
1668                 printbuf_exit(&journal_debug_buf);
1669                 bch2_fatal_error(c);
1670                 continue_at(cl, journal_write_done, c->io_complete_wq);
1671                 return;
1672         }
1673
1674         w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1675
1676         if (c->opts.nochanges)
1677                 goto no_io;
1678
1679         for_each_rw_member(ca, c, i)
1680                 nr_rw_members++;
1681
1682         if (nr_rw_members > 1)
1683                 w->separate_flush = true;
1684
1685         if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1686                 for_each_rw_member(ca, c, i) {
1687                         percpu_ref_get(&ca->io_ref);
1688
1689                         bio = ca->journal.bio;
1690                         bio_reset(bio);
1691                         bio_set_dev(bio, ca->disk_sb.bdev);
1692                         bio->bi_opf             = REQ_OP_FLUSH;
1693                         bio->bi_end_io          = journal_write_endio;
1694                         bio->bi_private         = ca;
1695                         closure_bio_submit(bio, cl);
1696                 }
1697         }
1698
1699         continue_at(cl, do_journal_write, c->io_complete_wq);
1700         return;
1701 no_io:
1702         continue_at(cl, journal_write_done, c->io_complete_wq);
1703         return;
1704 err:
1705         bch2_fatal_error(c);
1706         continue_at(cl, journal_write_done, c->io_complete_wq);
1707 }