]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
c5bc58247146a2cdbc1eecb987db7ba667e9677f
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_io.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
8 #include "buckets.h"
9 #include "checksum.h"
10 #include "disk_groups.h"
11 #include "error.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "replicas.h"
17 #include "sb-clean.h"
18 #include "trace.h"
19
20 static struct nonce journal_nonce(const struct jset *jset)
21 {
22         return (struct nonce) {{
23                 [0] = 0,
24                 [1] = ((__le32 *) &jset->seq)[0],
25                 [2] = ((__le32 *) &jset->seq)[1],
26                 [3] = BCH_NONCE_JOURNAL,
27         }};
28 }
29
30 static bool jset_csum_good(struct bch_fs *c, struct jset *j)
31 {
32         return bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)) &&
33                 !bch2_crc_cmp(j->csum,
34                               csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j));
35 }
36
37 static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
38 {
39         return (seq - c->journal_entries_base_seq) & (~0U >> 1);
40 }
41
42 static void __journal_replay_free(struct bch_fs *c,
43                                   struct journal_replay *i)
44 {
45         struct journal_replay **p =
46                 genradix_ptr(&c->journal_entries,
47                              journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
48
49         BUG_ON(*p != i);
50         *p = NULL;
51         kvpfree(i, offsetof(struct journal_replay, j) +
52                 vstruct_bytes(&i->j));
53 }
54
55 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
56 {
57         i->ignore = true;
58
59         if (!c->opts.read_entire_journal)
60                 __journal_replay_free(c, i);
61 }
62
63 struct journal_list {
64         struct closure          cl;
65         u64                     last_seq;
66         struct mutex            lock;
67         int                     ret;
68 };
69
70 #define JOURNAL_ENTRY_ADD_OK            0
71 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
72
73 /*
74  * Given a journal entry we just read, add it to the list of journal entries to
75  * be replayed:
76  */
77 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
78                              struct journal_ptr entry_ptr,
79                              struct journal_list *jlist, struct jset *j)
80 {
81         struct genradix_iter iter;
82         struct journal_replay **_i, *i, *dup;
83         struct journal_ptr *ptr;
84         size_t bytes = vstruct_bytes(j);
85         u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
86         int ret = JOURNAL_ENTRY_ADD_OK;
87
88         /* Is this entry older than the range we need? */
89         if (!c->opts.read_entire_journal &&
90             le64_to_cpu(j->seq) < jlist->last_seq)
91                 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
92
93         /*
94          * genradixes are indexed by a ulong, not a u64, so we can't index them
95          * by sequence number directly: Assume instead that they will all fall
96          * within the range of +-2billion of the filrst one we find.
97          */
98         if (!c->journal_entries_base_seq)
99                 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
100
101         /* Drop entries we don't need anymore */
102         if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
103                 genradix_for_each_from(&c->journal_entries, iter, _i,
104                                        journal_entry_radix_idx(c, jlist->last_seq)) {
105                         i = *_i;
106
107                         if (!i || i->ignore)
108                                 continue;
109
110                         if (le64_to_cpu(i->j.seq) >= last_seq)
111                                 break;
112                         journal_replay_free(c, i);
113                 }
114         }
115
116         jlist->last_seq = max(jlist->last_seq, last_seq);
117
118         _i = genradix_ptr_alloc(&c->journal_entries,
119                                 journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
120                                 GFP_KERNEL);
121         if (!_i)
122                 return -BCH_ERR_ENOMEM_journal_entry_add;
123
124         /*
125          * Duplicate journal entries? If so we want the one that didn't have a
126          * checksum error:
127          */
128         dup = *_i;
129         if (dup) {
130                 if (bytes == vstruct_bytes(&dup->j) &&
131                     !memcmp(j, &dup->j, bytes)) {
132                         i = dup;
133                         goto found;
134                 }
135
136                 if (!entry_ptr.csum_good) {
137                         i = dup;
138                         goto found;
139                 }
140
141                 if (!dup->csum_good)
142                         goto replace;
143
144                 fsck_err(c, journal_entry_replicas_data_mismatch,
145                          "found duplicate but non identical journal entries (seq %llu)",
146                          le64_to_cpu(j->seq));
147                 i = dup;
148                 goto found;
149         }
150 replace:
151         i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
152         if (!i)
153                 return -BCH_ERR_ENOMEM_journal_entry_add;
154
155         i->nr_ptrs      = 0;
156         i->csum_good    = entry_ptr.csum_good;
157         i->ignore       = false;
158         unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
159         i->ptrs[i->nr_ptrs++] = entry_ptr;
160
161         if (dup) {
162                 if (dup->nr_ptrs >= ARRAY_SIZE(dup->ptrs)) {
163                         bch_err(c, "found too many copies of journal entry %llu",
164                                 le64_to_cpu(i->j.seq));
165                         dup->nr_ptrs = ARRAY_SIZE(dup->ptrs) - 1;
166                 }
167
168                 /* The first ptr should represent the jset we kept: */
169                 memcpy(i->ptrs + i->nr_ptrs,
170                        dup->ptrs,
171                        sizeof(dup->ptrs[0]) * dup->nr_ptrs);
172                 i->nr_ptrs += dup->nr_ptrs;
173                 __journal_replay_free(c, dup);
174         }
175
176         *_i = i;
177         return 0;
178 found:
179         for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
180                 if (ptr->dev == ca->dev_idx) {
181                         bch_err(c, "duplicate journal entry %llu on same device",
182                                 le64_to_cpu(i->j.seq));
183                         goto out;
184                 }
185         }
186
187         if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
188                 bch_err(c, "found too many copies of journal entry %llu",
189                         le64_to_cpu(i->j.seq));
190                 goto out;
191         }
192
193         i->ptrs[i->nr_ptrs++] = entry_ptr;
194 out:
195 fsck_err:
196         return ret;
197 }
198
199 /* this fills in a range with empty jset_entries: */
200 static void journal_entry_null_range(void *start, void *end)
201 {
202         struct jset_entry *entry;
203
204         for (entry = start; entry != end; entry = vstruct_next(entry))
205                 memset(entry, 0, sizeof(*entry));
206 }
207
208 #define JOURNAL_ENTRY_REREAD    5
209 #define JOURNAL_ENTRY_NONE      6
210 #define JOURNAL_ENTRY_BAD       7
211
212 static void journal_entry_err_msg(struct printbuf *out,
213                                   u32 version,
214                                   struct jset *jset,
215                                   struct jset_entry *entry)
216 {
217         prt_str(out, "invalid journal entry, version=");
218         bch2_version_to_text(out, version);
219
220         if (entry) {
221                 prt_str(out, " type=");
222                 prt_str(out, bch2_jset_entry_types[entry->type]);
223         }
224
225         if (!jset) {
226                 prt_printf(out, " in superblock");
227         } else {
228
229                 prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq));
230
231                 if (entry)
232                         prt_printf(out, " offset=%zi/%u",
233                                    (u64 *) entry - jset->_data,
234                                    le32_to_cpu(jset->u64s));
235         }
236
237         prt_str(out, ": ");
238 }
239
240 #define journal_entry_err(c, version, jset, entry, _err, msg, ...)      \
241 ({                                                                      \
242         struct printbuf _buf = PRINTBUF;                                \
243                                                                         \
244         journal_entry_err_msg(&_buf, version, jset, entry);             \
245         prt_printf(&_buf, msg, ##__VA_ARGS__);                          \
246                                                                         \
247         switch (flags & BKEY_INVALID_WRITE) {                           \
248         case READ:                                                      \
249                 mustfix_fsck_err(c, _err, "%s", _buf.buf);              \
250                 break;                                                  \
251         case WRITE:                                                     \
252                 bch2_sb_error_count(c, BCH_FSCK_ERR_##_err);            \
253                 bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
254                 if (bch2_fs_inconsistent(c)) {                          \
255                         ret = -BCH_ERR_fsck_errors_not_fixed;           \
256                         goto fsck_err;                                  \
257                 }                                                       \
258                 break;                                                  \
259         }                                                               \
260                                                                         \
261         printbuf_exit(&_buf);                                           \
262         true;                                                           \
263 })
264
265 #define journal_entry_err_on(cond, ...)                                 \
266         ((cond) ? journal_entry_err(__VA_ARGS__) : false)
267
268 #define FSCK_DELETED_KEY        5
269
270 static int journal_validate_key(struct bch_fs *c,
271                                 struct jset *jset,
272                                 struct jset_entry *entry,
273                                 unsigned level, enum btree_id btree_id,
274                                 struct bkey_i *k,
275                                 unsigned version, int big_endian,
276                                 enum bkey_invalid_flags flags)
277 {
278         int write = flags & BKEY_INVALID_WRITE;
279         void *next = vstruct_next(entry);
280         struct printbuf buf = PRINTBUF;
281         int ret = 0;
282
283         if (journal_entry_err_on(!k->k.u64s,
284                                  c, version, jset, entry,
285                                  journal_entry_bkey_u64s_0,
286                                  "k->u64s 0")) {
287                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
288                 journal_entry_null_range(vstruct_next(entry), next);
289                 return FSCK_DELETED_KEY;
290         }
291
292         if (journal_entry_err_on((void *) bkey_next(k) >
293                                  (void *) vstruct_next(entry),
294                                  c, version, jset, entry,
295                                  journal_entry_bkey_past_end,
296                                  "extends past end of journal entry")) {
297                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
298                 journal_entry_null_range(vstruct_next(entry), next);
299                 return FSCK_DELETED_KEY;
300         }
301
302         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
303                                  c, version, jset, entry,
304                                  journal_entry_bkey_bad_format,
305                                  "bad format %u", k->k.format)) {
306                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
307                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
308                 journal_entry_null_range(vstruct_next(entry), next);
309                 return FSCK_DELETED_KEY;
310         }
311
312         if (!write)
313                 bch2_bkey_compat(level, btree_id, version, big_endian,
314                                  write, NULL, bkey_to_packed(k));
315
316         if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
317                               __btree_node_type(level, btree_id), write, &buf)) {
318                 printbuf_reset(&buf);
319                 journal_entry_err_msg(&buf, version, jset, entry);
320                 prt_newline(&buf);
321                 printbuf_indent_add(&buf, 2);
322
323                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
324                 prt_newline(&buf);
325                 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
326                                   __btree_node_type(level, btree_id), write, &buf);
327
328                 mustfix_fsck_err(c, journal_entry_bkey_invalid,
329                                  "%s", buf.buf);
330
331                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
332                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
333                 journal_entry_null_range(vstruct_next(entry), next);
334
335                 printbuf_exit(&buf);
336                 return FSCK_DELETED_KEY;
337         }
338
339         if (write)
340                 bch2_bkey_compat(level, btree_id, version, big_endian,
341                                  write, NULL, bkey_to_packed(k));
342 fsck_err:
343         printbuf_exit(&buf);
344         return ret;
345 }
346
347 static int journal_entry_btree_keys_validate(struct bch_fs *c,
348                                 struct jset *jset,
349                                 struct jset_entry *entry,
350                                 unsigned version, int big_endian,
351                                 enum bkey_invalid_flags flags)
352 {
353         struct bkey_i *k = entry->start;
354
355         while (k != vstruct_last(entry)) {
356                 int ret = journal_validate_key(c, jset, entry,
357                                                entry->level,
358                                                entry->btree_id,
359                                                k, version, big_endian,
360                                                flags|BKEY_INVALID_JOURNAL);
361                 if (ret == FSCK_DELETED_KEY)
362                         continue;
363
364                 k = bkey_next(k);
365         }
366
367         return 0;
368 }
369
370 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
371                                              struct jset_entry *entry)
372 {
373         struct bkey_i *k;
374         bool first = true;
375
376         jset_entry_for_each_key(entry, k) {
377                 if (!first) {
378                         prt_newline(out);
379                         prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
380                 }
381                 prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
382                 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
383                 first = false;
384         }
385 }
386
387 static int journal_entry_btree_root_validate(struct bch_fs *c,
388                                 struct jset *jset,
389                                 struct jset_entry *entry,
390                                 unsigned version, int big_endian,
391                                 enum bkey_invalid_flags flags)
392 {
393         struct bkey_i *k = entry->start;
394         int ret = 0;
395
396         if (journal_entry_err_on(!entry->u64s ||
397                                  le16_to_cpu(entry->u64s) != k->k.u64s,
398                                  c, version, jset, entry,
399                                  journal_entry_btree_root_bad_size,
400                                  "invalid btree root journal entry: wrong number of keys")) {
401                 void *next = vstruct_next(entry);
402                 /*
403                  * we don't want to null out this jset_entry,
404                  * just the contents, so that later we can tell
405                  * we were _supposed_ to have a btree root
406                  */
407                 entry->u64s = 0;
408                 journal_entry_null_range(vstruct_next(entry), next);
409                 return 0;
410         }
411
412         ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
413                                    version, big_endian, flags);
414         if (ret == FSCK_DELETED_KEY)
415                 ret = 0;
416 fsck_err:
417         return ret;
418 }
419
420 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
421                                              struct jset_entry *entry)
422 {
423         journal_entry_btree_keys_to_text(out, c, entry);
424 }
425
426 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
427                                 struct jset *jset,
428                                 struct jset_entry *entry,
429                                 unsigned version, int big_endian,
430                                 enum bkey_invalid_flags flags)
431 {
432         /* obsolete, don't care: */
433         return 0;
434 }
435
436 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
437                                             struct jset_entry *entry)
438 {
439 }
440
441 static int journal_entry_blacklist_validate(struct bch_fs *c,
442                                 struct jset *jset,
443                                 struct jset_entry *entry,
444                                 unsigned version, int big_endian,
445                                 enum bkey_invalid_flags flags)
446 {
447         int ret = 0;
448
449         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
450                                  c, version, jset, entry,
451                                  journal_entry_blacklist_bad_size,
452                 "invalid journal seq blacklist entry: bad size")) {
453                 journal_entry_null_range(entry, vstruct_next(entry));
454         }
455 fsck_err:
456         return ret;
457 }
458
459 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
460                                             struct jset_entry *entry)
461 {
462         struct jset_entry_blacklist *bl =
463                 container_of(entry, struct jset_entry_blacklist, entry);
464
465         prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
466 }
467
468 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
469                                 struct jset *jset,
470                                 struct jset_entry *entry,
471                                 unsigned version, int big_endian,
472                                 enum bkey_invalid_flags flags)
473 {
474         struct jset_entry_blacklist_v2 *bl_entry;
475         int ret = 0;
476
477         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
478                                  c, version, jset, entry,
479                                  journal_entry_blacklist_v2_bad_size,
480                 "invalid journal seq blacklist entry: bad size")) {
481                 journal_entry_null_range(entry, vstruct_next(entry));
482                 goto out;
483         }
484
485         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
486
487         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
488                                  le64_to_cpu(bl_entry->end),
489                                  c, version, jset, entry,
490                                  journal_entry_blacklist_v2_start_past_end,
491                 "invalid journal seq blacklist entry: start > end")) {
492                 journal_entry_null_range(entry, vstruct_next(entry));
493         }
494 out:
495 fsck_err:
496         return ret;
497 }
498
499 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
500                                                struct jset_entry *entry)
501 {
502         struct jset_entry_blacklist_v2 *bl =
503                 container_of(entry, struct jset_entry_blacklist_v2, entry);
504
505         prt_printf(out, "start=%llu end=%llu",
506                le64_to_cpu(bl->start),
507                le64_to_cpu(bl->end));
508 }
509
510 static int journal_entry_usage_validate(struct bch_fs *c,
511                                 struct jset *jset,
512                                 struct jset_entry *entry,
513                                 unsigned version, int big_endian,
514                                 enum bkey_invalid_flags flags)
515 {
516         struct jset_entry_usage *u =
517                 container_of(entry, struct jset_entry_usage, entry);
518         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
519         int ret = 0;
520
521         if (journal_entry_err_on(bytes < sizeof(*u),
522                                  c, version, jset, entry,
523                                  journal_entry_usage_bad_size,
524                                  "invalid journal entry usage: bad size")) {
525                 journal_entry_null_range(entry, vstruct_next(entry));
526                 return ret;
527         }
528
529 fsck_err:
530         return ret;
531 }
532
533 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
534                                         struct jset_entry *entry)
535 {
536         struct jset_entry_usage *u =
537                 container_of(entry, struct jset_entry_usage, entry);
538
539         prt_printf(out, "type=%s v=%llu",
540                bch2_fs_usage_types[u->entry.btree_id],
541                le64_to_cpu(u->v));
542 }
543
544 static int journal_entry_data_usage_validate(struct bch_fs *c,
545                                 struct jset *jset,
546                                 struct jset_entry *entry,
547                                 unsigned version, int big_endian,
548                                 enum bkey_invalid_flags flags)
549 {
550         struct jset_entry_data_usage *u =
551                 container_of(entry, struct jset_entry_data_usage, entry);
552         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
553         struct printbuf err = PRINTBUF;
554         int ret = 0;
555
556         if (journal_entry_err_on(bytes < sizeof(*u) ||
557                                  bytes < sizeof(*u) + u->r.nr_devs,
558                                  c, version, jset, entry,
559                                  journal_entry_data_usage_bad_size,
560                                  "invalid journal entry usage: bad size")) {
561                 journal_entry_null_range(entry, vstruct_next(entry));
562                 goto out;
563         }
564
565         if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err),
566                                  c, version, jset, entry,
567                                  journal_entry_data_usage_bad_size,
568                                  "invalid journal entry usage: %s", err.buf)) {
569                 journal_entry_null_range(entry, vstruct_next(entry));
570                 goto out;
571         }
572 out:
573 fsck_err:
574         printbuf_exit(&err);
575         return ret;
576 }
577
578 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
579                                              struct jset_entry *entry)
580 {
581         struct jset_entry_data_usage *u =
582                 container_of(entry, struct jset_entry_data_usage, entry);
583
584         bch2_replicas_entry_to_text(out, &u->r);
585         prt_printf(out, "=%llu", le64_to_cpu(u->v));
586 }
587
588 static int journal_entry_clock_validate(struct bch_fs *c,
589                                 struct jset *jset,
590                                 struct jset_entry *entry,
591                                 unsigned version, int big_endian,
592                                 enum bkey_invalid_flags flags)
593 {
594         struct jset_entry_clock *clock =
595                 container_of(entry, struct jset_entry_clock, entry);
596         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
597         int ret = 0;
598
599         if (journal_entry_err_on(bytes != sizeof(*clock),
600                                  c, version, jset, entry,
601                                  journal_entry_clock_bad_size,
602                                  "bad size")) {
603                 journal_entry_null_range(entry, vstruct_next(entry));
604                 return ret;
605         }
606
607         if (journal_entry_err_on(clock->rw > 1,
608                                  c, version, jset, entry,
609                                  journal_entry_clock_bad_rw,
610                                  "bad rw")) {
611                 journal_entry_null_range(entry, vstruct_next(entry));
612                 return ret;
613         }
614
615 fsck_err:
616         return ret;
617 }
618
619 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
620                                         struct jset_entry *entry)
621 {
622         struct jset_entry_clock *clock =
623                 container_of(entry, struct jset_entry_clock, entry);
624
625         prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
626 }
627
628 static int journal_entry_dev_usage_validate(struct bch_fs *c,
629                                 struct jset *jset,
630                                 struct jset_entry *entry,
631                                 unsigned version, int big_endian,
632                                 enum bkey_invalid_flags flags)
633 {
634         struct jset_entry_dev_usage *u =
635                 container_of(entry, struct jset_entry_dev_usage, entry);
636         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
637         unsigned expected = sizeof(*u);
638         unsigned dev;
639         int ret = 0;
640
641         if (journal_entry_err_on(bytes < expected,
642                                  c, version, jset, entry,
643                                  journal_entry_dev_usage_bad_size,
644                                  "bad size (%u < %u)",
645                                  bytes, expected)) {
646                 journal_entry_null_range(entry, vstruct_next(entry));
647                 return ret;
648         }
649
650         dev = le32_to_cpu(u->dev);
651
652         if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
653                                  c, version, jset, entry,
654                                  journal_entry_dev_usage_bad_dev,
655                                  "bad dev")) {
656                 journal_entry_null_range(entry, vstruct_next(entry));
657                 return ret;
658         }
659
660         if (journal_entry_err_on(u->pad,
661                                  c, version, jset, entry,
662                                  journal_entry_dev_usage_bad_pad,
663                                  "bad pad")) {
664                 journal_entry_null_range(entry, vstruct_next(entry));
665                 return ret;
666         }
667
668 fsck_err:
669         return ret;
670 }
671
672 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
673                                             struct jset_entry *entry)
674 {
675         struct jset_entry_dev_usage *u =
676                 container_of(entry, struct jset_entry_dev_usage, entry);
677         unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
678
679         prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
680
681         for (i = 0; i < nr_types; i++) {
682                 if (i < BCH_DATA_NR)
683                         prt_printf(out, " %s", bch2_data_types[i]);
684                 else
685                         prt_printf(out, " (unknown data type %u)", i);
686                 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
687                        le64_to_cpu(u->d[i].buckets),
688                        le64_to_cpu(u->d[i].sectors),
689                        le64_to_cpu(u->d[i].fragmented));
690         }
691 }
692
693 static int journal_entry_log_validate(struct bch_fs *c,
694                                 struct jset *jset,
695                                 struct jset_entry *entry,
696                                 unsigned version, int big_endian,
697                                 enum bkey_invalid_flags flags)
698 {
699         return 0;
700 }
701
702 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
703                                       struct jset_entry *entry)
704 {
705         struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
706         unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
707
708         prt_printf(out, "%.*s", bytes, l->d);
709 }
710
711 static int journal_entry_overwrite_validate(struct bch_fs *c,
712                                 struct jset *jset,
713                                 struct jset_entry *entry,
714                                 unsigned version, int big_endian,
715                                 enum bkey_invalid_flags flags)
716 {
717         return journal_entry_btree_keys_validate(c, jset, entry,
718                                 version, big_endian, READ);
719 }
720
721 static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
722                                             struct jset_entry *entry)
723 {
724         journal_entry_btree_keys_to_text(out, c, entry);
725 }
726
727 static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
728                                 struct jset *jset,
729                                 struct jset_entry *entry,
730                                 unsigned version, int big_endian,
731                                 enum bkey_invalid_flags flags)
732 {
733         return journal_entry_btree_keys_validate(c, jset, entry,
734                                 version, big_endian, READ);
735 }
736
737 static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c,
738                                             struct jset_entry *entry)
739 {
740         journal_entry_btree_keys_to_text(out, c, entry);
741 }
742
743 struct jset_entry_ops {
744         int (*validate)(struct bch_fs *, struct jset *,
745                         struct jset_entry *, unsigned, int,
746                         enum bkey_invalid_flags);
747         void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
748 };
749
750 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
751 #define x(f, nr)                                                \
752         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
753                 .validate       = journal_entry_##f##_validate, \
754                 .to_text        = journal_entry_##f##_to_text,  \
755         },
756         BCH_JSET_ENTRY_TYPES()
757 #undef x
758 };
759
760 int bch2_journal_entry_validate(struct bch_fs *c,
761                                 struct jset *jset,
762                                 struct jset_entry *entry,
763                                 unsigned version, int big_endian,
764                                 enum bkey_invalid_flags flags)
765 {
766         return entry->type < BCH_JSET_ENTRY_NR
767                 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
768                                 version, big_endian, flags)
769                 : 0;
770 }
771
772 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
773                                 struct jset_entry *entry)
774 {
775         if (entry->type < BCH_JSET_ENTRY_NR) {
776                 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
777                 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
778         } else {
779                 prt_printf(out, "(unknown type %u)", entry->type);
780         }
781 }
782
783 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
784                                  enum bkey_invalid_flags flags)
785 {
786         unsigned version = le32_to_cpu(jset->version);
787         int ret = 0;
788
789         vstruct_for_each(jset, entry) {
790                 if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
791                                 c, version, jset, entry,
792                                 journal_entry_past_jset_end,
793                                 "journal entry extends past end of jset")) {
794                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
795                         break;
796                 }
797
798                 ret = bch2_journal_entry_validate(c, jset, entry,
799                                         version, JSET_BIG_ENDIAN(jset), flags);
800                 if (ret)
801                         break;
802         }
803 fsck_err:
804         return ret;
805 }
806
807 static int jset_validate(struct bch_fs *c,
808                          struct bch_dev *ca,
809                          struct jset *jset, u64 sector,
810                          enum bkey_invalid_flags flags)
811 {
812         unsigned version;
813         int ret = 0;
814
815         if (le64_to_cpu(jset->magic) != jset_magic(c))
816                 return JOURNAL_ENTRY_NONE;
817
818         version = le32_to_cpu(jset->version);
819         if (journal_entry_err_on(!bch2_version_compatible(version),
820                         c, version, jset, NULL,
821                         jset_unsupported_version,
822                         "%s sector %llu seq %llu: incompatible journal entry version %u.%u",
823                         ca ? ca->name : c->name,
824                         sector, le64_to_cpu(jset->seq),
825                         BCH_VERSION_MAJOR(version),
826                         BCH_VERSION_MINOR(version))) {
827                 /* don't try to continue: */
828                 return -EINVAL;
829         }
830
831         if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
832                         c, version, jset, NULL,
833                         jset_unknown_csum,
834                         "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
835                         ca ? ca->name : c->name,
836                         sector, le64_to_cpu(jset->seq),
837                         JSET_CSUM_TYPE(jset)))
838                 ret = JOURNAL_ENTRY_BAD;
839
840         /* last_seq is ignored when JSET_NO_FLUSH is true */
841         if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
842                                  le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
843                                  c, version, jset, NULL,
844                                  jset_last_seq_newer_than_seq,
845                                  "invalid journal entry: last_seq > seq (%llu > %llu)",
846                                  le64_to_cpu(jset->last_seq),
847                                  le64_to_cpu(jset->seq))) {
848                 jset->last_seq = jset->seq;
849                 return JOURNAL_ENTRY_BAD;
850         }
851
852         ret = jset_validate_entries(c, jset, flags);
853 fsck_err:
854         return ret;
855 }
856
857 static int jset_validate_early(struct bch_fs *c,
858                          struct bch_dev *ca,
859                          struct jset *jset, u64 sector,
860                          unsigned bucket_sectors_left,
861                          unsigned sectors_read)
862 {
863         size_t bytes = vstruct_bytes(jset);
864         unsigned version;
865         enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
866         int ret = 0;
867
868         if (le64_to_cpu(jset->magic) != jset_magic(c))
869                 return JOURNAL_ENTRY_NONE;
870
871         version = le32_to_cpu(jset->version);
872         if (journal_entry_err_on(!bch2_version_compatible(version),
873                         c, version, jset, NULL,
874                         jset_unsupported_version,
875                         "%s sector %llu seq %llu: unknown journal entry version %u.%u",
876                         ca ? ca->name : c->name,
877                         sector, le64_to_cpu(jset->seq),
878                         BCH_VERSION_MAJOR(version),
879                         BCH_VERSION_MINOR(version))) {
880                 /* don't try to continue: */
881                 return -EINVAL;
882         }
883
884         if (bytes > (sectors_read << 9) &&
885             sectors_read < bucket_sectors_left)
886                 return JOURNAL_ENTRY_REREAD;
887
888         if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
889                         c, version, jset, NULL,
890                         jset_past_bucket_end,
891                         "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
892                         ca ? ca->name : c->name,
893                         sector, le64_to_cpu(jset->seq), bytes))
894                 le32_add_cpu(&jset->u64s,
895                              -((bytes - (bucket_sectors_left << 9)) / 8));
896 fsck_err:
897         return ret;
898 }
899
900 struct journal_read_buf {
901         void            *data;
902         size_t          size;
903 };
904
905 static int journal_read_buf_realloc(struct journal_read_buf *b,
906                                     size_t new_size)
907 {
908         void *n;
909
910         /* the bios are sized for this many pages, max: */
911         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
912                 return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
913
914         new_size = roundup_pow_of_two(new_size);
915         n = kvpmalloc(new_size, GFP_KERNEL);
916         if (!n)
917                 return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
918
919         kvpfree(b->data, b->size);
920         b->data = n;
921         b->size = new_size;
922         return 0;
923 }
924
925 static int journal_read_bucket(struct bch_dev *ca,
926                                struct journal_read_buf *buf,
927                                struct journal_list *jlist,
928                                unsigned bucket)
929 {
930         struct bch_fs *c = ca->fs;
931         struct journal_device *ja = &ca->journal;
932         struct jset *j = NULL;
933         unsigned sectors, sectors_read = 0;
934         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
935             end = offset + ca->mi.bucket_size;
936         bool saw_bad = false, csum_good;
937         int ret = 0;
938
939         pr_debug("reading %u", bucket);
940
941         while (offset < end) {
942                 if (!sectors_read) {
943                         struct bio *bio;
944                         unsigned nr_bvecs;
945 reread:
946                         sectors_read = min_t(unsigned,
947                                 end - offset, buf->size >> 9);
948                         nr_bvecs = buf_pages(buf->data, sectors_read << 9);
949
950                         bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
951                         bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
952
953                         bio->bi_iter.bi_sector = offset;
954                         bch2_bio_map(bio, buf->data, sectors_read << 9);
955
956                         ret = submit_bio_wait(bio);
957                         kfree(bio);
958
959                         if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
960                                                "journal read error: sector %llu",
961                                                offset) ||
962                             bch2_meta_read_fault("journal")) {
963                                 /*
964                                  * We don't error out of the recovery process
965                                  * here, since the relevant journal entry may be
966                                  * found on a different device, and missing or
967                                  * no journal entries will be handled later
968                                  */
969                                 return 0;
970                         }
971
972                         j = buf->data;
973                 }
974
975                 ret = jset_validate_early(c, ca, j, offset,
976                                     end - offset, sectors_read);
977                 switch (ret) {
978                 case 0:
979                         sectors = vstruct_sectors(j, c->block_bits);
980                         break;
981                 case JOURNAL_ENTRY_REREAD:
982                         if (vstruct_bytes(j) > buf->size) {
983                                 ret = journal_read_buf_realloc(buf,
984                                                         vstruct_bytes(j));
985                                 if (ret)
986                                         return ret;
987                         }
988                         goto reread;
989                 case JOURNAL_ENTRY_NONE:
990                         if (!saw_bad)
991                                 return 0;
992                         /*
993                          * On checksum error we don't really trust the size
994                          * field of the journal entry we read, so try reading
995                          * again at next block boundary:
996                          */
997                         sectors = block_sectors(c);
998                         goto next_block;
999                 default:
1000                         return ret;
1001                 }
1002
1003                 /*
1004                  * This happens sometimes if we don't have discards on -
1005                  * when we've partially overwritten a bucket with new
1006                  * journal entries. We don't need the rest of the
1007                  * bucket:
1008                  */
1009                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
1010                         return 0;
1011
1012                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
1013
1014                 csum_good = jset_csum_good(c, j);
1015                 if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
1016                                        "journal checksum error"))
1017                         saw_bad = true;
1018
1019                 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
1020                              j->encrypted_start,
1021                              vstruct_end(j) - (void *) j->encrypted_start);
1022                 bch2_fs_fatal_err_on(ret, c,
1023                                 "error decrypting journal entry: %i", ret);
1024
1025                 mutex_lock(&jlist->lock);
1026                 ret = journal_entry_add(c, ca, (struct journal_ptr) {
1027                                         .csum_good      = csum_good,
1028                                         .dev            = ca->dev_idx,
1029                                         .bucket         = bucket,
1030                                         .bucket_offset  = offset -
1031                                                 bucket_to_sector(ca, ja->buckets[bucket]),
1032                                         .sector         = offset,
1033                                         }, jlist, j);
1034                 mutex_unlock(&jlist->lock);
1035
1036                 switch (ret) {
1037                 case JOURNAL_ENTRY_ADD_OK:
1038                         break;
1039                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
1040                         break;
1041                 default:
1042                         return ret;
1043                 }
1044 next_block:
1045                 pr_debug("next");
1046                 offset          += sectors;
1047                 sectors_read    -= sectors;
1048                 j = ((void *) j) + (sectors << 9);
1049         }
1050
1051         return 0;
1052 }
1053
1054 static CLOSURE_CALLBACK(bch2_journal_read_device)
1055 {
1056         closure_type(ja, struct journal_device, read);
1057         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
1058         struct bch_fs *c = ca->fs;
1059         struct journal_list *jlist =
1060                 container_of(cl->parent, struct journal_list, cl);
1061         struct journal_replay *r, **_r;
1062         struct genradix_iter iter;
1063         struct journal_read_buf buf = { NULL, 0 };
1064         unsigned i;
1065         int ret = 0;
1066
1067         if (!ja->nr)
1068                 goto out;
1069
1070         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
1071         if (ret)
1072                 goto err;
1073
1074         pr_debug("%u journal buckets", ja->nr);
1075
1076         for (i = 0; i < ja->nr; i++) {
1077                 ret = journal_read_bucket(ca, &buf, jlist, i);
1078                 if (ret)
1079                         goto err;
1080         }
1081
1082         ja->sectors_free = ca->mi.bucket_size;
1083
1084         mutex_lock(&jlist->lock);
1085         genradix_for_each_reverse(&c->journal_entries, iter, _r) {
1086                 r = *_r;
1087
1088                 if (!r)
1089                         continue;
1090
1091                 for (i = 0; i < r->nr_ptrs; i++) {
1092                         if (r->ptrs[i].dev == ca->dev_idx) {
1093                                 unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) +
1094                                         vstruct_sectors(&r->j, c->block_bits);
1095
1096                                 ja->cur_idx = r->ptrs[i].bucket;
1097                                 ja->sectors_free = ca->mi.bucket_size - wrote;
1098                                 goto found;
1099                         }
1100                 }
1101         }
1102 found:
1103         mutex_unlock(&jlist->lock);
1104
1105         if (ja->bucket_seq[ja->cur_idx] &&
1106             ja->sectors_free == ca->mi.bucket_size) {
1107 #if 0
1108                 /*
1109                  * Debug code for ZNS support, where we (probably) want to be
1110                  * correlated where we stopped in the journal to the zone write
1111                  * points:
1112                  */
1113                 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
1114                 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
1115                 for (i = 0; i < 3; i++) {
1116                         unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
1117
1118                         bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
1119                 }
1120 #endif
1121                 ja->sectors_free = 0;
1122         }
1123
1124         /*
1125          * Set dirty_idx to indicate the entire journal is full and needs to be
1126          * reclaimed - journal reclaim will immediately reclaim whatever isn't
1127          * pinned when it first runs:
1128          */
1129         ja->discard_idx = ja->dirty_idx_ondisk =
1130                 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1131 out:
1132         bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1133         kvpfree(buf.data, buf.size);
1134         percpu_ref_put(&ca->io_ref);
1135         closure_return(cl);
1136         return;
1137 err:
1138         mutex_lock(&jlist->lock);
1139         jlist->ret = ret;
1140         mutex_unlock(&jlist->lock);
1141         goto out;
1142 }
1143
1144 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1145                                struct journal_replay *j)
1146 {
1147         unsigned i;
1148
1149         for (i = 0; i < j->nr_ptrs; i++) {
1150                 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1151                 u64 offset;
1152
1153                 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1154
1155                 if (i)
1156                         prt_printf(out, " ");
1157                 prt_printf(out, "%u:%u:%u (sector %llu)",
1158                        j->ptrs[i].dev,
1159                        j->ptrs[i].bucket,
1160                        j->ptrs[i].bucket_offset,
1161                        j->ptrs[i].sector);
1162         }
1163 }
1164
1165 int bch2_journal_read(struct bch_fs *c,
1166                       u64 *last_seq,
1167                       u64 *blacklist_seq,
1168                       u64 *start_seq)
1169 {
1170         struct journal_list jlist;
1171         struct journal_replay *i, **_i, *prev = NULL;
1172         struct genradix_iter radix_iter;
1173         struct printbuf buf = PRINTBUF;
1174         bool degraded = false, last_write_torn = false;
1175         u64 seq;
1176         int ret = 0;
1177
1178         closure_init_stack(&jlist.cl);
1179         mutex_init(&jlist.lock);
1180         jlist.last_seq = 0;
1181         jlist.ret = 0;
1182
1183         for_each_member_device(c, ca) {
1184                 if (!c->opts.fsck &&
1185                     !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1186                         continue;
1187
1188                 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1189                      ca->mi.state == BCH_MEMBER_STATE_ro) &&
1190                     percpu_ref_tryget(&ca->io_ref))
1191                         closure_call(&ca->journal.read,
1192                                      bch2_journal_read_device,
1193                                      system_unbound_wq,
1194                                      &jlist.cl);
1195                 else
1196                         degraded = true;
1197         }
1198
1199         closure_sync(&jlist.cl);
1200
1201         if (jlist.ret)
1202                 return jlist.ret;
1203
1204         *last_seq       = 0;
1205         *start_seq      = 0;
1206         *blacklist_seq  = 0;
1207
1208         /*
1209          * Find most recent flush entry, and ignore newer non flush entries -
1210          * those entries will be blacklisted:
1211          */
1212         genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
1213                 enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
1214
1215                 i = *_i;
1216
1217                 if (!i || i->ignore)
1218                         continue;
1219
1220                 if (!*start_seq)
1221                         *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1;
1222
1223                 if (JSET_NO_FLUSH(&i->j)) {
1224                         i->ignore = true;
1225                         continue;
1226                 }
1227
1228                 if (!last_write_torn && !i->csum_good) {
1229                         last_write_torn = true;
1230                         i->ignore = true;
1231                         continue;
1232                 }
1233
1234                 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
1235                                          c, le32_to_cpu(i->j.version), &i->j, NULL,
1236                                          jset_last_seq_newer_than_seq,
1237                                          "invalid journal entry: last_seq > seq (%llu > %llu)",
1238                                          le64_to_cpu(i->j.last_seq),
1239                                          le64_to_cpu(i->j.seq)))
1240                         i->j.last_seq = i->j.seq;
1241
1242                 *last_seq       = le64_to_cpu(i->j.last_seq);
1243                 *blacklist_seq  = le64_to_cpu(i->j.seq) + 1;
1244                 break;
1245         }
1246
1247         if (!*start_seq) {
1248                 bch_info(c, "journal read done, but no entries found");
1249                 return 0;
1250         }
1251
1252         if (!*last_seq) {
1253                 fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
1254                          "journal read done, but no entries found after dropping non-flushes");
1255                 return 0;
1256         }
1257
1258         bch_info(c, "journal read done, replaying entries %llu-%llu",
1259                  *last_seq, *blacklist_seq - 1);
1260
1261         if (*start_seq != *blacklist_seq)
1262                 bch_info(c, "dropped unflushed entries %llu-%llu",
1263                          *blacklist_seq, *start_seq - 1);
1264
1265         /* Drop blacklisted entries and entries older than last_seq: */
1266         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1267                 i = *_i;
1268
1269                 if (!i || i->ignore)
1270                         continue;
1271
1272                 seq = le64_to_cpu(i->j.seq);
1273                 if (seq < *last_seq) {
1274                         journal_replay_free(c, i);
1275                         continue;
1276                 }
1277
1278                 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1279                         fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1280                                     jset_seq_blacklisted,
1281                                     "found blacklisted journal entry %llu", seq);
1282                         i->ignore = true;
1283                 }
1284         }
1285
1286         /* Check for missing entries: */
1287         seq = *last_seq;
1288         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1289                 i = *_i;
1290
1291                 if (!i || i->ignore)
1292                         continue;
1293
1294                 BUG_ON(seq > le64_to_cpu(i->j.seq));
1295
1296                 while (seq < le64_to_cpu(i->j.seq)) {
1297                         u64 missing_start, missing_end;
1298                         struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1299
1300                         while (seq < le64_to_cpu(i->j.seq) &&
1301                                bch2_journal_seq_is_blacklisted(c, seq, false))
1302                                 seq++;
1303
1304                         if (seq == le64_to_cpu(i->j.seq))
1305                                 break;
1306
1307                         missing_start = seq;
1308
1309                         while (seq < le64_to_cpu(i->j.seq) &&
1310                                !bch2_journal_seq_is_blacklisted(c, seq, false))
1311                                 seq++;
1312
1313                         if (prev) {
1314                                 bch2_journal_ptrs_to_text(&buf1, c, prev);
1315                                 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
1316                         } else
1317                                 prt_printf(&buf1, "(none)");
1318                         bch2_journal_ptrs_to_text(&buf2, c, i);
1319
1320                         missing_end = seq - 1;
1321                         fsck_err(c, journal_entries_missing,
1322                                  "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1323                                  "  prev at %s\n"
1324                                  "  next at %s",
1325                                  missing_start, missing_end,
1326                                  *last_seq, *blacklist_seq - 1,
1327                                  buf1.buf, buf2.buf);
1328
1329                         printbuf_exit(&buf1);
1330                         printbuf_exit(&buf2);
1331                 }
1332
1333                 prev = i;
1334                 seq++;
1335         }
1336
1337         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1338                 struct bch_replicas_padded replicas = {
1339                         .e.data_type = BCH_DATA_journal,
1340                         .e.nr_required = 1,
1341                 };
1342                 unsigned ptr;
1343
1344                 i = *_i;
1345                 if (!i || i->ignore)
1346                         continue;
1347
1348                 for (ptr = 0; ptr < i->nr_ptrs; ptr++) {
1349                         struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
1350
1351                         if (!i->ptrs[ptr].csum_good)
1352                                 bch_err_dev_offset(ca, i->ptrs[ptr].sector,
1353                                                    "invalid journal checksum, seq %llu%s",
1354                                                    le64_to_cpu(i->j.seq),
1355                                                    i->csum_good ? " (had good copy on another device)" : "");
1356                 }
1357
1358                 ret = jset_validate(c,
1359                                     bch_dev_bkey_exists(c, i->ptrs[0].dev),
1360                                     &i->j,
1361                                     i->ptrs[0].sector,
1362                                     READ);
1363                 if (ret)
1364                         goto err;
1365
1366                 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1367                         replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1368
1369                 bch2_replicas_entry_sort(&replicas.e);
1370
1371                 printbuf_reset(&buf);
1372                 bch2_replicas_entry_to_text(&buf, &replicas.e);
1373
1374                 if (!degraded &&
1375                     !bch2_replicas_marked(c, &replicas.e) &&
1376                     (le64_to_cpu(i->j.seq) == *last_seq ||
1377                      fsck_err(c, journal_entry_replicas_not_marked,
1378                               "superblock not marked as containing replicas for journal entry %llu\n  %s",
1379                               le64_to_cpu(i->j.seq), buf.buf))) {
1380                         ret = bch2_mark_replicas(c, &replicas.e);
1381                         if (ret)
1382                                 goto err;
1383                 }
1384         }
1385 err:
1386 fsck_err:
1387         printbuf_exit(&buf);
1388         return ret;
1389 }
1390
1391 /* journal write: */
1392
1393 static void __journal_write_alloc(struct journal *j,
1394                                   struct journal_buf *w,
1395                                   struct dev_alloc_list *devs_sorted,
1396                                   unsigned sectors,
1397                                   unsigned *replicas,
1398                                   unsigned replicas_want)
1399 {
1400         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1401         struct journal_device *ja;
1402         struct bch_dev *ca;
1403         unsigned i;
1404
1405         if (*replicas >= replicas_want)
1406                 return;
1407
1408         for (i = 0; i < devs_sorted->nr; i++) {
1409                 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1410                 if (!ca)
1411                         continue;
1412
1413                 ja = &ca->journal;
1414
1415                 /*
1416                  * Check that we can use this device, and aren't already using
1417                  * it:
1418                  */
1419                 if (!ca->mi.durability ||
1420                     ca->mi.state != BCH_MEMBER_STATE_rw ||
1421                     !ja->nr ||
1422                     bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) ||
1423                     sectors > ja->sectors_free)
1424                         continue;
1425
1426                 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1427
1428                 bch2_bkey_append_ptr(&w->key,
1429                         (struct bch_extent_ptr) {
1430                                   .offset = bucket_to_sector(ca,
1431                                         ja->buckets[ja->cur_idx]) +
1432                                         ca->mi.bucket_size -
1433                                         ja->sectors_free,
1434                                   .dev = ca->dev_idx,
1435                 });
1436
1437                 ja->sectors_free -= sectors;
1438                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1439
1440                 *replicas += ca->mi.durability;
1441
1442                 if (*replicas >= replicas_want)
1443                         break;
1444         }
1445 }
1446
1447 /**
1448  * journal_write_alloc - decide where to write next journal entry
1449  *
1450  * @j:          journal object
1451  * @w:          journal buf (entry to be written)
1452  *
1453  * Returns: 0 on success, or -EROFS on failure
1454  */
1455 static int journal_write_alloc(struct journal *j, struct journal_buf *w)
1456 {
1457         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1458         struct bch_devs_mask devs;
1459         struct journal_device *ja;
1460         struct bch_dev *ca;
1461         struct dev_alloc_list devs_sorted;
1462         unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1463         unsigned target = c->opts.metadata_target ?:
1464                 c->opts.foreground_target;
1465         unsigned i, replicas = 0, replicas_want =
1466                 READ_ONCE(c->opts.metadata_replicas);
1467
1468         rcu_read_lock();
1469 retry:
1470         devs = target_rw_devs(c, BCH_DATA_journal, target);
1471
1472         devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1473
1474         __journal_write_alloc(j, w, &devs_sorted,
1475                               sectors, &replicas, replicas_want);
1476
1477         if (replicas >= replicas_want)
1478                 goto done;
1479
1480         for (i = 0; i < devs_sorted.nr; i++) {
1481                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1482                 if (!ca)
1483                         continue;
1484
1485                 ja = &ca->journal;
1486
1487                 if (sectors > ja->sectors_free &&
1488                     sectors <= ca->mi.bucket_size &&
1489                     bch2_journal_dev_buckets_available(j, ja,
1490                                         journal_space_discarded)) {
1491                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1492                         ja->sectors_free = ca->mi.bucket_size;
1493
1494                         /*
1495                          * ja->bucket_seq[ja->cur_idx] must always have
1496                          * something sensible:
1497                          */
1498                         ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1499                 }
1500         }
1501
1502         __journal_write_alloc(j, w, &devs_sorted,
1503                               sectors, &replicas, replicas_want);
1504
1505         if (replicas < replicas_want && target) {
1506                 /* Retry from all devices: */
1507                 target = 0;
1508                 goto retry;
1509         }
1510 done:
1511         rcu_read_unlock();
1512
1513         BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1514
1515         return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1516 }
1517
1518 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1519 {
1520         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1521
1522         /* we aren't holding j->lock: */
1523         unsigned new_size = READ_ONCE(j->buf_size_want);
1524         void *new_buf;
1525
1526         if (buf->buf_size >= new_size)
1527                 return;
1528
1529         size_t btree_write_buffer_size = new_size / 64;
1530
1531         if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
1532                 return;
1533
1534         new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
1535         if (!new_buf)
1536                 return;
1537
1538         memcpy(new_buf, buf->data, buf->buf_size);
1539
1540         spin_lock(&j->lock);
1541         swap(buf->data,         new_buf);
1542         swap(buf->buf_size,     new_size);
1543         spin_unlock(&j->lock);
1544
1545         kvpfree(new_buf, new_size);
1546 }
1547
1548 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1549 {
1550         return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1551 }
1552
1553 static CLOSURE_CALLBACK(journal_write_done)
1554 {
1555         closure_type(j, struct journal, io);
1556         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1557         struct journal_buf *w = journal_last_unwritten_buf(j);
1558         struct bch_replicas_padded replicas;
1559         union journal_res_state old, new;
1560         u64 v, seq;
1561         int err = 0;
1562
1563         bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1564                                ? j->flush_write_time
1565                                : j->noflush_write_time, j->write_start_time);
1566
1567         if (!w->devs_written.nr) {
1568                 bch_err(c, "unable to write journal to sufficient devices");
1569                 err = -EIO;
1570         } else {
1571                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1572                                          w->devs_written);
1573                 if (bch2_mark_replicas(c, &replicas.e))
1574                         err = -EIO;
1575         }
1576
1577         if (err)
1578                 bch2_fatal_error(c);
1579
1580         spin_lock(&j->lock);
1581         seq = le64_to_cpu(w->data->seq);
1582
1583         if (seq >= j->pin.front)
1584                 journal_seq_pin(j, seq)->devs = w->devs_written;
1585
1586         if (!err) {
1587                 if (!JSET_NO_FLUSH(w->data)) {
1588                         j->flushed_seq_ondisk = seq;
1589                         j->last_seq_ondisk = w->last_seq;
1590
1591                         bch2_do_discards(c);
1592                         closure_wake_up(&c->freelist_wait);
1593
1594                         bch2_reset_alloc_cursors(c);
1595                 }
1596         } else if (!j->err_seq || seq < j->err_seq)
1597                 j->err_seq      = seq;
1598
1599         j->seq_ondisk           = seq;
1600
1601         /*
1602          * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1603          * more buckets:
1604          *
1605          * Must come before signaling write completion, for
1606          * bch2_fs_journal_stop():
1607          */
1608         if (j->watermark != BCH_WATERMARK_stripe)
1609                 journal_reclaim_kick(&c->journal);
1610
1611         /* also must come before signalling write completion: */
1612         closure_debug_destroy(cl);
1613
1614         v = atomic64_read(&j->reservations.counter);
1615         do {
1616                 old.v = new.v = v;
1617                 BUG_ON(journal_state_count(new, new.unwritten_idx));
1618
1619                 new.unwritten_idx++;
1620         } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1621                                        old.v, new.v)) != old.v);
1622
1623         bch2_journal_reclaim_fast(j);
1624         bch2_journal_space_available(j);
1625
1626         track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight],
1627                            &j->max_in_flight_start, false);
1628
1629         closure_wake_up(&w->wait);
1630         journal_wake(j);
1631
1632         if (!journal_state_count(new, new.unwritten_idx) &&
1633             journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1634                 spin_unlock(&j->lock);
1635                 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1636         } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1637                    new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1638                 struct journal_buf *buf = journal_cur_buf(j);
1639                 long delta = buf->expires - jiffies;
1640
1641                 /*
1642                  * We don't close a journal entry to write it while there's
1643                  * previous entries still in flight - the current journal entry
1644                  * might want to be written now:
1645                  */
1646
1647                 spin_unlock(&j->lock);
1648                 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1649         } else {
1650                 spin_unlock(&j->lock);
1651         }
1652 }
1653
1654 static void journal_write_endio(struct bio *bio)
1655 {
1656         struct bch_dev *ca = bio->bi_private;
1657         struct journal *j = &ca->fs->journal;
1658         struct journal_buf *w = journal_last_unwritten_buf(j);
1659         unsigned long flags;
1660
1661         if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1662                                "error writing journal entry %llu: %s",
1663                                le64_to_cpu(w->data->seq),
1664                                bch2_blk_status_to_str(bio->bi_status)) ||
1665             bch2_meta_write_fault("journal")) {
1666                 spin_lock_irqsave(&j->err_lock, flags);
1667                 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1668                 spin_unlock_irqrestore(&j->err_lock, flags);
1669         }
1670
1671         closure_put(&j->io);
1672         percpu_ref_put(&ca->io_ref);
1673 }
1674
1675 static CLOSURE_CALLBACK(do_journal_write)
1676 {
1677         closure_type(j, struct journal, io);
1678         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1679         struct bch_dev *ca;
1680         struct journal_buf *w = journal_last_unwritten_buf(j);
1681         struct bio *bio;
1682         unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1683
1684         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1685                 ca = bch_dev_bkey_exists(c, ptr->dev);
1686                 if (!percpu_ref_tryget(&ca->io_ref)) {
1687                         /* XXX: fix this */
1688                         bch_err(c, "missing device for journal write\n");
1689                         continue;
1690                 }
1691
1692                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1693                              sectors);
1694
1695                 bio = ca->journal.bio;
1696                 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1697                 bio->bi_iter.bi_sector  = ptr->offset;
1698                 bio->bi_end_io          = journal_write_endio;
1699                 bio->bi_private         = ca;
1700
1701                 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1702                 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1703
1704                 if (!JSET_NO_FLUSH(w->data))
1705                         bio->bi_opf    |= REQ_FUA;
1706                 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1707                         bio->bi_opf    |= REQ_PREFLUSH;
1708
1709                 bch2_bio_map(bio, w->data, sectors << 9);
1710
1711                 trace_and_count(c, journal_write, bio);
1712                 closure_bio_submit(bio, cl);
1713
1714                 ca->journal.bucket_seq[ca->journal.cur_idx] =
1715                         le64_to_cpu(w->data->seq);
1716         }
1717
1718         continue_at(cl, journal_write_done, c->io_complete_wq);
1719 }
1720
1721 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
1722 {
1723         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1724         struct jset_entry *start, *end;
1725         struct jset *jset = w->data;
1726         struct journal_keys_to_wb wb = { NULL };
1727         unsigned sectors, bytes, u64s;
1728         unsigned long btree_roots_have = 0;
1729         bool validate_before_checksum = false;
1730         u64 seq = le64_to_cpu(jset->seq);
1731         int ret;
1732
1733         /*
1734          * Simple compaction, dropping empty jset_entries (from journal
1735          * reservations that weren't fully used) and merging jset_entries that
1736          * can be.
1737          *
1738          * If we wanted to be really fancy here, we could sort all the keys in
1739          * the jset and drop keys that were overwritten - probably not worth it:
1740          */
1741         vstruct_for_each(jset, i) {
1742                 unsigned u64s = le16_to_cpu(i->u64s);
1743
1744                 /* Empty entry: */
1745                 if (!u64s)
1746                         continue;
1747
1748                 /*
1749                  * New btree roots are set by journalling them; when the journal
1750                  * entry gets written we have to propagate them to
1751                  * c->btree_roots
1752                  *
1753                  * But, every journal entry we write has to contain all the
1754                  * btree roots (at least for now); so after we copy btree roots
1755                  * to c->btree_roots we have to get any missing btree roots and
1756                  * add them to this journal entry:
1757                  */
1758                 switch (i->type) {
1759                 case BCH_JSET_ENTRY_btree_root:
1760                         bch2_journal_entry_to_btree_root(c, i);
1761                         __set_bit(i->btree_id, &btree_roots_have);
1762                         break;
1763                 case BCH_JSET_ENTRY_write_buffer_keys:
1764                         EBUG_ON(!w->need_flush_to_write_buffer);
1765
1766                         if (!wb.wb)
1767                                 bch2_journal_keys_to_write_buffer_start(c, &wb, seq);
1768
1769                         struct bkey_i *k;
1770                         jset_entry_for_each_key(i, k) {
1771                                 ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k);
1772                                 if (ret) {
1773                                         bch2_fs_fatal_error(c, "-ENOMEM flushing journal keys to btree write buffer");
1774                                         bch2_journal_keys_to_write_buffer_end(c, &wb);
1775                                         return ret;
1776                                 }
1777                         }
1778                         i->type = BCH_JSET_ENTRY_btree_keys;
1779                         break;
1780                 }
1781         }
1782
1783         if (wb.wb)
1784                 bch2_journal_keys_to_write_buffer_end(c, &wb);
1785         w->need_flush_to_write_buffer = false;
1786
1787         start = end = vstruct_last(jset);
1788
1789         end     = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
1790
1791         bch2_journal_super_entries_add_common(c, &end, seq);
1792         u64s    = (u64 *) end - (u64 *) start;
1793         BUG_ON(u64s > j->entry_u64s_reserved);
1794
1795         le32_add_cpu(&jset->u64s, u64s);
1796
1797         sectors = vstruct_sectors(jset, c->block_bits);
1798         bytes   = vstruct_bytes(jset);
1799
1800         if (sectors > w->sectors) {
1801                 bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
1802                                     vstruct_bytes(jset), w->sectors << 9,
1803                                     u64s, w->u64s_reserved, j->entry_u64s_reserved);
1804                 return -EINVAL;
1805         }
1806
1807         jset->magic             = cpu_to_le64(jset_magic(c));
1808         jset->version           = cpu_to_le32(c->sb.version);
1809
1810         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1811         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1812
1813         if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1814                 j->last_empty_seq = seq;
1815
1816         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1817                 validate_before_checksum = true;
1818
1819         if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1820                 validate_before_checksum = true;
1821
1822         if (validate_before_checksum &&
1823             (ret = jset_validate(c, NULL, jset, 0, WRITE)))
1824                 return ret;
1825
1826         ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1827                     jset->encrypted_start,
1828                     vstruct_end(jset) - (void *) jset->encrypted_start);
1829         if (bch2_fs_fatal_err_on(ret, c,
1830                         "error decrypting journal entry: %i", ret))
1831                 return ret;
1832
1833         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1834                                   journal_nonce(jset), jset);
1835
1836         if (!validate_before_checksum &&
1837             (ret = jset_validate(c, NULL, jset, 0, WRITE)))
1838                 return ret;
1839
1840         memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1841         return 0;
1842 }
1843
1844 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
1845 {
1846         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1847         int error = bch2_journal_error(j);
1848
1849         /*
1850          * If the journal is in an error state - we did an emergency shutdown -
1851          * we prefer to continue doing journal writes. We just mark them as
1852          * noflush so they'll never be used, but they'll still be visible by the
1853          * list_journal tool - this helps in debugging.
1854          *
1855          * There's a caveat: the first journal write after marking the
1856          * superblock dirty must always be a flush write, because on startup
1857          * from a clean shutdown we didn't necessarily read the journal and the
1858          * new journal write might overwrite whatever was in the journal
1859          * previously - we can't leave the journal without any flush writes in
1860          * it.
1861          *
1862          * So if we're in an error state, and we're still starting up, we don't
1863          * write anything at all.
1864          */
1865         if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags))
1866                 return -EIO;
1867
1868         if (error ||
1869             w->noflush ||
1870             (!w->must_flush &&
1871              (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1872              test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1873                 w->noflush = true;
1874                 SET_JSET_NO_FLUSH(w->data, true);
1875                 w->data->last_seq       = 0;
1876                 w->last_seq             = 0;
1877
1878                 j->nr_noflush_writes++;
1879         } else {
1880                 j->last_flush_write = jiffies;
1881                 j->nr_flush_writes++;
1882                 clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
1883         }
1884
1885         return 0;
1886 }
1887
1888 CLOSURE_CALLBACK(bch2_journal_write)
1889 {
1890         closure_type(j, struct journal, io);
1891         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1892         struct journal_buf *w = journal_last_unwritten_buf(j);
1893         struct bch_replicas_padded replicas;
1894         struct bio *bio;
1895         struct printbuf journal_debug_buf = PRINTBUF;
1896         unsigned nr_rw_members = 0;
1897         int ret;
1898
1899         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1900
1901         j->write_start_time = local_clock();
1902
1903         spin_lock(&j->lock);
1904         ret = bch2_journal_write_pick_flush(j, w);
1905         spin_unlock(&j->lock);
1906         if (ret)
1907                 goto err;
1908
1909         mutex_lock(&j->buf_lock);
1910         journal_buf_realloc(j, w);
1911
1912         ret = bch2_journal_write_prep(j, w);
1913         mutex_unlock(&j->buf_lock);
1914         if (ret)
1915                 goto err;
1916
1917         j->entry_bytes_written += vstruct_bytes(w->data);
1918
1919         while (1) {
1920                 spin_lock(&j->lock);
1921                 ret = journal_write_alloc(j, w);
1922                 if (!ret || !j->can_discard)
1923                         break;
1924
1925                 spin_unlock(&j->lock);
1926                 bch2_journal_do_discards(j);
1927         }
1928
1929         if (ret) {
1930                 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1931                 spin_unlock(&j->lock);
1932                 bch_err(c, "Unable to allocate journal write:\n%s",
1933                         journal_debug_buf.buf);
1934                 printbuf_exit(&journal_debug_buf);
1935                 goto err;
1936         }
1937
1938         /*
1939          * write is allocated, no longer need to account for it in
1940          * bch2_journal_space_available():
1941          */
1942         w->sectors = 0;
1943
1944         /*
1945          * journal entry has been compacted and allocated, recalculate space
1946          * available:
1947          */
1948         bch2_journal_space_available(j);
1949         spin_unlock(&j->lock);
1950
1951         w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1952
1953         if (c->opts.nochanges)
1954                 goto no_io;
1955
1956         for_each_rw_member(c, ca)
1957                 nr_rw_members++;
1958
1959         if (nr_rw_members > 1)
1960                 w->separate_flush = true;
1961
1962         /*
1963          * Mark journal replicas before we submit the write to guarantee
1964          * recovery will find the journal entries after a crash.
1965          */
1966         bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1967                                  w->devs_written);
1968         ret = bch2_mark_replicas(c, &replicas.e);
1969         if (ret)
1970                 goto err;
1971
1972         if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
1973                 for_each_rw_member(c, ca) {
1974                         percpu_ref_get(&ca->io_ref);
1975
1976                         bio = ca->journal.bio;
1977                         bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1978                         bio->bi_end_io          = journal_write_endio;
1979                         bio->bi_private         = ca;
1980                         closure_bio_submit(bio, cl);
1981                 }
1982         }
1983
1984         continue_at(cl, do_journal_write, c->io_complete_wq);
1985         return;
1986 no_io:
1987         continue_at(cl, journal_write_done, c->io_complete_wq);
1988         return;
1989 err:
1990         bch2_fatal_error(c);
1991         continue_at(cl, journal_write_done, c->io_complete_wq);
1992 }