]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/journal_io.c
057e7c6113157775366b187aad54c613cd91404b
[bcachefs-tools-debian] / libbcachefs / journal_io.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "btree_io.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
8 #include "buckets.h"
9 #include "checksum.h"
10 #include "disk_groups.h"
11 #include "error.h"
12 #include "journal.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
16 #include "replicas.h"
17 #include "sb-clean.h"
18 #include "trace.h"
19
20 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
21                                struct journal_replay *j)
22 {
23         darray_for_each(j->ptrs, i) {
24                 struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev);
25                 u64 offset;
26
27                 div64_u64_rem(i->sector, ca->mi.bucket_size, &offset);
28
29                 if (i != j->ptrs.data)
30                         prt_printf(out, " ");
31                 prt_printf(out, "%u:%u:%u (sector %llu)",
32                            i->dev, i->bucket, i->bucket_offset, i->sector);
33         }
34 }
35
36 static void bch2_journal_replay_to_text(struct printbuf *out, struct bch_fs *c,
37                                         struct journal_replay *j)
38 {
39         prt_printf(out, "seq %llu ", le64_to_cpu(j->j.seq));
40
41         bch2_journal_ptrs_to_text(out, c, j);
42
43         struct jset_entry *entry;
44         for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) {
45                 struct jset_entry_datetime *datetime =
46                         container_of(entry, struct jset_entry_datetime, entry);
47                 bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
48                 break;
49         }
50 }
51
52 static struct nonce journal_nonce(const struct jset *jset)
53 {
54         return (struct nonce) {{
55                 [0] = 0,
56                 [1] = ((__le32 *) &jset->seq)[0],
57                 [2] = ((__le32 *) &jset->seq)[1],
58                 [3] = BCH_NONCE_JOURNAL,
59         }};
60 }
61
62 static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum)
63 {
64         if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) {
65                 *csum = (struct bch_csum) {};
66                 return false;
67         }
68
69         *csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
70         return !bch2_crc_cmp(j->csum, *csum);
71 }
72
73 static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
74 {
75         return (seq - c->journal_entries_base_seq) & (~0U >> 1);
76 }
77
78 static void __journal_replay_free(struct bch_fs *c,
79                                   struct journal_replay *i)
80 {
81         struct journal_replay **p =
82                 genradix_ptr(&c->journal_entries,
83                              journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
84
85         BUG_ON(*p != i);
86         *p = NULL;
87         kvfree(i);
88 }
89
90 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
91 {
92         i->ignore = true;
93
94         if (!c->opts.read_entire_journal)
95                 __journal_replay_free(c, i);
96 }
97
98 struct journal_list {
99         struct closure          cl;
100         u64                     last_seq;
101         struct mutex            lock;
102         int                     ret;
103 };
104
105 #define JOURNAL_ENTRY_ADD_OK            0
106 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE  5
107
108 /*
109  * Given a journal entry we just read, add it to the list of journal entries to
110  * be replayed:
111  */
112 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
113                              struct journal_ptr entry_ptr,
114                              struct journal_list *jlist, struct jset *j)
115 {
116         struct genradix_iter iter;
117         struct journal_replay **_i, *i, *dup;
118         size_t bytes = vstruct_bytes(j);
119         u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
120         struct printbuf buf = PRINTBUF;
121         int ret = JOURNAL_ENTRY_ADD_OK;
122
123         /* Is this entry older than the range we need? */
124         if (!c->opts.read_entire_journal &&
125             le64_to_cpu(j->seq) < jlist->last_seq)
126                 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
127
128         /*
129          * genradixes are indexed by a ulong, not a u64, so we can't index them
130          * by sequence number directly: Assume instead that they will all fall
131          * within the range of +-2billion of the filrst one we find.
132          */
133         if (!c->journal_entries_base_seq)
134                 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
135
136         /* Drop entries we don't need anymore */
137         if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
138                 genradix_for_each_from(&c->journal_entries, iter, _i,
139                                        journal_entry_radix_idx(c, jlist->last_seq)) {
140                         i = *_i;
141
142                         if (!i || i->ignore)
143                                 continue;
144
145                         if (le64_to_cpu(i->j.seq) >= last_seq)
146                                 break;
147                         journal_replay_free(c, i);
148                 }
149         }
150
151         jlist->last_seq = max(jlist->last_seq, last_seq);
152
153         _i = genradix_ptr_alloc(&c->journal_entries,
154                                 journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
155                                 GFP_KERNEL);
156         if (!_i)
157                 return -BCH_ERR_ENOMEM_journal_entry_add;
158
159         /*
160          * Duplicate journal entries? If so we want the one that didn't have a
161          * checksum error:
162          */
163         dup = *_i;
164         if (dup) {
165                 bool identical = bytes == vstruct_bytes(&dup->j) &&
166                         !memcmp(j, &dup->j, bytes);
167                 bool not_identical = !identical &&
168                         entry_ptr.csum_good &&
169                         dup->csum_good;
170
171                 bool same_device = false;
172                 darray_for_each(dup->ptrs, ptr)
173                         if (ptr->dev == ca->dev_idx)
174                                 same_device = true;
175
176                 ret = darray_push(&dup->ptrs, entry_ptr);
177                 if (ret)
178                         goto out;
179
180                 bch2_journal_replay_to_text(&buf, c, dup);
181
182                 fsck_err_on(same_device,
183                             c, journal_entry_dup_same_device,
184                             "duplicate journal entry on same device\n  %s",
185                             buf.buf);
186
187                 fsck_err_on(not_identical,
188                             c, journal_entry_replicas_data_mismatch,
189                             "found duplicate but non identical journal entries\n  %s",
190                             buf.buf);
191
192                 if (entry_ptr.csum_good && !identical)
193                         goto replace;
194
195                 goto out;
196         }
197 replace:
198         i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
199         if (!i)
200                 return -BCH_ERR_ENOMEM_journal_entry_add;
201
202         darray_init(&i->ptrs);
203         i->csum_good    = entry_ptr.csum_good;
204         i->ignore       = false;
205         unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
206
207         if (dup) {
208                 /* The first ptr should represent the jset we kept: */
209                 darray_for_each(dup->ptrs, ptr)
210                         darray_push(&i->ptrs, *ptr);
211                 __journal_replay_free(c, dup);
212         } else {
213                 darray_push(&i->ptrs, entry_ptr);
214         }
215
216         *_i = i;
217 out:
218 fsck_err:
219         printbuf_exit(&buf);
220         return ret;
221 }
222
223 /* this fills in a range with empty jset_entries: */
224 static void journal_entry_null_range(void *start, void *end)
225 {
226         struct jset_entry *entry;
227
228         for (entry = start; entry != end; entry = vstruct_next(entry))
229                 memset(entry, 0, sizeof(*entry));
230 }
231
232 #define JOURNAL_ENTRY_REREAD    5
233 #define JOURNAL_ENTRY_NONE      6
234 #define JOURNAL_ENTRY_BAD       7
235
236 static void journal_entry_err_msg(struct printbuf *out,
237                                   u32 version,
238                                   struct jset *jset,
239                                   struct jset_entry *entry)
240 {
241         prt_str(out, "invalid journal entry, version=");
242         bch2_version_to_text(out, version);
243
244         if (entry) {
245                 prt_str(out, " type=");
246                 prt_str(out, bch2_jset_entry_types[entry->type]);
247         }
248
249         if (!jset) {
250                 prt_printf(out, " in superblock");
251         } else {
252
253                 prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq));
254
255                 if (entry)
256                         prt_printf(out, " offset=%zi/%u",
257                                    (u64 *) entry - jset->_data,
258                                    le32_to_cpu(jset->u64s));
259         }
260
261         prt_str(out, ": ");
262 }
263
264 #define journal_entry_err(c, version, jset, entry, _err, msg, ...)      \
265 ({                                                                      \
266         struct printbuf _buf = PRINTBUF;                                \
267                                                                         \
268         journal_entry_err_msg(&_buf, version, jset, entry);             \
269         prt_printf(&_buf, msg, ##__VA_ARGS__);                          \
270                                                                         \
271         switch (flags & BKEY_INVALID_WRITE) {                           \
272         case READ:                                                      \
273                 mustfix_fsck_err(c, _err, "%s", _buf.buf);              \
274                 break;                                                  \
275         case WRITE:                                                     \
276                 bch2_sb_error_count(c, BCH_FSCK_ERR_##_err);            \
277                 bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
278                 if (bch2_fs_inconsistent(c)) {                          \
279                         ret = -BCH_ERR_fsck_errors_not_fixed;           \
280                         goto fsck_err;                                  \
281                 }                                                       \
282                 break;                                                  \
283         }                                                               \
284                                                                         \
285         printbuf_exit(&_buf);                                           \
286         true;                                                           \
287 })
288
289 #define journal_entry_err_on(cond, ...)                                 \
290         ((cond) ? journal_entry_err(__VA_ARGS__) : false)
291
292 #define FSCK_DELETED_KEY        5
293
294 static int journal_validate_key(struct bch_fs *c,
295                                 struct jset *jset,
296                                 struct jset_entry *entry,
297                                 unsigned level, enum btree_id btree_id,
298                                 struct bkey_i *k,
299                                 unsigned version, int big_endian,
300                                 enum bkey_invalid_flags flags)
301 {
302         int write = flags & BKEY_INVALID_WRITE;
303         void *next = vstruct_next(entry);
304         struct printbuf buf = PRINTBUF;
305         int ret = 0;
306
307         if (journal_entry_err_on(!k->k.u64s,
308                                  c, version, jset, entry,
309                                  journal_entry_bkey_u64s_0,
310                                  "k->u64s 0")) {
311                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
312                 journal_entry_null_range(vstruct_next(entry), next);
313                 return FSCK_DELETED_KEY;
314         }
315
316         if (journal_entry_err_on((void *) bkey_next(k) >
317                                  (void *) vstruct_next(entry),
318                                  c, version, jset, entry,
319                                  journal_entry_bkey_past_end,
320                                  "extends past end of journal entry")) {
321                 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
322                 journal_entry_null_range(vstruct_next(entry), next);
323                 return FSCK_DELETED_KEY;
324         }
325
326         if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
327                                  c, version, jset, entry,
328                                  journal_entry_bkey_bad_format,
329                                  "bad format %u", k->k.format)) {
330                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
331                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
332                 journal_entry_null_range(vstruct_next(entry), next);
333                 return FSCK_DELETED_KEY;
334         }
335
336         if (!write)
337                 bch2_bkey_compat(level, btree_id, version, big_endian,
338                                  write, NULL, bkey_to_packed(k));
339
340         if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
341                               __btree_node_type(level, btree_id), write, &buf)) {
342                 printbuf_reset(&buf);
343                 journal_entry_err_msg(&buf, version, jset, entry);
344                 prt_newline(&buf);
345                 printbuf_indent_add(&buf, 2);
346
347                 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
348                 prt_newline(&buf);
349                 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
350                                   __btree_node_type(level, btree_id), write, &buf);
351
352                 mustfix_fsck_err(c, journal_entry_bkey_invalid,
353                                  "%s", buf.buf);
354
355                 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
356                 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
357                 journal_entry_null_range(vstruct_next(entry), next);
358
359                 printbuf_exit(&buf);
360                 return FSCK_DELETED_KEY;
361         }
362
363         if (write)
364                 bch2_bkey_compat(level, btree_id, version, big_endian,
365                                  write, NULL, bkey_to_packed(k));
366 fsck_err:
367         printbuf_exit(&buf);
368         return ret;
369 }
370
371 static int journal_entry_btree_keys_validate(struct bch_fs *c,
372                                 struct jset *jset,
373                                 struct jset_entry *entry,
374                                 unsigned version, int big_endian,
375                                 enum bkey_invalid_flags flags)
376 {
377         struct bkey_i *k = entry->start;
378
379         while (k != vstruct_last(entry)) {
380                 int ret = journal_validate_key(c, jset, entry,
381                                                entry->level,
382                                                entry->btree_id,
383                                                k, version, big_endian,
384                                                flags|BKEY_INVALID_JOURNAL);
385                 if (ret == FSCK_DELETED_KEY)
386                         continue;
387
388                 k = bkey_next(k);
389         }
390
391         return 0;
392 }
393
394 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
395                                              struct jset_entry *entry)
396 {
397         struct bkey_i *k;
398         bool first = true;
399
400         jset_entry_for_each_key(entry, k) {
401                 if (!first) {
402                         prt_newline(out);
403                         prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
404                 }
405                 prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
406                 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
407                 first = false;
408         }
409 }
410
411 static int journal_entry_btree_root_validate(struct bch_fs *c,
412                                 struct jset *jset,
413                                 struct jset_entry *entry,
414                                 unsigned version, int big_endian,
415                                 enum bkey_invalid_flags flags)
416 {
417         struct bkey_i *k = entry->start;
418         int ret = 0;
419
420         if (journal_entry_err_on(!entry->u64s ||
421                                  le16_to_cpu(entry->u64s) != k->k.u64s,
422                                  c, version, jset, entry,
423                                  journal_entry_btree_root_bad_size,
424                                  "invalid btree root journal entry: wrong number of keys")) {
425                 void *next = vstruct_next(entry);
426                 /*
427                  * we don't want to null out this jset_entry,
428                  * just the contents, so that later we can tell
429                  * we were _supposed_ to have a btree root
430                  */
431                 entry->u64s = 0;
432                 journal_entry_null_range(vstruct_next(entry), next);
433                 return 0;
434         }
435
436         ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
437                                    version, big_endian, flags);
438         if (ret == FSCK_DELETED_KEY)
439                 ret = 0;
440 fsck_err:
441         return ret;
442 }
443
444 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
445                                              struct jset_entry *entry)
446 {
447         journal_entry_btree_keys_to_text(out, c, entry);
448 }
449
450 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
451                                 struct jset *jset,
452                                 struct jset_entry *entry,
453                                 unsigned version, int big_endian,
454                                 enum bkey_invalid_flags flags)
455 {
456         /* obsolete, don't care: */
457         return 0;
458 }
459
460 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
461                                             struct jset_entry *entry)
462 {
463 }
464
465 static int journal_entry_blacklist_validate(struct bch_fs *c,
466                                 struct jset *jset,
467                                 struct jset_entry *entry,
468                                 unsigned version, int big_endian,
469                                 enum bkey_invalid_flags flags)
470 {
471         int ret = 0;
472
473         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
474                                  c, version, jset, entry,
475                                  journal_entry_blacklist_bad_size,
476                 "invalid journal seq blacklist entry: bad size")) {
477                 journal_entry_null_range(entry, vstruct_next(entry));
478         }
479 fsck_err:
480         return ret;
481 }
482
483 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
484                                             struct jset_entry *entry)
485 {
486         struct jset_entry_blacklist *bl =
487                 container_of(entry, struct jset_entry_blacklist, entry);
488
489         prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
490 }
491
492 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
493                                 struct jset *jset,
494                                 struct jset_entry *entry,
495                                 unsigned version, int big_endian,
496                                 enum bkey_invalid_flags flags)
497 {
498         struct jset_entry_blacklist_v2 *bl_entry;
499         int ret = 0;
500
501         if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
502                                  c, version, jset, entry,
503                                  journal_entry_blacklist_v2_bad_size,
504                 "invalid journal seq blacklist entry: bad size")) {
505                 journal_entry_null_range(entry, vstruct_next(entry));
506                 goto out;
507         }
508
509         bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
510
511         if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
512                                  le64_to_cpu(bl_entry->end),
513                                  c, version, jset, entry,
514                                  journal_entry_blacklist_v2_start_past_end,
515                 "invalid journal seq blacklist entry: start > end")) {
516                 journal_entry_null_range(entry, vstruct_next(entry));
517         }
518 out:
519 fsck_err:
520         return ret;
521 }
522
523 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
524                                                struct jset_entry *entry)
525 {
526         struct jset_entry_blacklist_v2 *bl =
527                 container_of(entry, struct jset_entry_blacklist_v2, entry);
528
529         prt_printf(out, "start=%llu end=%llu",
530                le64_to_cpu(bl->start),
531                le64_to_cpu(bl->end));
532 }
533
534 static int journal_entry_usage_validate(struct bch_fs *c,
535                                 struct jset *jset,
536                                 struct jset_entry *entry,
537                                 unsigned version, int big_endian,
538                                 enum bkey_invalid_flags flags)
539 {
540         struct jset_entry_usage *u =
541                 container_of(entry, struct jset_entry_usage, entry);
542         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
543         int ret = 0;
544
545         if (journal_entry_err_on(bytes < sizeof(*u),
546                                  c, version, jset, entry,
547                                  journal_entry_usage_bad_size,
548                                  "invalid journal entry usage: bad size")) {
549                 journal_entry_null_range(entry, vstruct_next(entry));
550                 return ret;
551         }
552
553 fsck_err:
554         return ret;
555 }
556
557 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
558                                         struct jset_entry *entry)
559 {
560         struct jset_entry_usage *u =
561                 container_of(entry, struct jset_entry_usage, entry);
562
563         prt_printf(out, "type=%s v=%llu",
564                bch2_fs_usage_types[u->entry.btree_id],
565                le64_to_cpu(u->v));
566 }
567
568 static int journal_entry_data_usage_validate(struct bch_fs *c,
569                                 struct jset *jset,
570                                 struct jset_entry *entry,
571                                 unsigned version, int big_endian,
572                                 enum bkey_invalid_flags flags)
573 {
574         struct jset_entry_data_usage *u =
575                 container_of(entry, struct jset_entry_data_usage, entry);
576         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
577         struct printbuf err = PRINTBUF;
578         int ret = 0;
579
580         if (journal_entry_err_on(bytes < sizeof(*u) ||
581                                  bytes < sizeof(*u) + u->r.nr_devs,
582                                  c, version, jset, entry,
583                                  journal_entry_data_usage_bad_size,
584                                  "invalid journal entry usage: bad size")) {
585                 journal_entry_null_range(entry, vstruct_next(entry));
586                 goto out;
587         }
588
589         if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err),
590                                  c, version, jset, entry,
591                                  journal_entry_data_usage_bad_size,
592                                  "invalid journal entry usage: %s", err.buf)) {
593                 journal_entry_null_range(entry, vstruct_next(entry));
594                 goto out;
595         }
596 out:
597 fsck_err:
598         printbuf_exit(&err);
599         return ret;
600 }
601
602 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
603                                              struct jset_entry *entry)
604 {
605         struct jset_entry_data_usage *u =
606                 container_of(entry, struct jset_entry_data_usage, entry);
607
608         bch2_replicas_entry_to_text(out, &u->r);
609         prt_printf(out, "=%llu", le64_to_cpu(u->v));
610 }
611
612 static int journal_entry_clock_validate(struct bch_fs *c,
613                                 struct jset *jset,
614                                 struct jset_entry *entry,
615                                 unsigned version, int big_endian,
616                                 enum bkey_invalid_flags flags)
617 {
618         struct jset_entry_clock *clock =
619                 container_of(entry, struct jset_entry_clock, entry);
620         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
621         int ret = 0;
622
623         if (journal_entry_err_on(bytes != sizeof(*clock),
624                                  c, version, jset, entry,
625                                  journal_entry_clock_bad_size,
626                                  "bad size")) {
627                 journal_entry_null_range(entry, vstruct_next(entry));
628                 return ret;
629         }
630
631         if (journal_entry_err_on(clock->rw > 1,
632                                  c, version, jset, entry,
633                                  journal_entry_clock_bad_rw,
634                                  "bad rw")) {
635                 journal_entry_null_range(entry, vstruct_next(entry));
636                 return ret;
637         }
638
639 fsck_err:
640         return ret;
641 }
642
643 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
644                                         struct jset_entry *entry)
645 {
646         struct jset_entry_clock *clock =
647                 container_of(entry, struct jset_entry_clock, entry);
648
649         prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
650 }
651
652 static int journal_entry_dev_usage_validate(struct bch_fs *c,
653                                 struct jset *jset,
654                                 struct jset_entry *entry,
655                                 unsigned version, int big_endian,
656                                 enum bkey_invalid_flags flags)
657 {
658         struct jset_entry_dev_usage *u =
659                 container_of(entry, struct jset_entry_dev_usage, entry);
660         unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
661         unsigned expected = sizeof(*u);
662         unsigned dev;
663         int ret = 0;
664
665         if (journal_entry_err_on(bytes < expected,
666                                  c, version, jset, entry,
667                                  journal_entry_dev_usage_bad_size,
668                                  "bad size (%u < %u)",
669                                  bytes, expected)) {
670                 journal_entry_null_range(entry, vstruct_next(entry));
671                 return ret;
672         }
673
674         dev = le32_to_cpu(u->dev);
675
676         if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
677                                  c, version, jset, entry,
678                                  journal_entry_dev_usage_bad_dev,
679                                  "bad dev")) {
680                 journal_entry_null_range(entry, vstruct_next(entry));
681                 return ret;
682         }
683
684         if (journal_entry_err_on(u->pad,
685                                  c, version, jset, entry,
686                                  journal_entry_dev_usage_bad_pad,
687                                  "bad pad")) {
688                 journal_entry_null_range(entry, vstruct_next(entry));
689                 return ret;
690         }
691
692 fsck_err:
693         return ret;
694 }
695
696 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
697                                             struct jset_entry *entry)
698 {
699         struct jset_entry_dev_usage *u =
700                 container_of(entry, struct jset_entry_dev_usage, entry);
701         unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
702
703         prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
704
705         for (i = 0; i < nr_types; i++) {
706                 bch2_prt_data_type(out, i);
707                 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
708                        le64_to_cpu(u->d[i].buckets),
709                        le64_to_cpu(u->d[i].sectors),
710                        le64_to_cpu(u->d[i].fragmented));
711         }
712 }
713
714 static int journal_entry_log_validate(struct bch_fs *c,
715                                 struct jset *jset,
716                                 struct jset_entry *entry,
717                                 unsigned version, int big_endian,
718                                 enum bkey_invalid_flags flags)
719 {
720         return 0;
721 }
722
723 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
724                                       struct jset_entry *entry)
725 {
726         struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
727         unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
728
729         prt_printf(out, "%.*s", bytes, l->d);
730 }
731
732 static int journal_entry_overwrite_validate(struct bch_fs *c,
733                                 struct jset *jset,
734                                 struct jset_entry *entry,
735                                 unsigned version, int big_endian,
736                                 enum bkey_invalid_flags flags)
737 {
738         return journal_entry_btree_keys_validate(c, jset, entry,
739                                 version, big_endian, READ);
740 }
741
742 static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
743                                             struct jset_entry *entry)
744 {
745         journal_entry_btree_keys_to_text(out, c, entry);
746 }
747
748 static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
749                                 struct jset *jset,
750                                 struct jset_entry *entry,
751                                 unsigned version, int big_endian,
752                                 enum bkey_invalid_flags flags)
753 {
754         return journal_entry_btree_keys_validate(c, jset, entry,
755                                 version, big_endian, READ);
756 }
757
758 static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c,
759                                             struct jset_entry *entry)
760 {
761         journal_entry_btree_keys_to_text(out, c, entry);
762 }
763
764 static int journal_entry_datetime_validate(struct bch_fs *c,
765                                 struct jset *jset,
766                                 struct jset_entry *entry,
767                                 unsigned version, int big_endian,
768                                 enum bkey_invalid_flags flags)
769 {
770         unsigned bytes = vstruct_bytes(entry);
771         unsigned expected = 16;
772         int ret = 0;
773
774         if (journal_entry_err_on(vstruct_bytes(entry) < expected,
775                                  c, version, jset, entry,
776                                  journal_entry_dev_usage_bad_size,
777                                  "bad size (%u < %u)",
778                                  bytes, expected)) {
779                 journal_entry_null_range(entry, vstruct_next(entry));
780                 return ret;
781         }
782 fsck_err:
783         return ret;
784 }
785
786 static void journal_entry_datetime_to_text(struct printbuf *out, struct bch_fs *c,
787                                             struct jset_entry *entry)
788 {
789         struct jset_entry_datetime *datetime =
790                 container_of(entry, struct jset_entry_datetime, entry);
791
792         bch2_prt_datetime(out, le64_to_cpu(datetime->seconds));
793 }
794
795 struct jset_entry_ops {
796         int (*validate)(struct bch_fs *, struct jset *,
797                         struct jset_entry *, unsigned, int,
798                         enum bkey_invalid_flags);
799         void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
800 };
801
802 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
803 #define x(f, nr)                                                \
804         [BCH_JSET_ENTRY_##f]    = (struct jset_entry_ops) {     \
805                 .validate       = journal_entry_##f##_validate, \
806                 .to_text        = journal_entry_##f##_to_text,  \
807         },
808         BCH_JSET_ENTRY_TYPES()
809 #undef x
810 };
811
812 int bch2_journal_entry_validate(struct bch_fs *c,
813                                 struct jset *jset,
814                                 struct jset_entry *entry,
815                                 unsigned version, int big_endian,
816                                 enum bkey_invalid_flags flags)
817 {
818         return entry->type < BCH_JSET_ENTRY_NR
819                 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
820                                 version, big_endian, flags)
821                 : 0;
822 }
823
824 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
825                                 struct jset_entry *entry)
826 {
827         if (entry->type < BCH_JSET_ENTRY_NR) {
828                 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
829                 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
830         } else {
831                 prt_printf(out, "(unknown type %u)", entry->type);
832         }
833 }
834
835 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
836                                  enum bkey_invalid_flags flags)
837 {
838         unsigned version = le32_to_cpu(jset->version);
839         int ret = 0;
840
841         vstruct_for_each(jset, entry) {
842                 if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
843                                 c, version, jset, entry,
844                                 journal_entry_past_jset_end,
845                                 "journal entry extends past end of jset")) {
846                         jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
847                         break;
848                 }
849
850                 ret = bch2_journal_entry_validate(c, jset, entry,
851                                         version, JSET_BIG_ENDIAN(jset), flags);
852                 if (ret)
853                         break;
854         }
855 fsck_err:
856         return ret;
857 }
858
859 static int jset_validate(struct bch_fs *c,
860                          struct bch_dev *ca,
861                          struct jset *jset, u64 sector,
862                          enum bkey_invalid_flags flags)
863 {
864         unsigned version;
865         int ret = 0;
866
867         if (le64_to_cpu(jset->magic) != jset_magic(c))
868                 return JOURNAL_ENTRY_NONE;
869
870         version = le32_to_cpu(jset->version);
871         if (journal_entry_err_on(!bch2_version_compatible(version),
872                         c, version, jset, NULL,
873                         jset_unsupported_version,
874                         "%s sector %llu seq %llu: incompatible journal entry version %u.%u",
875                         ca ? ca->name : c->name,
876                         sector, le64_to_cpu(jset->seq),
877                         BCH_VERSION_MAJOR(version),
878                         BCH_VERSION_MINOR(version))) {
879                 /* don't try to continue: */
880                 return -EINVAL;
881         }
882
883         if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
884                         c, version, jset, NULL,
885                         jset_unknown_csum,
886                         "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
887                         ca ? ca->name : c->name,
888                         sector, le64_to_cpu(jset->seq),
889                         JSET_CSUM_TYPE(jset)))
890                 ret = JOURNAL_ENTRY_BAD;
891
892         /* last_seq is ignored when JSET_NO_FLUSH is true */
893         if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
894                                  le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
895                                  c, version, jset, NULL,
896                                  jset_last_seq_newer_than_seq,
897                                  "invalid journal entry: last_seq > seq (%llu > %llu)",
898                                  le64_to_cpu(jset->last_seq),
899                                  le64_to_cpu(jset->seq))) {
900                 jset->last_seq = jset->seq;
901                 return JOURNAL_ENTRY_BAD;
902         }
903
904         ret = jset_validate_entries(c, jset, flags);
905 fsck_err:
906         return ret;
907 }
908
909 static int jset_validate_early(struct bch_fs *c,
910                          struct bch_dev *ca,
911                          struct jset *jset, u64 sector,
912                          unsigned bucket_sectors_left,
913                          unsigned sectors_read)
914 {
915         size_t bytes = vstruct_bytes(jset);
916         unsigned version;
917         enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
918         int ret = 0;
919
920         if (le64_to_cpu(jset->magic) != jset_magic(c))
921                 return JOURNAL_ENTRY_NONE;
922
923         version = le32_to_cpu(jset->version);
924         if (journal_entry_err_on(!bch2_version_compatible(version),
925                         c, version, jset, NULL,
926                         jset_unsupported_version,
927                         "%s sector %llu seq %llu: unknown journal entry version %u.%u",
928                         ca ? ca->name : c->name,
929                         sector, le64_to_cpu(jset->seq),
930                         BCH_VERSION_MAJOR(version),
931                         BCH_VERSION_MINOR(version))) {
932                 /* don't try to continue: */
933                 return -EINVAL;
934         }
935
936         if (bytes > (sectors_read << 9) &&
937             sectors_read < bucket_sectors_left)
938                 return JOURNAL_ENTRY_REREAD;
939
940         if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
941                         c, version, jset, NULL,
942                         jset_past_bucket_end,
943                         "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
944                         ca ? ca->name : c->name,
945                         sector, le64_to_cpu(jset->seq), bytes))
946                 le32_add_cpu(&jset->u64s,
947                              -((bytes - (bucket_sectors_left << 9)) / 8));
948 fsck_err:
949         return ret;
950 }
951
952 struct journal_read_buf {
953         void            *data;
954         size_t          size;
955 };
956
957 static int journal_read_buf_realloc(struct journal_read_buf *b,
958                                     size_t new_size)
959 {
960         void *n;
961
962         /* the bios are sized for this many pages, max: */
963         if (new_size > JOURNAL_ENTRY_SIZE_MAX)
964                 return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
965
966         new_size = roundup_pow_of_two(new_size);
967         n = kvmalloc(new_size, GFP_KERNEL);
968         if (!n)
969                 return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
970
971         kvfree(b->data);
972         b->data = n;
973         b->size = new_size;
974         return 0;
975 }
976
977 static int journal_read_bucket(struct bch_dev *ca,
978                                struct journal_read_buf *buf,
979                                struct journal_list *jlist,
980                                unsigned bucket)
981 {
982         struct bch_fs *c = ca->fs;
983         struct journal_device *ja = &ca->journal;
984         struct jset *j = NULL;
985         unsigned sectors, sectors_read = 0;
986         u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
987             end = offset + ca->mi.bucket_size;
988         bool saw_bad = false, csum_good;
989         struct printbuf err = PRINTBUF;
990         int ret = 0;
991
992         pr_debug("reading %u", bucket);
993
994         while (offset < end) {
995                 if (!sectors_read) {
996                         struct bio *bio;
997                         unsigned nr_bvecs;
998 reread:
999                         sectors_read = min_t(unsigned,
1000                                 end - offset, buf->size >> 9);
1001                         nr_bvecs = buf_pages(buf->data, sectors_read << 9);
1002
1003                         bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1004                         bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
1005
1006                         bio->bi_iter.bi_sector = offset;
1007                         bch2_bio_map(bio, buf->data, sectors_read << 9);
1008
1009                         ret = submit_bio_wait(bio);
1010                         kfree(bio);
1011
1012                         if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
1013                                                "journal read error: sector %llu",
1014                                                offset) ||
1015                             bch2_meta_read_fault("journal")) {
1016                                 /*
1017                                  * We don't error out of the recovery process
1018                                  * here, since the relevant journal entry may be
1019                                  * found on a different device, and missing or
1020                                  * no journal entries will be handled later
1021                                  */
1022                                 goto out;
1023                         }
1024
1025                         j = buf->data;
1026                 }
1027
1028                 ret = jset_validate_early(c, ca, j, offset,
1029                                     end - offset, sectors_read);
1030                 switch (ret) {
1031                 case 0:
1032                         sectors = vstruct_sectors(j, c->block_bits);
1033                         break;
1034                 case JOURNAL_ENTRY_REREAD:
1035                         if (vstruct_bytes(j) > buf->size) {
1036                                 ret = journal_read_buf_realloc(buf,
1037                                                         vstruct_bytes(j));
1038                                 if (ret)
1039                                         goto err;
1040                         }
1041                         goto reread;
1042                 case JOURNAL_ENTRY_NONE:
1043                         if (!saw_bad)
1044                                 goto out;
1045                         /*
1046                          * On checksum error we don't really trust the size
1047                          * field of the journal entry we read, so try reading
1048                          * again at next block boundary:
1049                          */
1050                         sectors = block_sectors(c);
1051                         goto next_block;
1052                 default:
1053                         goto err;
1054                 }
1055
1056                 /*
1057                  * This happens sometimes if we don't have discards on -
1058                  * when we've partially overwritten a bucket with new
1059                  * journal entries. We don't need the rest of the
1060                  * bucket:
1061                  */
1062                 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
1063                         goto out;
1064
1065                 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
1066
1067                 enum bch_csum_type csum_type = JSET_CSUM_TYPE(j);
1068                 struct bch_csum csum;
1069                 csum_good = jset_csum_good(c, j, &csum);
1070
1071                 if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
1072                                        "%s",
1073                                        (printbuf_reset(&err),
1074                                         prt_str(&err, "journal "),
1075                                         bch2_csum_err_msg(&err, csum_type, j->csum, csum),
1076                                         err.buf)))
1077                         saw_bad = true;
1078
1079                 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
1080                              j->encrypted_start,
1081                              vstruct_end(j) - (void *) j->encrypted_start);
1082                 bch2_fs_fatal_err_on(ret, c,
1083                                 "error decrypting journal entry: %s",
1084                                 bch2_err_str(ret));
1085
1086                 mutex_lock(&jlist->lock);
1087                 ret = journal_entry_add(c, ca, (struct journal_ptr) {
1088                                         .csum_good      = csum_good,
1089                                         .dev            = ca->dev_idx,
1090                                         .bucket         = bucket,
1091                                         .bucket_offset  = offset -
1092                                                 bucket_to_sector(ca, ja->buckets[bucket]),
1093                                         .sector         = offset,
1094                                         }, jlist, j);
1095                 mutex_unlock(&jlist->lock);
1096
1097                 switch (ret) {
1098                 case JOURNAL_ENTRY_ADD_OK:
1099                         break;
1100                 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
1101                         break;
1102                 default:
1103                         goto err;
1104                 }
1105 next_block:
1106                 pr_debug("next");
1107                 offset          += sectors;
1108                 sectors_read    -= sectors;
1109                 j = ((void *) j) + (sectors << 9);
1110         }
1111
1112 out:
1113         ret = 0;
1114 err:
1115         printbuf_exit(&err);
1116         return ret;
1117 }
1118
1119 static CLOSURE_CALLBACK(bch2_journal_read_device)
1120 {
1121         closure_type(ja, struct journal_device, read);
1122         struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
1123         struct bch_fs *c = ca->fs;
1124         struct journal_list *jlist =
1125                 container_of(cl->parent, struct journal_list, cl);
1126         struct journal_replay *r, **_r;
1127         struct genradix_iter iter;
1128         struct journal_read_buf buf = { NULL, 0 };
1129         unsigned i;
1130         int ret = 0;
1131
1132         if (!ja->nr)
1133                 goto out;
1134
1135         ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
1136         if (ret)
1137                 goto err;
1138
1139         pr_debug("%u journal buckets", ja->nr);
1140
1141         for (i = 0; i < ja->nr; i++) {
1142                 ret = journal_read_bucket(ca, &buf, jlist, i);
1143                 if (ret)
1144                         goto err;
1145         }
1146
1147         ja->sectors_free = ca->mi.bucket_size;
1148
1149         mutex_lock(&jlist->lock);
1150         genradix_for_each_reverse(&c->journal_entries, iter, _r) {
1151                 r = *_r;
1152
1153                 if (!r)
1154                         continue;
1155
1156                 darray_for_each(r->ptrs, i)
1157                         if (i->dev == ca->dev_idx) {
1158                                 unsigned wrote = bucket_remainder(ca, i->sector) +
1159                                         vstruct_sectors(&r->j, c->block_bits);
1160
1161                                 ja->cur_idx = i->bucket;
1162                                 ja->sectors_free = ca->mi.bucket_size - wrote;
1163                                 goto found;
1164                         }
1165         }
1166 found:
1167         mutex_unlock(&jlist->lock);
1168
1169         if (ja->bucket_seq[ja->cur_idx] &&
1170             ja->sectors_free == ca->mi.bucket_size) {
1171 #if 0
1172                 /*
1173                  * Debug code for ZNS support, where we (probably) want to be
1174                  * correlated where we stopped in the journal to the zone write
1175                  * points:
1176                  */
1177                 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
1178                 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
1179                 for (i = 0; i < 3; i++) {
1180                         unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
1181
1182                         bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
1183                 }
1184 #endif
1185                 ja->sectors_free = 0;
1186         }
1187
1188         /*
1189          * Set dirty_idx to indicate the entire journal is full and needs to be
1190          * reclaimed - journal reclaim will immediately reclaim whatever isn't
1191          * pinned when it first runs:
1192          */
1193         ja->discard_idx = ja->dirty_idx_ondisk =
1194                 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1195 out:
1196         bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1197         kvfree(buf.data);
1198         percpu_ref_put(&ca->io_ref);
1199         closure_return(cl);
1200         return;
1201 err:
1202         mutex_lock(&jlist->lock);
1203         jlist->ret = ret;
1204         mutex_unlock(&jlist->lock);
1205         goto out;
1206 }
1207
1208 int bch2_journal_read(struct bch_fs *c,
1209                       u64 *last_seq,
1210                       u64 *blacklist_seq,
1211                       u64 *start_seq)
1212 {
1213         struct journal_list jlist;
1214         struct journal_replay *i, **_i, *prev = NULL;
1215         struct genradix_iter radix_iter;
1216         struct printbuf buf = PRINTBUF;
1217         bool degraded = false, last_write_torn = false;
1218         u64 seq;
1219         int ret = 0;
1220
1221         closure_init_stack(&jlist.cl);
1222         mutex_init(&jlist.lock);
1223         jlist.last_seq = 0;
1224         jlist.ret = 0;
1225
1226         for_each_member_device(c, ca) {
1227                 if (!c->opts.fsck &&
1228                     !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1229                         continue;
1230
1231                 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1232                      ca->mi.state == BCH_MEMBER_STATE_ro) &&
1233                     percpu_ref_tryget(&ca->io_ref))
1234                         closure_call(&ca->journal.read,
1235                                      bch2_journal_read_device,
1236                                      system_unbound_wq,
1237                                      &jlist.cl);
1238                 else
1239                         degraded = true;
1240         }
1241
1242         closure_sync(&jlist.cl);
1243
1244         if (jlist.ret)
1245                 return jlist.ret;
1246
1247         *last_seq       = 0;
1248         *start_seq      = 0;
1249         *blacklist_seq  = 0;
1250
1251         /*
1252          * Find most recent flush entry, and ignore newer non flush entries -
1253          * those entries will be blacklisted:
1254          */
1255         genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
1256                 enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
1257
1258                 i = *_i;
1259
1260                 if (!i || i->ignore)
1261                         continue;
1262
1263                 if (!*start_seq)
1264                         *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1;
1265
1266                 if (JSET_NO_FLUSH(&i->j)) {
1267                         i->ignore = true;
1268                         continue;
1269                 }
1270
1271                 if (!last_write_torn && !i->csum_good) {
1272                         last_write_torn = true;
1273                         i->ignore = true;
1274                         continue;
1275                 }
1276
1277                 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
1278                                          c, le32_to_cpu(i->j.version), &i->j, NULL,
1279                                          jset_last_seq_newer_than_seq,
1280                                          "invalid journal entry: last_seq > seq (%llu > %llu)",
1281                                          le64_to_cpu(i->j.last_seq),
1282                                          le64_to_cpu(i->j.seq)))
1283                         i->j.last_seq = i->j.seq;
1284
1285                 *last_seq       = le64_to_cpu(i->j.last_seq);
1286                 *blacklist_seq  = le64_to_cpu(i->j.seq) + 1;
1287                 break;
1288         }
1289
1290         if (!*start_seq) {
1291                 bch_info(c, "journal read done, but no entries found");
1292                 return 0;
1293         }
1294
1295         if (!*last_seq) {
1296                 fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
1297                          "journal read done, but no entries found after dropping non-flushes");
1298                 return 0;
1299         }
1300
1301         bch_info(c, "journal read done, replaying entries %llu-%llu",
1302                  *last_seq, *blacklist_seq - 1);
1303
1304         if (*start_seq != *blacklist_seq)
1305                 bch_info(c, "dropped unflushed entries %llu-%llu",
1306                          *blacklist_seq, *start_seq - 1);
1307
1308         /* Drop blacklisted entries and entries older than last_seq: */
1309         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1310                 i = *_i;
1311
1312                 if (!i || i->ignore)
1313                         continue;
1314
1315                 seq = le64_to_cpu(i->j.seq);
1316                 if (seq < *last_seq) {
1317                         journal_replay_free(c, i);
1318                         continue;
1319                 }
1320
1321                 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1322                         fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1323                                     jset_seq_blacklisted,
1324                                     "found blacklisted journal entry %llu", seq);
1325                         i->ignore = true;
1326                 }
1327         }
1328
1329         /* Check for missing entries: */
1330         seq = *last_seq;
1331         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1332                 i = *_i;
1333
1334                 if (!i || i->ignore)
1335                         continue;
1336
1337                 BUG_ON(seq > le64_to_cpu(i->j.seq));
1338
1339                 while (seq < le64_to_cpu(i->j.seq)) {
1340                         u64 missing_start, missing_end;
1341                         struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1342
1343                         while (seq < le64_to_cpu(i->j.seq) &&
1344                                bch2_journal_seq_is_blacklisted(c, seq, false))
1345                                 seq++;
1346
1347                         if (seq == le64_to_cpu(i->j.seq))
1348                                 break;
1349
1350                         missing_start = seq;
1351
1352                         while (seq < le64_to_cpu(i->j.seq) &&
1353                                !bch2_journal_seq_is_blacklisted(c, seq, false))
1354                                 seq++;
1355
1356                         if (prev) {
1357                                 bch2_journal_ptrs_to_text(&buf1, c, prev);
1358                                 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
1359                         } else
1360                                 prt_printf(&buf1, "(none)");
1361                         bch2_journal_ptrs_to_text(&buf2, c, i);
1362
1363                         missing_end = seq - 1;
1364                         fsck_err(c, journal_entries_missing,
1365                                  "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1366                                  "  prev at %s\n"
1367                                  "  next at %s",
1368                                  missing_start, missing_end,
1369                                  *last_seq, *blacklist_seq - 1,
1370                                  buf1.buf, buf2.buf);
1371
1372                         printbuf_exit(&buf1);
1373                         printbuf_exit(&buf2);
1374                 }
1375
1376                 prev = i;
1377                 seq++;
1378         }
1379
1380         genradix_for_each(&c->journal_entries, radix_iter, _i) {
1381                 struct bch_replicas_padded replicas = {
1382                         .e.data_type = BCH_DATA_journal,
1383                         .e.nr_required = 1,
1384                 };
1385
1386                 i = *_i;
1387                 if (!i || i->ignore)
1388                         continue;
1389
1390                 darray_for_each(i->ptrs, ptr) {
1391                         struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1392
1393                         if (!ptr->csum_good)
1394                                 bch_err_dev_offset(ca, ptr->sector,
1395                                                    "invalid journal checksum, seq %llu%s",
1396                                                    le64_to_cpu(i->j.seq),
1397                                                    i->csum_good ? " (had good copy on another device)" : "");
1398                 }
1399
1400                 ret = jset_validate(c,
1401                                     bch_dev_bkey_exists(c, i->ptrs.data[0].dev),
1402                                     &i->j,
1403                                     i->ptrs.data[0].sector,
1404                                     READ);
1405                 if (ret)
1406                         goto err;
1407
1408                 darray_for_each(i->ptrs, ptr)
1409                         replicas.e.devs[replicas.e.nr_devs++] = ptr->dev;
1410
1411                 bch2_replicas_entry_sort(&replicas.e);
1412
1413                 printbuf_reset(&buf);
1414                 bch2_replicas_entry_to_text(&buf, &replicas.e);
1415
1416                 if (!degraded &&
1417                     !bch2_replicas_marked(c, &replicas.e) &&
1418                     (le64_to_cpu(i->j.seq) == *last_seq ||
1419                      fsck_err(c, journal_entry_replicas_not_marked,
1420                               "superblock not marked as containing replicas for journal entry %llu\n  %s",
1421                               le64_to_cpu(i->j.seq), buf.buf))) {
1422                         ret = bch2_mark_replicas(c, &replicas.e);
1423                         if (ret)
1424                                 goto err;
1425                 }
1426         }
1427 err:
1428 fsck_err:
1429         printbuf_exit(&buf);
1430         return ret;
1431 }
1432
1433 /* journal write: */
1434
1435 static void __journal_write_alloc(struct journal *j,
1436                                   struct journal_buf *w,
1437                                   struct dev_alloc_list *devs_sorted,
1438                                   unsigned sectors,
1439                                   unsigned *replicas,
1440                                   unsigned replicas_want)
1441 {
1442         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1443         struct journal_device *ja;
1444         struct bch_dev *ca;
1445         unsigned i;
1446
1447         if (*replicas >= replicas_want)
1448                 return;
1449
1450         for (i = 0; i < devs_sorted->nr; i++) {
1451                 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1452                 if (!ca)
1453                         continue;
1454
1455                 ja = &ca->journal;
1456
1457                 /*
1458                  * Check that we can use this device, and aren't already using
1459                  * it:
1460                  */
1461                 if (!ca->mi.durability ||
1462                     ca->mi.state != BCH_MEMBER_STATE_rw ||
1463                     !ja->nr ||
1464                     bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) ||
1465                     sectors > ja->sectors_free)
1466                         continue;
1467
1468                 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1469
1470                 bch2_bkey_append_ptr(&w->key,
1471                         (struct bch_extent_ptr) {
1472                                   .offset = bucket_to_sector(ca,
1473                                         ja->buckets[ja->cur_idx]) +
1474                                         ca->mi.bucket_size -
1475                                         ja->sectors_free,
1476                                   .dev = ca->dev_idx,
1477                 });
1478
1479                 ja->sectors_free -= sectors;
1480                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1481
1482                 *replicas += ca->mi.durability;
1483
1484                 if (*replicas >= replicas_want)
1485                         break;
1486         }
1487 }
1488
1489 /**
1490  * journal_write_alloc - decide where to write next journal entry
1491  *
1492  * @j:          journal object
1493  * @w:          journal buf (entry to be written)
1494  *
1495  * Returns: 0 on success, or -EROFS on failure
1496  */
1497 static int journal_write_alloc(struct journal *j, struct journal_buf *w)
1498 {
1499         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1500         struct bch_devs_mask devs;
1501         struct journal_device *ja;
1502         struct bch_dev *ca;
1503         struct dev_alloc_list devs_sorted;
1504         unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1505         unsigned target = c->opts.metadata_target ?:
1506                 c->opts.foreground_target;
1507         unsigned i, replicas = 0, replicas_want =
1508                 READ_ONCE(c->opts.metadata_replicas);
1509         unsigned replicas_need = min(replicas_want,
1510                                      READ_ONCE(c->opts.metadata_replicas_required));
1511
1512         rcu_read_lock();
1513 retry:
1514         devs = target_rw_devs(c, BCH_DATA_journal, target);
1515
1516         devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1517
1518         __journal_write_alloc(j, w, &devs_sorted,
1519                               sectors, &replicas, replicas_want);
1520
1521         if (replicas >= replicas_want)
1522                 goto done;
1523
1524         for (i = 0; i < devs_sorted.nr; i++) {
1525                 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1526                 if (!ca)
1527                         continue;
1528
1529                 ja = &ca->journal;
1530
1531                 if (sectors > ja->sectors_free &&
1532                     sectors <= ca->mi.bucket_size &&
1533                     bch2_journal_dev_buckets_available(j, ja,
1534                                         journal_space_discarded)) {
1535                         ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1536                         ja->sectors_free = ca->mi.bucket_size;
1537
1538                         /*
1539                          * ja->bucket_seq[ja->cur_idx] must always have
1540                          * something sensible:
1541                          */
1542                         ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1543                 }
1544         }
1545
1546         __journal_write_alloc(j, w, &devs_sorted,
1547                               sectors, &replicas, replicas_want);
1548
1549         if (replicas < replicas_want && target) {
1550                 /* Retry from all devices: */
1551                 target = 0;
1552                 goto retry;
1553         }
1554 done:
1555         rcu_read_unlock();
1556
1557         BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1558
1559         return replicas >= replicas_need ? 0 : -EROFS;
1560 }
1561
1562 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1563 {
1564         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1565
1566         /* we aren't holding j->lock: */
1567         unsigned new_size = READ_ONCE(j->buf_size_want);
1568         void *new_buf;
1569
1570         if (buf->buf_size >= new_size)
1571                 return;
1572
1573         size_t btree_write_buffer_size = new_size / 64;
1574
1575         if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
1576                 return;
1577
1578         new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
1579         if (!new_buf)
1580                 return;
1581
1582         memcpy(new_buf, buf->data, buf->buf_size);
1583
1584         spin_lock(&j->lock);
1585         swap(buf->data,         new_buf);
1586         swap(buf->buf_size,     new_size);
1587         spin_unlock(&j->lock);
1588
1589         kvfree(new_buf);
1590 }
1591
1592 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1593 {
1594         return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1595 }
1596
1597 static CLOSURE_CALLBACK(journal_write_done)
1598 {
1599         closure_type(w, struct journal_buf, io);
1600         struct journal *j = container_of(w, struct journal, buf[w->idx]);
1601         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1602         struct bch_replicas_padded replicas;
1603         union journal_res_state old, new;
1604         u64 v, seq = le64_to_cpu(w->data->seq);
1605         int err = 0;
1606
1607         time_stats_update(!JSET_NO_FLUSH(w->data)
1608                           ? j->flush_write_time
1609                           : j->noflush_write_time, j->write_start_time);
1610
1611         if (!w->devs_written.nr) {
1612                 bch_err(c, "unable to write journal to sufficient devices");
1613                 err = -EIO;
1614         } else {
1615                 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1616                                          w->devs_written);
1617                 if (bch2_mark_replicas(c, &replicas.e))
1618                         err = -EIO;
1619         }
1620
1621         if (err)
1622                 bch2_fatal_error(c);
1623
1624         closure_debug_destroy(cl);
1625
1626         spin_lock(&j->lock);
1627         if (seq >= j->pin.front)
1628                 journal_seq_pin(j, seq)->devs = w->devs_written;
1629         if (err && (!j->err_seq || seq < j->err_seq))
1630                 j->err_seq      = seq;
1631         w->write_done = true;
1632
1633         bool completed = false;
1634
1635         for (seq = journal_last_unwritten_seq(j);
1636              seq <= journal_cur_seq(j);
1637              seq++) {
1638                 w = j->buf + (seq & JOURNAL_BUF_MASK);
1639                 if (!w->write_done)
1640                         break;
1641
1642                 if (!j->err_seq && !JSET_NO_FLUSH(w->data)) {
1643                         j->flushed_seq_ondisk = seq;
1644                         j->last_seq_ondisk = w->last_seq;
1645
1646                         bch2_do_discards(c);
1647                         closure_wake_up(&c->freelist_wait);
1648                         bch2_reset_alloc_cursors(c);
1649                 }
1650
1651                 j->seq_ondisk = seq;
1652
1653                 /*
1654                  * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1655                  * more buckets:
1656                  *
1657                  * Must come before signaling write completion, for
1658                  * bch2_fs_journal_stop():
1659                  */
1660                 if (j->watermark != BCH_WATERMARK_stripe)
1661                         journal_reclaim_kick(&c->journal);
1662
1663                 v = atomic64_read(&j->reservations.counter);
1664                 do {
1665                         old.v = new.v = v;
1666                         BUG_ON(journal_state_count(new, new.unwritten_idx));
1667                         BUG_ON(new.unwritten_idx != (seq & JOURNAL_BUF_MASK));
1668
1669                         new.unwritten_idx++;
1670                 } while ((v = atomic64_cmpxchg(&j->reservations.counter, old.v, new.v)) != old.v);
1671
1672                 completed = true;
1673         }
1674
1675         if (completed) {
1676                 bch2_journal_reclaim_fast(j);
1677                 bch2_journal_space_available(j);
1678
1679                 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], false);
1680
1681                 closure_wake_up(&w->wait);
1682                 journal_wake(j);
1683         }
1684
1685         if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1686                    new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1687                 struct journal_buf *buf = journal_cur_buf(j);
1688                 long delta = buf->expires - jiffies;
1689
1690                 /*
1691                  * We don't close a journal entry to write it while there's
1692                  * previous entries still in flight - the current journal entry
1693                  * might want to be written now:
1694                  */
1695                 mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
1696         }
1697
1698         spin_unlock(&j->lock);
1699 }
1700
1701 static void journal_write_endio(struct bio *bio)
1702 {
1703         struct journal_bio *jbio = container_of(bio, struct journal_bio, bio);
1704         struct bch_dev *ca = jbio->ca;
1705         struct journal *j = &ca->fs->journal;
1706         struct journal_buf *w = j->buf + jbio->buf_idx;
1707
1708         if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1709                                "error writing journal entry %llu: %s",
1710                                le64_to_cpu(w->data->seq),
1711                                bch2_blk_status_to_str(bio->bi_status)) ||
1712             bch2_meta_write_fault("journal")) {
1713                 unsigned long flags;
1714
1715                 spin_lock_irqsave(&j->err_lock, flags);
1716                 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1717                 spin_unlock_irqrestore(&j->err_lock, flags);
1718         }
1719
1720         closure_put(&w->io);
1721         percpu_ref_put(&ca->io_ref);
1722 }
1723
1724 static CLOSURE_CALLBACK(do_journal_write)
1725 {
1726         closure_type(w, struct journal_buf, io);
1727         struct journal *j = container_of(w, struct journal, buf[w->idx]);
1728         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1729         unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1730
1731         extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1732                 struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1733                 struct journal_device *ja = &ca->journal;
1734
1735                 if (!percpu_ref_tryget(&ca->io_ref)) {
1736                         /* XXX: fix this */
1737                         bch_err(c, "missing device for journal write\n");
1738                         continue;
1739                 }
1740
1741                 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1742                              sectors);
1743
1744                 struct bio *bio = &ja->bio[w->idx]->bio;
1745                 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1746                 bio->bi_iter.bi_sector  = ptr->offset;
1747                 bio->bi_end_io          = journal_write_endio;
1748                 bio->bi_private         = ca;
1749
1750                 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1751                 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1752
1753                 if (!JSET_NO_FLUSH(w->data))
1754                         bio->bi_opf    |= REQ_FUA;
1755                 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1756                         bio->bi_opf    |= REQ_PREFLUSH;
1757
1758                 bch2_bio_map(bio, w->data, sectors << 9);
1759
1760                 trace_and_count(c, journal_write, bio);
1761                 closure_bio_submit(bio, cl);
1762
1763                 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1764         }
1765
1766         continue_at(cl, journal_write_done, j->wq);
1767 }
1768
1769 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
1770 {
1771         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1772         struct jset_entry *start, *end;
1773         struct jset *jset = w->data;
1774         struct journal_keys_to_wb wb = { NULL };
1775         unsigned sectors, bytes, u64s;
1776         unsigned long btree_roots_have = 0;
1777         bool validate_before_checksum = false;
1778         u64 seq = le64_to_cpu(jset->seq);
1779         int ret;
1780
1781         /*
1782          * Simple compaction, dropping empty jset_entries (from journal
1783          * reservations that weren't fully used) and merging jset_entries that
1784          * can be.
1785          *
1786          * If we wanted to be really fancy here, we could sort all the keys in
1787          * the jset and drop keys that were overwritten - probably not worth it:
1788          */
1789         vstruct_for_each(jset, i) {
1790                 unsigned u64s = le16_to_cpu(i->u64s);
1791
1792                 /* Empty entry: */
1793                 if (!u64s)
1794                         continue;
1795
1796                 /*
1797                  * New btree roots are set by journalling them; when the journal
1798                  * entry gets written we have to propagate them to
1799                  * c->btree_roots
1800                  *
1801                  * But, every journal entry we write has to contain all the
1802                  * btree roots (at least for now); so after we copy btree roots
1803                  * to c->btree_roots we have to get any missing btree roots and
1804                  * add them to this journal entry:
1805                  */
1806                 switch (i->type) {
1807                 case BCH_JSET_ENTRY_btree_root:
1808                         bch2_journal_entry_to_btree_root(c, i);
1809                         __set_bit(i->btree_id, &btree_roots_have);
1810                         break;
1811                 case BCH_JSET_ENTRY_write_buffer_keys:
1812                         EBUG_ON(!w->need_flush_to_write_buffer);
1813
1814                         if (!wb.wb)
1815                                 bch2_journal_keys_to_write_buffer_start(c, &wb, seq);
1816
1817                         struct bkey_i *k;
1818                         jset_entry_for_each_key(i, k) {
1819                                 ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k);
1820                                 if (ret) {
1821                                         bch2_fs_fatal_error(c, "-ENOMEM flushing journal keys to btree write buffer");
1822                                         bch2_journal_keys_to_write_buffer_end(c, &wb);
1823                                         return ret;
1824                                 }
1825                         }
1826                         i->type = BCH_JSET_ENTRY_btree_keys;
1827                         break;
1828                 }
1829         }
1830
1831         if (wb.wb)
1832                 bch2_journal_keys_to_write_buffer_end(c, &wb);
1833         w->need_flush_to_write_buffer = false;
1834
1835         start = end = vstruct_last(jset);
1836
1837         end     = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
1838
1839         struct jset_entry_datetime *d =
1840                 container_of(jset_entry_init(&end, sizeof(*d)), struct jset_entry_datetime, entry);
1841         d->entry.type   = BCH_JSET_ENTRY_datetime;
1842         d->seconds      = cpu_to_le64(ktime_get_real_seconds());
1843
1844         bch2_journal_super_entries_add_common(c, &end, seq);
1845         u64s    = (u64 *) end - (u64 *) start;
1846         BUG_ON(u64s > j->entry_u64s_reserved);
1847
1848         le32_add_cpu(&jset->u64s, u64s);
1849
1850         sectors = vstruct_sectors(jset, c->block_bits);
1851         bytes   = vstruct_bytes(jset);
1852
1853         if (sectors > w->sectors) {
1854                 bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
1855                                     vstruct_bytes(jset), w->sectors << 9,
1856                                     u64s, w->u64s_reserved, j->entry_u64s_reserved);
1857                 return -EINVAL;
1858         }
1859
1860         jset->magic             = cpu_to_le64(jset_magic(c));
1861         jset->version           = cpu_to_le32(c->sb.version);
1862
1863         SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1864         SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1865
1866         if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1867                 j->last_empty_seq = seq;
1868
1869         if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1870                 validate_before_checksum = true;
1871
1872         if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1873                 validate_before_checksum = true;
1874
1875         if (validate_before_checksum &&
1876             (ret = jset_validate(c, NULL, jset, 0, WRITE)))
1877                 return ret;
1878
1879         ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1880                     jset->encrypted_start,
1881                     vstruct_end(jset) - (void *) jset->encrypted_start);
1882         if (bch2_fs_fatal_err_on(ret, c,
1883                         "error decrypting journal entry: %i", ret))
1884                 return ret;
1885
1886         jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1887                                   journal_nonce(jset), jset);
1888
1889         if (!validate_before_checksum &&
1890             (ret = jset_validate(c, NULL, jset, 0, WRITE)))
1891                 return ret;
1892
1893         memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1894         return 0;
1895 }
1896
1897 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
1898 {
1899         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1900         int error = bch2_journal_error(j);
1901
1902         /*
1903          * If the journal is in an error state - we did an emergency shutdown -
1904          * we prefer to continue doing journal writes. We just mark them as
1905          * noflush so they'll never be used, but they'll still be visible by the
1906          * list_journal tool - this helps in debugging.
1907          *
1908          * There's a caveat: the first journal write after marking the
1909          * superblock dirty must always be a flush write, because on startup
1910          * from a clean shutdown we didn't necessarily read the journal and the
1911          * new journal write might overwrite whatever was in the journal
1912          * previously - we can't leave the journal without any flush writes in
1913          * it.
1914          *
1915          * So if we're in an error state, and we're still starting up, we don't
1916          * write anything at all.
1917          */
1918         if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags))
1919                 return -EIO;
1920
1921         if (error ||
1922             w->noflush ||
1923             (!w->must_flush &&
1924              (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1925              test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1926                 w->noflush = true;
1927                 SET_JSET_NO_FLUSH(w->data, true);
1928                 w->data->last_seq       = 0;
1929                 w->last_seq             = 0;
1930
1931                 j->nr_noflush_writes++;
1932         } else {
1933                 j->last_flush_write = jiffies;
1934                 j->nr_flush_writes++;
1935                 clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
1936         }
1937
1938         return 0;
1939 }
1940
1941 CLOSURE_CALLBACK(bch2_journal_write)
1942 {
1943         closure_type(w, struct journal_buf, io);
1944         struct journal *j = container_of(w, struct journal, buf[w->idx]);
1945         struct bch_fs *c = container_of(j, struct bch_fs, journal);
1946         struct bch_replicas_padded replicas;
1947         struct printbuf journal_debug_buf = PRINTBUF;
1948         unsigned nr_rw_members = 0;
1949         int ret;
1950
1951         BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1952         BUG_ON(w->write_allocated);
1953
1954         j->write_start_time = local_clock();
1955
1956         spin_lock(&j->lock);
1957         ret = bch2_journal_write_pick_flush(j, w);
1958         spin_unlock(&j->lock);
1959         if (ret)
1960                 goto err;
1961
1962         mutex_lock(&j->buf_lock);
1963         journal_buf_realloc(j, w);
1964
1965         ret = bch2_journal_write_prep(j, w);
1966         mutex_unlock(&j->buf_lock);
1967         if (ret)
1968                 goto err;
1969
1970         j->entry_bytes_written += vstruct_bytes(w->data);
1971
1972         while (1) {
1973                 spin_lock(&j->lock);
1974                 ret = journal_write_alloc(j, w);
1975                 if (!ret || !j->can_discard)
1976                         break;
1977
1978                 spin_unlock(&j->lock);
1979                 bch2_journal_do_discards(j);
1980         }
1981
1982         if (ret) {
1983                 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1984                 spin_unlock(&j->lock);
1985                 bch_err(c, "Unable to allocate journal write:\n%s",
1986                         journal_debug_buf.buf);
1987                 printbuf_exit(&journal_debug_buf);
1988                 goto err;
1989         }
1990
1991         /*
1992          * write is allocated, no longer need to account for it in
1993          * bch2_journal_space_available():
1994          */
1995         w->sectors = 0;
1996         w->write_allocated = true;
1997
1998         /*
1999          * journal entry has been compacted and allocated, recalculate space
2000          * available:
2001          */
2002         bch2_journal_space_available(j);
2003         bch2_journal_do_writes(j);
2004         spin_unlock(&j->lock);
2005
2006         w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
2007
2008         if (c->opts.nochanges)
2009                 goto no_io;
2010
2011         for_each_rw_member(c, ca)
2012                 nr_rw_members++;
2013
2014         if (nr_rw_members > 1)
2015                 w->separate_flush = true;
2016
2017         /*
2018          * Mark journal replicas before we submit the write to guarantee
2019          * recovery will find the journal entries after a crash.
2020          */
2021         bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
2022                                  w->devs_written);
2023         ret = bch2_mark_replicas(c, &replicas.e);
2024         if (ret)
2025                 goto err;
2026
2027         if (!JSET_NO_FLUSH(w->data))
2028                 closure_wait_event(&j->async_wait, j->seq_ondisk + 1 == le64_to_cpu(w->data->seq));
2029
2030         if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
2031                 for_each_rw_member(c, ca) {
2032                         percpu_ref_get(&ca->io_ref);
2033
2034                         struct journal_device *ja = &ca->journal;
2035                         struct bio *bio = &ja->bio[w->idx]->bio;
2036                         bio_reset(bio, ca->disk_sb.bdev,
2037                                   REQ_OP_WRITE|REQ_SYNC|REQ_META|REQ_PREFLUSH);
2038                         bio->bi_end_io          = journal_write_endio;
2039                         bio->bi_private         = ca;
2040                         closure_bio_submit(bio, cl);
2041                 }
2042         }
2043
2044         continue_at(cl, do_journal_write, j->wq);
2045         return;
2046 no_io:
2047         continue_at(cl, journal_write_done, j->wq);
2048         return;
2049 err:
2050         bch2_fatal_error(c);
2051         continue_at(cl, journal_write_done, j->wq);
2052 }