1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
6 #include "btree_update_interior.h"
9 #include "disk_groups.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
18 #include <trace/events/bcachefs.h>
20 static void __journal_replay_free(struct journal_replay *i)
23 kvpfree(i, offsetof(struct journal_replay, j) +
24 vstruct_bytes(&i->j));
28 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
32 if (!c->opts.read_entire_journal)
33 __journal_replay_free(i);
39 struct list_head *head;
43 #define JOURNAL_ENTRY_ADD_OK 0
44 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
47 * Given a journal entry we just read, add it to the list of journal entries to
50 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
51 struct journal_ptr entry_ptr,
52 struct journal_list *jlist, struct jset *j,
55 struct journal_replay *i, *pos, *dup = NULL;
56 struct journal_ptr *ptr;
57 struct list_head *where;
58 size_t bytes = vstruct_bytes(j);
60 int ret = JOURNAL_ENTRY_ADD_OK;
62 list_for_each_entry_reverse(i, jlist->head, list) {
63 if (!JSET_NO_FLUSH(&i->j)) {
64 last_seq = le64_to_cpu(i->j.last_seq);
69 /* Is this entry older than the range we need? */
70 if (!c->opts.read_entire_journal &&
71 le64_to_cpu(j->seq) < last_seq) {
72 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
76 /* Drop entries we don't need anymore */
77 if (!JSET_NO_FLUSH(j)) {
78 list_for_each_entry_safe(i, pos, jlist->head, list) {
79 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
81 journal_replay_free(c, i);
85 list_for_each_entry_reverse(i, jlist->head, list) {
86 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
94 dup = where->next != jlist->head
95 ? container_of(where->next, struct journal_replay, list)
98 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
102 * Duplicate journal entries? If so we want the one that didn't have a
107 /* we'll replace @dup: */
112 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
113 memcmp(j, &dup->j, bytes), c,
114 "found duplicate but non identical journal entries (seq %llu)",
115 le64_to_cpu(j->seq));
121 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
130 memcpy(&i->j, j, bytes);
133 i->nr_ptrs = dup->nr_ptrs;
134 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
135 __journal_replay_free(dup);
138 list_add(&i->list, where);
140 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
141 if (ptr->dev == ca->dev_idx) {
142 bch_err(c, "duplicate journal entry %llu on same device",
143 le64_to_cpu(i->j.seq));
148 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
149 bch_err(c, "found too many copies of journal entry %llu",
150 le64_to_cpu(i->j.seq));
154 i->ptrs[i->nr_ptrs++] = entry_ptr;
160 static struct nonce journal_nonce(const struct jset *jset)
162 return (struct nonce) {{
164 [1] = ((__le32 *) &jset->seq)[0],
165 [2] = ((__le32 *) &jset->seq)[1],
166 [3] = BCH_NONCE_JOURNAL,
170 /* this fills in a range with empty jset_entries: */
171 static void journal_entry_null_range(void *start, void *end)
173 struct jset_entry *entry;
175 for (entry = start; entry != end; entry = vstruct_next(entry))
176 memset(entry, 0, sizeof(*entry));
179 #define JOURNAL_ENTRY_REREAD 5
180 #define JOURNAL_ENTRY_NONE 6
181 #define JOURNAL_ENTRY_BAD 7
183 #define journal_entry_err(c, msg, ...) \
187 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
190 bch_err(c, "corrupt metadata before write:\n" \
191 msg, ##__VA_ARGS__); \
192 if (bch2_fs_inconsistent(c)) { \
193 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
201 #define journal_entry_err_on(cond, c, msg, ...) \
202 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
204 #define FSCK_DELETED_KEY 5
206 static int journal_validate_key(struct bch_fs *c, const char *where,
207 struct jset_entry *entry,
208 unsigned level, enum btree_id btree_id,
209 struct bkey_i *k, const char *type,
210 unsigned version, int big_endian, int write)
212 void *next = vstruct_next(entry);
213 struct printbuf buf = PRINTBUF;
216 if (journal_entry_err_on(!k->k.u64s, c,
217 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
219 (u64 *) k - entry->_data,
220 le16_to_cpu(entry->u64s))) {
221 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
222 journal_entry_null_range(vstruct_next(entry), next);
223 return FSCK_DELETED_KEY;
226 if (journal_entry_err_on((void *) bkey_next(k) >
227 (void *) vstruct_next(entry), c,
228 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
230 (u64 *) k - entry->_data,
231 le16_to_cpu(entry->u64s))) {
232 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
233 journal_entry_null_range(vstruct_next(entry), next);
234 return FSCK_DELETED_KEY;
237 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
238 "invalid %s in %s entry offset %zi/%u: bad format %u",
240 (u64 *) k - entry->_data,
241 le16_to_cpu(entry->u64s),
243 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
244 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
245 journal_entry_null_range(vstruct_next(entry), next);
246 return FSCK_DELETED_KEY;
250 bch2_bkey_compat(level, btree_id, version, big_endian,
251 write, NULL, bkey_to_packed(k));
253 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
254 __btree_node_type(level, btree_id), write, &buf)) {
255 printbuf_reset(&buf);
256 pr_buf(&buf, "invalid %s in %s entry offset %zi/%u:",
258 (u64 *) k - entry->_data,
259 le16_to_cpu(entry->u64s));
261 pr_indent_push(&buf, 2);
263 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
265 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
266 __btree_node_type(level, btree_id), write, &buf);
268 mustfix_fsck_err(c, "%s", buf.buf);
270 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
271 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
272 journal_entry_null_range(vstruct_next(entry), next);
275 return FSCK_DELETED_KEY;
279 bch2_bkey_compat(level, btree_id, version, big_endian,
280 write, NULL, bkey_to_packed(k));
286 static int journal_entry_btree_keys_validate(struct bch_fs *c,
288 struct jset_entry *entry,
289 unsigned version, int big_endian, int write)
291 struct bkey_i *k = entry->start;
293 while (k != vstruct_last(entry)) {
294 int ret = journal_validate_key(c, where, entry,
297 k, "key", version, big_endian, write);
298 if (ret == FSCK_DELETED_KEY)
307 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
308 struct jset_entry *entry)
313 vstruct_for_each(entry, k) {
316 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
318 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
319 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
324 static int journal_entry_btree_root_validate(struct bch_fs *c,
326 struct jset_entry *entry,
327 unsigned version, int big_endian, int write)
329 struct bkey_i *k = entry->start;
332 if (journal_entry_err_on(!entry->u64s ||
333 le16_to_cpu(entry->u64s) != k->k.u64s, c,
334 "invalid btree root journal entry: wrong number of keys")) {
335 void *next = vstruct_next(entry);
337 * we don't want to null out this jset_entry,
338 * just the contents, so that later we can tell
339 * we were _supposed_ to have a btree root
342 journal_entry_null_range(vstruct_next(entry), next);
346 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
347 "btree root", version, big_endian, write);
352 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
353 struct jset_entry *entry)
355 journal_entry_btree_keys_to_text(out, c, entry);
358 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
360 struct jset_entry *entry,
361 unsigned version, int big_endian, int write)
363 /* obsolete, don't care: */
367 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
368 struct jset_entry *entry)
372 static int journal_entry_blacklist_validate(struct bch_fs *c,
374 struct jset_entry *entry,
375 unsigned version, int big_endian, int write)
379 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
380 "invalid journal seq blacklist entry: bad size")) {
381 journal_entry_null_range(entry, vstruct_next(entry));
387 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
388 struct jset_entry *entry)
390 struct jset_entry_blacklist *bl =
391 container_of(entry, struct jset_entry_blacklist, entry);
393 pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
396 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
398 struct jset_entry *entry,
399 unsigned version, int big_endian, int write)
401 struct jset_entry_blacklist_v2 *bl_entry;
404 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
405 "invalid journal seq blacklist entry: bad size")) {
406 journal_entry_null_range(entry, vstruct_next(entry));
410 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
412 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
413 le64_to_cpu(bl_entry->end), c,
414 "invalid journal seq blacklist entry: start > end")) {
415 journal_entry_null_range(entry, vstruct_next(entry));
422 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
423 struct jset_entry *entry)
425 struct jset_entry_blacklist_v2 *bl =
426 container_of(entry, struct jset_entry_blacklist_v2, entry);
428 pr_buf(out, "start=%llu end=%llu",
429 le64_to_cpu(bl->start),
430 le64_to_cpu(bl->end));
433 static int journal_entry_usage_validate(struct bch_fs *c,
435 struct jset_entry *entry,
436 unsigned version, int big_endian, int write)
438 struct jset_entry_usage *u =
439 container_of(entry, struct jset_entry_usage, entry);
440 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
443 if (journal_entry_err_on(bytes < sizeof(*u),
445 "invalid journal entry usage: bad size")) {
446 journal_entry_null_range(entry, vstruct_next(entry));
454 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
455 struct jset_entry *entry)
457 struct jset_entry_usage *u =
458 container_of(entry, struct jset_entry_usage, entry);
460 pr_buf(out, "type=%s v=%llu",
461 bch2_fs_usage_types[u->entry.btree_id],
465 static int journal_entry_data_usage_validate(struct bch_fs *c,
467 struct jset_entry *entry,
468 unsigned version, int big_endian, int write)
470 struct jset_entry_data_usage *u =
471 container_of(entry, struct jset_entry_data_usage, entry);
472 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
475 if (journal_entry_err_on(bytes < sizeof(*u) ||
476 bytes < sizeof(*u) + u->r.nr_devs,
478 "invalid journal entry usage: bad size")) {
479 journal_entry_null_range(entry, vstruct_next(entry));
487 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
488 struct jset_entry *entry)
490 struct jset_entry_data_usage *u =
491 container_of(entry, struct jset_entry_data_usage, entry);
493 bch2_replicas_entry_to_text(out, &u->r);
494 pr_buf(out, "=%llu", le64_to_cpu(u->v));
497 static int journal_entry_clock_validate(struct bch_fs *c,
499 struct jset_entry *entry,
500 unsigned version, int big_endian, int write)
502 struct jset_entry_clock *clock =
503 container_of(entry, struct jset_entry_clock, entry);
504 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
507 if (journal_entry_err_on(bytes != sizeof(*clock),
508 c, "invalid journal entry clock: bad size")) {
509 journal_entry_null_range(entry, vstruct_next(entry));
513 if (journal_entry_err_on(clock->rw > 1,
514 c, "invalid journal entry clock: bad rw")) {
515 journal_entry_null_range(entry, vstruct_next(entry));
523 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
524 struct jset_entry *entry)
526 struct jset_entry_clock *clock =
527 container_of(entry, struct jset_entry_clock, entry);
529 pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
532 static int journal_entry_dev_usage_validate(struct bch_fs *c,
534 struct jset_entry *entry,
535 unsigned version, int big_endian, int write)
537 struct jset_entry_dev_usage *u =
538 container_of(entry, struct jset_entry_dev_usage, entry);
539 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
540 unsigned expected = sizeof(*u);
544 if (journal_entry_err_on(bytes < expected,
545 c, "invalid journal entry dev usage: bad size (%u < %u)",
547 journal_entry_null_range(entry, vstruct_next(entry));
551 dev = le32_to_cpu(u->dev);
553 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
554 c, "invalid journal entry dev usage: bad dev")) {
555 journal_entry_null_range(entry, vstruct_next(entry));
559 if (journal_entry_err_on(u->pad,
560 c, "invalid journal entry dev usage: bad pad")) {
561 journal_entry_null_range(entry, vstruct_next(entry));
569 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
570 struct jset_entry *entry)
572 struct jset_entry_dev_usage *u =
573 container_of(entry, struct jset_entry_dev_usage, entry);
574 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
576 pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
578 for (i = 0; i < nr_types; i++) {
580 pr_buf(out, " %s", bch2_data_types[i]);
582 pr_buf(out, " (unknown data type %u)", i);
583 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
584 le64_to_cpu(u->d[i].buckets),
585 le64_to_cpu(u->d[i].sectors),
586 le64_to_cpu(u->d[i].fragmented));
589 pr_buf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec));
592 static int journal_entry_log_validate(struct bch_fs *c,
594 struct jset_entry *entry,
595 unsigned version, int big_endian, int write)
600 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
601 struct jset_entry *entry)
603 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
604 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
606 pr_buf(out, "%.*s", bytes, l->d);
609 struct jset_entry_ops {
610 int (*validate)(struct bch_fs *, const char *,
611 struct jset_entry *, unsigned, int, int);
612 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
615 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
617 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
618 .validate = journal_entry_##f##_validate, \
619 .to_text = journal_entry_##f##_to_text, \
621 BCH_JSET_ENTRY_TYPES()
625 int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
626 struct jset_entry *entry,
627 unsigned version, int big_endian, int write)
629 return entry->type < BCH_JSET_ENTRY_NR
630 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
631 version, big_endian, write)
635 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
636 struct jset_entry *entry)
638 if (entry->type < BCH_JSET_ENTRY_NR) {
639 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
640 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
642 pr_buf(out, "(unknown type %u)", entry->type);
646 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
650 struct jset_entry *entry;
653 vstruct_for_each(jset, entry) {
654 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
655 le64_to_cpu(jset->seq),
656 (u64 *) entry - jset->_data,
657 le32_to_cpu(jset->u64s));
659 if (journal_entry_err_on(vstruct_next(entry) >
660 vstruct_last(jset), c,
661 "journal entry extends past end of jset")) {
662 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
666 ret = bch2_journal_entry_validate(c, buf, entry,
667 le32_to_cpu(jset->version),
668 JSET_BIG_ENDIAN(jset), write);
676 static int jset_validate(struct bch_fs *c,
678 struct jset *jset, u64 sector,
679 unsigned bucket_sectors_left,
680 unsigned sectors_read,
683 size_t bytes = vstruct_bytes(jset);
684 struct bch_csum csum;
688 if (le64_to_cpu(jset->magic) != jset_magic(c))
689 return JOURNAL_ENTRY_NONE;
691 version = le32_to_cpu(jset->version);
692 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
693 version < bcachefs_metadata_version_min) ||
694 version >= bcachefs_metadata_version_max, c,
695 "%s sector %llu seq %llu: unknown journal entry version %u",
696 ca ? ca->name : c->name,
697 sector, le64_to_cpu(jset->seq),
699 /* don't try to continue: */
703 if (bytes > (sectors_read << 9) &&
704 sectors_read < bucket_sectors_left)
705 return JOURNAL_ENTRY_REREAD;
707 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
708 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
709 ca ? ca->name : c->name,
710 sector, le64_to_cpu(jset->seq), bytes)) {
711 ret = JOURNAL_ENTRY_BAD;
712 le32_add_cpu(&jset->u64s,
713 -((bytes - (bucket_sectors_left << 9)) / 8));
716 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
717 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
718 ca ? ca->name : c->name,
719 sector, le64_to_cpu(jset->seq),
720 JSET_CSUM_TYPE(jset))) {
721 ret = JOURNAL_ENTRY_BAD;
728 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
729 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
730 "%s sector %llu seq %llu: journal checksum bad",
731 ca ? ca->name : c->name,
732 sector, le64_to_cpu(jset->seq)))
733 ret = JOURNAL_ENTRY_BAD;
735 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
736 jset->encrypted_start,
737 vstruct_end(jset) - (void *) jset->encrypted_start);
738 bch2_fs_fatal_err_on(ret, c,
739 "error decrypting journal entry: %i", ret);
741 /* last_seq is ignored when JSET_NO_FLUSH is true */
742 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
743 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
744 "invalid journal entry: last_seq > seq (%llu > %llu)",
745 le64_to_cpu(jset->last_seq),
746 le64_to_cpu(jset->seq))) {
747 jset->last_seq = jset->seq;
748 return JOURNAL_ENTRY_BAD;
754 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
756 unsigned sectors = vstruct_sectors(jset, c->block_bits);
758 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
759 jset_validate_entries(c, jset, WRITE);
762 struct journal_read_buf {
767 static int journal_read_buf_realloc(struct journal_read_buf *b,
772 /* the bios are sized for this many pages, max: */
773 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
776 new_size = roundup_pow_of_two(new_size);
777 n = kvpmalloc(new_size, GFP_KERNEL);
781 kvpfree(b->data, b->size);
787 static int journal_read_bucket(struct bch_dev *ca,
788 struct journal_read_buf *buf,
789 struct journal_list *jlist,
792 struct bch_fs *c = ca->fs;
793 struct journal_device *ja = &ca->journal;
794 struct jset *j = NULL;
795 unsigned sectors, sectors_read = 0;
796 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
797 end = offset + ca->mi.bucket_size;
798 bool saw_bad = false;
801 pr_debug("reading %u", bucket);
803 while (offset < end) {
807 sectors_read = min_t(unsigned,
808 end - offset, buf->size >> 9);
810 bio = bio_kmalloc(GFP_KERNEL,
813 bio_set_dev(bio, ca->disk_sb.bdev);
814 bio->bi_iter.bi_sector = offset;
815 bio_set_op_attrs(bio, REQ_OP_READ, 0);
816 bch2_bio_map(bio, buf->data, sectors_read << 9);
818 ret = submit_bio_wait(bio);
821 if (bch2_dev_io_err_on(ret, ca,
822 "journal read error: sector %llu",
824 bch2_meta_read_fault("journal")) {
826 * We don't error out of the recovery process
827 * here, since the relevant journal entry may be
828 * found on a different device, and missing or
829 * no journal entries will be handled later
837 ret = jset_validate(c, ca, j, offset,
838 end - offset, sectors_read,
842 sectors = vstruct_sectors(j, c->block_bits);
844 case JOURNAL_ENTRY_REREAD:
845 if (vstruct_bytes(j) > buf->size) {
846 ret = journal_read_buf_realloc(buf,
852 case JOURNAL_ENTRY_NONE:
855 sectors = block_sectors(c);
857 case JOURNAL_ENTRY_BAD:
860 * On checksum error we don't really trust the size
861 * field of the journal entry we read, so try reading
862 * again at next block boundary:
864 sectors = block_sectors(c);
871 * This happens sometimes if we don't have discards on -
872 * when we've partially overwritten a bucket with new
873 * journal entries. We don't need the rest of the
876 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
879 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
881 mutex_lock(&jlist->lock);
882 ret = journal_entry_add(c, ca, (struct journal_ptr) {
885 .bucket_offset = offset -
886 bucket_to_sector(ca, ja->buckets[bucket]),
888 }, jlist, j, ret != 0);
889 mutex_unlock(&jlist->lock);
892 case JOURNAL_ENTRY_ADD_OK:
894 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
902 sectors_read -= sectors;
903 j = ((void *) j) + (sectors << 9);
909 static void bch2_journal_read_device(struct closure *cl)
911 struct journal_device *ja =
912 container_of(cl, struct journal_device, read);
913 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
914 struct bch_fs *c = ca->fs;
915 struct journal_list *jlist =
916 container_of(cl->parent, struct journal_list, cl);
917 struct journal_replay *r;
918 struct journal_read_buf buf = { NULL, 0 };
919 u64 min_seq = U64_MAX;
926 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
930 pr_debug("%u journal buckets", ja->nr);
932 for (i = 0; i < ja->nr; i++) {
933 ret = journal_read_bucket(ca, &buf, jlist, i);
938 /* Find the journal bucket with the highest sequence number: */
939 for (i = 0; i < ja->nr; i++) {
940 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
943 min_seq = min(ja->bucket_seq[i], min_seq);
947 * If there's duplicate journal entries in multiple buckets (which
948 * definitely isn't supposed to happen, but...) - make sure to start
949 * cur_idx at the last of those buckets, so we don't deadlock trying to
952 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
953 ja->bucket_seq[ja->cur_idx] ==
954 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
955 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
957 ja->sectors_free = ca->mi.bucket_size;
959 mutex_lock(&jlist->lock);
960 list_for_each_entry(r, jlist->head, list) {
961 for (i = 0; i < r->nr_ptrs; i++) {
962 if (r->ptrs[i].dev == ca->dev_idx &&
963 sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
964 unsigned wrote = (r->ptrs[i].sector % ca->mi.bucket_size) +
965 vstruct_sectors(&r->j, c->block_bits);
967 ja->sectors_free = min(ja->sectors_free,
968 ca->mi.bucket_size - wrote);
972 mutex_unlock(&jlist->lock);
974 if (ja->bucket_seq[ja->cur_idx] &&
975 ja->sectors_free == ca->mi.bucket_size) {
976 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
977 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
978 for (i = 0; i < 3; i++) {
979 unsigned idx = ja->cur_idx - 1 + i;
980 bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
982 ja->sectors_free = 0;
986 * Set dirty_idx to indicate the entire journal is full and needs to be
987 * reclaimed - journal reclaim will immediately reclaim whatever isn't
988 * pinned when it first runs:
990 ja->discard_idx = ja->dirty_idx_ondisk =
991 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
993 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
994 kvpfree(buf.data, buf.size);
995 percpu_ref_put(&ca->io_ref);
999 mutex_lock(&jlist->lock);
1001 mutex_unlock(&jlist->lock);
1005 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1006 struct journal_replay *j)
1010 for (i = 0; i < j->nr_ptrs; i++) {
1011 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1014 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1018 pr_buf(out, "%u:%u:%u (sector %llu)",
1021 j->ptrs[i].bucket_offset,
1026 int bch2_journal_read(struct bch_fs *c, struct list_head *list,
1027 u64 *blacklist_seq, u64 *start_seq)
1029 struct journal_list jlist;
1030 struct journal_replay *i, *t;
1033 struct printbuf buf = PRINTBUF;
1034 size_t keys = 0, entries = 0;
1035 bool degraded = false;
1036 u64 seq, last_seq = 0;
1039 closure_init_stack(&jlist.cl);
1040 mutex_init(&jlist.lock);
1044 for_each_member_device(ca, c, iter) {
1045 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1046 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1049 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1050 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1051 percpu_ref_tryget(&ca->io_ref))
1052 closure_call(&ca->journal.read,
1053 bch2_journal_read_device,
1060 closure_sync(&jlist.cl);
1065 if (list_empty(list)) {
1066 bch_info(c, "journal read done, but no entries found");
1070 i = list_last_entry(list, struct journal_replay, list);
1071 *start_seq = le64_to_cpu(i->j.seq) + 1;
1074 * Find most recent flush entry, and ignore newer non flush entries -
1075 * those entries will be blacklisted:
1077 list_for_each_entry_safe_reverse(i, t, list, list) {
1081 if (!JSET_NO_FLUSH(&i->j)) {
1082 last_seq = le64_to_cpu(i->j.last_seq);
1083 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1087 journal_replay_free(c, i);
1091 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1096 /* Drop blacklisted entries and entries older than last_seq: */
1097 list_for_each_entry_safe(i, t, list, list) {
1101 seq = le64_to_cpu(i->j.seq);
1102 if (seq < last_seq) {
1103 journal_replay_free(c, i);
1107 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1108 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1109 "found blacklisted journal entry %llu", seq);
1111 journal_replay_free(c, i);
1115 /* Check for missing entries: */
1117 list_for_each_entry(i, list, list) {
1121 BUG_ON(seq > le64_to_cpu(i->j.seq));
1123 while (seq < le64_to_cpu(i->j.seq)) {
1124 u64 missing_start, missing_end;
1125 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1127 while (seq < le64_to_cpu(i->j.seq) &&
1128 bch2_journal_seq_is_blacklisted(c, seq, false))
1131 if (seq == le64_to_cpu(i->j.seq))
1134 missing_start = seq;
1136 while (seq < le64_to_cpu(i->j.seq) &&
1137 !bch2_journal_seq_is_blacklisted(c, seq, false))
1140 if (i->list.prev != list) {
1141 struct journal_replay *p = list_prev_entry(i, list);
1143 bch2_journal_ptrs_to_text(&buf1, c, p);
1144 pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
1146 pr_buf(&buf1, "(none)");
1147 bch2_journal_ptrs_to_text(&buf2, c, i);
1149 missing_end = seq - 1;
1150 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1153 missing_start, missing_end,
1154 last_seq, *blacklist_seq - 1,
1155 buf1.buf, buf2.buf);
1157 printbuf_exit(&buf1);
1158 printbuf_exit(&buf2);
1164 list_for_each_entry(i, list, list) {
1165 struct jset_entry *entry;
1166 struct bkey_i *k, *_n;
1167 struct bch_replicas_padded replicas = {
1168 .e.data_type = BCH_DATA_journal,
1176 ret = jset_validate_entries(c, &i->j, READ);
1180 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1181 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1183 bch2_replicas_entry_sort(&replicas.e);
1186 * If we're mounting in degraded mode - if we didn't read all
1187 * the devices - this is wrong:
1190 printbuf_reset(&buf);
1191 bch2_replicas_entry_to_text(&buf, &replicas.e);
1194 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1195 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1196 "superblock not marked as containing replicas %s",
1198 ret = bch2_mark_replicas(c, &replicas.e);
1203 for_each_jset_key(k, _n, entry, &i->j)
1208 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1209 keys, entries, *start_seq);
1211 if (*start_seq != *blacklist_seq)
1212 bch_info(c, "dropped unflushed entries %llu-%llu",
1213 *blacklist_seq, *start_seq - 1);
1216 printbuf_exit(&buf);
1220 /* journal write: */
1222 static void __journal_write_alloc(struct journal *j,
1223 struct journal_buf *w,
1224 struct dev_alloc_list *devs_sorted,
1227 unsigned replicas_want)
1229 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1230 struct journal_device *ja;
1234 if (*replicas >= replicas_want)
1237 for (i = 0; i < devs_sorted->nr; i++) {
1238 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1245 * Check that we can use this device, and aren't already using
1248 if (!ca->mi.durability ||
1249 ca->mi.state != BCH_MEMBER_STATE_rw ||
1251 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1253 sectors > ja->sectors_free)
1256 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1258 bch2_bkey_append_ptr(&w->key,
1259 (struct bch_extent_ptr) {
1260 .offset = bucket_to_sector(ca,
1261 ja->buckets[ja->cur_idx]) +
1262 ca->mi.bucket_size -
1267 ja->sectors_free -= sectors;
1268 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1270 *replicas += ca->mi.durability;
1272 if (*replicas >= replicas_want)
1278 * journal_next_bucket - move on to the next journal bucket if possible
1280 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1283 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1284 struct bch_devs_mask devs;
1285 struct journal_device *ja;
1287 struct dev_alloc_list devs_sorted;
1288 unsigned target = c->opts.metadata_target ?:
1289 c->opts.foreground_target;
1290 unsigned i, replicas = 0, replicas_want =
1291 READ_ONCE(c->opts.metadata_replicas);
1295 devs = target_rw_devs(c, BCH_DATA_journal, target);
1297 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1299 __journal_write_alloc(j, w, &devs_sorted,
1300 sectors, &replicas, replicas_want);
1302 if (replicas >= replicas_want)
1305 for (i = 0; i < devs_sorted.nr; i++) {
1306 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1312 if (sectors > ja->sectors_free &&
1313 sectors <= ca->mi.bucket_size &&
1314 bch2_journal_dev_buckets_available(j, ja,
1315 journal_space_discarded)) {
1316 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1317 ja->sectors_free = ca->mi.bucket_size;
1320 * ja->bucket_seq[ja->cur_idx] must always have
1321 * something sensible:
1323 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1327 __journal_write_alloc(j, w, &devs_sorted,
1328 sectors, &replicas, replicas_want);
1330 if (replicas < replicas_want && target) {
1331 /* Retry from all devices: */
1338 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1340 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1343 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1345 /* we aren't holding j->lock: */
1346 unsigned new_size = READ_ONCE(j->buf_size_want);
1349 if (buf->buf_size >= new_size)
1352 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1356 memcpy(new_buf, buf->data, buf->buf_size);
1358 spin_lock(&j->lock);
1359 swap(buf->data, new_buf);
1360 swap(buf->buf_size, new_size);
1361 spin_unlock(&j->lock);
1363 kvpfree(new_buf, new_size);
1366 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1368 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1371 static void journal_write_done(struct closure *cl)
1373 struct journal *j = container_of(cl, struct journal, io);
1374 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1375 struct journal_buf *w = journal_last_unwritten_buf(j);
1376 struct bch_replicas_padded replicas;
1377 union journal_res_state old, new;
1381 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1382 ? j->flush_write_time
1383 : j->noflush_write_time, j->write_start_time);
1385 if (!w->devs_written.nr) {
1386 bch_err(c, "unable to write journal to sufficient devices");
1389 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1391 if (bch2_mark_replicas(c, &replicas.e))
1396 bch2_fatal_error(c);
1398 spin_lock(&j->lock);
1399 seq = le64_to_cpu(w->data->seq);
1401 if (seq >= j->pin.front)
1402 journal_seq_pin(j, seq)->devs = w->devs_written;
1405 if (!JSET_NO_FLUSH(w->data)) {
1406 j->flushed_seq_ondisk = seq;
1407 j->last_seq_ondisk = w->last_seq;
1409 bch2_do_discards(c);
1410 closure_wake_up(&c->freelist_wait);
1412 } else if (!j->err_seq || seq < j->err_seq)
1415 j->seq_ondisk = seq;
1418 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1421 * Must come before signaling write completion, for
1422 * bch2_fs_journal_stop():
1424 journal_reclaim_kick(&c->journal);
1426 /* also must come before signalling write completion: */
1427 closure_debug_destroy(cl);
1429 v = atomic64_read(&j->reservations.counter);
1432 BUG_ON(journal_state_count(new, new.unwritten_idx));
1434 new.unwritten_idx++;
1435 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1436 old.v, new.v)) != old.v);
1438 bch2_journal_space_available(j);
1440 closure_wake_up(&w->wait);
1443 if (!journal_state_count(new, new.unwritten_idx) &&
1444 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1445 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1446 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1447 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1448 struct journal_buf *buf = journal_cur_buf(j);
1449 long delta = buf->expires - jiffies;
1452 * We don't close a journal entry to write it while there's
1453 * previous entries still in flight - the current journal entry
1454 * might want to be written now:
1457 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1460 spin_unlock(&j->lock);
1463 static void journal_write_endio(struct bio *bio)
1465 struct bch_dev *ca = bio->bi_private;
1466 struct journal *j = &ca->fs->journal;
1467 struct journal_buf *w = journal_last_unwritten_buf(j);
1468 unsigned long flags;
1470 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1471 le64_to_cpu(w->data->seq),
1472 bch2_blk_status_to_str(bio->bi_status)) ||
1473 bch2_meta_write_fault("journal")) {
1474 spin_lock_irqsave(&j->err_lock, flags);
1475 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1476 spin_unlock_irqrestore(&j->err_lock, flags);
1479 closure_put(&j->io);
1480 percpu_ref_put(&ca->io_ref);
1483 static void do_journal_write(struct closure *cl)
1485 struct journal *j = container_of(cl, struct journal, io);
1486 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1488 struct journal_buf *w = journal_last_unwritten_buf(j);
1489 struct bch_extent_ptr *ptr;
1491 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1493 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1494 ca = bch_dev_bkey_exists(c, ptr->dev);
1495 if (!percpu_ref_tryget(&ca->io_ref)) {
1497 bch_err(c, "missing device for journal write\n");
1501 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1504 bio = ca->journal.bio;
1506 bio_set_dev(bio, ca->disk_sb.bdev);
1507 bio->bi_iter.bi_sector = ptr->offset;
1508 bio->bi_end_io = journal_write_endio;
1509 bio->bi_private = ca;
1510 bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1512 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1513 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1515 if (!JSET_NO_FLUSH(w->data))
1516 bio->bi_opf |= REQ_FUA;
1517 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1518 bio->bi_opf |= REQ_PREFLUSH;
1520 bch2_bio_map(bio, w->data, sectors << 9);
1522 trace_journal_write(bio);
1523 closure_bio_submit(bio, cl);
1525 ca->journal.bucket_seq[ca->journal.cur_idx] =
1526 le64_to_cpu(w->data->seq);
1529 continue_at(cl, journal_write_done, c->io_complete_wq);
1533 void bch2_journal_write(struct closure *cl)
1535 struct journal *j = container_of(cl, struct journal, io);
1536 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1538 struct journal_buf *w = journal_last_unwritten_buf(j);
1539 struct jset_entry *start, *end;
1542 struct printbuf journal_debug_buf = PRINTBUF;
1543 bool validate_before_checksum = false;
1544 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1547 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1549 journal_buf_realloc(j, w);
1552 j->write_start_time = local_clock();
1554 spin_lock(&j->lock);
1555 if (bch2_journal_error(j) ||
1558 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1559 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1561 SET_JSET_NO_FLUSH(jset, true);
1565 j->nr_noflush_writes++;
1567 j->last_flush_write = jiffies;
1568 j->nr_flush_writes++;
1570 spin_unlock(&j->lock);
1573 * New btree roots are set by journalling them; when the journal entry
1574 * gets written we have to propagate them to c->btree_roots
1576 * But, every journal entry we write has to contain all the btree roots
1577 * (at least for now); so after we copy btree roots to c->btree_roots we
1578 * have to get any missing btree roots and add them to this journal
1582 bch2_journal_entries_to_btree_roots(c, jset);
1584 start = end = vstruct_last(jset);
1586 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1588 bch2_journal_super_entries_add_common(c, &end,
1589 le64_to_cpu(jset->seq));
1590 u64s = (u64 *) end - (u64 *) start;
1591 BUG_ON(u64s > j->entry_u64s_reserved);
1593 le32_add_cpu(&jset->u64s, u64s);
1594 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1596 jset->magic = cpu_to_le64(jset_magic(c));
1597 jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
1598 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1599 : cpu_to_le32(c->sb.version);
1601 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1602 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1604 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1605 j->last_empty_seq = le64_to_cpu(jset->seq);
1607 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1608 validate_before_checksum = true;
1610 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1611 validate_before_checksum = true;
1613 if (validate_before_checksum &&
1614 jset_validate_for_write(c, jset))
1617 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1618 jset->encrypted_start,
1619 vstruct_end(jset) - (void *) jset->encrypted_start);
1620 if (bch2_fs_fatal_err_on(ret, c,
1621 "error decrypting journal entry: %i", ret))
1624 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1625 journal_nonce(jset), jset);
1627 if (!validate_before_checksum &&
1628 jset_validate_for_write(c, jset))
1631 sectors = vstruct_sectors(jset, c->block_bits);
1632 BUG_ON(sectors > w->sectors);
1634 bytes = vstruct_bytes(jset);
1635 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1638 spin_lock(&j->lock);
1639 ret = journal_write_alloc(j, w, sectors);
1641 if (ret && j->can_discard) {
1642 spin_unlock(&j->lock);
1643 bch2_journal_do_discards(j);
1648 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1651 * write is allocated, no longer need to account for it in
1652 * bch2_journal_space_available():
1657 * journal entry has been compacted and allocated, recalculate space
1660 bch2_journal_space_available(j);
1661 spin_unlock(&j->lock);
1664 bch_err(c, "Unable to allocate journal write:\n%s",
1665 journal_debug_buf.buf);
1666 printbuf_exit(&journal_debug_buf);
1667 bch2_fatal_error(c);
1668 continue_at(cl, journal_write_done, c->io_complete_wq);
1672 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1674 if (c->opts.nochanges)
1677 for_each_rw_member(ca, c, i)
1680 if (nr_rw_members > 1)
1681 w->separate_flush = true;
1683 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1684 for_each_rw_member(ca, c, i) {
1685 percpu_ref_get(&ca->io_ref);
1687 bio = ca->journal.bio;
1689 bio_set_dev(bio, ca->disk_sb.bdev);
1690 bio->bi_opf = REQ_OP_FLUSH;
1691 bio->bi_end_io = journal_write_endio;
1692 bio->bi_private = ca;
1693 closure_bio_submit(bio, cl);
1697 continue_at(cl, do_journal_write, c->io_complete_wq);
1700 continue_at(cl, journal_write_done, c->io_complete_wq);
1703 bch2_fatal_error(c);
1704 continue_at(cl, journal_write_done, c->io_complete_wq);