1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
6 #include "btree_update_interior.h"
9 #include "disk_groups.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
18 #include <trace/events/bcachefs.h>
20 static inline u32 journal_entry_radix_idx(struct bch_fs *c,
23 return (le64_to_cpu(j->seq) - c->journal_entries_base_seq) & (~0U >> 1);
26 static void __journal_replay_free(struct bch_fs *c,
27 struct journal_replay *i)
29 struct journal_replay **p =
30 genradix_ptr(&c->journal_entries, journal_entry_radix_idx(c, &i->j));
34 kvpfree(i, offsetof(struct journal_replay, j) +
35 vstruct_bytes(&i->j));
38 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
42 if (!c->opts.read_entire_journal)
43 __journal_replay_free(c, i);
52 #define JOURNAL_ENTRY_ADD_OK 0
53 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
56 * Given a journal entry we just read, add it to the list of journal entries to
59 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
60 struct journal_ptr entry_ptr,
61 struct journal_list *jlist, struct jset *j,
64 struct genradix_iter iter;
65 struct journal_replay **_i, *i, *dup;
66 struct journal_ptr *ptr;
67 size_t bytes = vstruct_bytes(j);
69 int ret = JOURNAL_ENTRY_ADD_OK;
72 * Xarrays are indexed by a ulong, not a u64, so we can't index them by
73 * sequence number directly:
74 * Assume instead that they will all fall within the range of +-2billion
75 * of the filrst one we find.
77 if (!c->journal_entries_base_seq)
78 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
81 list_for_each_entry_reverse(i, jlist->head, list) {
82 if (!JSET_NO_FLUSH(&i->j)) {
83 last_seq = le64_to_cpu(i->j.last_seq);
89 /* Is this entry older than the range we need? */
90 if (!c->opts.read_entire_journal &&
91 le64_to_cpu(j->seq) < last_seq) {
92 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
96 /* Drop entries we don't need anymore */
97 if (!JSET_NO_FLUSH(j) && !c->opts.read_entire_journal) {
98 genradix_for_each(&c->journal_entries, iter, _i) {
104 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
106 journal_replay_free(c, i);
110 _i = genradix_ptr(&c->journal_entries, journal_entry_radix_idx(c, j));
111 dup = _i ? *_i : NULL;
114 * Duplicate journal entries? If so we want the one that didn't have a
119 /* we'll replace @dup: */
124 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
125 memcmp(j, &dup->j, bytes), c,
126 "found duplicate but non identical journal entries (seq %llu)",
127 le64_to_cpu(j->seq));
133 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
142 memcpy(&i->j, j, bytes);
145 i->nr_ptrs = dup->nr_ptrs;
146 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
147 __journal_replay_free(c, dup);
150 _i = genradix_ptr_alloc(&c->journal_entries,
151 journal_entry_radix_idx(c, &i->j),
154 bch_err(c, "failed to allocate c->journal_entries entry");
161 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
162 if (ptr->dev == ca->dev_idx) {
163 bch_err(c, "duplicate journal entry %llu on same device",
164 le64_to_cpu(i->j.seq));
169 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
170 bch_err(c, "found too many copies of journal entry %llu",
171 le64_to_cpu(i->j.seq));
175 i->ptrs[i->nr_ptrs++] = entry_ptr;
181 static struct nonce journal_nonce(const struct jset *jset)
183 return (struct nonce) {{
185 [1] = ((__le32 *) &jset->seq)[0],
186 [2] = ((__le32 *) &jset->seq)[1],
187 [3] = BCH_NONCE_JOURNAL,
191 /* this fills in a range with empty jset_entries: */
192 static void journal_entry_null_range(void *start, void *end)
194 struct jset_entry *entry;
196 for (entry = start; entry != end; entry = vstruct_next(entry))
197 memset(entry, 0, sizeof(*entry));
200 #define JOURNAL_ENTRY_REREAD 5
201 #define JOURNAL_ENTRY_NONE 6
202 #define JOURNAL_ENTRY_BAD 7
204 #define journal_entry_err(c, msg, ...) \
208 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
211 bch_err(c, "corrupt metadata before write:\n" \
212 msg, ##__VA_ARGS__); \
213 if (bch2_fs_inconsistent(c)) { \
214 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
222 #define journal_entry_err_on(cond, c, msg, ...) \
223 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
225 #define FSCK_DELETED_KEY 5
227 static int journal_validate_key(struct bch_fs *c, const char *where,
228 struct jset_entry *entry,
229 unsigned level, enum btree_id btree_id,
230 struct bkey_i *k, const char *type,
231 unsigned version, int big_endian, int write)
233 void *next = vstruct_next(entry);
234 struct printbuf buf = PRINTBUF;
237 if (journal_entry_err_on(!k->k.u64s, c,
238 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
240 (u64 *) k - entry->_data,
241 le16_to_cpu(entry->u64s))) {
242 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
243 journal_entry_null_range(vstruct_next(entry), next);
244 return FSCK_DELETED_KEY;
247 if (journal_entry_err_on((void *) bkey_next(k) >
248 (void *) vstruct_next(entry), c,
249 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
251 (u64 *) k - entry->_data,
252 le16_to_cpu(entry->u64s))) {
253 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
254 journal_entry_null_range(vstruct_next(entry), next);
255 return FSCK_DELETED_KEY;
258 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
259 "invalid %s in %s entry offset %zi/%u: bad format %u",
261 (u64 *) k - entry->_data,
262 le16_to_cpu(entry->u64s),
264 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
265 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
266 journal_entry_null_range(vstruct_next(entry), next);
267 return FSCK_DELETED_KEY;
271 bch2_bkey_compat(level, btree_id, version, big_endian,
272 write, NULL, bkey_to_packed(k));
274 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
275 __btree_node_type(level, btree_id), write, &buf)) {
276 printbuf_reset(&buf);
277 pr_buf(&buf, "invalid %s in %s entry offset %zi/%u:",
279 (u64 *) k - entry->_data,
280 le16_to_cpu(entry->u64s));
282 pr_indent_push(&buf, 2);
284 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
286 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
287 __btree_node_type(level, btree_id), write, &buf);
289 mustfix_fsck_err(c, "%s", buf.buf);
291 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
292 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
293 journal_entry_null_range(vstruct_next(entry), next);
296 return FSCK_DELETED_KEY;
300 bch2_bkey_compat(level, btree_id, version, big_endian,
301 write, NULL, bkey_to_packed(k));
307 static int journal_entry_btree_keys_validate(struct bch_fs *c,
309 struct jset_entry *entry,
310 unsigned version, int big_endian, int write)
312 struct bkey_i *k = entry->start;
314 while (k != vstruct_last(entry)) {
315 int ret = journal_validate_key(c, where, entry,
318 k, "key", version, big_endian, write);
319 if (ret == FSCK_DELETED_KEY)
328 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
329 struct jset_entry *entry)
334 vstruct_for_each(entry, k) {
337 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
339 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
340 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
345 static int journal_entry_btree_root_validate(struct bch_fs *c,
347 struct jset_entry *entry,
348 unsigned version, int big_endian, int write)
350 struct bkey_i *k = entry->start;
353 if (journal_entry_err_on(!entry->u64s ||
354 le16_to_cpu(entry->u64s) != k->k.u64s, c,
355 "invalid btree root journal entry: wrong number of keys")) {
356 void *next = vstruct_next(entry);
358 * we don't want to null out this jset_entry,
359 * just the contents, so that later we can tell
360 * we were _supposed_ to have a btree root
363 journal_entry_null_range(vstruct_next(entry), next);
367 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
368 "btree root", version, big_endian, write);
373 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
374 struct jset_entry *entry)
376 journal_entry_btree_keys_to_text(out, c, entry);
379 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
381 struct jset_entry *entry,
382 unsigned version, int big_endian, int write)
384 /* obsolete, don't care: */
388 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
389 struct jset_entry *entry)
393 static int journal_entry_blacklist_validate(struct bch_fs *c,
395 struct jset_entry *entry,
396 unsigned version, int big_endian, int write)
400 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
401 "invalid journal seq blacklist entry: bad size")) {
402 journal_entry_null_range(entry, vstruct_next(entry));
408 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
409 struct jset_entry *entry)
411 struct jset_entry_blacklist *bl =
412 container_of(entry, struct jset_entry_blacklist, entry);
414 pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
417 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
419 struct jset_entry *entry,
420 unsigned version, int big_endian, int write)
422 struct jset_entry_blacklist_v2 *bl_entry;
425 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
426 "invalid journal seq blacklist entry: bad size")) {
427 journal_entry_null_range(entry, vstruct_next(entry));
431 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
433 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
434 le64_to_cpu(bl_entry->end), c,
435 "invalid journal seq blacklist entry: start > end")) {
436 journal_entry_null_range(entry, vstruct_next(entry));
443 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
444 struct jset_entry *entry)
446 struct jset_entry_blacklist_v2 *bl =
447 container_of(entry, struct jset_entry_blacklist_v2, entry);
449 pr_buf(out, "start=%llu end=%llu",
450 le64_to_cpu(bl->start),
451 le64_to_cpu(bl->end));
454 static int journal_entry_usage_validate(struct bch_fs *c,
456 struct jset_entry *entry,
457 unsigned version, int big_endian, int write)
459 struct jset_entry_usage *u =
460 container_of(entry, struct jset_entry_usage, entry);
461 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
464 if (journal_entry_err_on(bytes < sizeof(*u),
466 "invalid journal entry usage: bad size")) {
467 journal_entry_null_range(entry, vstruct_next(entry));
475 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
476 struct jset_entry *entry)
478 struct jset_entry_usage *u =
479 container_of(entry, struct jset_entry_usage, entry);
481 pr_buf(out, "type=%s v=%llu",
482 bch2_fs_usage_types[u->entry.btree_id],
486 static int journal_entry_data_usage_validate(struct bch_fs *c,
488 struct jset_entry *entry,
489 unsigned version, int big_endian, int write)
491 struct jset_entry_data_usage *u =
492 container_of(entry, struct jset_entry_data_usage, entry);
493 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
496 if (journal_entry_err_on(bytes < sizeof(*u) ||
497 bytes < sizeof(*u) + u->r.nr_devs,
499 "invalid journal entry usage: bad size")) {
500 journal_entry_null_range(entry, vstruct_next(entry));
508 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
509 struct jset_entry *entry)
511 struct jset_entry_data_usage *u =
512 container_of(entry, struct jset_entry_data_usage, entry);
514 bch2_replicas_entry_to_text(out, &u->r);
515 pr_buf(out, "=%llu", le64_to_cpu(u->v));
518 static int journal_entry_clock_validate(struct bch_fs *c,
520 struct jset_entry *entry,
521 unsigned version, int big_endian, int write)
523 struct jset_entry_clock *clock =
524 container_of(entry, struct jset_entry_clock, entry);
525 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
528 if (journal_entry_err_on(bytes != sizeof(*clock),
529 c, "invalid journal entry clock: bad size")) {
530 journal_entry_null_range(entry, vstruct_next(entry));
534 if (journal_entry_err_on(clock->rw > 1,
535 c, "invalid journal entry clock: bad rw")) {
536 journal_entry_null_range(entry, vstruct_next(entry));
544 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
545 struct jset_entry *entry)
547 struct jset_entry_clock *clock =
548 container_of(entry, struct jset_entry_clock, entry);
550 pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
553 static int journal_entry_dev_usage_validate(struct bch_fs *c,
555 struct jset_entry *entry,
556 unsigned version, int big_endian, int write)
558 struct jset_entry_dev_usage *u =
559 container_of(entry, struct jset_entry_dev_usage, entry);
560 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
561 unsigned expected = sizeof(*u);
565 if (journal_entry_err_on(bytes < expected,
566 c, "invalid journal entry dev usage: bad size (%u < %u)",
568 journal_entry_null_range(entry, vstruct_next(entry));
572 dev = le32_to_cpu(u->dev);
574 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
575 c, "invalid journal entry dev usage: bad dev")) {
576 journal_entry_null_range(entry, vstruct_next(entry));
580 if (journal_entry_err_on(u->pad,
581 c, "invalid journal entry dev usage: bad pad")) {
582 journal_entry_null_range(entry, vstruct_next(entry));
590 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
591 struct jset_entry *entry)
593 struct jset_entry_dev_usage *u =
594 container_of(entry, struct jset_entry_dev_usage, entry);
595 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
597 pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
599 for (i = 0; i < nr_types; i++) {
601 pr_buf(out, " %s", bch2_data_types[i]);
603 pr_buf(out, " (unknown data type %u)", i);
604 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
605 le64_to_cpu(u->d[i].buckets),
606 le64_to_cpu(u->d[i].sectors),
607 le64_to_cpu(u->d[i].fragmented));
610 pr_buf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec));
613 static int journal_entry_log_validate(struct bch_fs *c,
615 struct jset_entry *entry,
616 unsigned version, int big_endian, int write)
621 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
622 struct jset_entry *entry)
624 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
625 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
627 pr_buf(out, "%.*s", bytes, l->d);
630 struct jset_entry_ops {
631 int (*validate)(struct bch_fs *, const char *,
632 struct jset_entry *, unsigned, int, int);
633 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
636 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
638 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
639 .validate = journal_entry_##f##_validate, \
640 .to_text = journal_entry_##f##_to_text, \
642 BCH_JSET_ENTRY_TYPES()
646 int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
647 struct jset_entry *entry,
648 unsigned version, int big_endian, int write)
650 return entry->type < BCH_JSET_ENTRY_NR
651 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
652 version, big_endian, write)
656 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
657 struct jset_entry *entry)
659 if (entry->type < BCH_JSET_ENTRY_NR) {
660 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
661 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
663 pr_buf(out, "(unknown type %u)", entry->type);
667 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
671 struct jset_entry *entry;
674 vstruct_for_each(jset, entry) {
675 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
676 le64_to_cpu(jset->seq),
677 (u64 *) entry - jset->_data,
678 le32_to_cpu(jset->u64s));
680 if (journal_entry_err_on(vstruct_next(entry) >
681 vstruct_last(jset), c,
682 "journal entry extends past end of jset")) {
683 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
687 ret = bch2_journal_entry_validate(c, buf, entry,
688 le32_to_cpu(jset->version),
689 JSET_BIG_ENDIAN(jset), write);
697 static int jset_validate(struct bch_fs *c,
699 struct jset *jset, u64 sector,
700 unsigned bucket_sectors_left,
701 unsigned sectors_read,
704 size_t bytes = vstruct_bytes(jset);
705 struct bch_csum csum;
709 if (le64_to_cpu(jset->magic) != jset_magic(c))
710 return JOURNAL_ENTRY_NONE;
712 version = le32_to_cpu(jset->version);
713 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
714 version < bcachefs_metadata_version_min) ||
715 version >= bcachefs_metadata_version_max, c,
716 "%s sector %llu seq %llu: unknown journal entry version %u",
717 ca ? ca->name : c->name,
718 sector, le64_to_cpu(jset->seq),
720 /* don't try to continue: */
724 if (bytes > (sectors_read << 9) &&
725 sectors_read < bucket_sectors_left)
726 return JOURNAL_ENTRY_REREAD;
728 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
729 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
730 ca ? ca->name : c->name,
731 sector, le64_to_cpu(jset->seq), bytes)) {
732 ret = JOURNAL_ENTRY_BAD;
733 le32_add_cpu(&jset->u64s,
734 -((bytes - (bucket_sectors_left << 9)) / 8));
737 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
738 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
739 ca ? ca->name : c->name,
740 sector, le64_to_cpu(jset->seq),
741 JSET_CSUM_TYPE(jset))) {
742 ret = JOURNAL_ENTRY_BAD;
749 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
750 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
751 "%s sector %llu seq %llu: journal checksum bad",
752 ca ? ca->name : c->name,
753 sector, le64_to_cpu(jset->seq)))
754 ret = JOURNAL_ENTRY_BAD;
756 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
757 jset->encrypted_start,
758 vstruct_end(jset) - (void *) jset->encrypted_start);
759 bch2_fs_fatal_err_on(ret, c,
760 "error decrypting journal entry: %i", ret);
762 /* last_seq is ignored when JSET_NO_FLUSH is true */
763 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
764 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
765 "invalid journal entry: last_seq > seq (%llu > %llu)",
766 le64_to_cpu(jset->last_seq),
767 le64_to_cpu(jset->seq))) {
768 jset->last_seq = jset->seq;
769 return JOURNAL_ENTRY_BAD;
775 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
777 unsigned sectors = vstruct_sectors(jset, c->block_bits);
779 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
780 jset_validate_entries(c, jset, WRITE);
783 struct journal_read_buf {
788 static int journal_read_buf_realloc(struct journal_read_buf *b,
793 /* the bios are sized for this many pages, max: */
794 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
797 new_size = roundup_pow_of_two(new_size);
798 n = kvpmalloc(new_size, GFP_KERNEL);
802 kvpfree(b->data, b->size);
808 static int journal_read_bucket(struct bch_dev *ca,
809 struct journal_read_buf *buf,
810 struct journal_list *jlist,
813 struct bch_fs *c = ca->fs;
814 struct journal_device *ja = &ca->journal;
815 struct jset *j = NULL;
816 unsigned sectors, sectors_read = 0;
817 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
818 end = offset + ca->mi.bucket_size;
819 bool saw_bad = false;
822 pr_debug("reading %u", bucket);
824 while (offset < end) {
828 sectors_read = min_t(unsigned,
829 end - offset, buf->size >> 9);
831 bio = bio_kmalloc(GFP_KERNEL,
834 bio_set_dev(bio, ca->disk_sb.bdev);
835 bio->bi_iter.bi_sector = offset;
836 bio_set_op_attrs(bio, REQ_OP_READ, 0);
837 bch2_bio_map(bio, buf->data, sectors_read << 9);
839 ret = submit_bio_wait(bio);
842 if (bch2_dev_io_err_on(ret, ca,
843 "journal read error: sector %llu",
845 bch2_meta_read_fault("journal")) {
847 * We don't error out of the recovery process
848 * here, since the relevant journal entry may be
849 * found on a different device, and missing or
850 * no journal entries will be handled later
858 ret = jset_validate(c, ca, j, offset,
859 end - offset, sectors_read,
863 sectors = vstruct_sectors(j, c->block_bits);
865 case JOURNAL_ENTRY_REREAD:
866 if (vstruct_bytes(j) > buf->size) {
867 ret = journal_read_buf_realloc(buf,
873 case JOURNAL_ENTRY_NONE:
876 sectors = block_sectors(c);
878 case JOURNAL_ENTRY_BAD:
881 * On checksum error we don't really trust the size
882 * field of the journal entry we read, so try reading
883 * again at next block boundary:
885 sectors = block_sectors(c);
892 * This happens sometimes if we don't have discards on -
893 * when we've partially overwritten a bucket with new
894 * journal entries. We don't need the rest of the
897 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
900 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
902 mutex_lock(&jlist->lock);
903 ret = journal_entry_add(c, ca, (struct journal_ptr) {
906 .bucket_offset = offset -
907 bucket_to_sector(ca, ja->buckets[bucket]),
909 }, jlist, j, ret != 0);
910 mutex_unlock(&jlist->lock);
913 case JOURNAL_ENTRY_ADD_OK:
915 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
923 sectors_read -= sectors;
924 j = ((void *) j) + (sectors << 9);
930 static void bch2_journal_read_device(struct closure *cl)
932 struct journal_device *ja =
933 container_of(cl, struct journal_device, read);
934 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
935 struct bch_fs *c = ca->fs;
936 struct journal_list *jlist =
937 container_of(cl->parent, struct journal_list, cl);
938 struct journal_replay *r, **_r;
939 struct genradix_iter iter;
940 struct journal_read_buf buf = { NULL, 0 };
941 u64 min_seq = U64_MAX;
948 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
952 pr_debug("%u journal buckets", ja->nr);
954 for (i = 0; i < ja->nr; i++) {
955 ret = journal_read_bucket(ca, &buf, jlist, i);
960 /* Find the journal bucket with the highest sequence number: */
961 for (i = 0; i < ja->nr; i++) {
962 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
965 min_seq = min(ja->bucket_seq[i], min_seq);
969 * If there's duplicate journal entries in multiple buckets (which
970 * definitely isn't supposed to happen, but...) - make sure to start
971 * cur_idx at the last of those buckets, so we don't deadlock trying to
974 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
975 ja->bucket_seq[ja->cur_idx] ==
976 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
977 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
979 ja->sectors_free = ca->mi.bucket_size;
981 mutex_lock(&jlist->lock);
982 genradix_for_each(&c->journal_entries, iter, _r) {
988 for (i = 0; i < r->nr_ptrs; i++) {
989 if (r->ptrs[i].dev == ca->dev_idx &&
990 sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
991 unsigned wrote = (r->ptrs[i].sector % ca->mi.bucket_size) +
992 vstruct_sectors(&r->j, c->block_bits);
994 ja->sectors_free = min(ja->sectors_free,
995 ca->mi.bucket_size - wrote);
999 mutex_unlock(&jlist->lock);
1001 if (ja->bucket_seq[ja->cur_idx] &&
1002 ja->sectors_free == ca->mi.bucket_size) {
1003 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
1004 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
1005 for (i = 0; i < 3; i++) {
1006 unsigned idx = ja->cur_idx - 1 + i;
1007 bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
1009 ja->sectors_free = 0;
1013 * Set dirty_idx to indicate the entire journal is full and needs to be
1014 * reclaimed - journal reclaim will immediately reclaim whatever isn't
1015 * pinned when it first runs:
1017 ja->discard_idx = ja->dirty_idx_ondisk =
1018 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1020 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1021 kvpfree(buf.data, buf.size);
1022 percpu_ref_put(&ca->io_ref);
1026 mutex_lock(&jlist->lock);
1028 mutex_unlock(&jlist->lock);
1032 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1033 struct journal_replay *j)
1037 for (i = 0; i < j->nr_ptrs; i++) {
1038 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1041 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1045 pr_buf(out, "%u:%u:%u (sector %llu)",
1048 j->ptrs[i].bucket_offset,
1053 int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq)
1055 struct journal_list jlist;
1056 struct journal_replay *i, **_i, *prev = NULL;
1057 struct genradix_iter radix_iter;
1060 struct printbuf buf = PRINTBUF;
1061 size_t keys = 0, entries = 0;
1062 bool degraded = false;
1063 u64 seq, last_seq = 0;
1066 closure_init_stack(&jlist.cl);
1067 mutex_init(&jlist.lock);
1070 for_each_member_device(ca, c, iter) {
1071 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1072 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1075 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1076 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1077 percpu_ref_tryget(&ca->io_ref))
1078 closure_call(&ca->journal.read,
1079 bch2_journal_read_device,
1086 closure_sync(&jlist.cl);
1094 * Find most recent flush entry, and ignore newer non flush entries -
1095 * those entries will be blacklisted:
1097 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
1100 if (!i || i->ignore)
1104 *start_seq = le64_to_cpu(i->j.seq) + 1;
1106 if (!JSET_NO_FLUSH(&i->j)) {
1107 last_seq = le64_to_cpu(i->j.last_seq);
1108 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1112 journal_replay_free(c, i);
1116 bch_info(c, "journal read done, but no entries found");
1121 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1126 /* Drop blacklisted entries and entries older than last_seq: */
1127 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1130 if (!i || i->ignore)
1133 seq = le64_to_cpu(i->j.seq);
1134 if (seq < last_seq) {
1135 journal_replay_free(c, i);
1139 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1140 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1141 "found blacklisted journal entry %llu", seq);
1143 journal_replay_free(c, i);
1147 /* Check for missing entries: */
1149 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1152 if (!i || i->ignore)
1155 BUG_ON(seq > le64_to_cpu(i->j.seq));
1157 while (seq < le64_to_cpu(i->j.seq)) {
1158 u64 missing_start, missing_end;
1159 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1161 while (seq < le64_to_cpu(i->j.seq) &&
1162 bch2_journal_seq_is_blacklisted(c, seq, false))
1165 if (seq == le64_to_cpu(i->j.seq))
1168 missing_start = seq;
1170 while (seq < le64_to_cpu(i->j.seq) &&
1171 !bch2_journal_seq_is_blacklisted(c, seq, false))
1175 bch2_journal_ptrs_to_text(&buf1, c, prev);
1176 pr_buf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
1178 pr_buf(&buf1, "(none)");
1179 bch2_journal_ptrs_to_text(&buf2, c, i);
1181 missing_end = seq - 1;
1182 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1185 missing_start, missing_end,
1186 last_seq, *blacklist_seq - 1,
1187 buf1.buf, buf2.buf);
1189 printbuf_exit(&buf1);
1190 printbuf_exit(&buf2);
1197 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1198 struct jset_entry *entry;
1199 struct bkey_i *k, *_n;
1200 struct bch_replicas_padded replicas = {
1201 .e.data_type = BCH_DATA_journal,
1207 if (!i || i->ignore)
1210 ret = jset_validate_entries(c, &i->j, READ);
1214 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1215 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1217 bch2_replicas_entry_sort(&replicas.e);
1220 * If we're mounting in degraded mode - if we didn't read all
1221 * the devices - this is wrong:
1224 printbuf_reset(&buf);
1225 bch2_replicas_entry_to_text(&buf, &replicas.e);
1228 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1229 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1230 "superblock not marked as containing replicas %s",
1232 ret = bch2_mark_replicas(c, &replicas.e);
1237 for_each_jset_key(k, _n, entry, &i->j)
1242 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1243 keys, entries, *start_seq);
1245 if (*start_seq != *blacklist_seq)
1246 bch_info(c, "dropped unflushed entries %llu-%llu",
1247 *blacklist_seq, *start_seq - 1);
1250 printbuf_exit(&buf);
1254 /* journal write: */
1256 static void __journal_write_alloc(struct journal *j,
1257 struct journal_buf *w,
1258 struct dev_alloc_list *devs_sorted,
1261 unsigned replicas_want)
1263 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1264 struct journal_device *ja;
1268 if (*replicas >= replicas_want)
1271 for (i = 0; i < devs_sorted->nr; i++) {
1272 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1279 * Check that we can use this device, and aren't already using
1282 if (!ca->mi.durability ||
1283 ca->mi.state != BCH_MEMBER_STATE_rw ||
1285 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1287 sectors > ja->sectors_free)
1290 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1292 bch2_bkey_append_ptr(&w->key,
1293 (struct bch_extent_ptr) {
1294 .offset = bucket_to_sector(ca,
1295 ja->buckets[ja->cur_idx]) +
1296 ca->mi.bucket_size -
1301 ja->sectors_free -= sectors;
1302 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1304 *replicas += ca->mi.durability;
1306 if (*replicas >= replicas_want)
1312 * journal_next_bucket - move on to the next journal bucket if possible
1314 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1317 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1318 struct bch_devs_mask devs;
1319 struct journal_device *ja;
1321 struct dev_alloc_list devs_sorted;
1322 unsigned target = c->opts.metadata_target ?:
1323 c->opts.foreground_target;
1324 unsigned i, replicas = 0, replicas_want =
1325 READ_ONCE(c->opts.metadata_replicas);
1329 devs = target_rw_devs(c, BCH_DATA_journal, target);
1331 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1333 __journal_write_alloc(j, w, &devs_sorted,
1334 sectors, &replicas, replicas_want);
1336 if (replicas >= replicas_want)
1339 for (i = 0; i < devs_sorted.nr; i++) {
1340 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1346 if (sectors > ja->sectors_free &&
1347 sectors <= ca->mi.bucket_size &&
1348 bch2_journal_dev_buckets_available(j, ja,
1349 journal_space_discarded)) {
1350 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1351 ja->sectors_free = ca->mi.bucket_size;
1354 * ja->bucket_seq[ja->cur_idx] must always have
1355 * something sensible:
1357 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1361 __journal_write_alloc(j, w, &devs_sorted,
1362 sectors, &replicas, replicas_want);
1364 if (replicas < replicas_want && target) {
1365 /* Retry from all devices: */
1372 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1374 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1377 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1379 /* we aren't holding j->lock: */
1380 unsigned new_size = READ_ONCE(j->buf_size_want);
1383 if (buf->buf_size >= new_size)
1386 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1390 memcpy(new_buf, buf->data, buf->buf_size);
1392 spin_lock(&j->lock);
1393 swap(buf->data, new_buf);
1394 swap(buf->buf_size, new_size);
1395 spin_unlock(&j->lock);
1397 kvpfree(new_buf, new_size);
1400 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1402 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1405 static void journal_write_done(struct closure *cl)
1407 struct journal *j = container_of(cl, struct journal, io);
1408 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1409 struct journal_buf *w = journal_last_unwritten_buf(j);
1410 struct bch_replicas_padded replicas;
1411 union journal_res_state old, new;
1415 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1416 ? j->flush_write_time
1417 : j->noflush_write_time, j->write_start_time);
1419 if (!w->devs_written.nr) {
1420 bch_err(c, "unable to write journal to sufficient devices");
1423 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1425 if (bch2_mark_replicas(c, &replicas.e))
1430 bch2_fatal_error(c);
1432 spin_lock(&j->lock);
1433 seq = le64_to_cpu(w->data->seq);
1435 if (seq >= j->pin.front)
1436 journal_seq_pin(j, seq)->devs = w->devs_written;
1439 if (!JSET_NO_FLUSH(w->data)) {
1440 j->flushed_seq_ondisk = seq;
1441 j->last_seq_ondisk = w->last_seq;
1443 bch2_do_discards(c);
1444 closure_wake_up(&c->freelist_wait);
1446 } else if (!j->err_seq || seq < j->err_seq)
1449 j->seq_ondisk = seq;
1452 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1455 * Must come before signaling write completion, for
1456 * bch2_fs_journal_stop():
1458 journal_reclaim_kick(&c->journal);
1460 /* also must come before signalling write completion: */
1461 closure_debug_destroy(cl);
1463 v = atomic64_read(&j->reservations.counter);
1466 BUG_ON(journal_state_count(new, new.unwritten_idx));
1468 new.unwritten_idx++;
1469 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1470 old.v, new.v)) != old.v);
1472 bch2_journal_space_available(j);
1474 closure_wake_up(&w->wait);
1477 if (!journal_state_count(new, new.unwritten_idx) &&
1478 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1479 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1480 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1481 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1482 struct journal_buf *buf = journal_cur_buf(j);
1483 long delta = buf->expires - jiffies;
1486 * We don't close a journal entry to write it while there's
1487 * previous entries still in flight - the current journal entry
1488 * might want to be written now:
1491 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1494 spin_unlock(&j->lock);
1497 static void journal_write_endio(struct bio *bio)
1499 struct bch_dev *ca = bio->bi_private;
1500 struct journal *j = &ca->fs->journal;
1501 struct journal_buf *w = journal_last_unwritten_buf(j);
1502 unsigned long flags;
1504 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1505 le64_to_cpu(w->data->seq),
1506 bch2_blk_status_to_str(bio->bi_status)) ||
1507 bch2_meta_write_fault("journal")) {
1508 spin_lock_irqsave(&j->err_lock, flags);
1509 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1510 spin_unlock_irqrestore(&j->err_lock, flags);
1513 closure_put(&j->io);
1514 percpu_ref_put(&ca->io_ref);
1517 static void do_journal_write(struct closure *cl)
1519 struct journal *j = container_of(cl, struct journal, io);
1520 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1522 struct journal_buf *w = journal_last_unwritten_buf(j);
1523 struct bch_extent_ptr *ptr;
1525 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1527 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1528 ca = bch_dev_bkey_exists(c, ptr->dev);
1529 if (!percpu_ref_tryget(&ca->io_ref)) {
1531 bch_err(c, "missing device for journal write\n");
1535 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1538 bio = ca->journal.bio;
1540 bio_set_dev(bio, ca->disk_sb.bdev);
1541 bio->bi_iter.bi_sector = ptr->offset;
1542 bio->bi_end_io = journal_write_endio;
1543 bio->bi_private = ca;
1544 bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1546 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1547 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1549 if (!JSET_NO_FLUSH(w->data))
1550 bio->bi_opf |= REQ_FUA;
1551 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1552 bio->bi_opf |= REQ_PREFLUSH;
1554 bch2_bio_map(bio, w->data, sectors << 9);
1556 trace_journal_write(bio);
1557 closure_bio_submit(bio, cl);
1559 ca->journal.bucket_seq[ca->journal.cur_idx] =
1560 le64_to_cpu(w->data->seq);
1563 continue_at(cl, journal_write_done, c->io_complete_wq);
1567 void bch2_journal_write(struct closure *cl)
1569 struct journal *j = container_of(cl, struct journal, io);
1570 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1572 struct journal_buf *w = journal_last_unwritten_buf(j);
1573 struct jset_entry *start, *end;
1576 struct printbuf journal_debug_buf = PRINTBUF;
1577 bool validate_before_checksum = false;
1578 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1581 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1583 journal_buf_realloc(j, w);
1586 j->write_start_time = local_clock();
1588 spin_lock(&j->lock);
1589 if (bch2_journal_error(j) ||
1592 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1593 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1595 SET_JSET_NO_FLUSH(jset, true);
1599 j->nr_noflush_writes++;
1601 j->last_flush_write = jiffies;
1602 j->nr_flush_writes++;
1604 spin_unlock(&j->lock);
1607 * New btree roots are set by journalling them; when the journal entry
1608 * gets written we have to propagate them to c->btree_roots
1610 * But, every journal entry we write has to contain all the btree roots
1611 * (at least for now); so after we copy btree roots to c->btree_roots we
1612 * have to get any missing btree roots and add them to this journal
1616 bch2_journal_entries_to_btree_roots(c, jset);
1618 start = end = vstruct_last(jset);
1620 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1622 bch2_journal_super_entries_add_common(c, &end,
1623 le64_to_cpu(jset->seq));
1624 u64s = (u64 *) end - (u64 *) start;
1625 BUG_ON(u64s > j->entry_u64s_reserved);
1627 le32_add_cpu(&jset->u64s, u64s);
1628 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1630 jset->magic = cpu_to_le64(jset_magic(c));
1631 jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
1632 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1633 : cpu_to_le32(c->sb.version);
1635 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1636 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1638 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1639 j->last_empty_seq = le64_to_cpu(jset->seq);
1641 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1642 validate_before_checksum = true;
1644 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1645 validate_before_checksum = true;
1647 if (validate_before_checksum &&
1648 jset_validate_for_write(c, jset))
1651 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1652 jset->encrypted_start,
1653 vstruct_end(jset) - (void *) jset->encrypted_start);
1654 if (bch2_fs_fatal_err_on(ret, c,
1655 "error decrypting journal entry: %i", ret))
1658 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1659 journal_nonce(jset), jset);
1661 if (!validate_before_checksum &&
1662 jset_validate_for_write(c, jset))
1665 sectors = vstruct_sectors(jset, c->block_bits);
1666 BUG_ON(sectors > w->sectors);
1668 bytes = vstruct_bytes(jset);
1669 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1672 spin_lock(&j->lock);
1673 ret = journal_write_alloc(j, w, sectors);
1675 if (ret && j->can_discard) {
1676 spin_unlock(&j->lock);
1677 bch2_journal_do_discards(j);
1682 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1685 * write is allocated, no longer need to account for it in
1686 * bch2_journal_space_available():
1691 * journal entry has been compacted and allocated, recalculate space
1694 bch2_journal_space_available(j);
1695 spin_unlock(&j->lock);
1698 bch_err(c, "Unable to allocate journal write:\n%s",
1699 journal_debug_buf.buf);
1700 printbuf_exit(&journal_debug_buf);
1701 bch2_fatal_error(c);
1702 continue_at(cl, journal_write_done, c->io_complete_wq);
1706 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1708 if (c->opts.nochanges)
1711 for_each_rw_member(ca, c, i)
1714 if (nr_rw_members > 1)
1715 w->separate_flush = true;
1717 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1718 for_each_rw_member(ca, c, i) {
1719 percpu_ref_get(&ca->io_ref);
1721 bio = ca->journal.bio;
1723 bio_set_dev(bio, ca->disk_sb.bdev);
1724 bio->bi_opf = REQ_OP_FLUSH;
1725 bio->bi_end_io = journal_write_endio;
1726 bio->bi_private = ca;
1727 closure_bio_submit(bio, cl);
1731 continue_at(cl, do_journal_write, c->io_complete_wq);
1734 continue_at(cl, journal_write_done, c->io_complete_wq);
1737 bch2_fatal_error(c);
1738 continue_at(cl, journal_write_done, c->io_complete_wq);