1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
6 #include "btree_update_interior.h"
9 #include "disk_groups.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
18 #include <trace/events/bcachefs.h>
20 static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
22 return (seq - c->journal_entries_base_seq) & (~0U >> 1);
25 static void __journal_replay_free(struct bch_fs *c,
26 struct journal_replay *i)
28 struct journal_replay **p =
29 genradix_ptr(&c->journal_entries,
30 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
34 kvpfree(i, offsetof(struct journal_replay, j) +
35 vstruct_bytes(&i->j));
38 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
42 if (!c->opts.read_entire_journal)
43 __journal_replay_free(c, i);
53 #define JOURNAL_ENTRY_ADD_OK 0
54 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
57 * Given a journal entry we just read, add it to the list of journal entries to
60 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
61 struct journal_ptr entry_ptr,
62 struct journal_list *jlist, struct jset *j,
65 struct genradix_iter iter;
66 struct journal_replay **_i, *i, *dup;
67 struct journal_ptr *ptr;
68 size_t bytes = vstruct_bytes(j);
69 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
70 int ret = JOURNAL_ENTRY_ADD_OK;
72 /* Is this entry older than the range we need? */
73 if (!c->opts.read_entire_journal &&
74 le64_to_cpu(j->seq) < jlist->last_seq)
75 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
78 * genradixes are indexed by a ulong, not a u64, so we can't index them
79 * by sequence number directly: Assume instead that they will all fall
80 * within the range of +-2billion of the filrst one we find.
82 if (!c->journal_entries_base_seq)
83 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
85 /* Drop entries we don't need anymore */
86 if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
87 genradix_for_each_from(&c->journal_entries, iter, _i,
88 journal_entry_radix_idx(c, jlist->last_seq)) {
94 if (le64_to_cpu(i->j.seq) >= last_seq)
96 journal_replay_free(c, i);
100 jlist->last_seq = max(jlist->last_seq, last_seq);
102 _i = genradix_ptr_alloc(&c->journal_entries,
103 journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
109 * Duplicate journal entries? If so we want the one that didn't have a
115 /* we'll replace @dup: */
120 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
121 memcmp(j, &dup->j, bytes), c,
122 "found duplicate but non identical journal entries (seq %llu)",
123 le64_to_cpu(j->seq));
129 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
136 memcpy(&i->j, j, bytes);
139 i->nr_ptrs = dup->nr_ptrs;
140 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
141 __journal_replay_free(c, dup);
147 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
148 if (ptr->dev == ca->dev_idx) {
149 bch_err(c, "duplicate journal entry %llu on same device",
150 le64_to_cpu(i->j.seq));
155 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
156 bch_err(c, "found too many copies of journal entry %llu",
157 le64_to_cpu(i->j.seq));
161 i->ptrs[i->nr_ptrs++] = entry_ptr;
167 static struct nonce journal_nonce(const struct jset *jset)
169 return (struct nonce) {{
171 [1] = ((__le32 *) &jset->seq)[0],
172 [2] = ((__le32 *) &jset->seq)[1],
173 [3] = BCH_NONCE_JOURNAL,
177 /* this fills in a range with empty jset_entries: */
178 static void journal_entry_null_range(void *start, void *end)
180 struct jset_entry *entry;
182 for (entry = start; entry != end; entry = vstruct_next(entry))
183 memset(entry, 0, sizeof(*entry));
186 #define JOURNAL_ENTRY_REREAD 5
187 #define JOURNAL_ENTRY_NONE 6
188 #define JOURNAL_ENTRY_BAD 7
190 static void journal_entry_err_msg(struct printbuf *out,
192 struct jset_entry *entry)
194 prt_str(out, "invalid journal entry ");
196 prt_printf(out, "%s ", bch2_jset_entry_types[entry->type]);
199 prt_printf(out, "in superblock");
201 prt_printf(out, "at seq %llu", le64_to_cpu(jset->seq));
203 prt_printf(out, "at offset %zi/%u seq %llu",
204 (u64 *) entry - jset->_data,
205 le32_to_cpu(jset->u64s),
206 le64_to_cpu(jset->seq));
210 #define journal_entry_err(c, jset, entry, msg, ...) \
212 struct printbuf buf = PRINTBUF; \
214 journal_entry_err_msg(&buf, jset, entry); \
215 prt_printf(&buf, msg, ##__VA_ARGS__); \
219 mustfix_fsck_err(c, "%s", buf.buf); \
222 bch_err(c, "corrupt metadata before write: %s\n", buf.buf);\
223 if (bch2_fs_inconsistent(c)) { \
224 ret = -BCH_ERR_fsck_errors_not_fixed; \
230 printbuf_exit(&buf); \
234 #define journal_entry_err_on(cond, c, jset, entry, msg, ...) \
235 ((cond) ? journal_entry_err(c, jset, entry, msg, ##__VA_ARGS__) : false)
237 #define FSCK_DELETED_KEY 5
239 static int journal_validate_key(struct bch_fs *c,
241 struct jset_entry *entry,
242 unsigned level, enum btree_id btree_id,
244 unsigned version, int big_endian, int write)
246 void *next = vstruct_next(entry);
247 struct printbuf buf = PRINTBUF;
250 if (journal_entry_err_on(!k->k.u64s, c, jset, entry, "k->u64s 0")) {
251 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
252 journal_entry_null_range(vstruct_next(entry), next);
253 return FSCK_DELETED_KEY;
256 if (journal_entry_err_on((void *) bkey_next(k) >
257 (void *) vstruct_next(entry),
259 "extends past end of journal entry")) {
260 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
261 journal_entry_null_range(vstruct_next(entry), next);
262 return FSCK_DELETED_KEY;
265 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
267 "bad format %u", k->k.format)) {
268 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
269 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
270 journal_entry_null_range(vstruct_next(entry), next);
271 return FSCK_DELETED_KEY;
275 bch2_bkey_compat(level, btree_id, version, big_endian,
276 write, NULL, bkey_to_packed(k));
278 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
279 __btree_node_type(level, btree_id), write, &buf)) {
280 printbuf_reset(&buf);
281 prt_printf(&buf, "invalid journal entry %s at offset %zi/%u seq %llu:",
282 bch2_jset_entry_types[entry->type],
283 (u64 *) entry - jset->_data,
284 le32_to_cpu(jset->u64s),
285 le64_to_cpu(jset->seq));
287 printbuf_indent_add(&buf, 2);
289 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
291 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
292 __btree_node_type(level, btree_id), write, &buf);
294 mustfix_fsck_err(c, "%s", buf.buf);
296 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
297 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
298 journal_entry_null_range(vstruct_next(entry), next);
301 return FSCK_DELETED_KEY;
305 bch2_bkey_compat(level, btree_id, version, big_endian,
306 write, NULL, bkey_to_packed(k));
312 static int journal_entry_btree_keys_validate(struct bch_fs *c,
314 struct jset_entry *entry,
315 unsigned version, int big_endian, int write)
317 struct bkey_i *k = entry->start;
319 while (k != vstruct_last(entry)) {
320 int ret = journal_validate_key(c, jset, entry,
323 k, version, big_endian, write);
324 if (ret == FSCK_DELETED_KEY)
333 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
334 struct jset_entry *entry)
339 vstruct_for_each(entry, k) {
342 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
344 prt_printf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
345 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
350 static int journal_entry_btree_root_validate(struct bch_fs *c,
352 struct jset_entry *entry,
353 unsigned version, int big_endian, int write)
355 struct bkey_i *k = entry->start;
358 if (journal_entry_err_on(!entry->u64s ||
359 le16_to_cpu(entry->u64s) != k->k.u64s,
361 "invalid btree root journal entry: wrong number of keys")) {
362 void *next = vstruct_next(entry);
364 * we don't want to null out this jset_entry,
365 * just the contents, so that later we can tell
366 * we were _supposed_ to have a btree root
369 journal_entry_null_range(vstruct_next(entry), next);
373 return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
374 version, big_endian, write);
379 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
380 struct jset_entry *entry)
382 journal_entry_btree_keys_to_text(out, c, entry);
385 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
387 struct jset_entry *entry,
388 unsigned version, int big_endian, int write)
390 /* obsolete, don't care: */
394 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
395 struct jset_entry *entry)
399 static int journal_entry_blacklist_validate(struct bch_fs *c,
401 struct jset_entry *entry,
402 unsigned version, int big_endian, int write)
406 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
408 "invalid journal seq blacklist entry: bad size")) {
409 journal_entry_null_range(entry, vstruct_next(entry));
415 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
416 struct jset_entry *entry)
418 struct jset_entry_blacklist *bl =
419 container_of(entry, struct jset_entry_blacklist, entry);
421 prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
424 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
426 struct jset_entry *entry,
427 unsigned version, int big_endian, int write)
429 struct jset_entry_blacklist_v2 *bl_entry;
432 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
434 "invalid journal seq blacklist entry: bad size")) {
435 journal_entry_null_range(entry, vstruct_next(entry));
439 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
441 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
442 le64_to_cpu(bl_entry->end),
444 "invalid journal seq blacklist entry: start > end")) {
445 journal_entry_null_range(entry, vstruct_next(entry));
452 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
453 struct jset_entry *entry)
455 struct jset_entry_blacklist_v2 *bl =
456 container_of(entry, struct jset_entry_blacklist_v2, entry);
458 prt_printf(out, "start=%llu end=%llu",
459 le64_to_cpu(bl->start),
460 le64_to_cpu(bl->end));
463 static int journal_entry_usage_validate(struct bch_fs *c,
465 struct jset_entry *entry,
466 unsigned version, int big_endian, int write)
468 struct jset_entry_usage *u =
469 container_of(entry, struct jset_entry_usage, entry);
470 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
473 if (journal_entry_err_on(bytes < sizeof(*u),
475 "invalid journal entry usage: bad size")) {
476 journal_entry_null_range(entry, vstruct_next(entry));
484 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
485 struct jset_entry *entry)
487 struct jset_entry_usage *u =
488 container_of(entry, struct jset_entry_usage, entry);
490 prt_printf(out, "type=%s v=%llu",
491 bch2_fs_usage_types[u->entry.btree_id],
495 static int journal_entry_data_usage_validate(struct bch_fs *c,
497 struct jset_entry *entry,
498 unsigned version, int big_endian, int write)
500 struct jset_entry_data_usage *u =
501 container_of(entry, struct jset_entry_data_usage, entry);
502 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
505 if (journal_entry_err_on(bytes < sizeof(*u) ||
506 bytes < sizeof(*u) + u->r.nr_devs,
508 "invalid journal entry usage: bad size")) {
509 journal_entry_null_range(entry, vstruct_next(entry));
517 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
518 struct jset_entry *entry)
520 struct jset_entry_data_usage *u =
521 container_of(entry, struct jset_entry_data_usage, entry);
523 bch2_replicas_entry_to_text(out, &u->r);
524 prt_printf(out, "=%llu", le64_to_cpu(u->v));
527 static int journal_entry_clock_validate(struct bch_fs *c,
529 struct jset_entry *entry,
530 unsigned version, int big_endian, int write)
532 struct jset_entry_clock *clock =
533 container_of(entry, struct jset_entry_clock, entry);
534 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
537 if (journal_entry_err_on(bytes != sizeof(*clock),
538 c, jset, entry, "bad size")) {
539 journal_entry_null_range(entry, vstruct_next(entry));
543 if (journal_entry_err_on(clock->rw > 1,
544 c, jset, entry, "bad rw")) {
545 journal_entry_null_range(entry, vstruct_next(entry));
553 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
554 struct jset_entry *entry)
556 struct jset_entry_clock *clock =
557 container_of(entry, struct jset_entry_clock, entry);
559 prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
562 static int journal_entry_dev_usage_validate(struct bch_fs *c,
564 struct jset_entry *entry,
565 unsigned version, int big_endian, int write)
567 struct jset_entry_dev_usage *u =
568 container_of(entry, struct jset_entry_dev_usage, entry);
569 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
570 unsigned expected = sizeof(*u);
574 if (journal_entry_err_on(bytes < expected,
575 c, jset, entry, "bad size (%u < %u)",
577 journal_entry_null_range(entry, vstruct_next(entry));
581 dev = le32_to_cpu(u->dev);
583 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
584 c, jset, entry, "bad dev")) {
585 journal_entry_null_range(entry, vstruct_next(entry));
589 if (journal_entry_err_on(u->pad,
590 c, jset, entry, "bad pad")) {
591 journal_entry_null_range(entry, vstruct_next(entry));
599 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
600 struct jset_entry *entry)
602 struct jset_entry_dev_usage *u =
603 container_of(entry, struct jset_entry_dev_usage, entry);
604 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
606 prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
608 for (i = 0; i < nr_types; i++) {
610 prt_printf(out, " %s", bch2_data_types[i]);
612 prt_printf(out, " (unknown data type %u)", i);
613 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
614 le64_to_cpu(u->d[i].buckets),
615 le64_to_cpu(u->d[i].sectors),
616 le64_to_cpu(u->d[i].fragmented));
619 prt_printf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec));
622 static int journal_entry_log_validate(struct bch_fs *c,
624 struct jset_entry *entry,
625 unsigned version, int big_endian, int write)
630 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
631 struct jset_entry *entry)
633 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
634 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
636 prt_printf(out, "%.*s", bytes, l->d);
639 static int journal_entry_overwrite_validate(struct bch_fs *c,
641 struct jset_entry *entry,
642 unsigned version, int big_endian, int write)
644 return journal_entry_btree_keys_validate(c, jset, entry, version, big_endian, write);
647 static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
648 struct jset_entry *entry)
650 journal_entry_btree_keys_to_text(out, c, entry);
653 struct jset_entry_ops {
654 int (*validate)(struct bch_fs *, struct jset *,
655 struct jset_entry *, unsigned, int, int);
656 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
659 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
661 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
662 .validate = journal_entry_##f##_validate, \
663 .to_text = journal_entry_##f##_to_text, \
665 BCH_JSET_ENTRY_TYPES()
669 int bch2_journal_entry_validate(struct bch_fs *c,
671 struct jset_entry *entry,
672 unsigned version, int big_endian, int write)
674 return entry->type < BCH_JSET_ENTRY_NR
675 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
676 version, big_endian, write)
680 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
681 struct jset_entry *entry)
683 if (entry->type < BCH_JSET_ENTRY_NR) {
684 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
685 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
687 prt_printf(out, "(unknown type %u)", entry->type);
691 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
694 struct jset_entry *entry;
697 vstruct_for_each(jset, entry) {
698 if (journal_entry_err_on(vstruct_next(entry) >
699 vstruct_last(jset), c, jset, entry,
700 "journal entry extends past end of jset")) {
701 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
705 ret = bch2_journal_entry_validate(c, jset, entry,
706 le32_to_cpu(jset->version),
707 JSET_BIG_ENDIAN(jset), write);
715 static int jset_validate(struct bch_fs *c,
717 struct jset *jset, u64 sector,
718 unsigned bucket_sectors_left,
719 unsigned sectors_read,
722 size_t bytes = vstruct_bytes(jset);
723 struct bch_csum csum;
727 if (le64_to_cpu(jset->magic) != jset_magic(c))
728 return JOURNAL_ENTRY_NONE;
730 version = le32_to_cpu(jset->version);
731 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
732 version < bcachefs_metadata_version_min) ||
733 version >= bcachefs_metadata_version_max,
735 "%s sector %llu seq %llu: unknown journal entry version %u",
736 ca ? ca->name : c->name,
737 sector, le64_to_cpu(jset->seq),
739 /* don't try to continue: */
743 if (bytes > (sectors_read << 9) &&
744 sectors_read < bucket_sectors_left)
745 return JOURNAL_ENTRY_REREAD;
747 if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
749 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
750 ca ? ca->name : c->name,
751 sector, le64_to_cpu(jset->seq), bytes)) {
752 ret = JOURNAL_ENTRY_BAD;
753 le32_add_cpu(&jset->u64s,
754 -((bytes - (bucket_sectors_left << 9)) / 8));
757 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
759 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
760 ca ? ca->name : c->name,
761 sector, le64_to_cpu(jset->seq),
762 JSET_CSUM_TYPE(jset))) {
763 ret = JOURNAL_ENTRY_BAD;
770 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
771 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum),
773 "%s sector %llu seq %llu: journal checksum bad",
774 ca ? ca->name : c->name,
775 sector, le64_to_cpu(jset->seq)))
776 ret = JOURNAL_ENTRY_BAD;
778 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
779 jset->encrypted_start,
780 vstruct_end(jset) - (void *) jset->encrypted_start);
781 bch2_fs_fatal_err_on(ret, c,
782 "error decrypting journal entry: %i", ret);
784 /* last_seq is ignored when JSET_NO_FLUSH is true */
785 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
786 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
788 "invalid journal entry: last_seq > seq (%llu > %llu)",
789 le64_to_cpu(jset->last_seq),
790 le64_to_cpu(jset->seq))) {
791 jset->last_seq = jset->seq;
792 return JOURNAL_ENTRY_BAD;
798 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
800 unsigned sectors = vstruct_sectors(jset, c->block_bits);
802 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
803 jset_validate_entries(c, jset, WRITE);
806 struct journal_read_buf {
811 static int journal_read_buf_realloc(struct journal_read_buf *b,
816 /* the bios are sized for this many pages, max: */
817 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
820 new_size = roundup_pow_of_two(new_size);
821 n = kvpmalloc(new_size, GFP_KERNEL);
825 kvpfree(b->data, b->size);
831 static int journal_read_bucket(struct bch_dev *ca,
832 struct journal_read_buf *buf,
833 struct journal_list *jlist,
836 struct bch_fs *c = ca->fs;
837 struct journal_device *ja = &ca->journal;
838 struct jset *j = NULL;
839 unsigned sectors, sectors_read = 0;
840 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
841 end = offset + ca->mi.bucket_size;
842 bool saw_bad = false;
845 pr_debug("reading %u", bucket);
847 while (offset < end) {
852 sectors_read = min_t(unsigned,
853 end - offset, buf->size >> 9);
854 nr_bvecs = buf_pages(buf->data, sectors_read << 9);
856 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
857 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
859 bio->bi_iter.bi_sector = offset;
860 bch2_bio_map(bio, buf->data, sectors_read << 9);
862 ret = submit_bio_wait(bio);
865 if (bch2_dev_io_err_on(ret, ca,
866 "journal read error: sector %llu",
868 bch2_meta_read_fault("journal")) {
870 * We don't error out of the recovery process
871 * here, since the relevant journal entry may be
872 * found on a different device, and missing or
873 * no journal entries will be handled later
881 ret = jset_validate(c, ca, j, offset,
882 end - offset, sectors_read,
886 sectors = vstruct_sectors(j, c->block_bits);
888 case JOURNAL_ENTRY_REREAD:
889 if (vstruct_bytes(j) > buf->size) {
890 ret = journal_read_buf_realloc(buf,
896 case JOURNAL_ENTRY_NONE:
899 sectors = block_sectors(c);
901 case JOURNAL_ENTRY_BAD:
904 * On checksum error we don't really trust the size
905 * field of the journal entry we read, so try reading
906 * again at next block boundary:
908 sectors = block_sectors(c);
915 * This happens sometimes if we don't have discards on -
916 * when we've partially overwritten a bucket with new
917 * journal entries. We don't need the rest of the
920 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
923 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
925 mutex_lock(&jlist->lock);
926 ret = journal_entry_add(c, ca, (struct journal_ptr) {
929 .bucket_offset = offset -
930 bucket_to_sector(ca, ja->buckets[bucket]),
932 }, jlist, j, ret != 0);
933 mutex_unlock(&jlist->lock);
936 case JOURNAL_ENTRY_ADD_OK:
938 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
946 sectors_read -= sectors;
947 j = ((void *) j) + (sectors << 9);
953 static void bch2_journal_read_device(struct closure *cl)
955 struct journal_device *ja =
956 container_of(cl, struct journal_device, read);
957 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
958 struct bch_fs *c = ca->fs;
959 struct journal_list *jlist =
960 container_of(cl->parent, struct journal_list, cl);
961 struct journal_replay *r, **_r;
962 struct genradix_iter iter;
963 struct journal_read_buf buf = { NULL, 0 };
964 u64 min_seq = U64_MAX;
971 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
975 pr_debug("%u journal buckets", ja->nr);
977 for (i = 0; i < ja->nr; i++) {
978 ret = journal_read_bucket(ca, &buf, jlist, i);
983 /* Find the journal bucket with the highest sequence number: */
984 for (i = 0; i < ja->nr; i++) {
985 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
988 min_seq = min(ja->bucket_seq[i], min_seq);
992 * If there's duplicate journal entries in multiple buckets (which
993 * definitely isn't supposed to happen, but...) - make sure to start
994 * cur_idx at the last of those buckets, so we don't deadlock trying to
997 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
998 ja->bucket_seq[ja->cur_idx] ==
999 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
1000 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1002 ja->sectors_free = ca->mi.bucket_size;
1004 mutex_lock(&jlist->lock);
1005 genradix_for_each(&c->journal_entries, iter, _r) {
1011 for (i = 0; i < r->nr_ptrs; i++) {
1012 if (r->ptrs[i].dev == ca->dev_idx &&
1013 sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) {
1014 unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) +
1015 vstruct_sectors(&r->j, c->block_bits);
1017 ja->sectors_free = min(ja->sectors_free,
1018 ca->mi.bucket_size - wrote);
1022 mutex_unlock(&jlist->lock);
1024 if (ja->bucket_seq[ja->cur_idx] &&
1025 ja->sectors_free == ca->mi.bucket_size) {
1026 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
1027 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
1028 for (i = 0; i < 3; i++) {
1029 unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
1030 bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
1032 ja->sectors_free = 0;
1036 * Set dirty_idx to indicate the entire journal is full and needs to be
1037 * reclaimed - journal reclaim will immediately reclaim whatever isn't
1038 * pinned when it first runs:
1040 ja->discard_idx = ja->dirty_idx_ondisk =
1041 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1043 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1044 kvpfree(buf.data, buf.size);
1045 percpu_ref_put(&ca->io_ref);
1049 mutex_lock(&jlist->lock);
1051 mutex_unlock(&jlist->lock);
1055 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1056 struct journal_replay *j)
1060 for (i = 0; i < j->nr_ptrs; i++) {
1061 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1064 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1067 prt_printf(out, " ");
1068 prt_printf(out, "%u:%u:%u (sector %llu)",
1071 j->ptrs[i].bucket_offset,
1076 int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq)
1078 struct journal_list jlist;
1079 struct journal_replay *i, **_i, *prev = NULL;
1080 struct genradix_iter radix_iter;
1083 struct printbuf buf = PRINTBUF;
1084 size_t keys = 0, entries = 0;
1085 bool degraded = false;
1086 u64 seq, last_seq = 0;
1089 closure_init_stack(&jlist.cl);
1090 mutex_init(&jlist.lock);
1094 for_each_member_device(ca, c, iter) {
1095 if (!c->opts.fsck &&
1096 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1099 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1100 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1101 percpu_ref_tryget(&ca->io_ref))
1102 closure_call(&ca->journal.read,
1103 bch2_journal_read_device,
1110 closure_sync(&jlist.cl);
1118 * Find most recent flush entry, and ignore newer non flush entries -
1119 * those entries will be blacklisted:
1121 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
1124 if (!i || i->ignore)
1128 *start_seq = le64_to_cpu(i->j.seq) + 1;
1130 if (!JSET_NO_FLUSH(&i->j)) {
1131 last_seq = le64_to_cpu(i->j.last_seq);
1132 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1136 journal_replay_free(c, i);
1140 bch_info(c, "journal read done, but no entries found");
1145 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1150 /* Drop blacklisted entries and entries older than last_seq: */
1151 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1154 if (!i || i->ignore)
1157 seq = le64_to_cpu(i->j.seq);
1158 if (seq < last_seq) {
1159 journal_replay_free(c, i);
1163 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1164 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1165 "found blacklisted journal entry %llu", seq);
1167 journal_replay_free(c, i);
1171 /* Check for missing entries: */
1173 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1176 if (!i || i->ignore)
1179 BUG_ON(seq > le64_to_cpu(i->j.seq));
1181 while (seq < le64_to_cpu(i->j.seq)) {
1182 u64 missing_start, missing_end;
1183 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1185 while (seq < le64_to_cpu(i->j.seq) &&
1186 bch2_journal_seq_is_blacklisted(c, seq, false))
1189 if (seq == le64_to_cpu(i->j.seq))
1192 missing_start = seq;
1194 while (seq < le64_to_cpu(i->j.seq) &&
1195 !bch2_journal_seq_is_blacklisted(c, seq, false))
1199 bch2_journal_ptrs_to_text(&buf1, c, prev);
1200 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
1202 prt_printf(&buf1, "(none)");
1203 bch2_journal_ptrs_to_text(&buf2, c, i);
1205 missing_end = seq - 1;
1206 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1209 missing_start, missing_end,
1210 last_seq, *blacklist_seq - 1,
1211 buf1.buf, buf2.buf);
1213 printbuf_exit(&buf1);
1214 printbuf_exit(&buf2);
1221 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1222 struct jset_entry *entry;
1223 struct bkey_i *k, *_n;
1224 struct bch_replicas_padded replicas = {
1225 .e.data_type = BCH_DATA_journal,
1231 if (!i || i->ignore)
1234 ret = jset_validate_entries(c, &i->j, READ);
1238 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1239 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1241 bch2_replicas_entry_sort(&replicas.e);
1244 * If we're mounting in degraded mode - if we didn't read all
1245 * the devices - this is wrong:
1248 printbuf_reset(&buf);
1249 bch2_replicas_entry_to_text(&buf, &replicas.e);
1252 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1253 "superblock not marked as containing replicas %s",
1255 ret = bch2_mark_replicas(c, &replicas.e);
1260 for_each_jset_key(k, _n, entry, &i->j)
1265 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1266 keys, entries, *start_seq);
1268 if (*start_seq != *blacklist_seq)
1269 bch_info(c, "dropped unflushed entries %llu-%llu",
1270 *blacklist_seq, *start_seq - 1);
1273 printbuf_exit(&buf);
1277 /* journal write: */
1279 static void __journal_write_alloc(struct journal *j,
1280 struct journal_buf *w,
1281 struct dev_alloc_list *devs_sorted,
1284 unsigned replicas_want)
1286 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1287 struct journal_device *ja;
1291 if (*replicas >= replicas_want)
1294 for (i = 0; i < devs_sorted->nr; i++) {
1295 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1302 * Check that we can use this device, and aren't already using
1305 if (!ca->mi.durability ||
1306 ca->mi.state != BCH_MEMBER_STATE_rw ||
1308 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1310 sectors > ja->sectors_free)
1313 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1315 bch2_bkey_append_ptr(&w->key,
1316 (struct bch_extent_ptr) {
1317 .offset = bucket_to_sector(ca,
1318 ja->buckets[ja->cur_idx]) +
1319 ca->mi.bucket_size -
1324 ja->sectors_free -= sectors;
1325 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1327 *replicas += ca->mi.durability;
1329 if (*replicas >= replicas_want)
1335 * journal_next_bucket - move on to the next journal bucket if possible
1337 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1340 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1341 struct bch_devs_mask devs;
1342 struct journal_device *ja;
1344 struct dev_alloc_list devs_sorted;
1345 unsigned target = c->opts.metadata_target ?:
1346 c->opts.foreground_target;
1347 unsigned i, replicas = 0, replicas_want =
1348 READ_ONCE(c->opts.metadata_replicas);
1352 devs = target_rw_devs(c, BCH_DATA_journal, target);
1354 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1356 __journal_write_alloc(j, w, &devs_sorted,
1357 sectors, &replicas, replicas_want);
1359 if (replicas >= replicas_want)
1362 for (i = 0; i < devs_sorted.nr; i++) {
1363 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1369 if (sectors > ja->sectors_free &&
1370 sectors <= ca->mi.bucket_size &&
1371 bch2_journal_dev_buckets_available(j, ja,
1372 journal_space_discarded)) {
1373 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1374 ja->sectors_free = ca->mi.bucket_size;
1377 * ja->bucket_seq[ja->cur_idx] must always have
1378 * something sensible:
1380 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1384 __journal_write_alloc(j, w, &devs_sorted,
1385 sectors, &replicas, replicas_want);
1387 if (replicas < replicas_want && target) {
1388 /* Retry from all devices: */
1395 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1397 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1400 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1402 /* we aren't holding j->lock: */
1403 unsigned new_size = READ_ONCE(j->buf_size_want);
1406 if (buf->buf_size >= new_size)
1409 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1413 memcpy(new_buf, buf->data, buf->buf_size);
1415 spin_lock(&j->lock);
1416 swap(buf->data, new_buf);
1417 swap(buf->buf_size, new_size);
1418 spin_unlock(&j->lock);
1420 kvpfree(new_buf, new_size);
1423 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1425 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1428 static void journal_write_done(struct closure *cl)
1430 struct journal *j = container_of(cl, struct journal, io);
1431 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1432 struct journal_buf *w = journal_last_unwritten_buf(j);
1433 struct bch_replicas_padded replicas;
1434 union journal_res_state old, new;
1438 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1439 ? j->flush_write_time
1440 : j->noflush_write_time, j->write_start_time);
1442 if (!w->devs_written.nr) {
1443 bch_err(c, "unable to write journal to sufficient devices");
1446 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1448 if (bch2_mark_replicas(c, &replicas.e))
1453 bch2_fatal_error(c);
1455 spin_lock(&j->lock);
1456 seq = le64_to_cpu(w->data->seq);
1458 if (seq >= j->pin.front)
1459 journal_seq_pin(j, seq)->devs = w->devs_written;
1462 if (!JSET_NO_FLUSH(w->data)) {
1463 j->flushed_seq_ondisk = seq;
1464 j->last_seq_ondisk = w->last_seq;
1466 bch2_do_discards(c);
1467 closure_wake_up(&c->freelist_wait);
1469 } else if (!j->err_seq || seq < j->err_seq)
1472 j->seq_ondisk = seq;
1475 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1478 * Must come before signaling write completion, for
1479 * bch2_fs_journal_stop():
1482 journal_reclaim_kick(&c->journal);
1484 /* also must come before signalling write completion: */
1485 closure_debug_destroy(cl);
1487 v = atomic64_read(&j->reservations.counter);
1490 BUG_ON(journal_state_count(new, new.unwritten_idx));
1492 new.unwritten_idx++;
1493 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1494 old.v, new.v)) != old.v);
1496 bch2_journal_space_available(j);
1498 closure_wake_up(&w->wait);
1501 if (!journal_state_count(new, new.unwritten_idx) &&
1502 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1503 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1504 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1505 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1506 struct journal_buf *buf = journal_cur_buf(j);
1507 long delta = buf->expires - jiffies;
1510 * We don't close a journal entry to write it while there's
1511 * previous entries still in flight - the current journal entry
1512 * might want to be written now:
1515 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1518 spin_unlock(&j->lock);
1521 static void journal_write_endio(struct bio *bio)
1523 struct bch_dev *ca = bio->bi_private;
1524 struct journal *j = &ca->fs->journal;
1525 struct journal_buf *w = journal_last_unwritten_buf(j);
1526 unsigned long flags;
1528 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1529 le64_to_cpu(w->data->seq),
1530 bch2_blk_status_to_str(bio->bi_status)) ||
1531 bch2_meta_write_fault("journal")) {
1532 spin_lock_irqsave(&j->err_lock, flags);
1533 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1534 spin_unlock_irqrestore(&j->err_lock, flags);
1537 closure_put(&j->io);
1538 percpu_ref_put(&ca->io_ref);
1541 static void do_journal_write(struct closure *cl)
1543 struct journal *j = container_of(cl, struct journal, io);
1544 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1546 struct journal_buf *w = journal_last_unwritten_buf(j);
1547 struct bch_extent_ptr *ptr;
1549 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1551 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1552 ca = bch_dev_bkey_exists(c, ptr->dev);
1553 if (!percpu_ref_tryget(&ca->io_ref)) {
1555 bch_err(c, "missing device for journal write\n");
1559 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1562 bio = ca->journal.bio;
1563 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1564 bio->bi_iter.bi_sector = ptr->offset;
1565 bio->bi_end_io = journal_write_endio;
1566 bio->bi_private = ca;
1568 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1569 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1571 if (!JSET_NO_FLUSH(w->data))
1572 bio->bi_opf |= REQ_FUA;
1573 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1574 bio->bi_opf |= REQ_PREFLUSH;
1576 bch2_bio_map(bio, w->data, sectors << 9);
1578 trace_and_count(c, journal_write, bio);
1579 closure_bio_submit(bio, cl);
1581 ca->journal.bucket_seq[ca->journal.cur_idx] =
1582 le64_to_cpu(w->data->seq);
1585 continue_at(cl, journal_write_done, c->io_complete_wq);
1589 void bch2_journal_write(struct closure *cl)
1591 struct journal *j = container_of(cl, struct journal, io);
1592 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1594 struct journal_buf *w = journal_last_unwritten_buf(j);
1595 struct jset_entry *start, *end;
1598 struct printbuf journal_debug_buf = PRINTBUF;
1599 bool validate_before_checksum = false;
1600 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1603 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1605 journal_buf_realloc(j, w);
1608 j->write_start_time = local_clock();
1610 spin_lock(&j->lock);
1611 if (bch2_journal_error(j) ||
1614 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1615 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1617 SET_JSET_NO_FLUSH(jset, true);
1621 j->nr_noflush_writes++;
1623 j->last_flush_write = jiffies;
1624 j->nr_flush_writes++;
1626 spin_unlock(&j->lock);
1629 * New btree roots are set by journalling them; when the journal entry
1630 * gets written we have to propagate them to c->btree_roots
1632 * But, every journal entry we write has to contain all the btree roots
1633 * (at least for now); so after we copy btree roots to c->btree_roots we
1634 * have to get any missing btree roots and add them to this journal
1638 bch2_journal_entries_to_btree_roots(c, jset);
1640 start = end = vstruct_last(jset);
1642 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1644 bch2_journal_super_entries_add_common(c, &end,
1645 le64_to_cpu(jset->seq));
1646 u64s = (u64 *) end - (u64 *) start;
1647 BUG_ON(u64s > j->entry_u64s_reserved);
1649 le32_add_cpu(&jset->u64s, u64s);
1650 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1652 jset->magic = cpu_to_le64(jset_magic(c));
1653 jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
1654 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1655 : cpu_to_le32(c->sb.version);
1657 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1658 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1660 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1661 j->last_empty_seq = le64_to_cpu(jset->seq);
1663 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1664 validate_before_checksum = true;
1666 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1667 validate_before_checksum = true;
1669 if (validate_before_checksum &&
1670 jset_validate_for_write(c, jset))
1673 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1674 jset->encrypted_start,
1675 vstruct_end(jset) - (void *) jset->encrypted_start);
1676 if (bch2_fs_fatal_err_on(ret, c,
1677 "error decrypting journal entry: %i", ret))
1680 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1681 journal_nonce(jset), jset);
1683 if (!validate_before_checksum &&
1684 jset_validate_for_write(c, jset))
1687 sectors = vstruct_sectors(jset, c->block_bits);
1688 BUG_ON(sectors > w->sectors);
1690 bytes = vstruct_bytes(jset);
1691 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1694 spin_lock(&j->lock);
1695 ret = journal_write_alloc(j, w, sectors);
1697 if (ret && j->can_discard) {
1698 spin_unlock(&j->lock);
1699 bch2_journal_do_discards(j);
1704 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1707 * write is allocated, no longer need to account for it in
1708 * bch2_journal_space_available():
1713 * journal entry has been compacted and allocated, recalculate space
1716 bch2_journal_space_available(j);
1717 spin_unlock(&j->lock);
1720 bch_err(c, "Unable to allocate journal write:\n%s",
1721 journal_debug_buf.buf);
1722 printbuf_exit(&journal_debug_buf);
1723 bch2_fatal_error(c);
1724 continue_at(cl, journal_write_done, c->io_complete_wq);
1728 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1730 if (c->opts.nochanges)
1733 for_each_rw_member(ca, c, i)
1736 if (nr_rw_members > 1)
1737 w->separate_flush = true;
1739 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1740 for_each_rw_member(ca, c, i) {
1741 percpu_ref_get(&ca->io_ref);
1743 bio = ca->journal.bio;
1744 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1745 bio->bi_end_io = journal_write_endio;
1746 bio->bi_private = ca;
1747 closure_bio_submit(bio, cl);
1751 continue_at(cl, do_journal_write, c->io_complete_wq);
1754 continue_at(cl, journal_write_done, c->io_complete_wq);
1757 bch2_fatal_error(c);
1758 continue_at(cl, journal_write_done, c->io_complete_wq);