1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
6 #include "btree_update_interior.h"
7 #include "btree_write_buffer.h"
10 #include "disk_groups.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
20 static struct nonce journal_nonce(const struct jset *jset)
22 return (struct nonce) {{
24 [1] = ((__le32 *) &jset->seq)[0],
25 [2] = ((__le32 *) &jset->seq)[1],
26 [3] = BCH_NONCE_JOURNAL,
30 static bool jset_csum_good(struct bch_fs *c, struct jset *j)
32 return bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)) &&
33 !bch2_crc_cmp(j->csum,
34 csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j));
37 static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
39 return (seq - c->journal_entries_base_seq) & (~0U >> 1);
42 static void __journal_replay_free(struct bch_fs *c,
43 struct journal_replay *i)
45 struct journal_replay **p =
46 genradix_ptr(&c->journal_entries,
47 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq)));
51 kvpfree(i, offsetof(struct journal_replay, j) +
52 vstruct_bytes(&i->j));
55 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
59 if (!c->opts.read_entire_journal)
60 __journal_replay_free(c, i);
70 #define JOURNAL_ENTRY_ADD_OK 0
71 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
74 * Given a journal entry we just read, add it to the list of journal entries to
77 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
78 struct journal_ptr entry_ptr,
79 struct journal_list *jlist, struct jset *j)
81 struct genradix_iter iter;
82 struct journal_replay **_i, *i, *dup;
83 struct journal_ptr *ptr;
84 size_t bytes = vstruct_bytes(j);
85 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
86 int ret = JOURNAL_ENTRY_ADD_OK;
88 /* Is this entry older than the range we need? */
89 if (!c->opts.read_entire_journal &&
90 le64_to_cpu(j->seq) < jlist->last_seq)
91 return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
94 * genradixes are indexed by a ulong, not a u64, so we can't index them
95 * by sequence number directly: Assume instead that they will all fall
96 * within the range of +-2billion of the filrst one we find.
98 if (!c->journal_entries_base_seq)
99 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
101 /* Drop entries we don't need anymore */
102 if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
103 genradix_for_each_from(&c->journal_entries, iter, _i,
104 journal_entry_radix_idx(c, jlist->last_seq)) {
110 if (le64_to_cpu(i->j.seq) >= last_seq)
112 journal_replay_free(c, i);
116 jlist->last_seq = max(jlist->last_seq, last_seq);
118 _i = genradix_ptr_alloc(&c->journal_entries,
119 journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
122 return -BCH_ERR_ENOMEM_journal_entry_add;
125 * Duplicate journal entries? If so we want the one that didn't have a
130 if (bytes == vstruct_bytes(&dup->j) &&
131 !memcmp(j, &dup->j, bytes)) {
136 if (!entry_ptr.csum_good) {
144 fsck_err(c, journal_entry_replicas_data_mismatch,
145 "found duplicate but non identical journal entries (seq %llu)",
146 le64_to_cpu(j->seq));
151 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
153 return -BCH_ERR_ENOMEM_journal_entry_add;
156 i->csum_good = entry_ptr.csum_good;
158 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct");
159 i->ptrs[i->nr_ptrs++] = entry_ptr;
162 if (dup->nr_ptrs >= ARRAY_SIZE(dup->ptrs)) {
163 bch_err(c, "found too many copies of journal entry %llu",
164 le64_to_cpu(i->j.seq));
165 dup->nr_ptrs = ARRAY_SIZE(dup->ptrs) - 1;
168 /* The first ptr should represent the jset we kept: */
169 memcpy(i->ptrs + i->nr_ptrs,
171 sizeof(dup->ptrs[0]) * dup->nr_ptrs);
172 i->nr_ptrs += dup->nr_ptrs;
173 __journal_replay_free(c, dup);
179 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
180 if (ptr->dev == ca->dev_idx) {
181 bch_err(c, "duplicate journal entry %llu on same device",
182 le64_to_cpu(i->j.seq));
187 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
188 bch_err(c, "found too many copies of journal entry %llu",
189 le64_to_cpu(i->j.seq));
193 i->ptrs[i->nr_ptrs++] = entry_ptr;
199 /* this fills in a range with empty jset_entries: */
200 static void journal_entry_null_range(void *start, void *end)
202 struct jset_entry *entry;
204 for (entry = start; entry != end; entry = vstruct_next(entry))
205 memset(entry, 0, sizeof(*entry));
208 #define JOURNAL_ENTRY_REREAD 5
209 #define JOURNAL_ENTRY_NONE 6
210 #define JOURNAL_ENTRY_BAD 7
212 static void journal_entry_err_msg(struct printbuf *out,
215 struct jset_entry *entry)
217 prt_str(out, "invalid journal entry, version=");
218 bch2_version_to_text(out, version);
221 prt_str(out, " type=");
222 prt_str(out, bch2_jset_entry_types[entry->type]);
226 prt_printf(out, " in superblock");
229 prt_printf(out, " seq=%llu", le64_to_cpu(jset->seq));
232 prt_printf(out, " offset=%zi/%u",
233 (u64 *) entry - jset->_data,
234 le32_to_cpu(jset->u64s));
240 #define journal_entry_err(c, version, jset, entry, _err, msg, ...) \
242 struct printbuf _buf = PRINTBUF; \
244 journal_entry_err_msg(&_buf, version, jset, entry); \
245 prt_printf(&_buf, msg, ##__VA_ARGS__); \
247 switch (flags & BKEY_INVALID_WRITE) { \
249 mustfix_fsck_err(c, _err, "%s", _buf.buf); \
252 bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \
253 bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
254 if (bch2_fs_inconsistent(c)) { \
255 ret = -BCH_ERR_fsck_errors_not_fixed; \
261 printbuf_exit(&_buf); \
265 #define journal_entry_err_on(cond, ...) \
266 ((cond) ? journal_entry_err(__VA_ARGS__) : false)
268 #define FSCK_DELETED_KEY 5
270 static int journal_validate_key(struct bch_fs *c,
272 struct jset_entry *entry,
273 unsigned level, enum btree_id btree_id,
275 unsigned version, int big_endian,
276 enum bkey_invalid_flags flags)
278 int write = flags & BKEY_INVALID_WRITE;
279 void *next = vstruct_next(entry);
280 struct printbuf buf = PRINTBUF;
283 if (journal_entry_err_on(!k->k.u64s,
284 c, version, jset, entry,
285 journal_entry_bkey_u64s_0,
287 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
288 journal_entry_null_range(vstruct_next(entry), next);
289 return FSCK_DELETED_KEY;
292 if (journal_entry_err_on((void *) bkey_next(k) >
293 (void *) vstruct_next(entry),
294 c, version, jset, entry,
295 journal_entry_bkey_past_end,
296 "extends past end of journal entry")) {
297 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
298 journal_entry_null_range(vstruct_next(entry), next);
299 return FSCK_DELETED_KEY;
302 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
303 c, version, jset, entry,
304 journal_entry_bkey_bad_format,
305 "bad format %u", k->k.format)) {
306 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
307 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
308 journal_entry_null_range(vstruct_next(entry), next);
309 return FSCK_DELETED_KEY;
313 bch2_bkey_compat(level, btree_id, version, big_endian,
314 write, NULL, bkey_to_packed(k));
316 if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
317 __btree_node_type(level, btree_id), write, &buf)) {
318 printbuf_reset(&buf);
319 journal_entry_err_msg(&buf, version, jset, entry);
321 printbuf_indent_add(&buf, 2);
323 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
325 bch2_bkey_invalid(c, bkey_i_to_s_c(k),
326 __btree_node_type(level, btree_id), write, &buf);
328 mustfix_fsck_err(c, journal_entry_bkey_invalid,
331 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
332 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
333 journal_entry_null_range(vstruct_next(entry), next);
336 return FSCK_DELETED_KEY;
340 bch2_bkey_compat(level, btree_id, version, big_endian,
341 write, NULL, bkey_to_packed(k));
347 static int journal_entry_btree_keys_validate(struct bch_fs *c,
349 struct jset_entry *entry,
350 unsigned version, int big_endian,
351 enum bkey_invalid_flags flags)
353 struct bkey_i *k = entry->start;
355 while (k != vstruct_last(entry)) {
356 int ret = journal_validate_key(c, jset, entry,
359 k, version, big_endian,
360 flags|BKEY_INVALID_JOURNAL);
361 if (ret == FSCK_DELETED_KEY)
370 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
371 struct jset_entry *entry)
376 jset_entry_for_each_key(entry, k) {
379 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
381 prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
382 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
387 static int journal_entry_btree_root_validate(struct bch_fs *c,
389 struct jset_entry *entry,
390 unsigned version, int big_endian,
391 enum bkey_invalid_flags flags)
393 struct bkey_i *k = entry->start;
396 if (journal_entry_err_on(!entry->u64s ||
397 le16_to_cpu(entry->u64s) != k->k.u64s,
398 c, version, jset, entry,
399 journal_entry_btree_root_bad_size,
400 "invalid btree root journal entry: wrong number of keys")) {
401 void *next = vstruct_next(entry);
403 * we don't want to null out this jset_entry,
404 * just the contents, so that later we can tell
405 * we were _supposed_ to have a btree root
408 journal_entry_null_range(vstruct_next(entry), next);
412 ret = journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
413 version, big_endian, flags);
414 if (ret == FSCK_DELETED_KEY)
420 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
421 struct jset_entry *entry)
423 journal_entry_btree_keys_to_text(out, c, entry);
426 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
428 struct jset_entry *entry,
429 unsigned version, int big_endian,
430 enum bkey_invalid_flags flags)
432 /* obsolete, don't care: */
436 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
437 struct jset_entry *entry)
441 static int journal_entry_blacklist_validate(struct bch_fs *c,
443 struct jset_entry *entry,
444 unsigned version, int big_endian,
445 enum bkey_invalid_flags flags)
449 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
450 c, version, jset, entry,
451 journal_entry_blacklist_bad_size,
452 "invalid journal seq blacklist entry: bad size")) {
453 journal_entry_null_range(entry, vstruct_next(entry));
459 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
460 struct jset_entry *entry)
462 struct jset_entry_blacklist *bl =
463 container_of(entry, struct jset_entry_blacklist, entry);
465 prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq));
468 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
470 struct jset_entry *entry,
471 unsigned version, int big_endian,
472 enum bkey_invalid_flags flags)
474 struct jset_entry_blacklist_v2 *bl_entry;
477 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
478 c, version, jset, entry,
479 journal_entry_blacklist_v2_bad_size,
480 "invalid journal seq blacklist entry: bad size")) {
481 journal_entry_null_range(entry, vstruct_next(entry));
485 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
487 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
488 le64_to_cpu(bl_entry->end),
489 c, version, jset, entry,
490 journal_entry_blacklist_v2_start_past_end,
491 "invalid journal seq blacklist entry: start > end")) {
492 journal_entry_null_range(entry, vstruct_next(entry));
499 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
500 struct jset_entry *entry)
502 struct jset_entry_blacklist_v2 *bl =
503 container_of(entry, struct jset_entry_blacklist_v2, entry);
505 prt_printf(out, "start=%llu end=%llu",
506 le64_to_cpu(bl->start),
507 le64_to_cpu(bl->end));
510 static int journal_entry_usage_validate(struct bch_fs *c,
512 struct jset_entry *entry,
513 unsigned version, int big_endian,
514 enum bkey_invalid_flags flags)
516 struct jset_entry_usage *u =
517 container_of(entry, struct jset_entry_usage, entry);
518 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
521 if (journal_entry_err_on(bytes < sizeof(*u),
522 c, version, jset, entry,
523 journal_entry_usage_bad_size,
524 "invalid journal entry usage: bad size")) {
525 journal_entry_null_range(entry, vstruct_next(entry));
533 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
534 struct jset_entry *entry)
536 struct jset_entry_usage *u =
537 container_of(entry, struct jset_entry_usage, entry);
539 prt_printf(out, "type=%s v=%llu",
540 bch2_fs_usage_types[u->entry.btree_id],
544 static int journal_entry_data_usage_validate(struct bch_fs *c,
546 struct jset_entry *entry,
547 unsigned version, int big_endian,
548 enum bkey_invalid_flags flags)
550 struct jset_entry_data_usage *u =
551 container_of(entry, struct jset_entry_data_usage, entry);
552 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
553 struct printbuf err = PRINTBUF;
556 if (journal_entry_err_on(bytes < sizeof(*u) ||
557 bytes < sizeof(*u) + u->r.nr_devs,
558 c, version, jset, entry,
559 journal_entry_data_usage_bad_size,
560 "invalid journal entry usage: bad size")) {
561 journal_entry_null_range(entry, vstruct_next(entry));
565 if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err),
566 c, version, jset, entry,
567 journal_entry_data_usage_bad_size,
568 "invalid journal entry usage: %s", err.buf)) {
569 journal_entry_null_range(entry, vstruct_next(entry));
578 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
579 struct jset_entry *entry)
581 struct jset_entry_data_usage *u =
582 container_of(entry, struct jset_entry_data_usage, entry);
584 bch2_replicas_entry_to_text(out, &u->r);
585 prt_printf(out, "=%llu", le64_to_cpu(u->v));
588 static int journal_entry_clock_validate(struct bch_fs *c,
590 struct jset_entry *entry,
591 unsigned version, int big_endian,
592 enum bkey_invalid_flags flags)
594 struct jset_entry_clock *clock =
595 container_of(entry, struct jset_entry_clock, entry);
596 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
599 if (journal_entry_err_on(bytes != sizeof(*clock),
600 c, version, jset, entry,
601 journal_entry_clock_bad_size,
603 journal_entry_null_range(entry, vstruct_next(entry));
607 if (journal_entry_err_on(clock->rw > 1,
608 c, version, jset, entry,
609 journal_entry_clock_bad_rw,
611 journal_entry_null_range(entry, vstruct_next(entry));
619 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
620 struct jset_entry *entry)
622 struct jset_entry_clock *clock =
623 container_of(entry, struct jset_entry_clock, entry);
625 prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
628 static int journal_entry_dev_usage_validate(struct bch_fs *c,
630 struct jset_entry *entry,
631 unsigned version, int big_endian,
632 enum bkey_invalid_flags flags)
634 struct jset_entry_dev_usage *u =
635 container_of(entry, struct jset_entry_dev_usage, entry);
636 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
637 unsigned expected = sizeof(*u);
641 if (journal_entry_err_on(bytes < expected,
642 c, version, jset, entry,
643 journal_entry_dev_usage_bad_size,
644 "bad size (%u < %u)",
646 journal_entry_null_range(entry, vstruct_next(entry));
650 dev = le32_to_cpu(u->dev);
652 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
653 c, version, jset, entry,
654 journal_entry_dev_usage_bad_dev,
656 journal_entry_null_range(entry, vstruct_next(entry));
660 if (journal_entry_err_on(u->pad,
661 c, version, jset, entry,
662 journal_entry_dev_usage_bad_pad,
664 journal_entry_null_range(entry, vstruct_next(entry));
672 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
673 struct jset_entry *entry)
675 struct jset_entry_dev_usage *u =
676 container_of(entry, struct jset_entry_dev_usage, entry);
677 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
679 prt_printf(out, "dev=%u", le32_to_cpu(u->dev));
681 for (i = 0; i < nr_types; i++) {
683 prt_printf(out, " %s", bch2_data_types[i]);
685 prt_printf(out, " (unknown data type %u)", i);
686 prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
687 le64_to_cpu(u->d[i].buckets),
688 le64_to_cpu(u->d[i].sectors),
689 le64_to_cpu(u->d[i].fragmented));
693 static int journal_entry_log_validate(struct bch_fs *c,
695 struct jset_entry *entry,
696 unsigned version, int big_endian,
697 enum bkey_invalid_flags flags)
702 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
703 struct jset_entry *entry)
705 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
706 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
708 prt_printf(out, "%.*s", bytes, l->d);
711 static int journal_entry_overwrite_validate(struct bch_fs *c,
713 struct jset_entry *entry,
714 unsigned version, int big_endian,
715 enum bkey_invalid_flags flags)
717 return journal_entry_btree_keys_validate(c, jset, entry,
718 version, big_endian, READ);
721 static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c,
722 struct jset_entry *entry)
724 journal_entry_btree_keys_to_text(out, c, entry);
727 static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
729 struct jset_entry *entry,
730 unsigned version, int big_endian,
731 enum bkey_invalid_flags flags)
733 return journal_entry_btree_keys_validate(c, jset, entry,
734 version, big_endian, READ);
737 static void journal_entry_write_buffer_keys_to_text(struct printbuf *out, struct bch_fs *c,
738 struct jset_entry *entry)
740 journal_entry_btree_keys_to_text(out, c, entry);
743 struct jset_entry_ops {
744 int (*validate)(struct bch_fs *, struct jset *,
745 struct jset_entry *, unsigned, int,
746 enum bkey_invalid_flags);
747 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
750 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
752 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
753 .validate = journal_entry_##f##_validate, \
754 .to_text = journal_entry_##f##_to_text, \
756 BCH_JSET_ENTRY_TYPES()
760 int bch2_journal_entry_validate(struct bch_fs *c,
762 struct jset_entry *entry,
763 unsigned version, int big_endian,
764 enum bkey_invalid_flags flags)
766 return entry->type < BCH_JSET_ENTRY_NR
767 ? bch2_jset_entry_ops[entry->type].validate(c, jset, entry,
768 version, big_endian, flags)
772 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
773 struct jset_entry *entry)
775 if (entry->type < BCH_JSET_ENTRY_NR) {
776 prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
777 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
779 prt_printf(out, "(unknown type %u)", entry->type);
783 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
784 enum bkey_invalid_flags flags)
786 unsigned version = le32_to_cpu(jset->version);
789 vstruct_for_each(jset, entry) {
790 if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
791 c, version, jset, entry,
792 journal_entry_past_jset_end,
793 "journal entry extends past end of jset")) {
794 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
798 ret = bch2_journal_entry_validate(c, jset, entry,
799 version, JSET_BIG_ENDIAN(jset), flags);
807 static int jset_validate(struct bch_fs *c,
809 struct jset *jset, u64 sector,
810 enum bkey_invalid_flags flags)
815 if (le64_to_cpu(jset->magic) != jset_magic(c))
816 return JOURNAL_ENTRY_NONE;
818 version = le32_to_cpu(jset->version);
819 if (journal_entry_err_on(!bch2_version_compatible(version),
820 c, version, jset, NULL,
821 jset_unsupported_version,
822 "%s sector %llu seq %llu: incompatible journal entry version %u.%u",
823 ca ? ca->name : c->name,
824 sector, le64_to_cpu(jset->seq),
825 BCH_VERSION_MAJOR(version),
826 BCH_VERSION_MINOR(version))) {
827 /* don't try to continue: */
831 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
832 c, version, jset, NULL,
834 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
835 ca ? ca->name : c->name,
836 sector, le64_to_cpu(jset->seq),
837 JSET_CSUM_TYPE(jset)))
838 ret = JOURNAL_ENTRY_BAD;
840 /* last_seq is ignored when JSET_NO_FLUSH is true */
841 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
842 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
843 c, version, jset, NULL,
844 jset_last_seq_newer_than_seq,
845 "invalid journal entry: last_seq > seq (%llu > %llu)",
846 le64_to_cpu(jset->last_seq),
847 le64_to_cpu(jset->seq))) {
848 jset->last_seq = jset->seq;
849 return JOURNAL_ENTRY_BAD;
852 ret = jset_validate_entries(c, jset, flags);
857 static int jset_validate_early(struct bch_fs *c,
859 struct jset *jset, u64 sector,
860 unsigned bucket_sectors_left,
861 unsigned sectors_read)
863 size_t bytes = vstruct_bytes(jset);
865 enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
868 if (le64_to_cpu(jset->magic) != jset_magic(c))
869 return JOURNAL_ENTRY_NONE;
871 version = le32_to_cpu(jset->version);
872 if (journal_entry_err_on(!bch2_version_compatible(version),
873 c, version, jset, NULL,
874 jset_unsupported_version,
875 "%s sector %llu seq %llu: unknown journal entry version %u.%u",
876 ca ? ca->name : c->name,
877 sector, le64_to_cpu(jset->seq),
878 BCH_VERSION_MAJOR(version),
879 BCH_VERSION_MINOR(version))) {
880 /* don't try to continue: */
884 if (bytes > (sectors_read << 9) &&
885 sectors_read < bucket_sectors_left)
886 return JOURNAL_ENTRY_REREAD;
888 if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
889 c, version, jset, NULL,
890 jset_past_bucket_end,
891 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
892 ca ? ca->name : c->name,
893 sector, le64_to_cpu(jset->seq), bytes))
894 le32_add_cpu(&jset->u64s,
895 -((bytes - (bucket_sectors_left << 9)) / 8));
900 struct journal_read_buf {
905 static int journal_read_buf_realloc(struct journal_read_buf *b,
910 /* the bios are sized for this many pages, max: */
911 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
912 return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
914 new_size = roundup_pow_of_two(new_size);
915 n = kvpmalloc(new_size, GFP_KERNEL);
917 return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
919 kvpfree(b->data, b->size);
925 static int journal_read_bucket(struct bch_dev *ca,
926 struct journal_read_buf *buf,
927 struct journal_list *jlist,
930 struct bch_fs *c = ca->fs;
931 struct journal_device *ja = &ca->journal;
932 struct jset *j = NULL;
933 unsigned sectors, sectors_read = 0;
934 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
935 end = offset + ca->mi.bucket_size;
936 bool saw_bad = false, csum_good;
939 pr_debug("reading %u", bucket);
941 while (offset < end) {
946 sectors_read = min_t(unsigned,
947 end - offset, buf->size >> 9);
948 nr_bvecs = buf_pages(buf->data, sectors_read << 9);
950 bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
951 bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
953 bio->bi_iter.bi_sector = offset;
954 bch2_bio_map(bio, buf->data, sectors_read << 9);
956 ret = submit_bio_wait(bio);
959 if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
960 "journal read error: sector %llu",
962 bch2_meta_read_fault("journal")) {
964 * We don't error out of the recovery process
965 * here, since the relevant journal entry may be
966 * found on a different device, and missing or
967 * no journal entries will be handled later
975 ret = jset_validate_early(c, ca, j, offset,
976 end - offset, sectors_read);
979 sectors = vstruct_sectors(j, c->block_bits);
981 case JOURNAL_ENTRY_REREAD:
982 if (vstruct_bytes(j) > buf->size) {
983 ret = journal_read_buf_realloc(buf,
989 case JOURNAL_ENTRY_NONE:
993 * On checksum error we don't really trust the size
994 * field of the journal entry we read, so try reading
995 * again at next block boundary:
997 sectors = block_sectors(c);
1004 * This happens sometimes if we don't have discards on -
1005 * when we've partially overwritten a bucket with new
1006 * journal entries. We don't need the rest of the
1009 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
1012 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
1014 csum_good = jset_csum_good(c, j);
1015 if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
1016 "journal checksum error"))
1019 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
1021 vstruct_end(j) - (void *) j->encrypted_start);
1022 bch2_fs_fatal_err_on(ret, c,
1023 "error decrypting journal entry: %i", ret);
1025 mutex_lock(&jlist->lock);
1026 ret = journal_entry_add(c, ca, (struct journal_ptr) {
1027 .csum_good = csum_good,
1030 .bucket_offset = offset -
1031 bucket_to_sector(ca, ja->buckets[bucket]),
1034 mutex_unlock(&jlist->lock);
1037 case JOURNAL_ENTRY_ADD_OK:
1039 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
1047 sectors_read -= sectors;
1048 j = ((void *) j) + (sectors << 9);
1054 static CLOSURE_CALLBACK(bch2_journal_read_device)
1056 closure_type(ja, struct journal_device, read);
1057 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
1058 struct bch_fs *c = ca->fs;
1059 struct journal_list *jlist =
1060 container_of(cl->parent, struct journal_list, cl);
1061 struct journal_replay *r, **_r;
1062 struct genradix_iter iter;
1063 struct journal_read_buf buf = { NULL, 0 };
1070 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
1074 pr_debug("%u journal buckets", ja->nr);
1076 for (i = 0; i < ja->nr; i++) {
1077 ret = journal_read_bucket(ca, &buf, jlist, i);
1082 ja->sectors_free = ca->mi.bucket_size;
1084 mutex_lock(&jlist->lock);
1085 genradix_for_each_reverse(&c->journal_entries, iter, _r) {
1091 for (i = 0; i < r->nr_ptrs; i++) {
1092 if (r->ptrs[i].dev == ca->dev_idx) {
1093 unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) +
1094 vstruct_sectors(&r->j, c->block_bits);
1096 ja->cur_idx = r->ptrs[i].bucket;
1097 ja->sectors_free = ca->mi.bucket_size - wrote;
1103 mutex_unlock(&jlist->lock);
1105 if (ja->bucket_seq[ja->cur_idx] &&
1106 ja->sectors_free == ca->mi.bucket_size) {
1109 * Debug code for ZNS support, where we (probably) want to be
1110 * correlated where we stopped in the journal to the zone write
1113 bch_err(c, "ja->sectors_free == ca->mi.bucket_size");
1114 bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
1115 for (i = 0; i < 3; i++) {
1116 unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
1118 bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
1121 ja->sectors_free = 0;
1125 * Set dirty_idx to indicate the entire journal is full and needs to be
1126 * reclaimed - journal reclaim will immediately reclaim whatever isn't
1127 * pinned when it first runs:
1129 ja->discard_idx = ja->dirty_idx_ondisk =
1130 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
1132 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
1133 kvpfree(buf.data, buf.size);
1134 percpu_ref_put(&ca->io_ref);
1138 mutex_lock(&jlist->lock);
1140 mutex_unlock(&jlist->lock);
1144 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
1145 struct journal_replay *j)
1149 for (i = 0; i < j->nr_ptrs; i++) {
1150 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
1153 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
1156 prt_printf(out, " ");
1157 prt_printf(out, "%u:%u:%u (sector %llu)",
1160 j->ptrs[i].bucket_offset,
1165 int bch2_journal_read(struct bch_fs *c,
1170 struct journal_list jlist;
1171 struct journal_replay *i, **_i, *prev = NULL;
1172 struct genradix_iter radix_iter;
1173 struct printbuf buf = PRINTBUF;
1174 bool degraded = false, last_write_torn = false;
1178 closure_init_stack(&jlist.cl);
1179 mutex_init(&jlist.lock);
1183 for_each_member_device(c, ca) {
1184 if (!c->opts.fsck &&
1185 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1188 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1189 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1190 percpu_ref_tryget(&ca->io_ref))
1191 closure_call(&ca->journal.read,
1192 bch2_journal_read_device,
1199 closure_sync(&jlist.cl);
1209 * Find most recent flush entry, and ignore newer non flush entries -
1210 * those entries will be blacklisted:
1212 genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) {
1213 enum bkey_invalid_flags flags = BKEY_INVALID_JOURNAL;
1217 if (!i || i->ignore)
1221 *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1;
1223 if (JSET_NO_FLUSH(&i->j)) {
1228 if (!last_write_torn && !i->csum_good) {
1229 last_write_torn = true;
1234 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
1235 c, le32_to_cpu(i->j.version), &i->j, NULL,
1236 jset_last_seq_newer_than_seq,
1237 "invalid journal entry: last_seq > seq (%llu > %llu)",
1238 le64_to_cpu(i->j.last_seq),
1239 le64_to_cpu(i->j.seq)))
1240 i->j.last_seq = i->j.seq;
1242 *last_seq = le64_to_cpu(i->j.last_seq);
1243 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1248 bch_info(c, "journal read done, but no entries found");
1253 fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
1254 "journal read done, but no entries found after dropping non-flushes");
1258 bch_info(c, "journal read done, replaying entries %llu-%llu",
1259 *last_seq, *blacklist_seq - 1);
1261 if (*start_seq != *blacklist_seq)
1262 bch_info(c, "dropped unflushed entries %llu-%llu",
1263 *blacklist_seq, *start_seq - 1);
1265 /* Drop blacklisted entries and entries older than last_seq: */
1266 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1269 if (!i || i->ignore)
1272 seq = le64_to_cpu(i->j.seq);
1273 if (seq < *last_seq) {
1274 journal_replay_free(c, i);
1278 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1279 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1280 jset_seq_blacklisted,
1281 "found blacklisted journal entry %llu", seq);
1286 /* Check for missing entries: */
1288 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1291 if (!i || i->ignore)
1294 BUG_ON(seq > le64_to_cpu(i->j.seq));
1296 while (seq < le64_to_cpu(i->j.seq)) {
1297 u64 missing_start, missing_end;
1298 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1300 while (seq < le64_to_cpu(i->j.seq) &&
1301 bch2_journal_seq_is_blacklisted(c, seq, false))
1304 if (seq == le64_to_cpu(i->j.seq))
1307 missing_start = seq;
1309 while (seq < le64_to_cpu(i->j.seq) &&
1310 !bch2_journal_seq_is_blacklisted(c, seq, false))
1314 bch2_journal_ptrs_to_text(&buf1, c, prev);
1315 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits));
1317 prt_printf(&buf1, "(none)");
1318 bch2_journal_ptrs_to_text(&buf2, c, i);
1320 missing_end = seq - 1;
1321 fsck_err(c, journal_entries_missing,
1322 "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1325 missing_start, missing_end,
1326 *last_seq, *blacklist_seq - 1,
1327 buf1.buf, buf2.buf);
1329 printbuf_exit(&buf1);
1330 printbuf_exit(&buf2);
1337 genradix_for_each(&c->journal_entries, radix_iter, _i) {
1338 struct bch_replicas_padded replicas = {
1339 .e.data_type = BCH_DATA_journal,
1345 if (!i || i->ignore)
1348 for (ptr = 0; ptr < i->nr_ptrs; ptr++) {
1349 struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
1351 if (!i->ptrs[ptr].csum_good)
1352 bch_err_dev_offset(ca, i->ptrs[ptr].sector,
1353 "invalid journal checksum, seq %llu%s",
1354 le64_to_cpu(i->j.seq),
1355 i->csum_good ? " (had good copy on another device)" : "");
1358 ret = jset_validate(c,
1359 bch_dev_bkey_exists(c, i->ptrs[0].dev),
1366 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1367 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1369 bch2_replicas_entry_sort(&replicas.e);
1371 printbuf_reset(&buf);
1372 bch2_replicas_entry_to_text(&buf, &replicas.e);
1375 !bch2_replicas_marked(c, &replicas.e) &&
1376 (le64_to_cpu(i->j.seq) == *last_seq ||
1377 fsck_err(c, journal_entry_replicas_not_marked,
1378 "superblock not marked as containing replicas for journal entry %llu\n %s",
1379 le64_to_cpu(i->j.seq), buf.buf))) {
1380 ret = bch2_mark_replicas(c, &replicas.e);
1387 printbuf_exit(&buf);
1391 /* journal write: */
1393 static void __journal_write_alloc(struct journal *j,
1394 struct journal_buf *w,
1395 struct dev_alloc_list *devs_sorted,
1398 unsigned replicas_want)
1400 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1401 struct journal_device *ja;
1405 if (*replicas >= replicas_want)
1408 for (i = 0; i < devs_sorted->nr; i++) {
1409 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1416 * Check that we can use this device, and aren't already using
1419 if (!ca->mi.durability ||
1420 ca->mi.state != BCH_MEMBER_STATE_rw ||
1422 bch2_bkey_has_device_c(bkey_i_to_s_c(&w->key), ca->dev_idx) ||
1423 sectors > ja->sectors_free)
1426 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1428 bch2_bkey_append_ptr(&w->key,
1429 (struct bch_extent_ptr) {
1430 .offset = bucket_to_sector(ca,
1431 ja->buckets[ja->cur_idx]) +
1432 ca->mi.bucket_size -
1437 ja->sectors_free -= sectors;
1438 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1440 *replicas += ca->mi.durability;
1442 if (*replicas >= replicas_want)
1448 * journal_write_alloc - decide where to write next journal entry
1450 * @j: journal object
1451 * @w: journal buf (entry to be written)
1453 * Returns: 0 on success, or -EROFS on failure
1455 static int journal_write_alloc(struct journal *j, struct journal_buf *w)
1457 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1458 struct bch_devs_mask devs;
1459 struct journal_device *ja;
1461 struct dev_alloc_list devs_sorted;
1462 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1463 unsigned target = c->opts.metadata_target ?:
1464 c->opts.foreground_target;
1465 unsigned i, replicas = 0, replicas_want =
1466 READ_ONCE(c->opts.metadata_replicas);
1470 devs = target_rw_devs(c, BCH_DATA_journal, target);
1472 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1474 __journal_write_alloc(j, w, &devs_sorted,
1475 sectors, &replicas, replicas_want);
1477 if (replicas >= replicas_want)
1480 for (i = 0; i < devs_sorted.nr; i++) {
1481 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1487 if (sectors > ja->sectors_free &&
1488 sectors <= ca->mi.bucket_size &&
1489 bch2_journal_dev_buckets_available(j, ja,
1490 journal_space_discarded)) {
1491 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1492 ja->sectors_free = ca->mi.bucket_size;
1495 * ja->bucket_seq[ja->cur_idx] must always have
1496 * something sensible:
1498 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1502 __journal_write_alloc(j, w, &devs_sorted,
1503 sectors, &replicas, replicas_want);
1505 if (replicas < replicas_want && target) {
1506 /* Retry from all devices: */
1513 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1515 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1518 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1520 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1522 /* we aren't holding j->lock: */
1523 unsigned new_size = READ_ONCE(j->buf_size_want);
1526 if (buf->buf_size >= new_size)
1529 size_t btree_write_buffer_size = new_size / 64;
1531 if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
1534 new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
1538 memcpy(new_buf, buf->data, buf->buf_size);
1540 spin_lock(&j->lock);
1541 swap(buf->data, new_buf);
1542 swap(buf->buf_size, new_size);
1543 spin_unlock(&j->lock);
1545 kvpfree(new_buf, new_size);
1548 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1550 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK);
1553 static CLOSURE_CALLBACK(journal_write_done)
1555 closure_type(j, struct journal, io);
1556 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1557 struct journal_buf *w = journal_last_unwritten_buf(j);
1558 struct bch_replicas_padded replicas;
1559 union journal_res_state old, new;
1563 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1564 ? j->flush_write_time
1565 : j->noflush_write_time, j->write_start_time);
1567 if (!w->devs_written.nr) {
1568 bch_err(c, "unable to write journal to sufficient devices");
1571 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1573 if (bch2_mark_replicas(c, &replicas.e))
1578 bch2_fatal_error(c);
1580 spin_lock(&j->lock);
1581 seq = le64_to_cpu(w->data->seq);
1583 if (seq >= j->pin.front)
1584 journal_seq_pin(j, seq)->devs = w->devs_written;
1587 if (!JSET_NO_FLUSH(w->data)) {
1588 j->flushed_seq_ondisk = seq;
1589 j->last_seq_ondisk = w->last_seq;
1591 bch2_do_discards(c);
1592 closure_wake_up(&c->freelist_wait);
1594 bch2_reset_alloc_cursors(c);
1596 } else if (!j->err_seq || seq < j->err_seq)
1599 j->seq_ondisk = seq;
1602 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1605 * Must come before signaling write completion, for
1606 * bch2_fs_journal_stop():
1608 if (j->watermark != BCH_WATERMARK_stripe)
1609 journal_reclaim_kick(&c->journal);
1611 /* also must come before signalling write completion: */
1612 closure_debug_destroy(cl);
1614 v = atomic64_read(&j->reservations.counter);
1617 BUG_ON(journal_state_count(new, new.unwritten_idx));
1619 new.unwritten_idx++;
1620 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1621 old.v, new.v)) != old.v);
1623 bch2_journal_reclaim_fast(j);
1624 bch2_journal_space_available(j);
1626 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight],
1627 &j->max_in_flight_start, false);
1629 closure_wake_up(&w->wait);
1632 if (!journal_state_count(new, new.unwritten_idx) &&
1633 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) {
1634 spin_unlock(&j->lock);
1635 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1636 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) &&
1637 new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) {
1638 struct journal_buf *buf = journal_cur_buf(j);
1639 long delta = buf->expires - jiffies;
1642 * We don't close a journal entry to write it while there's
1643 * previous entries still in flight - the current journal entry
1644 * might want to be written now:
1647 spin_unlock(&j->lock);
1648 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta));
1650 spin_unlock(&j->lock);
1654 static void journal_write_endio(struct bio *bio)
1656 struct bch_dev *ca = bio->bi_private;
1657 struct journal *j = &ca->fs->journal;
1658 struct journal_buf *w = journal_last_unwritten_buf(j);
1659 unsigned long flags;
1661 if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
1662 "error writing journal entry %llu: %s",
1663 le64_to_cpu(w->data->seq),
1664 bch2_blk_status_to_str(bio->bi_status)) ||
1665 bch2_meta_write_fault("journal")) {
1666 spin_lock_irqsave(&j->err_lock, flags);
1667 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1668 spin_unlock_irqrestore(&j->err_lock, flags);
1671 closure_put(&j->io);
1672 percpu_ref_put(&ca->io_ref);
1675 static CLOSURE_CALLBACK(do_journal_write)
1677 closure_type(j, struct journal, io);
1678 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1680 struct journal_buf *w = journal_last_unwritten_buf(j);
1682 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1684 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1685 ca = bch_dev_bkey_exists(c, ptr->dev);
1686 if (!percpu_ref_tryget(&ca->io_ref)) {
1688 bch_err(c, "missing device for journal write\n");
1692 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1695 bio = ca->journal.bio;
1696 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
1697 bio->bi_iter.bi_sector = ptr->offset;
1698 bio->bi_end_io = journal_write_endio;
1699 bio->bi_private = ca;
1701 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1702 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1704 if (!JSET_NO_FLUSH(w->data))
1705 bio->bi_opf |= REQ_FUA;
1706 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1707 bio->bi_opf |= REQ_PREFLUSH;
1709 bch2_bio_map(bio, w->data, sectors << 9);
1711 trace_and_count(c, journal_write, bio);
1712 closure_bio_submit(bio, cl);
1714 ca->journal.bucket_seq[ca->journal.cur_idx] =
1715 le64_to_cpu(w->data->seq);
1718 continue_at(cl, journal_write_done, c->io_complete_wq);
1721 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
1723 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1724 struct jset_entry *start, *end;
1725 struct jset *jset = w->data;
1726 struct journal_keys_to_wb wb = { NULL };
1727 unsigned sectors, bytes, u64s;
1728 unsigned long btree_roots_have = 0;
1729 bool validate_before_checksum = false;
1730 u64 seq = le64_to_cpu(jset->seq);
1734 * Simple compaction, dropping empty jset_entries (from journal
1735 * reservations that weren't fully used) and merging jset_entries that
1738 * If we wanted to be really fancy here, we could sort all the keys in
1739 * the jset and drop keys that were overwritten - probably not worth it:
1741 vstruct_for_each(jset, i) {
1742 unsigned u64s = le16_to_cpu(i->u64s);
1749 * New btree roots are set by journalling them; when the journal
1750 * entry gets written we have to propagate them to
1753 * But, every journal entry we write has to contain all the
1754 * btree roots (at least for now); so after we copy btree roots
1755 * to c->btree_roots we have to get any missing btree roots and
1756 * add them to this journal entry:
1759 case BCH_JSET_ENTRY_btree_root:
1760 bch2_journal_entry_to_btree_root(c, i);
1761 __set_bit(i->btree_id, &btree_roots_have);
1763 case BCH_JSET_ENTRY_write_buffer_keys:
1764 EBUG_ON(!w->need_flush_to_write_buffer);
1767 bch2_journal_keys_to_write_buffer_start(c, &wb, seq);
1770 jset_entry_for_each_key(i, k) {
1771 ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k);
1773 bch2_fs_fatal_error(c, "-ENOMEM flushing journal keys to btree write buffer");
1774 bch2_journal_keys_to_write_buffer_end(c, &wb);
1778 i->type = BCH_JSET_ENTRY_btree_keys;
1784 bch2_journal_keys_to_write_buffer_end(c, &wb);
1785 w->need_flush_to_write_buffer = false;
1787 start = end = vstruct_last(jset);
1789 end = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
1791 bch2_journal_super_entries_add_common(c, &end, seq);
1792 u64s = (u64 *) end - (u64 *) start;
1793 BUG_ON(u64s > j->entry_u64s_reserved);
1795 le32_add_cpu(&jset->u64s, u64s);
1797 sectors = vstruct_sectors(jset, c->block_bits);
1798 bytes = vstruct_bytes(jset);
1800 if (sectors > w->sectors) {
1801 bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
1802 vstruct_bytes(jset), w->sectors << 9,
1803 u64s, w->u64s_reserved, j->entry_u64s_reserved);
1807 jset->magic = cpu_to_le64(jset_magic(c));
1808 jset->version = cpu_to_le32(c->sb.version);
1810 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1811 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1813 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1814 j->last_empty_seq = seq;
1816 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1817 validate_before_checksum = true;
1819 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1820 validate_before_checksum = true;
1822 if (validate_before_checksum &&
1823 (ret = jset_validate(c, NULL, jset, 0, WRITE)))
1826 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1827 jset->encrypted_start,
1828 vstruct_end(jset) - (void *) jset->encrypted_start);
1829 if (bch2_fs_fatal_err_on(ret, c,
1830 "error decrypting journal entry: %i", ret))
1833 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1834 journal_nonce(jset), jset);
1836 if (!validate_before_checksum &&
1837 (ret = jset_validate(c, NULL, jset, 0, WRITE)))
1840 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1844 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
1846 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1847 int error = bch2_journal_error(j);
1850 * If the journal is in an error state - we did an emergency shutdown -
1851 * we prefer to continue doing journal writes. We just mark them as
1852 * noflush so they'll never be used, but they'll still be visible by the
1853 * list_journal tool - this helps in debugging.
1855 * There's a caveat: the first journal write after marking the
1856 * superblock dirty must always be a flush write, because on startup
1857 * from a clean shutdown we didn't necessarily read the journal and the
1858 * new journal write might overwrite whatever was in the journal
1859 * previously - we can't leave the journal without any flush writes in
1862 * So if we're in an error state, and we're still starting up, we don't
1863 * write anything at all.
1865 if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags))
1871 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1872 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
1874 SET_JSET_NO_FLUSH(w->data, true);
1875 w->data->last_seq = 0;
1878 j->nr_noflush_writes++;
1880 j->last_flush_write = jiffies;
1881 j->nr_flush_writes++;
1882 clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
1888 CLOSURE_CALLBACK(bch2_journal_write)
1890 closure_type(j, struct journal, io);
1891 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1892 struct journal_buf *w = journal_last_unwritten_buf(j);
1893 struct bch_replicas_padded replicas;
1895 struct printbuf journal_debug_buf = PRINTBUF;
1896 unsigned nr_rw_members = 0;
1899 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1901 j->write_start_time = local_clock();
1903 spin_lock(&j->lock);
1904 ret = bch2_journal_write_pick_flush(j, w);
1905 spin_unlock(&j->lock);
1909 mutex_lock(&j->buf_lock);
1910 journal_buf_realloc(j, w);
1912 ret = bch2_journal_write_prep(j, w);
1913 mutex_unlock(&j->buf_lock);
1917 j->entry_bytes_written += vstruct_bytes(w->data);
1920 spin_lock(&j->lock);
1921 ret = journal_write_alloc(j, w);
1922 if (!ret || !j->can_discard)
1925 spin_unlock(&j->lock);
1926 bch2_journal_do_discards(j);
1930 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1931 spin_unlock(&j->lock);
1932 bch_err(c, "Unable to allocate journal write:\n%s",
1933 journal_debug_buf.buf);
1934 printbuf_exit(&journal_debug_buf);
1939 * write is allocated, no longer need to account for it in
1940 * bch2_journal_space_available():
1945 * journal entry has been compacted and allocated, recalculate space
1948 bch2_journal_space_available(j);
1949 spin_unlock(&j->lock);
1951 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1953 if (c->opts.nochanges)
1956 for_each_rw_member(c, ca)
1959 if (nr_rw_members > 1)
1960 w->separate_flush = true;
1963 * Mark journal replicas before we submit the write to guarantee
1964 * recovery will find the journal entries after a crash.
1966 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1968 ret = bch2_mark_replicas(c, &replicas.e);
1972 if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
1973 for_each_rw_member(c, ca) {
1974 percpu_ref_get(&ca->io_ref);
1976 bio = ca->journal.bio;
1977 bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
1978 bio->bi_end_io = journal_write_endio;
1979 bio->bi_private = ca;
1980 closure_bio_submit(bio, cl);
1984 continue_at(cl, do_journal_write, c->io_complete_wq);
1987 continue_at(cl, journal_write_done, c->io_complete_wq);
1990 bch2_fatal_error(c);
1991 continue_at(cl, journal_write_done, c->io_complete_wq);