1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_foreground.h"
5 #include "btree_update_interior.h"
8 #include "disk_groups.h"
12 #include "journal_io.h"
13 #include "journal_reclaim.h"
14 #include "journal_seq_blacklist.h"
17 #include <trace/events/bcachefs.h>
19 static void __journal_replay_free(struct journal_replay *i)
22 kvpfree(i, offsetof(struct journal_replay, j) +
23 vstruct_bytes(&i->j));
27 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
31 if (!c->opts.read_entire_journal)
32 __journal_replay_free(i);
38 struct list_head *head;
42 #define JOURNAL_ENTRY_ADD_OK 0
43 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
46 * Given a journal entry we just read, add it to the list of journal entries to
49 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
50 struct bch_extent_ptr entry_ptr,
51 struct journal_list *jlist, struct jset *j,
54 struct journal_replay *i, *pos, *dup = NULL;
55 struct bch_extent_ptr *ptr;
56 struct list_head *where;
57 size_t bytes = vstruct_bytes(j);
59 int ret = JOURNAL_ENTRY_ADD_OK;
61 list_for_each_entry_reverse(i, jlist->head, list) {
62 if (!JSET_NO_FLUSH(&i->j)) {
63 last_seq = le64_to_cpu(i->j.last_seq);
68 /* Is this entry older than the range we need? */
69 if (!c->opts.read_entire_journal &&
70 le64_to_cpu(j->seq) < last_seq) {
71 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
75 /* Drop entries we don't need anymore */
76 if (!JSET_NO_FLUSH(j)) {
77 list_for_each_entry_safe(i, pos, jlist->head, list) {
78 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
80 journal_replay_free(c, i);
84 list_for_each_entry_reverse(i, jlist->head, list) {
85 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
93 dup = where->next != jlist->head
94 ? container_of(where->next, struct journal_replay, list)
97 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
101 * Duplicate journal entries? If so we want the one that didn't have a
106 /* we'll replace @dup: */
111 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
112 memcmp(j, &dup->j, bytes), c,
113 "found duplicate but non identical journal entries (seq %llu)",
114 le64_to_cpu(j->seq));
120 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
129 memcpy(&i->j, j, bytes);
132 i->nr_ptrs = dup->nr_ptrs;
133 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
134 __journal_replay_free(dup);
137 list_add(&i->list, where);
139 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
140 if (ptr->dev == ca->dev_idx) {
141 bch_err(c, "duplicate journal entry %llu on same device",
142 le64_to_cpu(i->j.seq));
147 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
148 bch_err(c, "found too many copies of journal entry %llu",
149 le64_to_cpu(i->j.seq));
153 i->ptrs[i->nr_ptrs++] = entry_ptr;
159 static struct nonce journal_nonce(const struct jset *jset)
161 return (struct nonce) {{
163 [1] = ((__le32 *) &jset->seq)[0],
164 [2] = ((__le32 *) &jset->seq)[1],
165 [3] = BCH_NONCE_JOURNAL,
169 /* this fills in a range with empty jset_entries: */
170 static void journal_entry_null_range(void *start, void *end)
172 struct jset_entry *entry;
174 for (entry = start; entry != end; entry = vstruct_next(entry))
175 memset(entry, 0, sizeof(*entry));
178 #define JOURNAL_ENTRY_REREAD 5
179 #define JOURNAL_ENTRY_NONE 6
180 #define JOURNAL_ENTRY_BAD 7
182 #define journal_entry_err(c, msg, ...) \
186 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
189 bch_err(c, "corrupt metadata before write:\n" \
190 msg, ##__VA_ARGS__); \
191 if (bch2_fs_inconsistent(c)) { \
192 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
200 #define journal_entry_err_on(cond, c, msg, ...) \
201 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
203 #define FSCK_DELETED_KEY 5
205 static int journal_validate_key(struct bch_fs *c, const char *where,
206 struct jset_entry *entry,
207 unsigned level, enum btree_id btree_id,
208 struct bkey_i *k, const char *type,
209 unsigned version, int big_endian, int write)
211 void *next = vstruct_next(entry);
215 if (journal_entry_err_on(!k->k.u64s, c,
216 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
218 (u64 *) k - entry->_data,
219 le16_to_cpu(entry->u64s))) {
220 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
221 journal_entry_null_range(vstruct_next(entry), next);
222 return FSCK_DELETED_KEY;
225 if (journal_entry_err_on((void *) bkey_next(k) >
226 (void *) vstruct_next(entry), c,
227 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
229 (u64 *) k - entry->_data,
230 le16_to_cpu(entry->u64s))) {
231 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
232 journal_entry_null_range(vstruct_next(entry), next);
233 return FSCK_DELETED_KEY;
236 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
237 "invalid %s in %s entry offset %zi/%u: bad format %u",
239 (u64 *) k - entry->_data,
240 le16_to_cpu(entry->u64s),
242 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
243 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
244 journal_entry_null_range(vstruct_next(entry), next);
245 return FSCK_DELETED_KEY;
249 bch2_bkey_compat(level, btree_id, version, big_endian,
250 write, NULL, bkey_to_packed(k));
252 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
253 __btree_node_type(level, btree_id));
257 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
258 mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s",
260 (u64 *) k - entry->_data,
261 le16_to_cpu(entry->u64s),
264 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
265 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
266 journal_entry_null_range(vstruct_next(entry), next);
267 return FSCK_DELETED_KEY;
271 bch2_bkey_compat(level, btree_id, version, big_endian,
272 write, NULL, bkey_to_packed(k));
277 static int journal_entry_validate_btree_keys(struct bch_fs *c,
279 struct jset_entry *entry,
280 unsigned version, int big_endian, int write)
282 struct bkey_i *k = entry->start;
284 while (k != vstruct_last(entry)) {
285 int ret = journal_validate_key(c, where, entry,
288 k, "key", version, big_endian, write);
289 if (ret == FSCK_DELETED_KEY)
298 static int journal_entry_validate_btree_root(struct bch_fs *c,
300 struct jset_entry *entry,
301 unsigned version, int big_endian, int write)
303 struct bkey_i *k = entry->start;
306 if (journal_entry_err_on(!entry->u64s ||
307 le16_to_cpu(entry->u64s) != k->k.u64s, c,
308 "invalid btree root journal entry: wrong number of keys")) {
309 void *next = vstruct_next(entry);
311 * we don't want to null out this jset_entry,
312 * just the contents, so that later we can tell
313 * we were _supposed_ to have a btree root
316 journal_entry_null_range(vstruct_next(entry), next);
320 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
321 "btree root", version, big_endian, write);
326 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
328 struct jset_entry *entry,
329 unsigned version, int big_endian, int write)
331 /* obsolete, don't care: */
335 static int journal_entry_validate_blacklist(struct bch_fs *c,
337 struct jset_entry *entry,
338 unsigned version, int big_endian, int write)
342 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
343 "invalid journal seq blacklist entry: bad size")) {
344 journal_entry_null_range(entry, vstruct_next(entry));
350 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
352 struct jset_entry *entry,
353 unsigned version, int big_endian, int write)
355 struct jset_entry_blacklist_v2 *bl_entry;
358 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
359 "invalid journal seq blacklist entry: bad size")) {
360 journal_entry_null_range(entry, vstruct_next(entry));
364 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
366 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
367 le64_to_cpu(bl_entry->end), c,
368 "invalid journal seq blacklist entry: start > end")) {
369 journal_entry_null_range(entry, vstruct_next(entry));
376 static int journal_entry_validate_usage(struct bch_fs *c,
378 struct jset_entry *entry,
379 unsigned version, int big_endian, int write)
381 struct jset_entry_usage *u =
382 container_of(entry, struct jset_entry_usage, entry);
383 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
386 if (journal_entry_err_on(bytes < sizeof(*u),
388 "invalid journal entry usage: bad size")) {
389 journal_entry_null_range(entry, vstruct_next(entry));
397 static int journal_entry_validate_data_usage(struct bch_fs *c,
399 struct jset_entry *entry,
400 unsigned version, int big_endian, int write)
402 struct jset_entry_data_usage *u =
403 container_of(entry, struct jset_entry_data_usage, entry);
404 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
407 if (journal_entry_err_on(bytes < sizeof(*u) ||
408 bytes < sizeof(*u) + u->r.nr_devs,
410 "invalid journal entry usage: bad size")) {
411 journal_entry_null_range(entry, vstruct_next(entry));
419 static int journal_entry_validate_clock(struct bch_fs *c,
421 struct jset_entry *entry,
422 unsigned version, int big_endian, int write)
424 struct jset_entry_clock *clock =
425 container_of(entry, struct jset_entry_clock, entry);
426 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
429 if (journal_entry_err_on(bytes != sizeof(*clock),
430 c, "invalid journal entry clock: bad size")) {
431 journal_entry_null_range(entry, vstruct_next(entry));
435 if (journal_entry_err_on(clock->rw > 1,
436 c, "invalid journal entry clock: bad rw")) {
437 journal_entry_null_range(entry, vstruct_next(entry));
445 static int journal_entry_validate_dev_usage(struct bch_fs *c,
447 struct jset_entry *entry,
448 unsigned version, int big_endian, int write)
450 struct jset_entry_dev_usage *u =
451 container_of(entry, struct jset_entry_dev_usage, entry);
452 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
453 unsigned expected = sizeof(*u);
457 if (journal_entry_err_on(bytes < expected,
458 c, "invalid journal entry dev usage: bad size (%u < %u)",
460 journal_entry_null_range(entry, vstruct_next(entry));
464 dev = le32_to_cpu(u->dev);
466 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
467 c, "invalid journal entry dev usage: bad dev")) {
468 journal_entry_null_range(entry, vstruct_next(entry));
472 if (journal_entry_err_on(u->pad,
473 c, "invalid journal entry dev usage: bad pad")) {
474 journal_entry_null_range(entry, vstruct_next(entry));
482 struct jset_entry_ops {
483 int (*validate)(struct bch_fs *, const char *,
484 struct jset_entry *, unsigned, int, int);
487 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
489 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
490 .validate = journal_entry_validate_##f, \
492 BCH_JSET_ENTRY_TYPES()
496 int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
497 struct jset_entry *entry,
498 unsigned version, int big_endian, int write)
500 return entry->type < BCH_JSET_ENTRY_NR
501 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
502 version, big_endian, write)
506 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
510 struct jset_entry *entry;
513 vstruct_for_each(jset, entry) {
514 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
515 le64_to_cpu(jset->seq),
516 (u64 *) entry - jset->_data,
517 le32_to_cpu(jset->u64s));
519 if (journal_entry_err_on(vstruct_next(entry) >
520 vstruct_last(jset), c,
521 "journal entry extends past end of jset")) {
522 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
526 ret = bch2_journal_entry_validate(c, buf, entry,
527 le32_to_cpu(jset->version),
528 JSET_BIG_ENDIAN(jset), write);
536 static int jset_validate(struct bch_fs *c,
538 struct jset *jset, u64 sector,
539 unsigned bucket_sectors_left,
540 unsigned sectors_read,
543 size_t bytes = vstruct_bytes(jset);
544 struct bch_csum csum;
548 if (le64_to_cpu(jset->magic) != jset_magic(c))
549 return JOURNAL_ENTRY_NONE;
551 version = le32_to_cpu(jset->version);
552 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
553 version < bcachefs_metadata_version_min) ||
554 version >= bcachefs_metadata_version_max, c,
555 "%s sector %llu seq %llu: unknown journal entry version %u",
556 ca ? ca->name : c->name,
557 sector, le64_to_cpu(jset->seq),
559 /* don't try to continue: */
563 if (bytes > (sectors_read << 9) &&
564 sectors_read < bucket_sectors_left)
565 return JOURNAL_ENTRY_REREAD;
567 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
568 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
569 ca ? ca->name : c->name,
570 sector, le64_to_cpu(jset->seq), bytes)) {
571 ret = JOURNAL_ENTRY_BAD;
572 le32_add_cpu(&jset->u64s,
573 -((bytes - (bucket_sectors_left << 9)) / 8));
576 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
577 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
578 ca ? ca->name : c->name,
579 sector, le64_to_cpu(jset->seq),
580 JSET_CSUM_TYPE(jset))) {
581 ret = JOURNAL_ENTRY_BAD;
588 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
589 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
590 "%s sector %llu seq %llu: journal checksum bad",
591 ca ? ca->name : c->name,
592 sector, le64_to_cpu(jset->seq)))
593 ret = JOURNAL_ENTRY_BAD;
595 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
596 jset->encrypted_start,
597 vstruct_end(jset) - (void *) jset->encrypted_start);
599 /* last_seq is ignored when JSET_NO_FLUSH is true */
600 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
601 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
602 "invalid journal entry: last_seq > seq (%llu > %llu)",
603 le64_to_cpu(jset->last_seq),
604 le64_to_cpu(jset->seq))) {
605 jset->last_seq = jset->seq;
606 return JOURNAL_ENTRY_BAD;
612 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
614 unsigned sectors = vstruct_sectors(jset, c->block_bits);
616 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
617 jset_validate_entries(c, jset, WRITE);
620 struct journal_read_buf {
625 static int journal_read_buf_realloc(struct journal_read_buf *b,
630 /* the bios are sized for this many pages, max: */
631 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
634 new_size = roundup_pow_of_two(new_size);
635 n = kvpmalloc(new_size, GFP_KERNEL);
639 kvpfree(b->data, b->size);
645 static int journal_read_bucket(struct bch_dev *ca,
646 struct journal_read_buf *buf,
647 struct journal_list *jlist,
650 struct bch_fs *c = ca->fs;
651 struct journal_device *ja = &ca->journal;
652 struct jset *j = NULL;
653 unsigned sectors, sectors_read = 0;
654 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
655 end = offset + ca->mi.bucket_size;
656 bool saw_bad = false;
659 pr_debug("reading %u", bucket);
661 while (offset < end) {
665 sectors_read = min_t(unsigned,
666 end - offset, buf->size >> 9);
668 bio = bio_kmalloc(GFP_KERNEL,
671 bio_set_dev(bio, ca->disk_sb.bdev);
672 bio->bi_iter.bi_sector = offset;
673 bio_set_op_attrs(bio, REQ_OP_READ, 0);
674 bch2_bio_map(bio, buf->data, sectors_read << 9);
676 ret = submit_bio_wait(bio);
679 if (bch2_dev_io_err_on(ret, ca,
680 "journal read error: sector %llu",
682 bch2_meta_read_fault("journal")) {
684 * We don't error out of the recovery process
685 * here, since the relevant journal entry may be
686 * found on a different device, and missing or
687 * no journal entries will be handled later
695 ret = jset_validate(c, ca, j, offset,
696 end - offset, sectors_read,
700 sectors = vstruct_sectors(j, c->block_bits);
702 case JOURNAL_ENTRY_REREAD:
703 if (vstruct_bytes(j) > buf->size) {
704 ret = journal_read_buf_realloc(buf,
710 case JOURNAL_ENTRY_NONE:
713 sectors = c->opts.block_size;
715 case JOURNAL_ENTRY_BAD:
718 * On checksum error we don't really trust the size
719 * field of the journal entry we read, so try reading
720 * again at next block boundary:
722 sectors = c->opts.block_size;
729 * This happens sometimes if we don't have discards on -
730 * when we've partially overwritten a bucket with new
731 * journal entries. We don't need the rest of the
734 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
737 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
739 mutex_lock(&jlist->lock);
740 ret = journal_entry_add(c, ca, (struct bch_extent_ptr) {
743 }, jlist, j, ret != 0);
744 mutex_unlock(&jlist->lock);
747 case JOURNAL_ENTRY_ADD_OK:
749 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
757 sectors_read -= sectors;
758 j = ((void *) j) + (sectors << 9);
764 static void bch2_journal_read_device(struct closure *cl)
766 struct journal_device *ja =
767 container_of(cl, struct journal_device, read);
768 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
769 struct journal_list *jlist =
770 container_of(cl->parent, struct journal_list, cl);
771 struct journal_read_buf buf = { NULL, 0 };
772 u64 min_seq = U64_MAX;
779 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
783 pr_debug("%u journal buckets", ja->nr);
785 for (i = 0; i < ja->nr; i++) {
786 ret = journal_read_bucket(ca, &buf, jlist, i);
791 /* Find the journal bucket with the highest sequence number: */
792 for (i = 0; i < ja->nr; i++) {
793 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
796 min_seq = min(ja->bucket_seq[i], min_seq);
800 * If there's duplicate journal entries in multiple buckets (which
801 * definitely isn't supposed to happen, but...) - make sure to start
802 * cur_idx at the last of those buckets, so we don't deadlock trying to
805 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
806 ja->bucket_seq[ja->cur_idx] >
807 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
808 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
810 ja->sectors_free = 0;
813 * Set dirty_idx to indicate the entire journal is full and needs to be
814 * reclaimed - journal reclaim will immediately reclaim whatever isn't
815 * pinned when it first runs:
817 ja->discard_idx = ja->dirty_idx_ondisk =
818 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
820 kvpfree(buf.data, buf.size);
821 percpu_ref_put(&ca->io_ref);
825 mutex_lock(&jlist->lock);
827 mutex_unlock(&jlist->lock);
831 static void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
832 struct journal_replay *j)
836 for (i = 0; i < j->nr_ptrs; i++) {
837 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
840 div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset);
844 pr_buf(out, "%u:%llu (offset %llu)",
846 (u64) j->ptrs[i].offset, offset);
850 int bch2_journal_read(struct bch_fs *c, struct list_head *list,
851 u64 *blacklist_seq, u64 *start_seq)
853 struct journal_list jlist;
854 struct journal_replay *i, *t;
857 size_t keys = 0, entries = 0;
858 bool degraded = false;
859 u64 seq, last_seq = 0;
862 closure_init_stack(&jlist.cl);
863 mutex_init(&jlist.lock);
867 for_each_member_device(ca, c, iter) {
868 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
869 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
872 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
873 ca->mi.state == BCH_MEMBER_STATE_ro) &&
874 percpu_ref_tryget(&ca->io_ref))
875 closure_call(&ca->journal.read,
876 bch2_journal_read_device,
883 closure_sync(&jlist.cl);
888 if (list_empty(list)) {
889 bch_info(c, "journal read done, but no entries found");
893 i = list_last_entry(list, struct journal_replay, list);
894 *start_seq = le64_to_cpu(i->j.seq) + 1;
897 * Find most recent flush entry, and ignore newer non flush entries -
898 * those entries will be blacklisted:
900 list_for_each_entry_safe_reverse(i, t, list, list) {
904 if (!JSET_NO_FLUSH(&i->j)) {
905 last_seq = le64_to_cpu(i->j.last_seq);
906 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
910 journal_replay_free(c, i);
914 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
918 /* Drop blacklisted entries and entries older than last_seq: */
919 list_for_each_entry_safe(i, t, list, list) {
923 seq = le64_to_cpu(i->j.seq);
924 if (seq < last_seq) {
925 journal_replay_free(c, i);
929 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
930 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
931 "found blacklisted journal entry %llu", seq);
933 journal_replay_free(c, i);
937 /* Check for missing entries: */
939 list_for_each_entry(i, list, list) {
943 BUG_ON(seq > le64_to_cpu(i->j.seq));
945 while (seq < le64_to_cpu(i->j.seq)) {
946 u64 missing_start, missing_end;
947 char buf1[200], buf2[200];
949 while (seq < le64_to_cpu(i->j.seq) &&
950 bch2_journal_seq_is_blacklisted(c, seq, false))
953 if (seq == le64_to_cpu(i->j.seq))
958 while (seq < le64_to_cpu(i->j.seq) &&
959 !bch2_journal_seq_is_blacklisted(c, seq, false))
962 if (i->list.prev != list) {
963 struct printbuf out = PBUF(buf1);
964 struct journal_replay *p = list_prev_entry(i, list);
966 bch2_journal_ptrs_to_text(&out, c, p);
967 pr_buf(&out, " size %llu", vstruct_sectors(&p->j, c->block_bits));
969 sprintf(buf1, "(none)");
970 bch2_journal_ptrs_to_text(&PBUF(buf2), c, i);
972 missing_end = seq - 1;
973 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
976 missing_start, missing_end,
977 last_seq, *blacklist_seq - 1,
984 list_for_each_entry(i, list, list) {
985 struct jset_entry *entry;
986 struct bkey_i *k, *_n;
987 struct bch_replicas_padded replicas = {
988 .e.data_type = BCH_DATA_journal,
997 ret = jset_validate_entries(c, &i->j, READ);
1001 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1002 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1004 bch2_replicas_entry_sort(&replicas.e);
1007 * If we're mounting in degraded mode - if we didn't read all
1008 * the devices - this is wrong:
1012 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1013 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1014 "superblock not marked as containing replicas %s",
1015 (bch2_replicas_entry_to_text(&PBUF(buf),
1016 &replicas.e), buf)))) {
1017 ret = bch2_mark_replicas(c, &replicas.e);
1022 for_each_jset_key(k, _n, entry, &i->j)
1027 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1028 keys, entries, *start_seq);
1030 if (*start_seq != *blacklist_seq)
1031 bch_info(c, "dropped unflushed entries %llu-%llu",
1032 *blacklist_seq, *start_seq - 1);
1037 /* journal write: */
1039 static void __journal_write_alloc(struct journal *j,
1040 struct journal_buf *w,
1041 struct dev_alloc_list *devs_sorted,
1044 unsigned replicas_want)
1046 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1047 struct journal_device *ja;
1051 if (*replicas >= replicas_want)
1054 for (i = 0; i < devs_sorted->nr; i++) {
1055 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1062 * Check that we can use this device, and aren't already using
1065 if (!ca->mi.durability ||
1066 ca->mi.state != BCH_MEMBER_STATE_rw ||
1068 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1070 sectors > ja->sectors_free)
1073 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1075 bch2_bkey_append_ptr(&w->key,
1076 (struct bch_extent_ptr) {
1077 .offset = bucket_to_sector(ca,
1078 ja->buckets[ja->cur_idx]) +
1079 ca->mi.bucket_size -
1084 ja->sectors_free -= sectors;
1085 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1087 *replicas += ca->mi.durability;
1089 if (*replicas >= replicas_want)
1095 * journal_next_bucket - move on to the next journal bucket if possible
1097 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1100 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1101 struct bch_devs_mask devs;
1102 struct journal_device *ja;
1104 struct dev_alloc_list devs_sorted;
1105 unsigned target = c->opts.metadata_target ?:
1106 c->opts.foreground_target;
1107 unsigned i, replicas = 0, replicas_want =
1108 READ_ONCE(c->opts.metadata_replicas);
1112 devs = target_rw_devs(c, BCH_DATA_journal, target);
1114 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1116 __journal_write_alloc(j, w, &devs_sorted,
1117 sectors, &replicas, replicas_want);
1119 if (replicas >= replicas_want)
1122 for (i = 0; i < devs_sorted.nr; i++) {
1123 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1129 if (sectors > ja->sectors_free &&
1130 sectors <= ca->mi.bucket_size &&
1131 bch2_journal_dev_buckets_available(j, ja,
1132 journal_space_discarded)) {
1133 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1134 ja->sectors_free = ca->mi.bucket_size;
1137 * ja->bucket_seq[ja->cur_idx] must always have
1138 * something sensible:
1140 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1144 __journal_write_alloc(j, w, &devs_sorted,
1145 sectors, &replicas, replicas_want);
1147 if (replicas < replicas_want && target) {
1148 /* Retry from all devices: */
1155 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1157 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1160 static void journal_write_compact(struct jset *jset)
1162 struct jset_entry *i, *next, *prev = NULL;
1165 * Simple compaction, dropping empty jset_entries (from journal
1166 * reservations that weren't fully used) and merging jset_entries that
1169 * If we wanted to be really fancy here, we could sort all the keys in
1170 * the jset and drop keys that were overwritten - probably not worth it:
1172 vstruct_for_each_safe(jset, i, next) {
1173 unsigned u64s = le16_to_cpu(i->u64s);
1179 /* Can we merge with previous entry? */
1181 i->btree_id == prev->btree_id &&
1182 i->level == prev->level &&
1183 i->type == prev->type &&
1184 i->type == BCH_JSET_ENTRY_btree_keys &&
1185 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1186 memmove_u64s_down(vstruct_next(prev),
1189 le16_add_cpu(&prev->u64s, u64s);
1193 /* Couldn't merge, move i into new position (after prev): */
1194 prev = prev ? vstruct_next(prev) : jset->start;
1196 memmove_u64s_down(prev, i, jset_u64s(u64s));
1199 prev = prev ? vstruct_next(prev) : jset->start;
1200 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1203 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1205 /* we aren't holding j->lock: */
1206 unsigned new_size = READ_ONCE(j->buf_size_want);
1209 if (buf->buf_size >= new_size)
1212 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1216 memcpy(new_buf, buf->data, buf->buf_size);
1218 spin_lock(&j->lock);
1219 swap(buf->data, new_buf);
1220 swap(buf->buf_size, new_size);
1221 spin_unlock(&j->lock);
1223 kvpfree(new_buf, new_size);
1226 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1228 return j->buf + j->reservations.unwritten_idx;
1231 static void journal_write_done(struct closure *cl)
1233 struct journal *j = container_of(cl, struct journal, io);
1234 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1235 struct journal_buf *w = journal_last_unwritten_buf(j);
1236 struct bch_replicas_padded replicas;
1237 union journal_res_state old, new;
1241 bch2_time_stats_update(j->write_time, j->write_start_time);
1243 if (!w->devs_written.nr) {
1244 bch_err(c, "unable to write journal to sufficient devices");
1247 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1249 if (bch2_mark_replicas(c, &replicas.e))
1254 bch2_fatal_error(c);
1256 spin_lock(&j->lock);
1257 seq = le64_to_cpu(w->data->seq);
1259 if (seq >= j->pin.front)
1260 journal_seq_pin(j, seq)->devs = w->devs_written;
1262 j->seq_ondisk = seq;
1263 if (err && (!j->err_seq || seq < j->err_seq))
1266 if (!JSET_NO_FLUSH(w->data)) {
1267 j->flushed_seq_ondisk = seq;
1268 j->last_seq_ondisk = w->last_seq;
1272 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1275 * Must come before signaling write completion, for
1276 * bch2_fs_journal_stop():
1278 journal_reclaim_kick(&c->journal);
1280 /* also must come before signalling write completion: */
1281 closure_debug_destroy(cl);
1283 v = atomic64_read(&j->reservations.counter);
1286 BUG_ON(new.idx == new.unwritten_idx);
1288 new.unwritten_idx++;
1289 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1290 old.v, new.v)) != old.v);
1292 bch2_journal_space_available(j);
1294 closure_wake_up(&w->wait);
1297 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1298 mod_delayed_work(c->io_complete_wq, &j->write_work, 0);
1299 spin_unlock(&j->lock);
1301 if (new.unwritten_idx != new.idx &&
1302 !journal_state_count(new, new.unwritten_idx))
1303 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1306 static void journal_write_endio(struct bio *bio)
1308 struct bch_dev *ca = bio->bi_private;
1309 struct journal *j = &ca->fs->journal;
1310 struct journal_buf *w = journal_last_unwritten_buf(j);
1311 unsigned long flags;
1313 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1314 le64_to_cpu(w->data->seq),
1315 bch2_blk_status_to_str(bio->bi_status)) ||
1316 bch2_meta_write_fault("journal")) {
1317 spin_lock_irqsave(&j->err_lock, flags);
1318 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1319 spin_unlock_irqrestore(&j->err_lock, flags);
1322 closure_put(&j->io);
1323 percpu_ref_put(&ca->io_ref);
1326 static void do_journal_write(struct closure *cl)
1328 struct journal *j = container_of(cl, struct journal, io);
1329 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1331 struct journal_buf *w = journal_last_unwritten_buf(j);
1332 struct bch_extent_ptr *ptr;
1334 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1336 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1337 ca = bch_dev_bkey_exists(c, ptr->dev);
1338 if (!percpu_ref_tryget(&ca->io_ref)) {
1340 bch_err(c, "missing device for journal write\n");
1344 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1347 bio = ca->journal.bio;
1349 bio_set_dev(bio, ca->disk_sb.bdev);
1350 bio->bi_iter.bi_sector = ptr->offset;
1351 bio->bi_end_io = journal_write_endio;
1352 bio->bi_private = ca;
1353 bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1355 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1356 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1358 if (!JSET_NO_FLUSH(w->data))
1359 bio->bi_opf |= REQ_FUA;
1360 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1361 bio->bi_opf |= REQ_PREFLUSH;
1363 bch2_bio_map(bio, w->data, sectors << 9);
1365 trace_journal_write(bio);
1366 closure_bio_submit(bio, cl);
1368 ca->journal.bucket_seq[ca->journal.cur_idx] =
1369 le64_to_cpu(w->data->seq);
1372 continue_at(cl, journal_write_done, c->io_complete_wq);
1376 void bch2_journal_write(struct closure *cl)
1378 struct journal *j = container_of(cl, struct journal, io);
1379 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1381 struct journal_buf *w = journal_last_unwritten_buf(j);
1382 struct jset_entry *start, *end;
1385 char *journal_debug_buf = NULL;
1386 bool validate_before_checksum = false;
1387 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1390 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1392 journal_buf_realloc(j, w);
1395 j->write_start_time = local_clock();
1397 spin_lock(&j->lock);
1398 if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
1400 (jiffies - j->last_flush_write) < msecs_to_jiffies(j->write_delay_ms) &&
1401 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)) {
1403 SET_JSET_NO_FLUSH(jset, true);
1407 j->nr_noflush_writes++;
1409 j->last_flush_write = jiffies;
1410 j->nr_flush_writes++;
1412 spin_unlock(&j->lock);
1415 * New btree roots are set by journalling them; when the journal entry
1416 * gets written we have to propagate them to c->btree_roots
1418 * But, every journal entry we write has to contain all the btree roots
1419 * (at least for now); so after we copy btree roots to c->btree_roots we
1420 * have to get any missing btree roots and add them to this journal
1424 bch2_journal_entries_to_btree_roots(c, jset);
1426 start = end = vstruct_last(jset);
1428 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1430 bch2_journal_super_entries_add_common(c, &end,
1431 le64_to_cpu(jset->seq));
1432 u64s = (u64 *) end - (u64 *) start;
1433 BUG_ON(u64s > j->entry_u64s_reserved);
1435 le32_add_cpu(&jset->u64s, u64s);
1436 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1438 journal_write_compact(jset);
1440 jset->magic = cpu_to_le64(jset_magic(c));
1441 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1442 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1443 : cpu_to_le32(c->sb.version);
1445 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1446 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1448 if (journal_entry_empty(jset))
1449 j->last_empty_seq = le64_to_cpu(jset->seq);
1451 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1452 validate_before_checksum = true;
1454 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1455 validate_before_checksum = true;
1457 if (validate_before_checksum &&
1458 jset_validate_for_write(c, jset))
1461 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1462 jset->encrypted_start,
1463 vstruct_end(jset) - (void *) jset->encrypted_start);
1465 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1466 journal_nonce(jset), jset);
1468 if (!validate_before_checksum &&
1469 jset_validate_for_write(c, jset))
1472 sectors = vstruct_sectors(jset, c->block_bits);
1473 BUG_ON(sectors > w->sectors);
1475 bytes = vstruct_bytes(jset);
1476 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1479 spin_lock(&j->lock);
1480 ret = journal_write_alloc(j, w, sectors);
1482 if (ret && j->can_discard) {
1483 spin_unlock(&j->lock);
1484 bch2_journal_do_discards(j);
1489 journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
1490 if (journal_debug_buf)
1491 __bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
1495 * write is allocated, no longer need to account for it in
1496 * bch2_journal_space_available():
1501 * journal entry has been compacted and allocated, recalculate space
1504 bch2_journal_space_available(j);
1505 spin_unlock(&j->lock);
1508 bch_err(c, "Unable to allocate journal write:\n%s",
1510 kfree(journal_debug_buf);
1511 bch2_fatal_error(c);
1512 continue_at(cl, journal_write_done, c->io_complete_wq);
1516 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1518 if (test_bit(JOURNAL_NOCHANGES, &j->flags))
1521 for_each_rw_member(ca, c, i)
1524 if (nr_rw_members > 1)
1525 w->separate_flush = true;
1527 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1528 for_each_rw_member(ca, c, i) {
1529 percpu_ref_get(&ca->io_ref);
1531 bio = ca->journal.bio;
1533 bio_set_dev(bio, ca->disk_sb.bdev);
1534 bio->bi_opf = REQ_OP_FLUSH;
1535 bio->bi_end_io = journal_write_endio;
1536 bio->bi_private = ca;
1537 closure_bio_submit(bio, cl);
1541 bch2_bucket_seq_cleanup(c);
1543 continue_at(cl, do_journal_write, c->io_complete_wq);
1546 bch2_bucket_seq_cleanup(c);
1548 continue_at(cl, journal_write_done, c->io_complete_wq);
1551 bch2_inconsistent_error(c);
1552 continue_at(cl, journal_write_done, c->io_complete_wq);