1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_foreground.h"
5 #include "btree_update_interior.h"
8 #include "disk_groups.h"
12 #include "journal_io.h"
13 #include "journal_reclaim.h"
14 #include "journal_seq_blacklist.h"
17 #include <trace/events/bcachefs.h>
19 static void __journal_replay_free(struct journal_replay *i)
22 kvpfree(i, offsetof(struct journal_replay, j) +
23 vstruct_bytes(&i->j));
27 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
31 if (!c->opts.read_entire_journal)
32 __journal_replay_free(i);
38 struct list_head *head;
42 #define JOURNAL_ENTRY_ADD_OK 0
43 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
46 * Given a journal entry we just read, add it to the list of journal entries to
49 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
50 struct journal_ptr entry_ptr,
51 struct journal_list *jlist, struct jset *j,
54 struct journal_replay *i, *pos, *dup = NULL;
55 struct journal_ptr *ptr;
56 struct list_head *where;
57 size_t bytes = vstruct_bytes(j);
59 int ret = JOURNAL_ENTRY_ADD_OK;
61 list_for_each_entry_reverse(i, jlist->head, list) {
62 if (!JSET_NO_FLUSH(&i->j)) {
63 last_seq = le64_to_cpu(i->j.last_seq);
68 /* Is this entry older than the range we need? */
69 if (!c->opts.read_entire_journal &&
70 le64_to_cpu(j->seq) < last_seq) {
71 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
75 /* Drop entries we don't need anymore */
76 if (!JSET_NO_FLUSH(j)) {
77 list_for_each_entry_safe(i, pos, jlist->head, list) {
78 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
80 journal_replay_free(c, i);
84 list_for_each_entry_reverse(i, jlist->head, list) {
85 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
93 dup = where->next != jlist->head
94 ? container_of(where->next, struct journal_replay, list)
97 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
101 * Duplicate journal entries? If so we want the one that didn't have a
106 /* we'll replace @dup: */
111 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
112 memcmp(j, &dup->j, bytes), c,
113 "found duplicate but non identical journal entries (seq %llu)",
114 le64_to_cpu(j->seq));
120 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
129 memcpy(&i->j, j, bytes);
132 i->nr_ptrs = dup->nr_ptrs;
133 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
134 __journal_replay_free(dup);
137 list_add(&i->list, where);
139 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
140 if (ptr->dev == ca->dev_idx) {
141 bch_err(c, "duplicate journal entry %llu on same device",
142 le64_to_cpu(i->j.seq));
147 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
148 bch_err(c, "found too many copies of journal entry %llu",
149 le64_to_cpu(i->j.seq));
153 i->ptrs[i->nr_ptrs++] = entry_ptr;
159 static struct nonce journal_nonce(const struct jset *jset)
161 return (struct nonce) {{
163 [1] = ((__le32 *) &jset->seq)[0],
164 [2] = ((__le32 *) &jset->seq)[1],
165 [3] = BCH_NONCE_JOURNAL,
169 /* this fills in a range with empty jset_entries: */
170 static void journal_entry_null_range(void *start, void *end)
172 struct jset_entry *entry;
174 for (entry = start; entry != end; entry = vstruct_next(entry))
175 memset(entry, 0, sizeof(*entry));
178 #define JOURNAL_ENTRY_REREAD 5
179 #define JOURNAL_ENTRY_NONE 6
180 #define JOURNAL_ENTRY_BAD 7
182 #define journal_entry_err(c, msg, ...) \
186 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
189 bch_err(c, "corrupt metadata before write:\n" \
190 msg, ##__VA_ARGS__); \
191 if (bch2_fs_inconsistent(c)) { \
192 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
200 #define journal_entry_err_on(cond, c, msg, ...) \
201 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
203 #define FSCK_DELETED_KEY 5
205 static int journal_validate_key(struct bch_fs *c, const char *where,
206 struct jset_entry *entry,
207 unsigned level, enum btree_id btree_id,
208 struct bkey_i *k, const char *type,
209 unsigned version, int big_endian, int write)
211 void *next = vstruct_next(entry);
215 if (journal_entry_err_on(!k->k.u64s, c,
216 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
218 (u64 *) k - entry->_data,
219 le16_to_cpu(entry->u64s))) {
220 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
221 journal_entry_null_range(vstruct_next(entry), next);
222 return FSCK_DELETED_KEY;
225 if (journal_entry_err_on((void *) bkey_next(k) >
226 (void *) vstruct_next(entry), c,
227 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
229 (u64 *) k - entry->_data,
230 le16_to_cpu(entry->u64s))) {
231 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
232 journal_entry_null_range(vstruct_next(entry), next);
233 return FSCK_DELETED_KEY;
236 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
237 "invalid %s in %s entry offset %zi/%u: bad format %u",
239 (u64 *) k - entry->_data,
240 le16_to_cpu(entry->u64s),
242 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
243 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
244 journal_entry_null_range(vstruct_next(entry), next);
245 return FSCK_DELETED_KEY;
249 bch2_bkey_compat(level, btree_id, version, big_endian,
250 write, NULL, bkey_to_packed(k));
252 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
253 __btree_node_type(level, btree_id));
255 struct printbuf buf = PRINTBUF;
257 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
258 mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s",
260 (u64 *) k - entry->_data,
261 le16_to_cpu(entry->u64s),
265 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
266 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
267 journal_entry_null_range(vstruct_next(entry), next);
268 return FSCK_DELETED_KEY;
272 bch2_bkey_compat(level, btree_id, version, big_endian,
273 write, NULL, bkey_to_packed(k));
278 static int journal_entry_btree_keys_validate(struct bch_fs *c,
280 struct jset_entry *entry,
281 unsigned version, int big_endian, int write)
283 struct bkey_i *k = entry->start;
285 while (k != vstruct_last(entry)) {
286 int ret = journal_validate_key(c, where, entry,
289 k, "key", version, big_endian, write);
290 if (ret == FSCK_DELETED_KEY)
299 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
300 struct jset_entry *entry)
305 vstruct_for_each(entry, k) {
308 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
310 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
311 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
316 static int journal_entry_btree_root_validate(struct bch_fs *c,
318 struct jset_entry *entry,
319 unsigned version, int big_endian, int write)
321 struct bkey_i *k = entry->start;
324 if (journal_entry_err_on(!entry->u64s ||
325 le16_to_cpu(entry->u64s) != k->k.u64s, c,
326 "invalid btree root journal entry: wrong number of keys")) {
327 void *next = vstruct_next(entry);
329 * we don't want to null out this jset_entry,
330 * just the contents, so that later we can tell
331 * we were _supposed_ to have a btree root
334 journal_entry_null_range(vstruct_next(entry), next);
338 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
339 "btree root", version, big_endian, write);
344 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
345 struct jset_entry *entry)
347 journal_entry_btree_keys_to_text(out, c, entry);
350 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
352 struct jset_entry *entry,
353 unsigned version, int big_endian, int write)
355 /* obsolete, don't care: */
359 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
360 struct jset_entry *entry)
364 static int journal_entry_blacklist_validate(struct bch_fs *c,
366 struct jset_entry *entry,
367 unsigned version, int big_endian, int write)
371 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
372 "invalid journal seq blacklist entry: bad size")) {
373 journal_entry_null_range(entry, vstruct_next(entry));
379 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
380 struct jset_entry *entry)
382 struct jset_entry_blacklist *bl =
383 container_of(entry, struct jset_entry_blacklist, entry);
385 pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
388 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
390 struct jset_entry *entry,
391 unsigned version, int big_endian, int write)
393 struct jset_entry_blacklist_v2 *bl_entry;
396 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
397 "invalid journal seq blacklist entry: bad size")) {
398 journal_entry_null_range(entry, vstruct_next(entry));
402 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
404 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
405 le64_to_cpu(bl_entry->end), c,
406 "invalid journal seq blacklist entry: start > end")) {
407 journal_entry_null_range(entry, vstruct_next(entry));
414 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
415 struct jset_entry *entry)
417 struct jset_entry_blacklist_v2 *bl =
418 container_of(entry, struct jset_entry_blacklist_v2, entry);
420 pr_buf(out, "start=%llu end=%llu",
421 le64_to_cpu(bl->start),
422 le64_to_cpu(bl->end));
425 static int journal_entry_usage_validate(struct bch_fs *c,
427 struct jset_entry *entry,
428 unsigned version, int big_endian, int write)
430 struct jset_entry_usage *u =
431 container_of(entry, struct jset_entry_usage, entry);
432 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
435 if (journal_entry_err_on(bytes < sizeof(*u),
437 "invalid journal entry usage: bad size")) {
438 journal_entry_null_range(entry, vstruct_next(entry));
446 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
447 struct jset_entry *entry)
449 struct jset_entry_usage *u =
450 container_of(entry, struct jset_entry_usage, entry);
452 pr_buf(out, "type=%s v=%llu",
453 bch2_fs_usage_types[u->entry.btree_id],
457 static int journal_entry_data_usage_validate(struct bch_fs *c,
459 struct jset_entry *entry,
460 unsigned version, int big_endian, int write)
462 struct jset_entry_data_usage *u =
463 container_of(entry, struct jset_entry_data_usage, entry);
464 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
467 if (journal_entry_err_on(bytes < sizeof(*u) ||
468 bytes < sizeof(*u) + u->r.nr_devs,
470 "invalid journal entry usage: bad size")) {
471 journal_entry_null_range(entry, vstruct_next(entry));
479 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
480 struct jset_entry *entry)
482 struct jset_entry_data_usage *u =
483 container_of(entry, struct jset_entry_data_usage, entry);
485 bch2_replicas_entry_to_text(out, &u->r);
486 pr_buf(out, "=%llu", le64_to_cpu(u->v));
489 static int journal_entry_clock_validate(struct bch_fs *c,
491 struct jset_entry *entry,
492 unsigned version, int big_endian, int write)
494 struct jset_entry_clock *clock =
495 container_of(entry, struct jset_entry_clock, entry);
496 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
499 if (journal_entry_err_on(bytes != sizeof(*clock),
500 c, "invalid journal entry clock: bad size")) {
501 journal_entry_null_range(entry, vstruct_next(entry));
505 if (journal_entry_err_on(clock->rw > 1,
506 c, "invalid journal entry clock: bad rw")) {
507 journal_entry_null_range(entry, vstruct_next(entry));
515 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
516 struct jset_entry *entry)
518 struct jset_entry_clock *clock =
519 container_of(entry, struct jset_entry_clock, entry);
521 pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
524 static int journal_entry_dev_usage_validate(struct bch_fs *c,
526 struct jset_entry *entry,
527 unsigned version, int big_endian, int write)
529 struct jset_entry_dev_usage *u =
530 container_of(entry, struct jset_entry_dev_usage, entry);
531 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
532 unsigned expected = sizeof(*u);
536 if (journal_entry_err_on(bytes < expected,
537 c, "invalid journal entry dev usage: bad size (%u < %u)",
539 journal_entry_null_range(entry, vstruct_next(entry));
543 dev = le32_to_cpu(u->dev);
545 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
546 c, "invalid journal entry dev usage: bad dev")) {
547 journal_entry_null_range(entry, vstruct_next(entry));
551 if (journal_entry_err_on(u->pad,
552 c, "invalid journal entry dev usage: bad pad")) {
553 journal_entry_null_range(entry, vstruct_next(entry));
561 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
562 struct jset_entry *entry)
564 struct jset_entry_dev_usage *u =
565 container_of(entry, struct jset_entry_dev_usage, entry);
566 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
568 pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
570 for (i = 0; i < nr_types; i++) {
572 pr_buf(out, " %s", bch2_data_types[i]);
574 pr_buf(out, " (unknown data type %u)", i);
575 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
576 le64_to_cpu(u->d[i].buckets),
577 le64_to_cpu(u->d[i].sectors),
578 le64_to_cpu(u->d[i].fragmented));
581 pr_buf(out, " buckets_ec: %llu buckets_unavailable: %llu",
582 le64_to_cpu(u->buckets_ec),
583 le64_to_cpu(u->buckets_unavailable));
586 static int journal_entry_log_validate(struct bch_fs *c,
588 struct jset_entry *entry,
589 unsigned version, int big_endian, int write)
594 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
595 struct jset_entry *entry)
597 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
598 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
600 pr_buf(out, "%.*s", bytes, l->d);
603 struct jset_entry_ops {
604 int (*validate)(struct bch_fs *, const char *,
605 struct jset_entry *, unsigned, int, int);
606 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
609 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
611 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
612 .validate = journal_entry_##f##_validate, \
613 .to_text = journal_entry_##f##_to_text, \
615 BCH_JSET_ENTRY_TYPES()
619 int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
620 struct jset_entry *entry,
621 unsigned version, int big_endian, int write)
623 return entry->type < BCH_JSET_ENTRY_NR
624 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
625 version, big_endian, write)
629 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
630 struct jset_entry *entry)
632 if (entry->type < BCH_JSET_ENTRY_NR) {
633 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
634 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
636 pr_buf(out, "(unknown type %u)", entry->type);
640 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
644 struct jset_entry *entry;
647 vstruct_for_each(jset, entry) {
648 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
649 le64_to_cpu(jset->seq),
650 (u64 *) entry - jset->_data,
651 le32_to_cpu(jset->u64s));
653 if (journal_entry_err_on(vstruct_next(entry) >
654 vstruct_last(jset), c,
655 "journal entry extends past end of jset")) {
656 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
660 ret = bch2_journal_entry_validate(c, buf, entry,
661 le32_to_cpu(jset->version),
662 JSET_BIG_ENDIAN(jset), write);
670 static int jset_validate(struct bch_fs *c,
672 struct jset *jset, u64 sector,
673 unsigned bucket_sectors_left,
674 unsigned sectors_read,
677 size_t bytes = vstruct_bytes(jset);
678 struct bch_csum csum;
682 if (le64_to_cpu(jset->magic) != jset_magic(c))
683 return JOURNAL_ENTRY_NONE;
685 version = le32_to_cpu(jset->version);
686 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
687 version < bcachefs_metadata_version_min) ||
688 version >= bcachefs_metadata_version_max, c,
689 "%s sector %llu seq %llu: unknown journal entry version %u",
690 ca ? ca->name : c->name,
691 sector, le64_to_cpu(jset->seq),
693 /* don't try to continue: */
697 if (bytes > (sectors_read << 9) &&
698 sectors_read < bucket_sectors_left)
699 return JOURNAL_ENTRY_REREAD;
701 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
702 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
703 ca ? ca->name : c->name,
704 sector, le64_to_cpu(jset->seq), bytes)) {
705 ret = JOURNAL_ENTRY_BAD;
706 le32_add_cpu(&jset->u64s,
707 -((bytes - (bucket_sectors_left << 9)) / 8));
710 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
711 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
712 ca ? ca->name : c->name,
713 sector, le64_to_cpu(jset->seq),
714 JSET_CSUM_TYPE(jset))) {
715 ret = JOURNAL_ENTRY_BAD;
722 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
723 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
724 "%s sector %llu seq %llu: journal checksum bad",
725 ca ? ca->name : c->name,
726 sector, le64_to_cpu(jset->seq)))
727 ret = JOURNAL_ENTRY_BAD;
729 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
730 jset->encrypted_start,
731 vstruct_end(jset) - (void *) jset->encrypted_start);
732 bch2_fs_fatal_err_on(ret, c,
733 "error decrypting journal entry: %i", ret);
735 /* last_seq is ignored when JSET_NO_FLUSH is true */
736 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
737 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
738 "invalid journal entry: last_seq > seq (%llu > %llu)",
739 le64_to_cpu(jset->last_seq),
740 le64_to_cpu(jset->seq))) {
741 jset->last_seq = jset->seq;
742 return JOURNAL_ENTRY_BAD;
748 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
750 unsigned sectors = vstruct_sectors(jset, c->block_bits);
752 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
753 jset_validate_entries(c, jset, WRITE);
756 struct journal_read_buf {
761 static int journal_read_buf_realloc(struct journal_read_buf *b,
766 /* the bios are sized for this many pages, max: */
767 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
770 new_size = roundup_pow_of_two(new_size);
771 n = kvpmalloc(new_size, GFP_KERNEL);
775 kvpfree(b->data, b->size);
781 static int journal_read_bucket(struct bch_dev *ca,
782 struct journal_read_buf *buf,
783 struct journal_list *jlist,
786 struct bch_fs *c = ca->fs;
787 struct journal_device *ja = &ca->journal;
788 struct jset *j = NULL;
789 unsigned sectors, sectors_read = 0;
790 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
791 end = offset + ca->mi.bucket_size;
792 bool saw_bad = false;
795 pr_debug("reading %u", bucket);
797 while (offset < end) {
801 sectors_read = min_t(unsigned,
802 end - offset, buf->size >> 9);
804 bio = bio_kmalloc(GFP_KERNEL,
807 bio_set_dev(bio, ca->disk_sb.bdev);
808 bio->bi_iter.bi_sector = offset;
809 bio_set_op_attrs(bio, REQ_OP_READ, 0);
810 bch2_bio_map(bio, buf->data, sectors_read << 9);
812 ret = submit_bio_wait(bio);
815 if (bch2_dev_io_err_on(ret, ca,
816 "journal read error: sector %llu",
818 bch2_meta_read_fault("journal")) {
820 * We don't error out of the recovery process
821 * here, since the relevant journal entry may be
822 * found on a different device, and missing or
823 * no journal entries will be handled later
831 ret = jset_validate(c, ca, j, offset,
832 end - offset, sectors_read,
836 sectors = vstruct_sectors(j, c->block_bits);
838 case JOURNAL_ENTRY_REREAD:
839 if (vstruct_bytes(j) > buf->size) {
840 ret = journal_read_buf_realloc(buf,
846 case JOURNAL_ENTRY_NONE:
849 sectors = block_sectors(c);
851 case JOURNAL_ENTRY_BAD:
854 * On checksum error we don't really trust the size
855 * field of the journal entry we read, so try reading
856 * again at next block boundary:
858 sectors = block_sectors(c);
865 * This happens sometimes if we don't have discards on -
866 * when we've partially overwritten a bucket with new
867 * journal entries. We don't need the rest of the
870 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
873 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
875 mutex_lock(&jlist->lock);
876 ret = journal_entry_add(c, ca, (struct journal_ptr) {
879 .bucket_offset = offset -
880 bucket_to_sector(ca, ja->buckets[bucket]),
882 }, jlist, j, ret != 0);
883 mutex_unlock(&jlist->lock);
886 case JOURNAL_ENTRY_ADD_OK:
888 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
896 sectors_read -= sectors;
897 j = ((void *) j) + (sectors << 9);
903 static void bch2_journal_read_device(struct closure *cl)
905 struct journal_device *ja =
906 container_of(cl, struct journal_device, read);
907 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
908 struct bch_fs *c = ca->fs;
909 struct journal_list *jlist =
910 container_of(cl->parent, struct journal_list, cl);
911 struct journal_read_buf buf = { NULL, 0 };
912 u64 min_seq = U64_MAX;
919 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
923 pr_debug("%u journal buckets", ja->nr);
925 for (i = 0; i < ja->nr; i++) {
926 ret = journal_read_bucket(ca, &buf, jlist, i);
931 /* Find the journal bucket with the highest sequence number: */
932 for (i = 0; i < ja->nr; i++) {
933 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
936 min_seq = min(ja->bucket_seq[i], min_seq);
940 * If there's duplicate journal entries in multiple buckets (which
941 * definitely isn't supposed to happen, but...) - make sure to start
942 * cur_idx at the last of those buckets, so we don't deadlock trying to
945 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
946 ja->bucket_seq[ja->cur_idx] >
947 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
948 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
950 ja->sectors_free = 0;
953 * Set dirty_idx to indicate the entire journal is full and needs to be
954 * reclaimed - journal reclaim will immediately reclaim whatever isn't
955 * pinned when it first runs:
957 ja->discard_idx = ja->dirty_idx_ondisk =
958 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
960 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
961 kvpfree(buf.data, buf.size);
962 percpu_ref_put(&ca->io_ref);
966 mutex_lock(&jlist->lock);
968 mutex_unlock(&jlist->lock);
972 void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
973 struct journal_replay *j)
977 for (i = 0; i < j->nr_ptrs; i++) {
978 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
981 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset);
985 pr_buf(out, "%u:%u:%u (sector %llu)",
988 j->ptrs[i].bucket_offset,
993 int bch2_journal_read(struct bch_fs *c, struct list_head *list,
994 u64 *blacklist_seq, u64 *start_seq)
996 struct journal_list jlist;
997 struct journal_replay *i, *t;
1000 struct printbuf buf = PRINTBUF;
1001 size_t keys = 0, entries = 0;
1002 bool degraded = false;
1003 u64 seq, last_seq = 0;
1006 closure_init_stack(&jlist.cl);
1007 mutex_init(&jlist.lock);
1011 for_each_member_device(ca, c, iter) {
1012 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
1013 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1016 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1017 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1018 percpu_ref_tryget(&ca->io_ref))
1019 closure_call(&ca->journal.read,
1020 bch2_journal_read_device,
1027 closure_sync(&jlist.cl);
1032 if (list_empty(list)) {
1033 bch_info(c, "journal read done, but no entries found");
1037 i = list_last_entry(list, struct journal_replay, list);
1038 *start_seq = le64_to_cpu(i->j.seq) + 1;
1041 * Find most recent flush entry, and ignore newer non flush entries -
1042 * those entries will be blacklisted:
1044 list_for_each_entry_safe_reverse(i, t, list, list) {
1048 if (!JSET_NO_FLUSH(&i->j)) {
1049 last_seq = le64_to_cpu(i->j.last_seq);
1050 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1054 journal_replay_free(c, i);
1058 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1063 /* Drop blacklisted entries and entries older than last_seq: */
1064 list_for_each_entry_safe(i, t, list, list) {
1068 seq = le64_to_cpu(i->j.seq);
1069 if (seq < last_seq) {
1070 journal_replay_free(c, i);
1074 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1075 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1076 "found blacklisted journal entry %llu", seq);
1078 journal_replay_free(c, i);
1082 /* Check for missing entries: */
1084 list_for_each_entry(i, list, list) {
1088 BUG_ON(seq > le64_to_cpu(i->j.seq));
1090 while (seq < le64_to_cpu(i->j.seq)) {
1091 u64 missing_start, missing_end;
1092 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1094 while (seq < le64_to_cpu(i->j.seq) &&
1095 bch2_journal_seq_is_blacklisted(c, seq, false))
1098 if (seq == le64_to_cpu(i->j.seq))
1101 missing_start = seq;
1103 while (seq < le64_to_cpu(i->j.seq) &&
1104 !bch2_journal_seq_is_blacklisted(c, seq, false))
1107 if (i->list.prev != list) {
1108 struct journal_replay *p = list_prev_entry(i, list);
1110 bch2_journal_ptrs_to_text(&buf1, c, p);
1111 pr_buf(&buf1, " size %zu", vstruct_sectors(&p->j, c->block_bits));
1113 pr_buf(&buf1, "(none)");
1114 bch2_journal_ptrs_to_text(&buf2, c, i);
1116 missing_end = seq - 1;
1117 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1120 missing_start, missing_end,
1121 last_seq, *blacklist_seq - 1,
1122 buf1.buf, buf2.buf);
1124 printbuf_exit(&buf1);
1125 printbuf_exit(&buf2);
1131 list_for_each_entry(i, list, list) {
1132 struct jset_entry *entry;
1133 struct bkey_i *k, *_n;
1134 struct bch_replicas_padded replicas = {
1135 .e.data_type = BCH_DATA_journal,
1143 ret = jset_validate_entries(c, &i->j, READ);
1147 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1148 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1150 bch2_replicas_entry_sort(&replicas.e);
1153 * If we're mounting in degraded mode - if we didn't read all
1154 * the devices - this is wrong:
1157 printbuf_reset(&buf);
1158 bch2_replicas_entry_to_text(&buf, &replicas.e);
1161 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1162 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1163 "superblock not marked as containing replicas %s",
1165 ret = bch2_mark_replicas(c, &replicas.e);
1170 for_each_jset_key(k, _n, entry, &i->j)
1175 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1176 keys, entries, *start_seq);
1178 if (*start_seq != *blacklist_seq)
1179 bch_info(c, "dropped unflushed entries %llu-%llu",
1180 *blacklist_seq, *start_seq - 1);
1183 printbuf_exit(&buf);
1187 /* journal write: */
1189 static void __journal_write_alloc(struct journal *j,
1190 struct journal_buf *w,
1191 struct dev_alloc_list *devs_sorted,
1194 unsigned replicas_want)
1196 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1197 struct journal_device *ja;
1201 if (*replicas >= replicas_want)
1204 for (i = 0; i < devs_sorted->nr; i++) {
1205 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1212 * Check that we can use this device, and aren't already using
1215 if (!ca->mi.durability ||
1216 ca->mi.state != BCH_MEMBER_STATE_rw ||
1218 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1220 sectors > ja->sectors_free)
1223 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1225 bch2_bkey_append_ptr(&w->key,
1226 (struct bch_extent_ptr) {
1227 .offset = bucket_to_sector(ca,
1228 ja->buckets[ja->cur_idx]) +
1229 ca->mi.bucket_size -
1234 ja->sectors_free -= sectors;
1235 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1237 *replicas += ca->mi.durability;
1239 if (*replicas >= replicas_want)
1245 * journal_next_bucket - move on to the next journal bucket if possible
1247 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1250 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1251 struct bch_devs_mask devs;
1252 struct journal_device *ja;
1254 struct dev_alloc_list devs_sorted;
1255 unsigned target = c->opts.metadata_target ?:
1256 c->opts.foreground_target;
1257 unsigned i, replicas = 0, replicas_want =
1258 READ_ONCE(c->opts.metadata_replicas);
1262 devs = target_rw_devs(c, BCH_DATA_journal, target);
1264 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1266 __journal_write_alloc(j, w, &devs_sorted,
1267 sectors, &replicas, replicas_want);
1269 if (replicas >= replicas_want)
1272 for (i = 0; i < devs_sorted.nr; i++) {
1273 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1279 if (sectors > ja->sectors_free &&
1280 sectors <= ca->mi.bucket_size &&
1281 bch2_journal_dev_buckets_available(j, ja,
1282 journal_space_discarded)) {
1283 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1284 ja->sectors_free = ca->mi.bucket_size;
1287 * ja->bucket_seq[ja->cur_idx] must always have
1288 * something sensible:
1290 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1294 __journal_write_alloc(j, w, &devs_sorted,
1295 sectors, &replicas, replicas_want);
1297 if (replicas < replicas_want && target) {
1298 /* Retry from all devices: */
1305 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1307 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1310 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1312 /* we aren't holding j->lock: */
1313 unsigned new_size = READ_ONCE(j->buf_size_want);
1316 if (buf->buf_size >= new_size)
1319 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1323 memcpy(new_buf, buf->data, buf->buf_size);
1325 spin_lock(&j->lock);
1326 swap(buf->data, new_buf);
1327 swap(buf->buf_size, new_size);
1328 spin_unlock(&j->lock);
1330 kvpfree(new_buf, new_size);
1333 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1335 return j->buf + j->reservations.unwritten_idx;
1338 static void journal_write_done(struct closure *cl)
1340 struct journal *j = container_of(cl, struct journal, io);
1341 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1342 struct journal_buf *w = journal_last_unwritten_buf(j);
1343 struct bch_replicas_padded replicas;
1344 union journal_res_state old, new;
1348 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1349 ? j->flush_write_time
1350 : j->noflush_write_time, j->write_start_time);
1352 if (!w->devs_written.nr) {
1353 bch_err(c, "unable to write journal to sufficient devices");
1356 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1358 if (bch2_mark_replicas(c, &replicas.e))
1363 bch2_fatal_error(c);
1365 spin_lock(&j->lock);
1366 seq = le64_to_cpu(w->data->seq);
1368 if (seq >= j->pin.front)
1369 journal_seq_pin(j, seq)->devs = w->devs_written;
1372 j->seq_ondisk = seq;
1374 if (!JSET_NO_FLUSH(w->data)) {
1375 j->flushed_seq_ondisk = seq;
1376 j->last_seq_ondisk = w->last_seq;
1378 } else if (!j->err_seq || seq < j->err_seq)
1382 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1385 * Must come before signaling write completion, for
1386 * bch2_fs_journal_stop():
1388 journal_reclaim_kick(&c->journal);
1390 /* also must come before signalling write completion: */
1391 closure_debug_destroy(cl);
1393 v = atomic64_read(&j->reservations.counter);
1396 BUG_ON(new.idx == new.unwritten_idx);
1398 new.unwritten_idx++;
1399 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1400 old.v, new.v)) != old.v);
1402 bch2_journal_space_available(j);
1404 closure_wake_up(&w->wait);
1407 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1408 mod_delayed_work(c->io_complete_wq, &j->write_work, 0);
1409 spin_unlock(&j->lock);
1411 if (new.unwritten_idx != new.idx &&
1412 !journal_state_count(new, new.unwritten_idx))
1413 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1416 static void journal_write_endio(struct bio *bio)
1418 struct bch_dev *ca = bio->bi_private;
1419 struct journal *j = &ca->fs->journal;
1420 struct journal_buf *w = journal_last_unwritten_buf(j);
1421 unsigned long flags;
1423 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1424 le64_to_cpu(w->data->seq),
1425 bch2_blk_status_to_str(bio->bi_status)) ||
1426 bch2_meta_write_fault("journal")) {
1427 spin_lock_irqsave(&j->err_lock, flags);
1428 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1429 spin_unlock_irqrestore(&j->err_lock, flags);
1432 closure_put(&j->io);
1433 percpu_ref_put(&ca->io_ref);
1436 static void do_journal_write(struct closure *cl)
1438 struct journal *j = container_of(cl, struct journal, io);
1439 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1441 struct journal_buf *w = journal_last_unwritten_buf(j);
1442 struct bch_extent_ptr *ptr;
1444 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1446 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1447 ca = bch_dev_bkey_exists(c, ptr->dev);
1448 if (!percpu_ref_tryget(&ca->io_ref)) {
1450 bch_err(c, "missing device for journal write\n");
1454 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1457 bio = ca->journal.bio;
1459 bio_set_dev(bio, ca->disk_sb.bdev);
1460 bio->bi_iter.bi_sector = ptr->offset;
1461 bio->bi_end_io = journal_write_endio;
1462 bio->bi_private = ca;
1463 bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1465 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1466 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1468 if (!JSET_NO_FLUSH(w->data))
1469 bio->bi_opf |= REQ_FUA;
1470 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1471 bio->bi_opf |= REQ_PREFLUSH;
1473 bch2_bio_map(bio, w->data, sectors << 9);
1475 trace_journal_write(bio);
1476 closure_bio_submit(bio, cl);
1478 ca->journal.bucket_seq[ca->journal.cur_idx] =
1479 le64_to_cpu(w->data->seq);
1482 continue_at(cl, journal_write_done, c->io_complete_wq);
1486 void bch2_journal_write(struct closure *cl)
1488 struct journal *j = container_of(cl, struct journal, io);
1489 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1491 struct journal_buf *w = journal_last_unwritten_buf(j);
1492 struct jset_entry *start, *end;
1495 struct printbuf journal_debug_buf = PRINTBUF;
1496 bool validate_before_checksum = false;
1497 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1500 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1502 journal_buf_realloc(j, w);
1505 j->write_start_time = local_clock();
1507 spin_lock(&j->lock);
1508 if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
1511 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1512 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
1514 SET_JSET_NO_FLUSH(jset, true);
1518 j->nr_noflush_writes++;
1520 j->last_flush_write = jiffies;
1521 j->nr_flush_writes++;
1523 spin_unlock(&j->lock);
1526 * New btree roots are set by journalling them; when the journal entry
1527 * gets written we have to propagate them to c->btree_roots
1529 * But, every journal entry we write has to contain all the btree roots
1530 * (at least for now); so after we copy btree roots to c->btree_roots we
1531 * have to get any missing btree roots and add them to this journal
1535 bch2_journal_entries_to_btree_roots(c, jset);
1537 start = end = vstruct_last(jset);
1539 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1541 bch2_journal_super_entries_add_common(c, &end,
1542 le64_to_cpu(jset->seq));
1543 u64s = (u64 *) end - (u64 *) start;
1544 BUG_ON(u64s > j->entry_u64s_reserved);
1546 le32_add_cpu(&jset->u64s, u64s);
1547 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1549 jset->magic = cpu_to_le64(jset_magic(c));
1550 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1551 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1552 : cpu_to_le32(c->sb.version);
1554 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1555 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1557 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1558 j->last_empty_seq = le64_to_cpu(jset->seq);
1560 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1561 validate_before_checksum = true;
1563 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1564 validate_before_checksum = true;
1566 if (validate_before_checksum &&
1567 jset_validate_for_write(c, jset))
1570 ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1571 jset->encrypted_start,
1572 vstruct_end(jset) - (void *) jset->encrypted_start);
1573 if (bch2_fs_fatal_err_on(ret, c,
1574 "error decrypting journal entry: %i", ret))
1577 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1578 journal_nonce(jset), jset);
1580 if (!validate_before_checksum &&
1581 jset_validate_for_write(c, jset))
1584 sectors = vstruct_sectors(jset, c->block_bits);
1585 BUG_ON(sectors > w->sectors);
1587 bytes = vstruct_bytes(jset);
1588 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1591 spin_lock(&j->lock);
1592 ret = journal_write_alloc(j, w, sectors);
1594 if (ret && j->can_discard) {
1595 spin_unlock(&j->lock);
1596 bch2_journal_do_discards(j);
1601 __bch2_journal_debug_to_text(&journal_debug_buf, j);
1604 * write is allocated, no longer need to account for it in
1605 * bch2_journal_space_available():
1610 * journal entry has been compacted and allocated, recalculate space
1613 bch2_journal_space_available(j);
1614 spin_unlock(&j->lock);
1617 bch_err(c, "Unable to allocate journal write:\n%s",
1618 journal_debug_buf.buf);
1619 printbuf_exit(&journal_debug_buf);
1620 bch2_fatal_error(c);
1621 continue_at(cl, journal_write_done, c->io_complete_wq);
1625 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1627 if (c->opts.nochanges)
1630 for_each_rw_member(ca, c, i)
1633 if (nr_rw_members > 1)
1634 w->separate_flush = true;
1636 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1637 for_each_rw_member(ca, c, i) {
1638 percpu_ref_get(&ca->io_ref);
1640 bio = ca->journal.bio;
1642 bio_set_dev(bio, ca->disk_sb.bdev);
1643 bio->bi_opf = REQ_OP_FLUSH;
1644 bio->bi_end_io = journal_write_endio;
1645 bio->bi_private = ca;
1646 closure_bio_submit(bio, cl);
1650 continue_at(cl, do_journal_write, c->io_complete_wq);
1653 continue_at(cl, journal_write_done, c->io_complete_wq);
1656 bch2_fatal_error(c);
1657 continue_at(cl, journal_write_done, c->io_complete_wq);