1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_foreground.h"
5 #include "btree_update_interior.h"
8 #include "disk_groups.h"
12 #include "journal_io.h"
13 #include "journal_reclaim.h"
14 #include "journal_seq_blacklist.h"
17 #include <trace/events/bcachefs.h>
19 static void __journal_replay_free(struct journal_replay *i)
22 kvpfree(i, offsetof(struct journal_replay, j) +
23 vstruct_bytes(&i->j));
27 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
31 if (!c->opts.read_entire_journal)
32 __journal_replay_free(i);
38 struct list_head *head;
42 #define JOURNAL_ENTRY_ADD_OK 0
43 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
46 * Given a journal entry we just read, add it to the list of journal entries to
49 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
50 struct bch_extent_ptr entry_ptr,
51 struct journal_list *jlist, struct jset *j,
54 struct journal_replay *i, *pos, *dup = NULL;
55 struct bch_extent_ptr *ptr;
56 struct list_head *where;
57 size_t bytes = vstruct_bytes(j);
59 int ret = JOURNAL_ENTRY_ADD_OK;
61 list_for_each_entry_reverse(i, jlist->head, list) {
62 if (!JSET_NO_FLUSH(&i->j)) {
63 last_seq = le64_to_cpu(i->j.last_seq);
68 /* Is this entry older than the range we need? */
69 if (!c->opts.read_entire_journal &&
70 le64_to_cpu(j->seq) < last_seq) {
71 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
75 /* Drop entries we don't need anymore */
76 if (!JSET_NO_FLUSH(j)) {
77 list_for_each_entry_safe(i, pos, jlist->head, list) {
78 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
80 journal_replay_free(c, i);
84 list_for_each_entry_reverse(i, jlist->head, list) {
85 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
93 dup = where->next != jlist->head
94 ? container_of(where->next, struct journal_replay, list)
97 if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
101 * Duplicate journal entries? If so we want the one that didn't have a
106 /* we'll replace @dup: */
111 fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
112 memcmp(j, &dup->j, bytes), c,
113 "found duplicate but non identical journal entries (seq %llu)",
114 le64_to_cpu(j->seq));
120 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
129 memcpy(&i->j, j, bytes);
132 i->nr_ptrs = dup->nr_ptrs;
133 memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
134 __journal_replay_free(dup);
137 list_add(&i->list, where);
139 for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
140 if (ptr->dev == ca->dev_idx) {
141 bch_err(c, "duplicate journal entry %llu on same device",
142 le64_to_cpu(i->j.seq));
147 if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
148 bch_err(c, "found too many copies of journal entry %llu",
149 le64_to_cpu(i->j.seq));
153 i->ptrs[i->nr_ptrs++] = entry_ptr;
159 static struct nonce journal_nonce(const struct jset *jset)
161 return (struct nonce) {{
163 [1] = ((__le32 *) &jset->seq)[0],
164 [2] = ((__le32 *) &jset->seq)[1],
165 [3] = BCH_NONCE_JOURNAL,
169 /* this fills in a range with empty jset_entries: */
170 static void journal_entry_null_range(void *start, void *end)
172 struct jset_entry *entry;
174 for (entry = start; entry != end; entry = vstruct_next(entry))
175 memset(entry, 0, sizeof(*entry));
178 #define JOURNAL_ENTRY_REREAD 5
179 #define JOURNAL_ENTRY_NONE 6
180 #define JOURNAL_ENTRY_BAD 7
182 #define journal_entry_err(c, msg, ...) \
186 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
189 bch_err(c, "corrupt metadata before write:\n" \
190 msg, ##__VA_ARGS__); \
191 if (bch2_fs_inconsistent(c)) { \
192 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
200 #define journal_entry_err_on(cond, c, msg, ...) \
201 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
203 #define FSCK_DELETED_KEY 5
205 static int journal_validate_key(struct bch_fs *c, const char *where,
206 struct jset_entry *entry,
207 unsigned level, enum btree_id btree_id,
208 struct bkey_i *k, const char *type,
209 unsigned version, int big_endian, int write)
211 void *next = vstruct_next(entry);
215 if (journal_entry_err_on(!k->k.u64s, c,
216 "invalid %s in %s entry offset %zi/%u: k->u64s 0",
218 (u64 *) k - entry->_data,
219 le16_to_cpu(entry->u64s))) {
220 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
221 journal_entry_null_range(vstruct_next(entry), next);
222 return FSCK_DELETED_KEY;
225 if (journal_entry_err_on((void *) bkey_next(k) >
226 (void *) vstruct_next(entry), c,
227 "invalid %s in %s entry offset %zi/%u: extends past end of journal entry",
229 (u64 *) k - entry->_data,
230 le16_to_cpu(entry->u64s))) {
231 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
232 journal_entry_null_range(vstruct_next(entry), next);
233 return FSCK_DELETED_KEY;
236 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
237 "invalid %s in %s entry offset %zi/%u: bad format %u",
239 (u64 *) k - entry->_data,
240 le16_to_cpu(entry->u64s),
242 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
243 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
244 journal_entry_null_range(vstruct_next(entry), next);
245 return FSCK_DELETED_KEY;
249 bch2_bkey_compat(level, btree_id, version, big_endian,
250 write, NULL, bkey_to_packed(k));
252 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
253 __btree_node_type(level, btree_id));
257 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
258 mustfix_fsck_err(c, "invalid %s in %s entry offset %zi/%u: %s\n%s",
260 (u64 *) k - entry->_data,
261 le16_to_cpu(entry->u64s),
264 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
265 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
266 journal_entry_null_range(vstruct_next(entry), next);
267 return FSCK_DELETED_KEY;
271 bch2_bkey_compat(level, btree_id, version, big_endian,
272 write, NULL, bkey_to_packed(k));
277 static int journal_entry_btree_keys_validate(struct bch_fs *c,
279 struct jset_entry *entry,
280 unsigned version, int big_endian, int write)
282 struct bkey_i *k = entry->start;
284 while (k != vstruct_last(entry)) {
285 int ret = journal_validate_key(c, where, entry,
288 k, "key", version, big_endian, write);
289 if (ret == FSCK_DELETED_KEY)
298 static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
299 struct jset_entry *entry)
303 pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
305 vstruct_for_each(entry, k)
306 bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
309 static int journal_entry_btree_root_validate(struct bch_fs *c,
311 struct jset_entry *entry,
312 unsigned version, int big_endian, int write)
314 struct bkey_i *k = entry->start;
317 if (journal_entry_err_on(!entry->u64s ||
318 le16_to_cpu(entry->u64s) != k->k.u64s, c,
319 "invalid btree root journal entry: wrong number of keys")) {
320 void *next = vstruct_next(entry);
322 * we don't want to null out this jset_entry,
323 * just the contents, so that later we can tell
324 * we were _supposed_ to have a btree root
327 journal_entry_null_range(vstruct_next(entry), next);
331 return journal_validate_key(c, where, entry, 1, entry->btree_id, k,
332 "btree root", version, big_endian, write);
337 static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
338 struct jset_entry *entry)
340 journal_entry_btree_keys_to_text(out, c, entry);
343 static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
345 struct jset_entry *entry,
346 unsigned version, int big_endian, int write)
348 /* obsolete, don't care: */
352 static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
353 struct jset_entry *entry)
357 static int journal_entry_blacklist_validate(struct bch_fs *c,
359 struct jset_entry *entry,
360 unsigned version, int big_endian, int write)
364 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
365 "invalid journal seq blacklist entry: bad size")) {
366 journal_entry_null_range(entry, vstruct_next(entry));
372 static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
373 struct jset_entry *entry)
375 struct jset_entry_blacklist *bl =
376 container_of(entry, struct jset_entry_blacklist, entry);
378 pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
381 static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
383 struct jset_entry *entry,
384 unsigned version, int big_endian, int write)
386 struct jset_entry_blacklist_v2 *bl_entry;
389 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
390 "invalid journal seq blacklist entry: bad size")) {
391 journal_entry_null_range(entry, vstruct_next(entry));
395 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
397 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
398 le64_to_cpu(bl_entry->end), c,
399 "invalid journal seq blacklist entry: start > end")) {
400 journal_entry_null_range(entry, vstruct_next(entry));
407 static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
408 struct jset_entry *entry)
410 struct jset_entry_blacklist_v2 *bl =
411 container_of(entry, struct jset_entry_blacklist_v2, entry);
413 pr_buf(out, "start=%llu end=%llu",
414 le64_to_cpu(bl->start),
415 le64_to_cpu(bl->end));
418 static int journal_entry_usage_validate(struct bch_fs *c,
420 struct jset_entry *entry,
421 unsigned version, int big_endian, int write)
423 struct jset_entry_usage *u =
424 container_of(entry, struct jset_entry_usage, entry);
425 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
428 if (journal_entry_err_on(bytes < sizeof(*u),
430 "invalid journal entry usage: bad size")) {
431 journal_entry_null_range(entry, vstruct_next(entry));
439 static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
440 struct jset_entry *entry)
442 struct jset_entry_usage *u =
443 container_of(entry, struct jset_entry_usage, entry);
445 pr_buf(out, "type=%s v=%llu",
446 bch2_fs_usage_types[u->entry.btree_id],
450 static int journal_entry_data_usage_validate(struct bch_fs *c,
452 struct jset_entry *entry,
453 unsigned version, int big_endian, int write)
455 struct jset_entry_data_usage *u =
456 container_of(entry, struct jset_entry_data_usage, entry);
457 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
460 if (journal_entry_err_on(bytes < sizeof(*u) ||
461 bytes < sizeof(*u) + u->r.nr_devs,
463 "invalid journal entry usage: bad size")) {
464 journal_entry_null_range(entry, vstruct_next(entry));
472 static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
473 struct jset_entry *entry)
475 struct jset_entry_data_usage *u =
476 container_of(entry, struct jset_entry_data_usage, entry);
478 bch2_replicas_entry_to_text(out, &u->r);
479 pr_buf(out, "=%llu", le64_to_cpu(u->v));
482 static int journal_entry_clock_validate(struct bch_fs *c,
484 struct jset_entry *entry,
485 unsigned version, int big_endian, int write)
487 struct jset_entry_clock *clock =
488 container_of(entry, struct jset_entry_clock, entry);
489 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
492 if (journal_entry_err_on(bytes != sizeof(*clock),
493 c, "invalid journal entry clock: bad size")) {
494 journal_entry_null_range(entry, vstruct_next(entry));
498 if (journal_entry_err_on(clock->rw > 1,
499 c, "invalid journal entry clock: bad rw")) {
500 journal_entry_null_range(entry, vstruct_next(entry));
508 static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
509 struct jset_entry *entry)
511 struct jset_entry_clock *clock =
512 container_of(entry, struct jset_entry_clock, entry);
514 pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
517 static int journal_entry_dev_usage_validate(struct bch_fs *c,
519 struct jset_entry *entry,
520 unsigned version, int big_endian, int write)
522 struct jset_entry_dev_usage *u =
523 container_of(entry, struct jset_entry_dev_usage, entry);
524 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
525 unsigned expected = sizeof(*u);
529 if (journal_entry_err_on(bytes < expected,
530 c, "invalid journal entry dev usage: bad size (%u < %u)",
532 journal_entry_null_range(entry, vstruct_next(entry));
536 dev = le32_to_cpu(u->dev);
538 if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
539 c, "invalid journal entry dev usage: bad dev")) {
540 journal_entry_null_range(entry, vstruct_next(entry));
544 if (journal_entry_err_on(u->pad,
545 c, "invalid journal entry dev usage: bad pad")) {
546 journal_entry_null_range(entry, vstruct_next(entry));
554 static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
555 struct jset_entry *entry)
557 struct jset_entry_dev_usage *u =
558 container_of(entry, struct jset_entry_dev_usage, entry);
559 unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
561 pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
563 for (i = 0; i < nr_types; i++) {
565 pr_buf(out, " %s", bch2_data_types[i]);
567 pr_buf(out, " (unknown data type %u)", i);
568 pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
569 le64_to_cpu(u->d[i].buckets),
570 le64_to_cpu(u->d[i].sectors),
571 le64_to_cpu(u->d[i].fragmented));
574 pr_buf(out, " buckets_ec: %llu buckets_unavailable: %llu",
575 le64_to_cpu(u->buckets_ec),
576 le64_to_cpu(u->buckets_unavailable));
579 static int journal_entry_log_validate(struct bch_fs *c,
581 struct jset_entry *entry,
582 unsigned version, int big_endian, int write)
587 static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
588 struct jset_entry *entry)
590 struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
591 unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
593 bch_scnmemcpy(out, l->d, strnlen(l->d, bytes));
596 struct jset_entry_ops {
597 int (*validate)(struct bch_fs *, const char *,
598 struct jset_entry *, unsigned, int, int);
599 void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
602 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
604 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
605 .validate = journal_entry_##f##_validate, \
606 .to_text = journal_entry_##f##_to_text, \
608 BCH_JSET_ENTRY_TYPES()
612 int bch2_journal_entry_validate(struct bch_fs *c, const char *where,
613 struct jset_entry *entry,
614 unsigned version, int big_endian, int write)
616 return entry->type < BCH_JSET_ENTRY_NR
617 ? bch2_jset_entry_ops[entry->type].validate(c, where, entry,
618 version, big_endian, write)
622 void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
623 struct jset_entry *entry)
625 if (entry->type < BCH_JSET_ENTRY_NR) {
626 pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
627 bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
629 pr_buf(out, "(unknown type %u)", entry->type);
633 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
637 struct jset_entry *entry;
640 vstruct_for_each(jset, entry) {
641 scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u",
642 le64_to_cpu(jset->seq),
643 (u64 *) entry - jset->_data,
644 le32_to_cpu(jset->u64s));
646 if (journal_entry_err_on(vstruct_next(entry) >
647 vstruct_last(jset), c,
648 "journal entry extends past end of jset")) {
649 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
653 ret = bch2_journal_entry_validate(c, buf, entry,
654 le32_to_cpu(jset->version),
655 JSET_BIG_ENDIAN(jset), write);
663 static int jset_validate(struct bch_fs *c,
665 struct jset *jset, u64 sector,
666 unsigned bucket_sectors_left,
667 unsigned sectors_read,
670 size_t bytes = vstruct_bytes(jset);
671 struct bch_csum csum;
675 if (le64_to_cpu(jset->magic) != jset_magic(c))
676 return JOURNAL_ENTRY_NONE;
678 version = le32_to_cpu(jset->version);
679 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
680 version < bcachefs_metadata_version_min) ||
681 version >= bcachefs_metadata_version_max, c,
682 "%s sector %llu seq %llu: unknown journal entry version %u",
683 ca ? ca->name : c->name,
684 sector, le64_to_cpu(jset->seq),
686 /* don't try to continue: */
690 if (bytes > (sectors_read << 9) &&
691 sectors_read < bucket_sectors_left)
692 return JOURNAL_ENTRY_REREAD;
694 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
695 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
696 ca ? ca->name : c->name,
697 sector, le64_to_cpu(jset->seq), bytes)) {
698 ret = JOURNAL_ENTRY_BAD;
699 le32_add_cpu(&jset->u64s,
700 -((bytes - (bucket_sectors_left << 9)) / 8));
703 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
704 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
705 ca ? ca->name : c->name,
706 sector, le64_to_cpu(jset->seq),
707 JSET_CSUM_TYPE(jset))) {
708 ret = JOURNAL_ENTRY_BAD;
715 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
716 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
717 "%s sector %llu seq %llu: journal checksum bad",
718 ca ? ca->name : c->name,
719 sector, le64_to_cpu(jset->seq)))
720 ret = JOURNAL_ENTRY_BAD;
722 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
723 jset->encrypted_start,
724 vstruct_end(jset) - (void *) jset->encrypted_start);
726 /* last_seq is ignored when JSET_NO_FLUSH is true */
727 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
728 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
729 "invalid journal entry: last_seq > seq (%llu > %llu)",
730 le64_to_cpu(jset->last_seq),
731 le64_to_cpu(jset->seq))) {
732 jset->last_seq = jset->seq;
733 return JOURNAL_ENTRY_BAD;
739 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
741 unsigned sectors = vstruct_sectors(jset, c->block_bits);
743 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
744 jset_validate_entries(c, jset, WRITE);
747 struct journal_read_buf {
752 static int journal_read_buf_realloc(struct journal_read_buf *b,
757 /* the bios are sized for this many pages, max: */
758 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
761 new_size = roundup_pow_of_two(new_size);
762 n = kvpmalloc(new_size, GFP_KERNEL);
766 kvpfree(b->data, b->size);
772 static int journal_read_bucket(struct bch_dev *ca,
773 struct journal_read_buf *buf,
774 struct journal_list *jlist,
777 struct bch_fs *c = ca->fs;
778 struct journal_device *ja = &ca->journal;
779 struct jset *j = NULL;
780 unsigned sectors, sectors_read = 0;
781 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
782 end = offset + ca->mi.bucket_size;
783 bool saw_bad = false;
786 pr_debug("reading %u", bucket);
788 while (offset < end) {
792 sectors_read = min_t(unsigned,
793 end - offset, buf->size >> 9);
795 bio = bio_kmalloc(GFP_KERNEL,
798 bio_set_dev(bio, ca->disk_sb.bdev);
799 bio->bi_iter.bi_sector = offset;
800 bio_set_op_attrs(bio, REQ_OP_READ, 0);
801 bch2_bio_map(bio, buf->data, sectors_read << 9);
803 ret = submit_bio_wait(bio);
806 if (bch2_dev_io_err_on(ret, ca,
807 "journal read error: sector %llu",
809 bch2_meta_read_fault("journal")) {
811 * We don't error out of the recovery process
812 * here, since the relevant journal entry may be
813 * found on a different device, and missing or
814 * no journal entries will be handled later
822 ret = jset_validate(c, ca, j, offset,
823 end - offset, sectors_read,
827 sectors = vstruct_sectors(j, c->block_bits);
829 case JOURNAL_ENTRY_REREAD:
830 if (vstruct_bytes(j) > buf->size) {
831 ret = journal_read_buf_realloc(buf,
837 case JOURNAL_ENTRY_NONE:
840 sectors = block_sectors(c);
842 case JOURNAL_ENTRY_BAD:
845 * On checksum error we don't really trust the size
846 * field of the journal entry we read, so try reading
847 * again at next block boundary:
849 sectors = block_sectors(c);
856 * This happens sometimes if we don't have discards on -
857 * when we've partially overwritten a bucket with new
858 * journal entries. We don't need the rest of the
861 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
864 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
866 mutex_lock(&jlist->lock);
867 ret = journal_entry_add(c, ca, (struct bch_extent_ptr) {
870 }, jlist, j, ret != 0);
871 mutex_unlock(&jlist->lock);
874 case JOURNAL_ENTRY_ADD_OK:
876 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
884 sectors_read -= sectors;
885 j = ((void *) j) + (sectors << 9);
891 static void bch2_journal_read_device(struct closure *cl)
893 struct journal_device *ja =
894 container_of(cl, struct journal_device, read);
895 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
896 struct bch_fs *c = ca->fs;
897 struct journal_list *jlist =
898 container_of(cl->parent, struct journal_list, cl);
899 struct journal_read_buf buf = { NULL, 0 };
900 u64 min_seq = U64_MAX;
907 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
911 pr_debug("%u journal buckets", ja->nr);
913 for (i = 0; i < ja->nr; i++) {
914 ret = journal_read_bucket(ca, &buf, jlist, i);
919 /* Find the journal bucket with the highest sequence number: */
920 for (i = 0; i < ja->nr; i++) {
921 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
924 min_seq = min(ja->bucket_seq[i], min_seq);
928 * If there's duplicate journal entries in multiple buckets (which
929 * definitely isn't supposed to happen, but...) - make sure to start
930 * cur_idx at the last of those buckets, so we don't deadlock trying to
933 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
934 ja->bucket_seq[ja->cur_idx] >
935 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
936 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
938 ja->sectors_free = 0;
941 * Set dirty_idx to indicate the entire journal is full and needs to be
942 * reclaimed - journal reclaim will immediately reclaim whatever isn't
943 * pinned when it first runs:
945 ja->discard_idx = ja->dirty_idx_ondisk =
946 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
948 bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
949 kvpfree(buf.data, buf.size);
950 percpu_ref_put(&ca->io_ref);
954 mutex_lock(&jlist->lock);
956 mutex_unlock(&jlist->lock);
960 static void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
961 struct journal_replay *j)
965 for (i = 0; i < j->nr_ptrs; i++) {
966 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev);
969 div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset);
973 pr_buf(out, "%u:%llu (offset %llu)",
975 (u64) j->ptrs[i].offset, offset);
979 int bch2_journal_read(struct bch_fs *c, struct list_head *list,
980 u64 *blacklist_seq, u64 *start_seq)
982 struct journal_list jlist;
983 struct journal_replay *i, *t;
986 size_t keys = 0, entries = 0;
987 bool degraded = false;
988 u64 seq, last_seq = 0;
991 closure_init_stack(&jlist.cl);
992 mutex_init(&jlist.lock);
996 for_each_member_device(ca, c, iter) {
997 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
998 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
1001 if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
1002 ca->mi.state == BCH_MEMBER_STATE_ro) &&
1003 percpu_ref_tryget(&ca->io_ref))
1004 closure_call(&ca->journal.read,
1005 bch2_journal_read_device,
1012 closure_sync(&jlist.cl);
1017 if (list_empty(list)) {
1018 bch_info(c, "journal read done, but no entries found");
1022 i = list_last_entry(list, struct journal_replay, list);
1023 *start_seq = le64_to_cpu(i->j.seq) + 1;
1026 * Find most recent flush entry, and ignore newer non flush entries -
1027 * those entries will be blacklisted:
1029 list_for_each_entry_safe_reverse(i, t, list, list) {
1033 if (!JSET_NO_FLUSH(&i->j)) {
1034 last_seq = le64_to_cpu(i->j.last_seq);
1035 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
1039 journal_replay_free(c, i);
1043 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
1047 /* Drop blacklisted entries and entries older than last_seq: */
1048 list_for_each_entry_safe(i, t, list, list) {
1052 seq = le64_to_cpu(i->j.seq);
1053 if (seq < last_seq) {
1054 journal_replay_free(c, i);
1058 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
1059 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
1060 "found blacklisted journal entry %llu", seq);
1062 journal_replay_free(c, i);
1066 /* Check for missing entries: */
1068 list_for_each_entry(i, list, list) {
1072 BUG_ON(seq > le64_to_cpu(i->j.seq));
1074 while (seq < le64_to_cpu(i->j.seq)) {
1075 u64 missing_start, missing_end;
1076 char buf1[200], buf2[200];
1078 while (seq < le64_to_cpu(i->j.seq) &&
1079 bch2_journal_seq_is_blacklisted(c, seq, false))
1082 if (seq == le64_to_cpu(i->j.seq))
1085 missing_start = seq;
1087 while (seq < le64_to_cpu(i->j.seq) &&
1088 !bch2_journal_seq_is_blacklisted(c, seq, false))
1091 if (i->list.prev != list) {
1092 struct printbuf out = PBUF(buf1);
1093 struct journal_replay *p = list_prev_entry(i, list);
1095 bch2_journal_ptrs_to_text(&out, c, p);
1096 pr_buf(&out, " size %llu", vstruct_sectors(&p->j, c->block_bits));
1098 sprintf(buf1, "(none)");
1099 bch2_journal_ptrs_to_text(&PBUF(buf2), c, i);
1101 missing_end = seq - 1;
1102 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
1105 missing_start, missing_end,
1106 last_seq, *blacklist_seq - 1,
1113 list_for_each_entry(i, list, list) {
1114 struct jset_entry *entry;
1115 struct bkey_i *k, *_n;
1116 struct bch_replicas_padded replicas = {
1117 .e.data_type = BCH_DATA_journal,
1126 ret = jset_validate_entries(c, &i->j, READ);
1130 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1131 replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
1133 bch2_replicas_entry_sort(&replicas.e);
1136 * If we're mounting in degraded mode - if we didn't read all
1137 * the devices - this is wrong:
1141 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1142 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
1143 "superblock not marked as containing replicas %s",
1144 (bch2_replicas_entry_to_text(&PBUF(buf),
1145 &replicas.e), buf)))) {
1146 ret = bch2_mark_replicas(c, &replicas.e);
1151 for_each_jset_key(k, _n, entry, &i->j)
1156 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
1157 keys, entries, *start_seq);
1159 if (*start_seq != *blacklist_seq)
1160 bch_info(c, "dropped unflushed entries %llu-%llu",
1161 *blacklist_seq, *start_seq - 1);
1166 /* journal write: */
1168 static void __journal_write_alloc(struct journal *j,
1169 struct journal_buf *w,
1170 struct dev_alloc_list *devs_sorted,
1173 unsigned replicas_want)
1175 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1176 struct journal_device *ja;
1180 if (*replicas >= replicas_want)
1183 for (i = 0; i < devs_sorted->nr; i++) {
1184 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
1191 * Check that we can use this device, and aren't already using
1194 if (!ca->mi.durability ||
1195 ca->mi.state != BCH_MEMBER_STATE_rw ||
1197 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
1199 sectors > ja->sectors_free)
1202 bch2_dev_stripe_increment(ca, &j->wp.stripe);
1204 bch2_bkey_append_ptr(&w->key,
1205 (struct bch_extent_ptr) {
1206 .offset = bucket_to_sector(ca,
1207 ja->buckets[ja->cur_idx]) +
1208 ca->mi.bucket_size -
1213 ja->sectors_free -= sectors;
1214 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1216 *replicas += ca->mi.durability;
1218 if (*replicas >= replicas_want)
1224 * journal_next_bucket - move on to the next journal bucket if possible
1226 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1229 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1230 struct bch_devs_mask devs;
1231 struct journal_device *ja;
1233 struct dev_alloc_list devs_sorted;
1234 unsigned target = c->opts.metadata_target ?:
1235 c->opts.foreground_target;
1236 unsigned i, replicas = 0, replicas_want =
1237 READ_ONCE(c->opts.metadata_replicas);
1241 devs = target_rw_devs(c, BCH_DATA_journal, target);
1243 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
1245 __journal_write_alloc(j, w, &devs_sorted,
1246 sectors, &replicas, replicas_want);
1248 if (replicas >= replicas_want)
1251 for (i = 0; i < devs_sorted.nr; i++) {
1252 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1258 if (sectors > ja->sectors_free &&
1259 sectors <= ca->mi.bucket_size &&
1260 bch2_journal_dev_buckets_available(j, ja,
1261 journal_space_discarded)) {
1262 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1263 ja->sectors_free = ca->mi.bucket_size;
1266 * ja->bucket_seq[ja->cur_idx] must always have
1267 * something sensible:
1269 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1273 __journal_write_alloc(j, w, &devs_sorted,
1274 sectors, &replicas, replicas_want);
1276 if (replicas < replicas_want && target) {
1277 /* Retry from all devices: */
1284 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1286 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1289 static void journal_write_compact(struct jset *jset)
1291 struct jset_entry *i, *next, *prev = NULL;
1294 * Simple compaction, dropping empty jset_entries (from journal
1295 * reservations that weren't fully used) and merging jset_entries that
1298 * If we wanted to be really fancy here, we could sort all the keys in
1299 * the jset and drop keys that were overwritten - probably not worth it:
1301 vstruct_for_each_safe(jset, i, next) {
1302 unsigned u64s = le16_to_cpu(i->u64s);
1308 /* Can we merge with previous entry? */
1310 i->btree_id == prev->btree_id &&
1311 i->level == prev->level &&
1312 i->type == prev->type &&
1313 i->type == BCH_JSET_ENTRY_btree_keys &&
1314 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1315 memmove_u64s_down(vstruct_next(prev),
1318 le16_add_cpu(&prev->u64s, u64s);
1322 /* Couldn't merge, move i into new position (after prev): */
1323 prev = prev ? vstruct_next(prev) : jset->start;
1325 memmove_u64s_down(prev, i, jset_u64s(u64s));
1328 prev = prev ? vstruct_next(prev) : jset->start;
1329 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1332 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1334 /* we aren't holding j->lock: */
1335 unsigned new_size = READ_ONCE(j->buf_size_want);
1338 if (buf->buf_size >= new_size)
1341 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1345 memcpy(new_buf, buf->data, buf->buf_size);
1347 spin_lock(&j->lock);
1348 swap(buf->data, new_buf);
1349 swap(buf->buf_size, new_size);
1350 spin_unlock(&j->lock);
1352 kvpfree(new_buf, new_size);
1355 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1357 return j->buf + j->reservations.unwritten_idx;
1360 static void journal_write_done(struct closure *cl)
1362 struct journal *j = container_of(cl, struct journal, io);
1363 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1364 struct journal_buf *w = journal_last_unwritten_buf(j);
1365 struct bch_replicas_padded replicas;
1366 union journal_res_state old, new;
1370 bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
1371 ? j->flush_write_time
1372 : j->noflush_write_time, j->write_start_time);
1374 if (!w->devs_written.nr) {
1375 bch_err(c, "unable to write journal to sufficient devices");
1378 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
1380 if (bch2_mark_replicas(c, &replicas.e))
1385 bch2_fatal_error(c);
1387 spin_lock(&j->lock);
1388 seq = le64_to_cpu(w->data->seq);
1390 if (seq >= j->pin.front)
1391 journal_seq_pin(j, seq)->devs = w->devs_written;
1394 j->seq_ondisk = seq;
1396 if (!JSET_NO_FLUSH(w->data)) {
1397 j->flushed_seq_ondisk = seq;
1398 j->last_seq_ondisk = w->last_seq;
1400 } else if (!j->err_seq || seq < j->err_seq)
1404 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1407 * Must come before signaling write completion, for
1408 * bch2_fs_journal_stop():
1410 journal_reclaim_kick(&c->journal);
1412 /* also must come before signalling write completion: */
1413 closure_debug_destroy(cl);
1415 v = atomic64_read(&j->reservations.counter);
1418 BUG_ON(new.idx == new.unwritten_idx);
1420 new.unwritten_idx++;
1421 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1422 old.v, new.v)) != old.v);
1424 bch2_journal_space_available(j);
1426 closure_wake_up(&w->wait);
1429 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1430 mod_delayed_work(c->io_complete_wq, &j->write_work, 0);
1431 spin_unlock(&j->lock);
1433 if (new.unwritten_idx != new.idx &&
1434 !journal_state_count(new, new.unwritten_idx))
1435 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
1438 static void journal_write_endio(struct bio *bio)
1440 struct bch_dev *ca = bio->bi_private;
1441 struct journal *j = &ca->fs->journal;
1442 struct journal_buf *w = journal_last_unwritten_buf(j);
1443 unsigned long flags;
1445 if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
1446 le64_to_cpu(w->data->seq),
1447 bch2_blk_status_to_str(bio->bi_status)) ||
1448 bch2_meta_write_fault("journal")) {
1449 spin_lock_irqsave(&j->err_lock, flags);
1450 bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx);
1451 spin_unlock_irqrestore(&j->err_lock, flags);
1454 closure_put(&j->io);
1455 percpu_ref_put(&ca->io_ref);
1458 static void do_journal_write(struct closure *cl)
1460 struct journal *j = container_of(cl, struct journal, io);
1461 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1463 struct journal_buf *w = journal_last_unwritten_buf(j);
1464 struct bch_extent_ptr *ptr;
1466 unsigned sectors = vstruct_sectors(w->data, c->block_bits);
1468 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1469 ca = bch_dev_bkey_exists(c, ptr->dev);
1470 if (!percpu_ref_tryget(&ca->io_ref)) {
1472 bch_err(c, "missing device for journal write\n");
1476 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1479 bio = ca->journal.bio;
1481 bio_set_dev(bio, ca->disk_sb.bdev);
1482 bio->bi_iter.bi_sector = ptr->offset;
1483 bio->bi_end_io = journal_write_endio;
1484 bio->bi_private = ca;
1485 bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1487 BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
1488 ca->prev_journal_sector = bio->bi_iter.bi_sector;
1490 if (!JSET_NO_FLUSH(w->data))
1491 bio->bi_opf |= REQ_FUA;
1492 if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
1493 bio->bi_opf |= REQ_PREFLUSH;
1495 bch2_bio_map(bio, w->data, sectors << 9);
1497 trace_journal_write(bio);
1498 closure_bio_submit(bio, cl);
1500 ca->journal.bucket_seq[ca->journal.cur_idx] =
1501 le64_to_cpu(w->data->seq);
1504 continue_at(cl, journal_write_done, c->io_complete_wq);
1508 void bch2_journal_write(struct closure *cl)
1510 struct journal *j = container_of(cl, struct journal, io);
1511 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1513 struct journal_buf *w = journal_last_unwritten_buf(j);
1514 struct jset_entry *start, *end;
1517 char *journal_debug_buf = NULL;
1518 bool validate_before_checksum = false;
1519 unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
1522 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1524 journal_buf_realloc(j, w);
1527 j->write_start_time = local_clock();
1529 spin_lock(&j->lock);
1530 if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
1533 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
1534 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
1536 SET_JSET_NO_FLUSH(jset, true);
1540 j->nr_noflush_writes++;
1542 j->last_flush_write = jiffies;
1543 j->nr_flush_writes++;
1545 spin_unlock(&j->lock);
1548 * New btree roots are set by journalling them; when the journal entry
1549 * gets written we have to propagate them to c->btree_roots
1551 * But, every journal entry we write has to contain all the btree roots
1552 * (at least for now); so after we copy btree roots to c->btree_roots we
1553 * have to get any missing btree roots and add them to this journal
1557 bch2_journal_entries_to_btree_roots(c, jset);
1559 start = end = vstruct_last(jset);
1561 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1563 bch2_journal_super_entries_add_common(c, &end,
1564 le64_to_cpu(jset->seq));
1565 u64s = (u64 *) end - (u64 *) start;
1566 BUG_ON(u64s > j->entry_u64s_reserved);
1568 le32_add_cpu(&jset->u64s, u64s);
1569 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1571 journal_write_compact(jset);
1573 jset->magic = cpu_to_le64(jset_magic(c));
1574 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1575 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1576 : cpu_to_le32(c->sb.version);
1578 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1579 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1581 if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
1582 j->last_empty_seq = le64_to_cpu(jset->seq);
1584 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1585 validate_before_checksum = true;
1587 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current)
1588 validate_before_checksum = true;
1590 if (validate_before_checksum &&
1591 jset_validate_for_write(c, jset))
1594 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1595 jset->encrypted_start,
1596 vstruct_end(jset) - (void *) jset->encrypted_start);
1598 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1599 journal_nonce(jset), jset);
1601 if (!validate_before_checksum &&
1602 jset_validate_for_write(c, jset))
1605 sectors = vstruct_sectors(jset, c->block_bits);
1606 BUG_ON(sectors > w->sectors);
1608 bytes = vstruct_bytes(jset);
1609 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1612 spin_lock(&j->lock);
1613 ret = journal_write_alloc(j, w, sectors);
1615 if (ret && j->can_discard) {
1616 spin_unlock(&j->lock);
1617 bch2_journal_do_discards(j);
1622 journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
1623 if (journal_debug_buf)
1624 __bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
1628 * write is allocated, no longer need to account for it in
1629 * bch2_journal_space_available():
1634 * journal entry has been compacted and allocated, recalculate space
1637 bch2_journal_space_available(j);
1638 spin_unlock(&j->lock);
1641 bch_err(c, "Unable to allocate journal write:\n%s",
1643 kfree(journal_debug_buf);
1644 bch2_fatal_error(c);
1645 continue_at(cl, journal_write_done, c->io_complete_wq);
1649 w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1651 if (test_bit(JOURNAL_NOCHANGES, &j->flags))
1654 for_each_rw_member(ca, c, i)
1657 if (nr_rw_members > 1)
1658 w->separate_flush = true;
1660 if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
1661 for_each_rw_member(ca, c, i) {
1662 percpu_ref_get(&ca->io_ref);
1664 bio = ca->journal.bio;
1666 bio_set_dev(bio, ca->disk_sb.bdev);
1667 bio->bi_opf = REQ_OP_FLUSH;
1668 bio->bi_end_io = journal_write_endio;
1669 bio->bi_private = ca;
1670 closure_bio_submit(bio, cl);
1674 continue_at(cl, do_journal_write, c->io_complete_wq);
1677 continue_at(cl, journal_write_done, c->io_complete_wq);
1680 bch2_inconsistent_error(c);
1681 continue_at(cl, journal_write_done, c->io_complete_wq);