1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_foreground.h"
5 #include "btree_update_interior.h"
11 #include "journal_io.h"
12 #include "journal_reclaim.h"
13 #include "journal_seq_blacklist.h"
16 #include <trace/events/bcachefs.h>
18 static void __journal_replay_free(struct journal_replay *i)
21 kvpfree(i, offsetof(struct journal_replay, j) +
22 vstruct_bytes(&i->j));
26 static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
30 if (!c->opts.read_entire_journal)
31 __journal_replay_free(i);
37 struct list_head *head;
41 #define JOURNAL_ENTRY_ADD_OK 0
42 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
45 * Given a journal entry we just read, add it to the list of journal entries to
48 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
49 struct journal_list *jlist, struct jset *j,
52 struct journal_replay *i, *pos;
53 struct bch_devs_list devs = { .nr = 0 };
54 struct list_head *where;
55 size_t bytes = vstruct_bytes(j);
59 list_for_each_entry_reverse(i, jlist->head, list) {
60 if (!JSET_NO_FLUSH(&i->j)) {
61 last_seq = le64_to_cpu(i->j.last_seq);
66 /* Is this entry older than the range we need? */
67 if (!c->opts.read_entire_journal &&
68 le64_to_cpu(j->seq) < last_seq) {
69 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
73 /* Drop entries we don't need anymore */
74 if (!JSET_NO_FLUSH(j)) {
75 list_for_each_entry_safe(i, pos, jlist->head, list) {
76 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
78 journal_replay_free(c, i);
82 list_for_each_entry_reverse(i, jlist->head, list) {
83 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
91 i = where->next != jlist->head
92 ? container_of(where->next, struct journal_replay, list)
96 * Duplicate journal entries? If so we want the one that didn't have a
99 if (i && le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
102 __journal_replay_free(i);
106 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
107 memcmp(j, &i->j, bytes), c,
108 "found duplicate but non identical journal entries (seq %llu)",
109 le64_to_cpu(j->seq));
115 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
121 list_add(&i->list, where);
125 memcpy(&i->j, j, bytes);
127 if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
128 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
130 fsck_err_on(1, c, "duplicate journal entries on same device");
131 ret = JOURNAL_ENTRY_ADD_OK;
137 static struct nonce journal_nonce(const struct jset *jset)
139 return (struct nonce) {{
141 [1] = ((__le32 *) &jset->seq)[0],
142 [2] = ((__le32 *) &jset->seq)[1],
143 [3] = BCH_NONCE_JOURNAL,
147 /* this fills in a range with empty jset_entries: */
148 static void journal_entry_null_range(void *start, void *end)
150 struct jset_entry *entry;
152 for (entry = start; entry != end; entry = vstruct_next(entry))
153 memset(entry, 0, sizeof(*entry));
156 #define JOURNAL_ENTRY_REREAD 5
157 #define JOURNAL_ENTRY_NONE 6
158 #define JOURNAL_ENTRY_BAD 7
160 #define journal_entry_err(c, msg, ...) \
164 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
167 bch_err(c, "corrupt metadata before write:\n" \
168 msg, ##__VA_ARGS__); \
169 if (bch2_fs_inconsistent(c)) { \
170 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
178 #define journal_entry_err_on(cond, c, msg, ...) \
179 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
181 #define FSCK_DELETED_KEY 5
183 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
184 struct jset_entry *entry,
185 unsigned level, enum btree_id btree_id,
187 const char *type, int write)
189 void *next = vstruct_next(entry);
191 unsigned version = le32_to_cpu(jset->version);
194 if (journal_entry_err_on(!k->k.u64s, c,
195 "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: k->u64s 0",
196 type, le64_to_cpu(jset->seq),
197 (u64 *) entry - jset->_data,
198 le32_to_cpu(jset->u64s),
199 (u64 *) k - entry->_data,
200 le16_to_cpu(entry->u64s))) {
201 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
202 journal_entry_null_range(vstruct_next(entry), next);
203 return FSCK_DELETED_KEY;
206 if (journal_entry_err_on((void *) bkey_next(k) >
207 (void *) vstruct_next(entry), c,
208 "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: extends past end of journal entry",
209 type, le64_to_cpu(jset->seq),
210 (u64 *) entry - jset->_data,
211 le32_to_cpu(jset->u64s),
212 (u64 *) k - entry->_data,
213 le16_to_cpu(entry->u64s))) {
214 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
215 journal_entry_null_range(vstruct_next(entry), next);
216 return FSCK_DELETED_KEY;
219 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
220 "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: bad format %u",
221 type, le64_to_cpu(jset->seq),
222 (u64 *) entry - jset->_data,
223 le32_to_cpu(jset->u64s),
224 (u64 *) k - entry->_data,
225 le16_to_cpu(entry->u64s),
227 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
228 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
229 journal_entry_null_range(vstruct_next(entry), next);
230 return FSCK_DELETED_KEY;
234 bch2_bkey_compat(level, btree_id, version,
235 JSET_BIG_ENDIAN(jset), write,
236 NULL, bkey_to_packed(k));
238 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
239 __btree_node_type(level, btree_id));
243 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
244 mustfix_fsck_err(c, "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: %s\n%s",
245 type, le64_to_cpu(jset->seq),
246 (u64 *) entry - jset->_data,
247 le32_to_cpu(jset->u64s),
248 (u64 *) k - entry->_data,
249 le16_to_cpu(entry->u64s),
252 le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
253 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
254 journal_entry_null_range(vstruct_next(entry), next);
255 return FSCK_DELETED_KEY;
259 bch2_bkey_compat(level, btree_id, version,
260 JSET_BIG_ENDIAN(jset), write,
261 NULL, bkey_to_packed(k));
266 static int journal_entry_validate_btree_keys(struct bch_fs *c,
268 struct jset_entry *entry,
271 struct bkey_i *k = entry->start;
273 while (k != vstruct_last(entry)) {
274 int ret = journal_validate_key(c, jset, entry,
278 if (ret == FSCK_DELETED_KEY)
287 static int journal_entry_validate_btree_root(struct bch_fs *c,
289 struct jset_entry *entry,
292 struct bkey_i *k = entry->start;
295 if (journal_entry_err_on(!entry->u64s ||
296 le16_to_cpu(entry->u64s) != k->k.u64s, c,
297 "invalid btree root journal entry: wrong number of keys")) {
298 void *next = vstruct_next(entry);
300 * we don't want to null out this jset_entry,
301 * just the contents, so that later we can tell
302 * we were _supposed_ to have a btree root
305 journal_entry_null_range(vstruct_next(entry), next);
309 return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
310 "btree root", write);
315 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
317 struct jset_entry *entry,
320 /* obsolete, don't care: */
324 static int journal_entry_validate_blacklist(struct bch_fs *c,
326 struct jset_entry *entry,
331 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
332 "invalid journal seq blacklist entry: bad size")) {
333 journal_entry_null_range(entry, vstruct_next(entry));
339 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
341 struct jset_entry *entry,
344 struct jset_entry_blacklist_v2 *bl_entry;
347 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
348 "invalid journal seq blacklist entry: bad size")) {
349 journal_entry_null_range(entry, vstruct_next(entry));
353 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
355 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
356 le64_to_cpu(bl_entry->end), c,
357 "invalid journal seq blacklist entry: start > end")) {
358 journal_entry_null_range(entry, vstruct_next(entry));
365 static int journal_entry_validate_usage(struct bch_fs *c,
367 struct jset_entry *entry,
370 struct jset_entry_usage *u =
371 container_of(entry, struct jset_entry_usage, entry);
372 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
375 if (journal_entry_err_on(bytes < sizeof(*u),
377 "invalid journal entry usage: bad size")) {
378 journal_entry_null_range(entry, vstruct_next(entry));
386 static int journal_entry_validate_data_usage(struct bch_fs *c,
388 struct jset_entry *entry,
391 struct jset_entry_data_usage *u =
392 container_of(entry, struct jset_entry_data_usage, entry);
393 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
396 if (journal_entry_err_on(bytes < sizeof(*u) ||
397 bytes < sizeof(*u) + u->r.nr_devs,
399 "invalid journal entry usage: bad size")) {
400 journal_entry_null_range(entry, vstruct_next(entry));
408 struct jset_entry_ops {
409 int (*validate)(struct bch_fs *, struct jset *,
410 struct jset_entry *, int);
413 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
415 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
416 .validate = journal_entry_validate_##f, \
418 BCH_JSET_ENTRY_TYPES()
422 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
423 struct jset_entry *entry, int write)
425 return entry->type < BCH_JSET_ENTRY_NR
426 ? bch2_jset_entry_ops[entry->type].validate(c, jset,
431 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
434 struct jset_entry *entry;
437 vstruct_for_each(jset, entry) {
438 if (journal_entry_err_on(vstruct_next(entry) >
439 vstruct_last(jset), c,
440 "journal entry extends past end of jset")) {
441 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
445 ret = journal_entry_validate(c, jset, entry, write);
453 static int jset_validate(struct bch_fs *c,
455 struct jset *jset, u64 sector,
456 unsigned bucket_sectors_left,
457 unsigned sectors_read,
460 size_t bytes = vstruct_bytes(jset);
461 struct bch_csum csum;
465 if (le64_to_cpu(jset->magic) != jset_magic(c))
466 return JOURNAL_ENTRY_NONE;
468 version = le32_to_cpu(jset->version);
469 if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
470 version < bcachefs_metadata_version_min) ||
471 version >= bcachefs_metadata_version_max, c,
472 "%s sector %llu seq %llu: unknown journal entry version %u",
473 ca ? ca->name : c->name,
474 sector, le64_to_cpu(jset->seq),
476 /* don't try to continue: */
480 if (bytes > (sectors_read << 9) &&
481 sectors_read < bucket_sectors_left)
482 return JOURNAL_ENTRY_REREAD;
484 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
485 "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
486 ca ? ca->name : c->name,
487 sector, le64_to_cpu(jset->seq), bytes)) {
488 ret = JOURNAL_ENTRY_BAD;
489 le32_add_cpu(&jset->u64s,
490 -((bytes - (bucket_sectors_left << 9)) / 8));
493 if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
494 "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
495 ca ? ca->name : c->name,
496 sector, le64_to_cpu(jset->seq),
497 JSET_CSUM_TYPE(jset))) {
498 ret = JOURNAL_ENTRY_BAD;
505 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
506 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
507 "%s sector %llu seq %llu: journal checksum bad",
508 ca ? ca->name : c->name,
509 sector, le64_to_cpu(jset->seq)))
510 ret = JOURNAL_ENTRY_BAD;
512 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
513 jset->encrypted_start,
514 vstruct_end(jset) - (void *) jset->encrypted_start);
516 /* last_seq is ignored when JSET_NO_FLUSH is true */
517 if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
518 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
519 "invalid journal entry: last_seq > seq (%llu > %llu)",
520 le64_to_cpu(jset->last_seq),
521 le64_to_cpu(jset->seq))) {
522 jset->last_seq = jset->seq;
523 return JOURNAL_ENTRY_BAD;
529 static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
531 unsigned sectors = vstruct_sectors(jset, c->block_bits);
533 return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
534 jset_validate_entries(c, jset, WRITE);
537 struct journal_read_buf {
542 static int journal_read_buf_realloc(struct journal_read_buf *b,
547 /* the bios are sized for this many pages, max: */
548 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
551 new_size = roundup_pow_of_two(new_size);
552 n = kvpmalloc(new_size, GFP_KERNEL);
556 kvpfree(b->data, b->size);
562 static int journal_read_bucket(struct bch_dev *ca,
563 struct journal_read_buf *buf,
564 struct journal_list *jlist,
567 struct bch_fs *c = ca->fs;
568 struct journal_device *ja = &ca->journal;
569 struct jset *j = NULL;
570 unsigned sectors, sectors_read = 0;
571 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
572 end = offset + ca->mi.bucket_size;
573 bool saw_bad = false;
576 pr_debug("reading %u", bucket);
578 while (offset < end) {
582 sectors_read = min_t(unsigned,
583 end - offset, buf->size >> 9);
585 bio = bio_kmalloc(GFP_KERNEL,
588 bio_set_dev(bio, ca->disk_sb.bdev);
589 bio->bi_iter.bi_sector = offset;
590 bio_set_op_attrs(bio, REQ_OP_READ, 0);
591 bch2_bio_map(bio, buf->data, sectors_read << 9);
593 ret = submit_bio_wait(bio);
596 if (bch2_dev_io_err_on(ret, ca,
597 "journal read error: sector %llu",
599 bch2_meta_read_fault("journal")) {
601 * We don't error out of the recovery process
602 * here, since the relevant journal entry may be
603 * found on a different device, and missing or
604 * no journal entries will be handled later
612 ret = jset_validate(c, ca, j, offset,
613 end - offset, sectors_read,
617 sectors = vstruct_sectors(j, c->block_bits);
619 case JOURNAL_ENTRY_REREAD:
620 if (vstruct_bytes(j) > buf->size) {
621 ret = journal_read_buf_realloc(buf,
627 case JOURNAL_ENTRY_NONE:
630 sectors = c->opts.block_size;
632 case JOURNAL_ENTRY_BAD:
635 * On checksum error we don't really trust the size
636 * field of the journal entry we read, so try reading
637 * again at next block boundary:
639 sectors = c->opts.block_size;
646 * This happens sometimes if we don't have discards on -
647 * when we've partially overwritten a bucket with new
648 * journal entries. We don't need the rest of the
651 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
654 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
656 mutex_lock(&jlist->lock);
657 ret = journal_entry_add(c, ca, jlist, j, ret != 0);
658 mutex_unlock(&jlist->lock);
661 case JOURNAL_ENTRY_ADD_OK:
663 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
671 sectors_read -= sectors;
672 j = ((void *) j) + (sectors << 9);
678 static void bch2_journal_read_device(struct closure *cl)
680 struct journal_device *ja =
681 container_of(cl, struct journal_device, read);
682 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
683 struct journal_list *jlist =
684 container_of(cl->parent, struct journal_list, cl);
685 struct journal_read_buf buf = { NULL, 0 };
686 u64 min_seq = U64_MAX;
693 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
697 pr_debug("%u journal buckets", ja->nr);
699 for (i = 0; i < ja->nr; i++) {
700 ret = journal_read_bucket(ca, &buf, jlist, i);
705 /* Find the journal bucket with the highest sequence number: */
706 for (i = 0; i < ja->nr; i++) {
707 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
710 min_seq = min(ja->bucket_seq[i], min_seq);
714 * If there's duplicate journal entries in multiple buckets (which
715 * definitely isn't supposed to happen, but...) - make sure to start
716 * cur_idx at the last of those buckets, so we don't deadlock trying to
719 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
720 ja->bucket_seq[ja->cur_idx] >
721 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
722 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
724 ja->sectors_free = 0;
727 * Set dirty_idx to indicate the entire journal is full and needs to be
728 * reclaimed - journal reclaim will immediately reclaim whatever isn't
729 * pinned when it first runs:
731 ja->discard_idx = ja->dirty_idx_ondisk =
732 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
734 kvpfree(buf.data, buf.size);
735 percpu_ref_put(&ca->io_ref);
739 mutex_lock(&jlist->lock);
741 mutex_unlock(&jlist->lock);
745 int bch2_journal_read(struct bch_fs *c, struct list_head *list,
746 u64 *blacklist_seq, u64 *start_seq)
748 struct journal_list jlist;
749 struct journal_replay *i, *t;
752 size_t keys = 0, entries = 0;
753 bool degraded = false;
754 u64 seq, last_seq = 0;
757 closure_init_stack(&jlist.cl);
758 mutex_init(&jlist.lock);
762 for_each_member_device(ca, c, iter) {
763 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
764 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
767 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
768 ca->mi.state == BCH_MEMBER_STATE_RO) &&
769 percpu_ref_tryget(&ca->io_ref))
770 closure_call(&ca->journal.read,
771 bch2_journal_read_device,
778 closure_sync(&jlist.cl);
783 if (list_empty(list)) {
784 bch_info(c, "journal read done, but no entries found");
788 i = list_last_entry(list, struct journal_replay, list);
789 *start_seq = le64_to_cpu(i->j.seq) + 1;
792 * Find most recent flush entry, and ignore newer non flush entries -
793 * those entries will be blacklisted:
795 list_for_each_entry_safe_reverse(i, t, list, list) {
799 if (!JSET_NO_FLUSH(&i->j)) {
800 last_seq = le64_to_cpu(i->j.last_seq);
801 *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
805 journal_replay_free(c, i);
809 fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
813 /* Drop blacklisted entries and entries older than last_seq: */
814 list_for_each_entry_safe(i, t, list, list) {
818 seq = le64_to_cpu(i->j.seq);
819 if (seq < last_seq) {
820 journal_replay_free(c, i);
824 if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
825 fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
826 "found blacklisted journal entry %llu", seq);
828 journal_replay_free(c, i);
832 /* Check for missing entries: */
834 list_for_each_entry(i, list, list) {
838 BUG_ON(seq > le64_to_cpu(i->j.seq));
840 while (seq < le64_to_cpu(i->j.seq)) {
841 u64 missing_start, missing_end;
843 while (seq < le64_to_cpu(i->j.seq) &&
844 bch2_journal_seq_is_blacklisted(c, seq, false))
847 if (seq == le64_to_cpu(i->j.seq))
852 while (seq < le64_to_cpu(i->j.seq) &&
853 !bch2_journal_seq_is_blacklisted(c, seq, false))
856 missing_end = seq - 1;
857 fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)",
858 missing_start, missing_end,
859 last_seq, *blacklist_seq - 1);
865 list_for_each_entry(i, list, list) {
866 struct jset_entry *entry;
867 struct bkey_i *k, *_n;
868 struct bch_replicas_padded replicas;
874 ret = jset_validate_entries(c, &i->j, READ);
879 * If we're mounting in degraded mode - if we didn't read all
880 * the devices - this is wrong:
883 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, i->devs);
886 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
887 fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
888 "superblock not marked as containing replicas %s",
889 (bch2_replicas_entry_to_text(&PBUF(buf),
890 &replicas.e), buf)))) {
891 ret = bch2_mark_replicas(c, &replicas.e);
896 for_each_jset_key(k, _n, entry, &i->j)
901 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
902 keys, entries, *start_seq);
904 if (*start_seq != *blacklist_seq)
905 bch_info(c, "dropped unflushed entries %llu-%llu",
906 *blacklist_seq, *start_seq - 1);
913 static void __journal_write_alloc(struct journal *j,
914 struct journal_buf *w,
915 struct dev_alloc_list *devs_sorted,
918 unsigned replicas_want)
920 struct bch_fs *c = container_of(j, struct bch_fs, journal);
921 struct journal_device *ja;
925 if (*replicas >= replicas_want)
928 for (i = 0; i < devs_sorted->nr; i++) {
929 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
936 * Check that we can use this device, and aren't already using
939 if (!ca->mi.durability ||
940 ca->mi.state != BCH_MEMBER_STATE_RW ||
942 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
944 sectors > ja->sectors_free)
947 bch2_dev_stripe_increment(ca, &j->wp.stripe);
949 bch2_bkey_append_ptr(&w->key,
950 (struct bch_extent_ptr) {
951 .offset = bucket_to_sector(ca,
952 ja->buckets[ja->cur_idx]) +
958 ja->sectors_free -= sectors;
959 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
961 *replicas += ca->mi.durability;
963 if (*replicas >= replicas_want)
969 * journal_next_bucket - move on to the next journal bucket if possible
971 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
974 struct bch_fs *c = container_of(j, struct bch_fs, journal);
975 struct journal_device *ja;
977 struct dev_alloc_list devs_sorted;
978 unsigned i, replicas = 0, replicas_want =
979 READ_ONCE(c->opts.metadata_replicas);
983 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
984 &c->rw_devs[BCH_DATA_journal]);
986 __journal_write_alloc(j, w, &devs_sorted,
987 sectors, &replicas, replicas_want);
989 if (replicas >= replicas_want)
992 for (i = 0; i < devs_sorted.nr; i++) {
993 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
999 if (sectors > ja->sectors_free &&
1000 sectors <= ca->mi.bucket_size &&
1001 bch2_journal_dev_buckets_available(j, ja,
1002 journal_space_discarded)) {
1003 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1004 ja->sectors_free = ca->mi.bucket_size;
1007 * ja->bucket_seq[ja->cur_idx] must always have
1008 * something sensible:
1010 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1014 __journal_write_alloc(j, w, &devs_sorted,
1015 sectors, &replicas, replicas_want);
1019 BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
1021 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
1024 static void journal_write_compact(struct jset *jset)
1026 struct jset_entry *i, *next, *prev = NULL;
1029 * Simple compaction, dropping empty jset_entries (from journal
1030 * reservations that weren't fully used) and merging jset_entries that
1033 * If we wanted to be really fancy here, we could sort all the keys in
1034 * the jset and drop keys that were overwritten - probably not worth it:
1036 vstruct_for_each_safe(jset, i, next) {
1037 unsigned u64s = le16_to_cpu(i->u64s);
1043 /* Can we merge with previous entry? */
1045 i->btree_id == prev->btree_id &&
1046 i->level == prev->level &&
1047 i->type == prev->type &&
1048 i->type == BCH_JSET_ENTRY_btree_keys &&
1049 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1050 memmove_u64s_down(vstruct_next(prev),
1053 le16_add_cpu(&prev->u64s, u64s);
1057 /* Couldn't merge, move i into new position (after prev): */
1058 prev = prev ? vstruct_next(prev) : jset->start;
1060 memmove_u64s_down(prev, i, jset_u64s(u64s));
1063 prev = prev ? vstruct_next(prev) : jset->start;
1064 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1067 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1069 /* we aren't holding j->lock: */
1070 unsigned new_size = READ_ONCE(j->buf_size_want);
1073 if (buf->buf_size >= new_size)
1076 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1080 memcpy(new_buf, buf->data, buf->buf_size);
1082 spin_lock(&j->lock);
1083 swap(buf->data, new_buf);
1084 swap(buf->buf_size, new_size);
1085 spin_unlock(&j->lock);
1087 kvpfree(new_buf, new_size);
1090 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
1092 return j->buf + j->reservations.unwritten_idx;
1095 static void journal_write_done(struct closure *cl)
1097 struct journal *j = container_of(cl, struct journal, io);
1098 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1099 struct journal_buf *w = journal_last_unwritten_buf(j);
1100 struct bch_devs_list devs =
1101 bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1102 struct bch_replicas_padded replicas;
1103 union journal_res_state old, new;
1104 u64 v, seq, last_seq;
1107 bch2_time_stats_update(j->write_time, j->write_start_time);
1110 bch_err(c, "unable to write journal to sufficient devices");
1113 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, devs);
1114 if (bch2_mark_replicas(c, &replicas.e))
1119 bch2_fatal_error(c);
1121 spin_lock(&j->lock);
1122 seq = le64_to_cpu(w->data->seq);
1123 last_seq = le64_to_cpu(w->data->last_seq);
1125 if (seq >= j->pin.front)
1126 journal_seq_pin(j, seq)->devs = devs;
1128 j->seq_ondisk = seq;
1129 if (err && (!j->err_seq || seq < j->err_seq))
1132 if (!JSET_NO_FLUSH(w->data)) {
1133 j->flushed_seq_ondisk = seq;
1134 j->last_seq_ondisk = last_seq;
1138 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1141 * Must come before signaling write completion, for
1142 * bch2_fs_journal_stop():
1144 journal_reclaim_kick(&c->journal);
1146 /* also must come before signalling write completion: */
1147 closure_debug_destroy(cl);
1149 v = atomic64_read(&j->reservations.counter);
1152 BUG_ON(new.idx == new.unwritten_idx);
1154 new.unwritten_idx++;
1155 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1156 old.v, new.v)) != old.v);
1158 bch2_journal_space_available(j);
1160 closure_wake_up(&w->wait);
1163 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1164 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
1165 spin_unlock(&j->lock);
1167 if (new.unwritten_idx != new.idx &&
1168 !journal_state_count(new, new.unwritten_idx))
1169 closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
1172 static void journal_write_endio(struct bio *bio)
1174 struct bch_dev *ca = bio->bi_private;
1175 struct journal *j = &ca->fs->journal;
1177 if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write error: %s",
1178 bch2_blk_status_to_str(bio->bi_status)) ||
1179 bch2_meta_write_fault("journal")) {
1180 struct journal_buf *w = journal_last_unwritten_buf(j);
1181 unsigned long flags;
1183 spin_lock_irqsave(&j->err_lock, flags);
1184 bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx);
1185 spin_unlock_irqrestore(&j->err_lock, flags);
1188 closure_put(&j->io);
1189 percpu_ref_put(&ca->io_ref);
1192 void bch2_journal_write(struct closure *cl)
1194 struct journal *j = container_of(cl, struct journal, io);
1195 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1197 struct journal_buf *w = journal_last_unwritten_buf(j);
1198 struct jset_entry *start, *end;
1201 struct bch_extent_ptr *ptr;
1202 bool validate_before_checksum = false;
1203 unsigned i, sectors, bytes, u64s;
1206 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
1208 journal_buf_realloc(j, w);
1211 j->write_start_time = local_clock();
1213 spin_lock(&j->lock);
1214 if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
1216 (jiffies - j->last_flush_write) < msecs_to_jiffies(j->write_delay_ms) &&
1217 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)) {
1219 SET_JSET_NO_FLUSH(jset, true);
1222 j->nr_noflush_writes++;
1224 j->last_flush_write = jiffies;
1225 j->nr_flush_writes++;
1227 spin_unlock(&j->lock);
1230 * New btree roots are set by journalling them; when the journal entry
1231 * gets written we have to propagate them to c->btree_roots
1233 * But, every journal entry we write has to contain all the btree roots
1234 * (at least for now); so after we copy btree roots to c->btree_roots we
1235 * have to get any missing btree roots and add them to this journal
1239 bch2_journal_entries_to_btree_roots(c, jset);
1241 start = end = vstruct_last(jset);
1243 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1245 end = bch2_journal_super_entries_add_common(c, end,
1246 le64_to_cpu(jset->seq));
1247 u64s = (u64 *) end - (u64 *) start;
1248 BUG_ON(u64s > j->entry_u64s_reserved);
1250 le32_add_cpu(&jset->u64s, u64s);
1251 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1253 journal_write_compact(jset);
1255 jset->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
1256 jset->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
1257 jset->magic = cpu_to_le64(jset_magic(c));
1259 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1260 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1261 : cpu_to_le32(c->sb.version);
1263 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1264 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1266 if (journal_entry_empty(jset))
1267 j->last_empty_seq = le64_to_cpu(jset->seq);
1269 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1270 validate_before_checksum = true;
1272 if (le32_to_cpu(jset->version) <= bcachefs_metadata_version_inode_btree_change)
1273 validate_before_checksum = true;
1275 if (validate_before_checksum &&
1276 jset_validate_for_write(c, jset))
1279 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1280 jset->encrypted_start,
1281 vstruct_end(jset) - (void *) jset->encrypted_start);
1283 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1284 journal_nonce(jset), jset);
1286 if (!validate_before_checksum &&
1287 jset_validate_for_write(c, jset))
1290 sectors = vstruct_sectors(jset, c->block_bits);
1291 BUG_ON(sectors > w->sectors);
1293 bytes = vstruct_bytes(jset);
1294 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1297 spin_lock(&j->lock);
1298 ret = journal_write_alloc(j, w, sectors);
1300 if (ret && j->can_discard) {
1301 spin_unlock(&j->lock);
1302 bch2_journal_do_discards(j);
1307 * write is allocated, no longer need to account for it in
1308 * bch2_journal_space_available():
1313 * journal entry has been compacted and allocated, recalculate space
1316 bch2_journal_space_available(j);
1317 spin_unlock(&j->lock);
1320 bch_err(c, "Unable to allocate journal write");
1321 bch2_fatal_error(c);
1322 continue_at(cl, journal_write_done, system_highpri_wq);
1327 * XXX: we really should just disable the entire journal in nochanges
1330 if (c->opts.nochanges)
1333 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1334 ca = bch_dev_bkey_exists(c, ptr->dev);
1335 if (!percpu_ref_tryget(&ca->io_ref)) {
1337 bch_err(c, "missing device for journal write\n");
1341 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
1344 bio = ca->journal.bio;
1346 bio_set_dev(bio, ca->disk_sb.bdev);
1347 bio->bi_iter.bi_sector = ptr->offset;
1348 bio->bi_end_io = journal_write_endio;
1349 bio->bi_private = ca;
1350 bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
1351 if (!JSET_NO_FLUSH(jset))
1352 bio->bi_opf |= REQ_PREFLUSH|REQ_FUA;
1353 bch2_bio_map(bio, jset, sectors << 9);
1355 trace_journal_write(bio);
1356 closure_bio_submit(bio, cl);
1358 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(jset->seq);
1361 if (!JSET_NO_FLUSH(jset)) {
1362 for_each_rw_member(ca, c, i)
1363 if (journal_flushes_device(ca) &&
1364 !bch2_bkey_has_device(bkey_i_to_s_c(&w->key), i)) {
1365 percpu_ref_get(&ca->io_ref);
1367 bio = ca->journal.bio;
1369 bio_set_dev(bio, ca->disk_sb.bdev);
1370 bio->bi_opf = REQ_OP_FLUSH;
1371 bio->bi_end_io = journal_write_endio;
1372 bio->bi_private = ca;
1373 closure_bio_submit(bio, cl);
1377 bch2_bucket_seq_cleanup(c);
1379 continue_at(cl, journal_write_done, system_highpri_wq);
1382 bch2_inconsistent_error(c);
1383 continue_at(cl, journal_write_done, system_highpri_wq);