1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_foreground.h"
5 #include "btree_update_interior.h"
10 #include "journal_io.h"
11 #include "journal_reclaim.h"
14 #include <trace/events/bcachefs.h>
19 struct list_head *head;
23 #define JOURNAL_ENTRY_ADD_OK 0
24 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
27 * Given a journal entry we just read, add it to the list of journal entries to
30 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
31 struct journal_list *jlist, struct jset *j)
33 struct journal_replay *i, *pos;
34 struct list_head *where;
35 size_t bytes = vstruct_bytes(j);
39 last_seq = !list_empty(jlist->head)
40 ? list_last_entry(jlist->head, struct journal_replay,
44 if (!c->opts.read_entire_journal) {
45 /* Is this entry older than the range we need? */
46 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
47 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
51 /* Drop entries we don't need anymore */
52 list_for_each_entry_safe(i, pos, jlist->head, list) {
53 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
56 kvpfree(i, offsetof(struct journal_replay, j) +
57 vstruct_bytes(&i->j));
61 list_for_each_entry_reverse(i, jlist->head, list) {
63 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
64 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
65 memcmp(j, &i->j, bytes), c,
66 "found duplicate but non identical journal entries (seq %llu)",
71 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
79 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
85 list_add(&i->list, where);
87 memcpy(&i->j, j, bytes);
89 if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
90 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
92 fsck_err_on(1, c, "duplicate journal entries on same device");
93 ret = JOURNAL_ENTRY_ADD_OK;
99 static struct nonce journal_nonce(const struct jset *jset)
101 return (struct nonce) {{
103 [1] = ((__le32 *) &jset->seq)[0],
104 [2] = ((__le32 *) &jset->seq)[1],
105 [3] = BCH_NONCE_JOURNAL,
109 /* this fills in a range with empty jset_entries: */
110 static void journal_entry_null_range(void *start, void *end)
112 struct jset_entry *entry;
114 for (entry = start; entry != end; entry = vstruct_next(entry))
115 memset(entry, 0, sizeof(*entry));
118 #define JOURNAL_ENTRY_REREAD 5
119 #define JOURNAL_ENTRY_NONE 6
120 #define JOURNAL_ENTRY_BAD 7
122 #define journal_entry_err(c, msg, ...) \
126 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
129 bch_err(c, "corrupt metadata before write:\n" \
130 msg, ##__VA_ARGS__); \
131 if (bch2_fs_inconsistent(c)) { \
132 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
140 #define journal_entry_err_on(cond, c, msg, ...) \
141 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
143 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
144 struct jset_entry *entry,
145 unsigned level, enum btree_id btree_id,
147 const char *type, int write)
149 void *next = vstruct_next(entry);
151 unsigned version = le32_to_cpu(jset->version);
154 if (journal_entry_err_on(!k->k.u64s, c,
155 "invalid %s in journal: k->u64s 0", type)) {
156 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
157 journal_entry_null_range(vstruct_next(entry), next);
161 if (journal_entry_err_on((void *) bkey_next(k) >
162 (void *) vstruct_next(entry), c,
163 "invalid %s in journal: extends past end of journal entry",
165 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
166 journal_entry_null_range(vstruct_next(entry), next);
170 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
171 "invalid %s in journal: bad format %u",
172 type, k->k.format)) {
173 le16_add_cpu(&entry->u64s, -k->k.u64s);
174 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
175 journal_entry_null_range(vstruct_next(entry), next);
180 bch2_bkey_compat(level, btree_id, version,
181 JSET_BIG_ENDIAN(jset), write,
182 NULL, bkey_to_packed(k));
184 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
185 __btree_node_type(level, btree_id));
189 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
190 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
193 le16_add_cpu(&entry->u64s, -k->k.u64s);
194 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
195 journal_entry_null_range(vstruct_next(entry), next);
200 bch2_bkey_compat(level, btree_id, version,
201 JSET_BIG_ENDIAN(jset), write,
202 NULL, bkey_to_packed(k));
207 static int journal_entry_validate_btree_keys(struct bch_fs *c,
209 struct jset_entry *entry,
214 vstruct_for_each(entry, k) {
215 int ret = journal_validate_key(c, jset, entry,
226 static int journal_entry_validate_btree_root(struct bch_fs *c,
228 struct jset_entry *entry,
231 struct bkey_i *k = entry->start;
234 if (journal_entry_err_on(!entry->u64s ||
235 le16_to_cpu(entry->u64s) != k->k.u64s, c,
236 "invalid btree root journal entry: wrong number of keys")) {
237 void *next = vstruct_next(entry);
239 * we don't want to null out this jset_entry,
240 * just the contents, so that later we can tell
241 * we were _supposed_ to have a btree root
244 journal_entry_null_range(vstruct_next(entry), next);
248 return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
249 "btree root", write);
254 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
256 struct jset_entry *entry,
259 /* obsolete, don't care: */
263 static int journal_entry_validate_blacklist(struct bch_fs *c,
265 struct jset_entry *entry,
270 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
271 "invalid journal seq blacklist entry: bad size")) {
272 journal_entry_null_range(entry, vstruct_next(entry));
278 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
280 struct jset_entry *entry,
283 struct jset_entry_blacklist_v2 *bl_entry;
286 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
287 "invalid journal seq blacklist entry: bad size")) {
288 journal_entry_null_range(entry, vstruct_next(entry));
292 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
294 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
295 le64_to_cpu(bl_entry->end), c,
296 "invalid journal seq blacklist entry: start > end")) {
297 journal_entry_null_range(entry, vstruct_next(entry));
304 static int journal_entry_validate_usage(struct bch_fs *c,
306 struct jset_entry *entry,
309 struct jset_entry_usage *u =
310 container_of(entry, struct jset_entry_usage, entry);
311 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
314 if (journal_entry_err_on(bytes < sizeof(*u),
316 "invalid journal entry usage: bad size")) {
317 journal_entry_null_range(entry, vstruct_next(entry));
325 static int journal_entry_validate_data_usage(struct bch_fs *c,
327 struct jset_entry *entry,
330 struct jset_entry_data_usage *u =
331 container_of(entry, struct jset_entry_data_usage, entry);
332 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
335 if (journal_entry_err_on(bytes < sizeof(*u) ||
336 bytes < sizeof(*u) + u->r.nr_devs,
338 "invalid journal entry usage: bad size")) {
339 journal_entry_null_range(entry, vstruct_next(entry));
347 struct jset_entry_ops {
348 int (*validate)(struct bch_fs *, struct jset *,
349 struct jset_entry *, int);
352 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
354 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
355 .validate = journal_entry_validate_##f, \
357 BCH_JSET_ENTRY_TYPES()
361 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
362 struct jset_entry *entry, int write)
364 return entry->type < BCH_JSET_ENTRY_NR
365 ? bch2_jset_entry_ops[entry->type].validate(c, jset,
370 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
373 struct jset_entry *entry;
376 vstruct_for_each(jset, entry) {
377 if (journal_entry_err_on(vstruct_next(entry) >
378 vstruct_last(jset), c,
379 "journal entry extends past end of jset")) {
380 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
384 ret = journal_entry_validate(c, jset, entry, write);
392 static int jset_validate(struct bch_fs *c,
393 struct jset *jset, u64 sector,
394 unsigned bucket_sectors_left,
395 unsigned sectors_read,
398 size_t bytes = vstruct_bytes(jset);
399 struct bch_csum csum;
403 if (le64_to_cpu(jset->magic) != jset_magic(c))
404 return JOURNAL_ENTRY_NONE;
406 version = le32_to_cpu(jset->version);
407 if ((version != BCH_JSET_VERSION_OLD &&
408 version < bcachefs_metadata_version_min) ||
409 version >= bcachefs_metadata_version_max) {
410 bch_err(c, "unknown journal entry version %u", jset->version);
411 return BCH_FSCK_UNKNOWN_VERSION;
414 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
415 "journal entry too big (%zu bytes), sector %lluu",
417 /* XXX: note we might have missing journal entries */
418 return JOURNAL_ENTRY_BAD;
421 if (bytes > sectors_read << 9)
422 return JOURNAL_ENTRY_REREAD;
424 if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
425 "journal entry with unknown csum type %llu sector %lluu",
426 JSET_CSUM_TYPE(jset), sector))
427 return JOURNAL_ENTRY_BAD;
429 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
430 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
431 "journal checksum bad, sector %llu", sector)) {
432 /* XXX: retry IO, when we start retrying checksum errors */
433 /* XXX: note we might have missing journal entries */
434 return JOURNAL_ENTRY_BAD;
437 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
438 jset->encrypted_start,
439 vstruct_end(jset) - (void *) jset->encrypted_start);
441 if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
442 "invalid journal entry: last_seq > seq"))
443 jset->last_seq = jset->seq;
450 struct journal_read_buf {
455 static int journal_read_buf_realloc(struct journal_read_buf *b,
460 /* the bios are sized for this many pages, max: */
461 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
464 new_size = roundup_pow_of_two(new_size);
465 n = kvpmalloc(new_size, GFP_KERNEL);
469 kvpfree(b->data, b->size);
475 static int journal_read_bucket(struct bch_dev *ca,
476 struct journal_read_buf *buf,
477 struct journal_list *jlist,
480 struct bch_fs *c = ca->fs;
481 struct journal_device *ja = &ca->journal;
482 struct jset *j = NULL;
483 unsigned sectors, sectors_read = 0;
484 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
485 end = offset + ca->mi.bucket_size;
486 bool saw_bad = false;
489 pr_debug("reading %u", bucket);
491 while (offset < end) {
495 sectors_read = min_t(unsigned,
496 end - offset, buf->size >> 9);
498 bio = bio_kmalloc(GFP_KERNEL,
501 bio_set_dev(bio, ca->disk_sb.bdev);
502 bio->bi_iter.bi_sector = offset;
503 bio_set_op_attrs(bio, REQ_OP_READ, 0);
504 bch2_bio_map(bio, buf->data, sectors_read << 9);
506 ret = submit_bio_wait(bio);
509 if (bch2_dev_io_err_on(ret, ca,
510 "journal read from sector %llu",
512 bch2_meta_read_fault("journal"))
518 ret = jset_validate(c, j, offset,
519 end - offset, sectors_read,
524 case JOURNAL_ENTRY_REREAD:
525 if (vstruct_bytes(j) > buf->size) {
526 ret = journal_read_buf_realloc(buf,
532 case JOURNAL_ENTRY_NONE:
535 sectors = c->opts.block_size;
537 case JOURNAL_ENTRY_BAD:
539 sectors = c->opts.block_size;
546 * This happens sometimes if we don't have discards on -
547 * when we've partially overwritten a bucket with new
548 * journal entries. We don't need the rest of the
551 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
554 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
556 mutex_lock(&jlist->lock);
557 ret = journal_entry_add(c, ca, jlist, j);
558 mutex_unlock(&jlist->lock);
561 case JOURNAL_ENTRY_ADD_OK:
563 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
569 sectors = vstruct_sectors(j, c->block_bits);
573 sectors_read -= sectors;
574 j = ((void *) j) + (sectors << 9);
580 static void bch2_journal_read_device(struct closure *cl)
582 struct journal_device *ja =
583 container_of(cl, struct journal_device, read);
584 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
585 struct journal_list *jlist =
586 container_of(cl->parent, struct journal_list, cl);
587 struct journal_read_buf buf = { NULL, 0 };
588 u64 min_seq = U64_MAX;
595 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
599 pr_debug("%u journal buckets", ja->nr);
601 for (i = 0; i < ja->nr; i++) {
602 ret = journal_read_bucket(ca, &buf, jlist, i);
607 /* Find the journal bucket with the highest sequence number: */
608 for (i = 0; i < ja->nr; i++) {
609 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
612 min_seq = min(ja->bucket_seq[i], min_seq);
616 * If there's duplicate journal entries in multiple buckets (which
617 * definitely isn't supposed to happen, but...) - make sure to start
618 * cur_idx at the last of those buckets, so we don't deadlock trying to
621 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
622 ja->bucket_seq[ja->cur_idx] >
623 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
624 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
626 ja->sectors_free = 0;
629 * Set dirty_idx to indicate the entire journal is full and needs to be
630 * reclaimed - journal reclaim will immediately reclaim whatever isn't
631 * pinned when it first runs:
633 ja->discard_idx = ja->dirty_idx_ondisk =
634 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
636 kvpfree(buf.data, buf.size);
637 percpu_ref_put(&ca->io_ref);
641 mutex_lock(&jlist->lock);
643 mutex_unlock(&jlist->lock);
647 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
649 struct journal_list jlist;
650 struct journal_replay *i;
653 size_t keys = 0, entries = 0;
654 bool degraded = false;
657 closure_init_stack(&jlist.cl);
658 mutex_init(&jlist.lock);
662 for_each_member_device(ca, c, iter) {
663 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
664 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
667 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
668 ca->mi.state == BCH_MEMBER_STATE_RO) &&
669 percpu_ref_tryget(&ca->io_ref))
670 closure_call(&ca->journal.read,
671 bch2_journal_read_device,
678 closure_sync(&jlist.cl);
683 list_for_each_entry(i, list, list) {
684 struct jset_entry *entry;
685 struct bkey_i *k, *_n;
686 struct bch_replicas_padded replicas;
689 ret = jset_validate_entries(c, &i->j, READ);
694 * If we're mounting in degraded mode - if we didn't read all
695 * the devices - this is wrong:
698 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
701 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
702 fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
703 "superblock not marked as containing replicas %s",
704 (bch2_replicas_entry_to_text(&PBUF(buf),
705 &replicas.e), buf)))) {
706 ret = bch2_mark_replicas(c, &replicas.e);
711 for_each_jset_key(k, _n, entry, &i->j)
716 if (!list_empty(list)) {
717 i = list_last_entry(list, struct journal_replay, list);
719 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
720 keys, entries, le64_to_cpu(i->j.seq));
728 static void __journal_write_alloc(struct journal *j,
729 struct journal_buf *w,
730 struct dev_alloc_list *devs_sorted,
733 unsigned replicas_want)
735 struct bch_fs *c = container_of(j, struct bch_fs, journal);
736 struct journal_device *ja;
740 if (*replicas >= replicas_want)
743 for (i = 0; i < devs_sorted->nr; i++) {
744 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
751 * Check that we can use this device, and aren't already using
754 if (!ca->mi.durability ||
755 ca->mi.state != BCH_MEMBER_STATE_RW ||
757 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
759 sectors > ja->sectors_free)
762 bch2_dev_stripe_increment(c, ca, &j->wp.stripe);
764 bch2_bkey_append_ptr(&w->key,
765 (struct bch_extent_ptr) {
766 .offset = bucket_to_sector(ca,
767 ja->buckets[ja->cur_idx]) +
773 ja->sectors_free -= sectors;
774 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
776 *replicas += ca->mi.durability;
778 if (*replicas >= replicas_want)
784 * journal_next_bucket - move on to the next journal bucket if possible
786 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
789 struct bch_fs *c = container_of(j, struct bch_fs, journal);
790 struct journal_device *ja;
792 struct dev_alloc_list devs_sorted;
793 unsigned i, replicas = 0, replicas_want =
794 READ_ONCE(c->opts.metadata_replicas);
798 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
799 &c->rw_devs[BCH_DATA_JOURNAL]);
801 __journal_write_alloc(j, w, &devs_sorted,
802 sectors, &replicas, replicas_want);
804 if (replicas >= replicas_want)
807 for (i = 0; i < devs_sorted.nr; i++) {
808 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
814 if (sectors > ja->sectors_free &&
815 sectors <= ca->mi.bucket_size &&
816 bch2_journal_dev_buckets_available(j, ja,
817 journal_space_discarded)) {
818 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
819 ja->sectors_free = ca->mi.bucket_size;
822 * ja->bucket_seq[ja->cur_idx] must always have
823 * something sensible:
825 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
829 __journal_write_alloc(j, w, &devs_sorted,
830 sectors, &replicas, replicas_want);
834 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
837 static void journal_write_compact(struct jset *jset)
839 struct jset_entry *i, *next, *prev = NULL;
842 * Simple compaction, dropping empty jset_entries (from journal
843 * reservations that weren't fully used) and merging jset_entries that
846 * If we wanted to be really fancy here, we could sort all the keys in
847 * the jset and drop keys that were overwritten - probably not worth it:
849 vstruct_for_each_safe(jset, i, next) {
850 unsigned u64s = le16_to_cpu(i->u64s);
856 /* Can we merge with previous entry? */
858 i->btree_id == prev->btree_id &&
859 i->level == prev->level &&
860 i->type == prev->type &&
861 i->type == BCH_JSET_ENTRY_btree_keys &&
862 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
863 memmove_u64s_down(vstruct_next(prev),
866 le16_add_cpu(&prev->u64s, u64s);
870 /* Couldn't merge, move i into new position (after prev): */
871 prev = prev ? vstruct_next(prev) : jset->start;
873 memmove_u64s_down(prev, i, jset_u64s(u64s));
876 prev = prev ? vstruct_next(prev) : jset->start;
877 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
880 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
882 /* we aren't holding j->lock: */
883 unsigned new_size = READ_ONCE(j->buf_size_want);
886 if (buf->buf_size >= new_size)
889 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
893 memcpy(new_buf, buf->data, buf->buf_size);
894 kvpfree(buf->data, buf->buf_size);
896 buf->buf_size = new_size;
899 static void journal_write_done(struct closure *cl)
901 struct journal *j = container_of(cl, struct journal, io);
902 struct bch_fs *c = container_of(j, struct bch_fs, journal);
903 struct journal_buf *w = journal_prev_buf(j);
904 struct bch_devs_list devs =
905 bch2_bkey_devs(bkey_i_to_s_c(&w->key));
906 struct bch_replicas_padded replicas;
907 u64 seq = le64_to_cpu(w->data->seq);
908 u64 last_seq = le64_to_cpu(w->data->last_seq);
910 bch2_time_stats_update(j->write_time, j->write_start_time);
913 bch_err(c, "unable to write journal to sufficient devices");
917 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
919 if (bch2_mark_replicas(c, &replicas.e))
923 if (seq >= j->pin.front)
924 journal_seq_pin(j, seq)->devs = devs;
927 j->last_seq_ondisk = last_seq;
928 bch2_journal_space_available(j);
931 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
934 * Must come before signaling write completion, for
935 * bch2_fs_journal_stop():
937 mod_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
939 /* also must come before signalling write completion: */
940 closure_debug_destroy(cl);
942 BUG_ON(!j->reservations.prev_buf_unwritten);
943 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
944 &j->reservations.counter);
946 closure_wake_up(&w->wait);
949 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
950 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
951 spin_unlock(&j->lock);
959 static void journal_write_endio(struct bio *bio)
961 struct bch_dev *ca = bio->bi_private;
962 struct journal *j = &ca->fs->journal;
964 if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
965 bch2_meta_write_fault("journal")) {
966 struct journal_buf *w = journal_prev_buf(j);
969 spin_lock_irqsave(&j->err_lock, flags);
970 bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx);
971 spin_unlock_irqrestore(&j->err_lock, flags);
975 percpu_ref_put(&ca->io_ref);
978 void bch2_journal_write(struct closure *cl)
980 struct journal *j = container_of(cl, struct journal, io);
981 struct bch_fs *c = container_of(j, struct bch_fs, journal);
983 struct journal_buf *w = journal_prev_buf(j);
984 struct jset_entry *start, *end;
987 struct bch_extent_ptr *ptr;
988 bool validate_before_checksum = false;
989 unsigned i, sectors, bytes, u64s;
992 bch2_journal_pin_put(j, le64_to_cpu(w->data->seq));
994 journal_buf_realloc(j, w);
997 j->write_start_time = local_clock();
1000 * New btree roots are set by journalling them; when the journal entry
1001 * gets written we have to propagate them to c->btree_roots
1003 * But, every journal entry we write has to contain all the btree roots
1004 * (at least for now); so after we copy btree roots to c->btree_roots we
1005 * have to get any missing btree roots and add them to this journal
1009 bch2_journal_entries_to_btree_roots(c, jset);
1011 start = end = vstruct_last(jset);
1013 end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
1015 end = bch2_journal_super_entries_add_common(c, end,
1016 le64_to_cpu(jset->seq));
1017 u64s = (u64 *) end - (u64 *) start;
1018 BUG_ON(u64s > j->entry_u64s_reserved);
1020 le32_add_cpu(&jset->u64s, u64s);
1021 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1023 journal_write_compact(jset);
1025 jset->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
1026 jset->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
1027 jset->magic = cpu_to_le64(jset_magic(c));
1029 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1030 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1031 : cpu_to_le32(c->sb.version);
1033 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1034 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1036 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1037 validate_before_checksum = true;
1039 if (le32_to_cpu(jset->version) < bcachefs_metadata_version_max)
1040 validate_before_checksum = true;
1042 if (validate_before_checksum &&
1043 jset_validate_entries(c, jset, WRITE))
1046 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1047 jset->encrypted_start,
1048 vstruct_end(jset) - (void *) jset->encrypted_start);
1050 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1051 journal_nonce(jset), jset);
1053 if (!validate_before_checksum &&
1054 jset_validate_entries(c, jset, WRITE))
1057 sectors = vstruct_sectors(jset, c->block_bits);
1058 BUG_ON(sectors > w->sectors);
1060 bytes = vstruct_bytes(jset);
1061 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1064 spin_lock(&j->lock);
1065 ret = journal_write_alloc(j, w, sectors);
1067 if (ret && j->can_discard) {
1068 spin_unlock(&j->lock);
1069 bch2_journal_do_discards(j);
1074 * write is allocated, no longer need to account for it in
1075 * bch2_journal_space_available():
1080 * journal entry has been compacted and allocated, recalculate space
1083 bch2_journal_space_available(j);
1084 spin_unlock(&j->lock);
1087 bch_err(c, "Unable to allocate journal write");
1088 bch2_fatal_error(c);
1089 continue_at(cl, journal_write_done, system_highpri_wq);
1094 * XXX: we really should just disable the entire journal in nochanges
1097 if (c->opts.nochanges)
1100 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1101 ca = bch_dev_bkey_exists(c, ptr->dev);
1102 if (!percpu_ref_tryget(&ca->io_ref)) {
1104 bch_err(c, "missing device for journal write\n");
1108 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
1111 bio = ca->journal.bio;
1113 bio_set_dev(bio, ca->disk_sb.bdev);
1114 bio->bi_iter.bi_sector = ptr->offset;
1115 bio->bi_end_io = journal_write_endio;
1116 bio->bi_private = ca;
1117 bio_set_op_attrs(bio, REQ_OP_WRITE,
1118 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
1119 bch2_bio_map(bio, jset, sectors << 9);
1121 trace_journal_write(bio);
1122 closure_bio_submit(bio, cl);
1124 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(jset->seq);
1127 for_each_rw_member(ca, c, i)
1128 if (journal_flushes_device(ca) &&
1129 !bch2_bkey_has_device(bkey_i_to_s_c(&w->key), i)) {
1130 percpu_ref_get(&ca->io_ref);
1132 bio = ca->journal.bio;
1134 bio_set_dev(bio, ca->disk_sb.bdev);
1135 bio->bi_opf = REQ_OP_FLUSH;
1136 bio->bi_end_io = journal_write_endio;
1137 bio->bi_private = ca;
1138 closure_bio_submit(bio, cl);
1142 bch2_bucket_seq_cleanup(c);
1144 continue_at(cl, journal_write_done, system_highpri_wq);
1147 bch2_inconsistent_error(c);
1148 continue_at(cl, journal_write_done, system_highpri_wq);