1 // SPDX-License-Identifier: GPL-2.0
3 #include "alloc_foreground.h"
8 #include "journal_io.h"
9 #include "journal_reclaim.h"
12 #include <trace/events/bcachefs.h>
17 struct list_head *head;
21 #define JOURNAL_ENTRY_ADD_OK 0
22 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
25 * Given a journal entry we just read, add it to the list of journal entries to
28 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
29 struct journal_list *jlist, struct jset *j)
31 struct journal_replay *i, *pos;
32 struct list_head *where;
33 size_t bytes = vstruct_bytes(j);
37 last_seq = !list_empty(jlist->head)
38 ? list_last_entry(jlist->head, struct journal_replay,
42 /* Is this entry older than the range we need? */
43 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
44 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
48 /* Drop entries we don't need anymore */
49 list_for_each_entry_safe(i, pos, jlist->head, list) {
50 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
53 kvpfree(i, offsetof(struct journal_replay, j) +
54 vstruct_bytes(&i->j));
57 list_for_each_entry_reverse(i, jlist->head, list) {
59 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
60 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
61 memcmp(j, &i->j, bytes), c,
62 "found duplicate but non identical journal entries (seq %llu)",
67 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
75 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
81 list_add(&i->list, where);
83 memcpy(&i->j, j, bytes);
85 if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
86 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
88 fsck_err_on(1, c, "duplicate journal entries on same device");
89 ret = JOURNAL_ENTRY_ADD_OK;
95 static struct nonce journal_nonce(const struct jset *jset)
97 return (struct nonce) {{
99 [1] = ((__le32 *) &jset->seq)[0],
100 [2] = ((__le32 *) &jset->seq)[1],
101 [3] = BCH_NONCE_JOURNAL,
105 /* this fills in a range with empty jset_entries: */
106 static void journal_entry_null_range(void *start, void *end)
108 struct jset_entry *entry;
110 for (entry = start; entry != end; entry = vstruct_next(entry))
111 memset(entry, 0, sizeof(*entry));
114 #define JOURNAL_ENTRY_REREAD 5
115 #define JOURNAL_ENTRY_NONE 6
116 #define JOURNAL_ENTRY_BAD 7
118 #define journal_entry_err(c, msg, ...) \
122 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
125 bch_err(c, "corrupt metadata before write:\n" \
126 msg, ##__VA_ARGS__); \
127 if (bch2_fs_inconsistent(c)) { \
128 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
136 #define journal_entry_err_on(cond, c, msg, ...) \
137 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
139 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
140 struct jset_entry *entry,
141 struct bkey_i *k, enum btree_node_type key_type,
142 const char *type, int write)
144 void *next = vstruct_next(entry);
146 unsigned version = le32_to_cpu(jset->version);
149 if (journal_entry_err_on(!k->k.u64s, c,
150 "invalid %s in journal: k->u64s 0", type)) {
151 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
152 journal_entry_null_range(vstruct_next(entry), next);
156 if (journal_entry_err_on((void *) bkey_next(k) >
157 (void *) vstruct_next(entry), c,
158 "invalid %s in journal: extends past end of journal entry",
160 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
161 journal_entry_null_range(vstruct_next(entry), next);
165 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
166 "invalid %s in journal: bad format %u",
167 type, k->k.format)) {
168 le16_add_cpu(&entry->u64s, -k->k.u64s);
169 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
170 journal_entry_null_range(vstruct_next(entry), next);
174 if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN) {
175 bch2_bkey_swab_key(NULL, bkey_to_packed(k));
176 bch2_bkey_swab_val(bkey_i_to_s(k));
180 version < bcachefs_metadata_version_bkey_renumber)
181 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
183 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k), key_type);
187 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
188 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
191 le16_add_cpu(&entry->u64s, -k->k.u64s);
192 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
193 journal_entry_null_range(vstruct_next(entry), next);
198 version < bcachefs_metadata_version_bkey_renumber)
199 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
204 static int journal_entry_validate_btree_keys(struct bch_fs *c,
206 struct jset_entry *entry,
211 vstruct_for_each(entry, k) {
212 int ret = journal_validate_key(c, jset, entry, k,
213 __btree_node_type(entry->level,
223 static int journal_entry_validate_btree_root(struct bch_fs *c,
225 struct jset_entry *entry,
228 struct bkey_i *k = entry->start;
231 if (journal_entry_err_on(!entry->u64s ||
232 le16_to_cpu(entry->u64s) != k->k.u64s, c,
233 "invalid btree root journal entry: wrong number of keys")) {
234 void *next = vstruct_next(entry);
236 * we don't want to null out this jset_entry,
237 * just the contents, so that later we can tell
238 * we were _supposed_ to have a btree root
241 journal_entry_null_range(vstruct_next(entry), next);
245 return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
246 "btree root", write);
251 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
253 struct jset_entry *entry,
256 /* obsolete, don't care: */
260 static int journal_entry_validate_blacklist(struct bch_fs *c,
262 struct jset_entry *entry,
267 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
268 "invalid journal seq blacklist entry: bad size")) {
269 journal_entry_null_range(entry, vstruct_next(entry));
275 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
277 struct jset_entry *entry,
280 struct jset_entry_blacklist_v2 *bl_entry;
283 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
284 "invalid journal seq blacklist entry: bad size")) {
285 journal_entry_null_range(entry, vstruct_next(entry));
289 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
291 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
292 le64_to_cpu(bl_entry->end), c,
293 "invalid journal seq blacklist entry: start > end")) {
294 journal_entry_null_range(entry, vstruct_next(entry));
301 static int journal_entry_validate_usage(struct bch_fs *c,
303 struct jset_entry *entry,
306 struct jset_entry_usage *u =
307 container_of(entry, struct jset_entry_usage, entry);
308 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
311 if (journal_entry_err_on(bytes < sizeof(*u),
313 "invalid journal entry usage: bad size")) {
314 journal_entry_null_range(entry, vstruct_next(entry));
322 static int journal_entry_validate_data_usage(struct bch_fs *c,
324 struct jset_entry *entry,
327 struct jset_entry_data_usage *u =
328 container_of(entry, struct jset_entry_data_usage, entry);
329 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
332 if (journal_entry_err_on(bytes < sizeof(*u) ||
333 bytes < sizeof(*u) + u->r.nr_devs,
335 "invalid journal entry usage: bad size")) {
336 journal_entry_null_range(entry, vstruct_next(entry));
344 struct jset_entry_ops {
345 int (*validate)(struct bch_fs *, struct jset *,
346 struct jset_entry *, int);
349 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
351 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
352 .validate = journal_entry_validate_##f, \
354 BCH_JSET_ENTRY_TYPES()
358 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
359 struct jset_entry *entry, int write)
361 return entry->type < BCH_JSET_ENTRY_NR
362 ? bch2_jset_entry_ops[entry->type].validate(c, jset,
367 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
370 struct jset_entry *entry;
373 vstruct_for_each(jset, entry) {
374 if (journal_entry_err_on(vstruct_next(entry) >
375 vstruct_last(jset), c,
376 "journal entry extends past end of jset")) {
377 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
381 ret = journal_entry_validate(c, jset, entry, write);
389 static int jset_validate(struct bch_fs *c,
390 struct jset *jset, u64 sector,
391 unsigned bucket_sectors_left,
392 unsigned sectors_read,
395 size_t bytes = vstruct_bytes(jset);
396 struct bch_csum csum;
400 if (le64_to_cpu(jset->magic) != jset_magic(c))
401 return JOURNAL_ENTRY_NONE;
403 version = le32_to_cpu(jset->version);
404 if ((version != BCH_JSET_VERSION_OLD &&
405 version < bcachefs_metadata_version_min) ||
406 version >= bcachefs_metadata_version_max) {
407 bch_err(c, "unknown journal entry version %u", jset->version);
408 return BCH_FSCK_UNKNOWN_VERSION;
411 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
412 "journal entry too big (%zu bytes), sector %lluu",
414 /* XXX: note we might have missing journal entries */
415 return JOURNAL_ENTRY_BAD;
418 if (bytes > sectors_read << 9)
419 return JOURNAL_ENTRY_REREAD;
421 if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
422 "journal entry with unknown csum type %llu sector %lluu",
423 JSET_CSUM_TYPE(jset), sector))
424 return JOURNAL_ENTRY_BAD;
426 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
427 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
428 "journal checksum bad, sector %llu", sector)) {
429 /* XXX: retry IO, when we start retrying checksum errors */
430 /* XXX: note we might have missing journal entries */
431 return JOURNAL_ENTRY_BAD;
434 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
435 jset->encrypted_start,
436 vstruct_end(jset) - (void *) jset->encrypted_start);
438 if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
439 "invalid journal entry: last_seq > seq"))
440 jset->last_seq = jset->seq;
447 struct journal_read_buf {
452 static int journal_read_buf_realloc(struct journal_read_buf *b,
457 /* the bios are sized for this many pages, max: */
458 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
461 new_size = roundup_pow_of_two(new_size);
462 n = kvpmalloc(new_size, GFP_KERNEL);
466 kvpfree(b->data, b->size);
472 static int journal_read_bucket(struct bch_dev *ca,
473 struct journal_read_buf *buf,
474 struct journal_list *jlist,
477 struct bch_fs *c = ca->fs;
478 struct journal_device *ja = &ca->journal;
479 struct jset *j = NULL;
480 unsigned sectors, sectors_read = 0;
481 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
482 end = offset + ca->mi.bucket_size;
483 bool saw_bad = false;
486 pr_debug("reading %u", bucket);
488 while (offset < end) {
492 sectors_read = min_t(unsigned,
493 end - offset, buf->size >> 9);
495 bio = bio_kmalloc(GFP_KERNEL,
498 bio_set_dev(bio, ca->disk_sb.bdev);
499 bio->bi_iter.bi_sector = offset;
500 bio_set_op_attrs(bio, REQ_OP_READ, 0);
501 bch2_bio_map(bio, buf->data, sectors_read << 9);
503 ret = submit_bio_wait(bio);
506 if (bch2_dev_io_err_on(ret, ca,
507 "journal read from sector %llu",
509 bch2_meta_read_fault("journal"))
515 ret = jset_validate(c, j, offset,
516 end - offset, sectors_read,
521 case JOURNAL_ENTRY_REREAD:
522 if (vstruct_bytes(j) > buf->size) {
523 ret = journal_read_buf_realloc(buf,
529 case JOURNAL_ENTRY_NONE:
532 sectors = c->opts.block_size;
534 case JOURNAL_ENTRY_BAD:
536 sectors = c->opts.block_size;
543 * This happens sometimes if we don't have discards on -
544 * when we've partially overwritten a bucket with new
545 * journal entries. We don't need the rest of the
548 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
551 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
553 mutex_lock(&jlist->lock);
554 ret = journal_entry_add(c, ca, jlist, j);
555 mutex_unlock(&jlist->lock);
558 case JOURNAL_ENTRY_ADD_OK:
560 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
566 sectors = vstruct_sectors(j, c->block_bits);
570 sectors_read -= sectors;
571 j = ((void *) j) + (sectors << 9);
577 static void bch2_journal_read_device(struct closure *cl)
579 struct journal_device *ja =
580 container_of(cl, struct journal_device, read);
581 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
582 struct journal_list *jlist =
583 container_of(cl->parent, struct journal_list, cl);
584 struct journal_read_buf buf = { NULL, 0 };
585 u64 min_seq = U64_MAX;
592 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
596 pr_debug("%u journal buckets", ja->nr);
598 for (i = 0; i < ja->nr; i++) {
599 ret = journal_read_bucket(ca, &buf, jlist, i);
604 /* Find the journal bucket with the highest sequence number: */
605 for (i = 0; i < ja->nr; i++) {
606 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
609 min_seq = min(ja->bucket_seq[i], min_seq);
613 * If there's duplicate journal entries in multiple buckets (which
614 * definitely isn't supposed to happen, but...) - make sure to start
615 * cur_idx at the last of those buckets, so we don't deadlock trying to
618 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
619 ja->bucket_seq[ja->cur_idx] >
620 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
621 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
623 ja->sectors_free = 0;
626 * Set dirty_idx to indicate the entire journal is full and needs to be
627 * reclaimed - journal reclaim will immediately reclaim whatever isn't
628 * pinned when it first runs:
630 ja->discard_idx = ja->dirty_idx_ondisk =
631 ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
633 kvpfree(buf.data, buf.size);
634 percpu_ref_put(&ca->io_ref);
638 mutex_lock(&jlist->lock);
640 mutex_unlock(&jlist->lock);
644 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
646 struct journal_list jlist;
647 struct journal_replay *i;
650 size_t keys = 0, entries = 0;
651 bool degraded = false;
654 closure_init_stack(&jlist.cl);
655 mutex_init(&jlist.lock);
659 for_each_member_device(ca, c, iter) {
660 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
661 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
664 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
665 ca->mi.state == BCH_MEMBER_STATE_RO) &&
666 percpu_ref_tryget(&ca->io_ref))
667 closure_call(&ca->journal.read,
668 bch2_journal_read_device,
675 closure_sync(&jlist.cl);
680 list_for_each_entry(i, list, list) {
681 struct jset_entry *entry;
682 struct bkey_i *k, *_n;
683 struct bch_replicas_padded replicas;
686 ret = jset_validate_entries(c, &i->j, READ);
691 * If we're mounting in degraded mode - if we didn't read all
692 * the devices - this is wrong:
695 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
698 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
699 fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
700 "superblock not marked as containing replicas %s",
701 (bch2_replicas_entry_to_text(&PBUF(buf),
702 &replicas.e), buf)))) {
703 ret = bch2_mark_replicas(c, &replicas.e);
708 for_each_jset_key(k, _n, entry, &i->j)
713 if (!list_empty(list)) {
714 i = list_last_entry(list, struct journal_replay, list);
716 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
717 keys, entries, le64_to_cpu(i->j.seq));
725 static void __journal_write_alloc(struct journal *j,
726 struct journal_buf *w,
727 struct dev_alloc_list *devs_sorted,
730 unsigned replicas_want)
732 struct bch_fs *c = container_of(j, struct bch_fs, journal);
733 struct journal_device *ja;
737 if (*replicas >= replicas_want)
740 for (i = 0; i < devs_sorted->nr; i++) {
741 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
748 * Check that we can use this device, and aren't already using
751 if (!ca->mi.durability ||
752 ca->mi.state != BCH_MEMBER_STATE_RW ||
754 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
756 sectors > ja->sectors_free)
759 bch2_dev_stripe_increment(c, ca, &j->wp.stripe);
761 bch2_bkey_append_ptr(&w->key,
762 (struct bch_extent_ptr) {
763 .offset = bucket_to_sector(ca,
764 ja->buckets[ja->cur_idx]) +
770 ja->sectors_free -= sectors;
771 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
773 *replicas += ca->mi.durability;
775 if (*replicas >= replicas_want)
781 * journal_next_bucket - move on to the next journal bucket if possible
783 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
786 struct bch_fs *c = container_of(j, struct bch_fs, journal);
787 struct journal_device *ja;
789 struct dev_alloc_list devs_sorted;
790 unsigned i, replicas = 0, replicas_want =
791 READ_ONCE(c->opts.metadata_replicas);
795 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
796 &c->rw_devs[BCH_DATA_JOURNAL]);
798 __journal_write_alloc(j, w, &devs_sorted,
799 sectors, &replicas, replicas_want);
801 if (replicas >= replicas_want)
804 for (i = 0; i < devs_sorted.nr; i++) {
805 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
811 if (sectors > ja->sectors_free &&
812 sectors <= ca->mi.bucket_size &&
813 bch2_journal_dev_buckets_available(j, ja,
814 journal_space_discarded)) {
815 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
816 ja->sectors_free = ca->mi.bucket_size;
819 * ja->bucket_seq[ja->cur_idx] must always have
820 * something sensible:
822 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
826 __journal_write_alloc(j, w, &devs_sorted,
827 sectors, &replicas, replicas_want);
831 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
834 static void journal_write_compact(struct jset *jset)
836 struct jset_entry *i, *next, *prev = NULL;
839 * Simple compaction, dropping empty jset_entries (from journal
840 * reservations that weren't fully used) and merging jset_entries that
843 * If we wanted to be really fancy here, we could sort all the keys in
844 * the jset and drop keys that were overwritten - probably not worth it:
846 vstruct_for_each_safe(jset, i, next) {
847 unsigned u64s = le16_to_cpu(i->u64s);
853 /* Can we merge with previous entry? */
855 i->btree_id == prev->btree_id &&
856 i->level == prev->level &&
857 i->type == prev->type &&
858 i->type == BCH_JSET_ENTRY_btree_keys &&
859 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
860 memmove_u64s_down(vstruct_next(prev),
863 le16_add_cpu(&prev->u64s, u64s);
867 /* Couldn't merge, move i into new position (after prev): */
868 prev = prev ? vstruct_next(prev) : jset->start;
870 memmove_u64s_down(prev, i, jset_u64s(u64s));
873 prev = prev ? vstruct_next(prev) : jset->start;
874 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
877 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
879 /* we aren't holding j->lock: */
880 unsigned new_size = READ_ONCE(j->buf_size_want);
883 if (buf->buf_size >= new_size)
886 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
890 memcpy(new_buf, buf->data, buf->buf_size);
891 kvpfree(buf->data, buf->buf_size);
893 buf->buf_size = new_size;
896 static void journal_write_done(struct closure *cl)
898 struct journal *j = container_of(cl, struct journal, io);
899 struct bch_fs *c = container_of(j, struct bch_fs, journal);
900 struct journal_buf *w = journal_prev_buf(j);
901 struct bch_devs_list devs =
902 bch2_bkey_devs(bkey_i_to_s_c(&w->key));
903 struct bch_replicas_padded replicas;
904 u64 seq = le64_to_cpu(w->data->seq);
905 u64 last_seq = le64_to_cpu(w->data->last_seq);
907 bch2_time_stats_update(j->write_time, j->write_start_time);
910 bch_err(c, "unable to write journal to sufficient devices");
914 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
916 if (bch2_mark_replicas(c, &replicas.e))
920 if (seq >= j->pin.front)
921 journal_seq_pin(j, seq)->devs = devs;
924 j->last_seq_ondisk = last_seq;
925 bch2_journal_space_available(j);
928 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
931 * Must come before signaling write completion, for
932 * bch2_fs_journal_stop():
934 mod_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
936 /* also must come before signalling write completion: */
937 closure_debug_destroy(cl);
939 BUG_ON(!j->reservations.prev_buf_unwritten);
940 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
941 &j->reservations.counter);
943 closure_wake_up(&w->wait);
946 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
947 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
948 spin_unlock(&j->lock);
956 static void journal_write_endio(struct bio *bio)
958 struct bch_dev *ca = bio->bi_private;
959 struct journal *j = &ca->fs->journal;
961 if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
962 bch2_meta_write_fault("journal")) {
963 struct journal_buf *w = journal_prev_buf(j);
966 spin_lock_irqsave(&j->err_lock, flags);
967 bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx);
968 spin_unlock_irqrestore(&j->err_lock, flags);
972 percpu_ref_put(&ca->io_ref);
975 void bch2_journal_write(struct closure *cl)
977 struct journal *j = container_of(cl, struct journal, io);
978 struct bch_fs *c = container_of(j, struct bch_fs, journal);
980 struct journal_buf *w = journal_prev_buf(j);
981 struct jset_entry *start, *end;
984 struct bch_extent_ptr *ptr;
985 bool validate_before_checksum = false;
986 unsigned i, sectors, bytes, u64s;
989 bch2_journal_pin_put(j, le64_to_cpu(w->data->seq));
991 journal_buf_realloc(j, w);
994 j->write_start_time = local_clock();
996 start = vstruct_last(jset);
997 end = bch2_journal_super_entries_add_common(c, start,
998 le64_to_cpu(jset->seq));
999 u64s = (u64 *) end - (u64 *) start;
1000 BUG_ON(u64s > j->entry_u64s_reserved);
1002 le32_add_cpu(&jset->u64s, u64s);
1003 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1005 journal_write_compact(jset);
1007 jset->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
1008 jset->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
1009 jset->magic = cpu_to_le64(jset_magic(c));
1011 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1012 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1013 : cpu_to_le32(c->sb.version);
1015 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1016 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1018 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1019 validate_before_checksum = true;
1021 if (le32_to_cpu(jset->version) <
1022 bcachefs_metadata_version_bkey_renumber)
1023 validate_before_checksum = true;
1025 if (validate_before_checksum &&
1026 jset_validate_entries(c, jset, WRITE))
1029 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1030 jset->encrypted_start,
1031 vstruct_end(jset) - (void *) jset->encrypted_start);
1033 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1034 journal_nonce(jset), jset);
1036 if (!validate_before_checksum &&
1037 jset_validate_entries(c, jset, WRITE))
1040 sectors = vstruct_sectors(jset, c->block_bits);
1041 BUG_ON(sectors > w->sectors);
1043 bytes = vstruct_bytes(jset);
1044 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1046 spin_lock(&j->lock);
1047 ret = journal_write_alloc(j, w, sectors);
1050 * write is allocated, no longer need to account for it in
1051 * bch2_journal_space_available():
1056 * journal entry has been compacted and allocated, recalculate space
1059 bch2_journal_space_available(j);
1060 spin_unlock(&j->lock);
1063 bch_err(c, "Unable to allocate journal write");
1064 bch2_fatal_error(c);
1065 continue_at(cl, journal_write_done, system_highpri_wq);
1070 * XXX: we really should just disable the entire journal in nochanges
1073 if (c->opts.nochanges)
1076 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1077 ca = bch_dev_bkey_exists(c, ptr->dev);
1078 if (!percpu_ref_tryget(&ca->io_ref)) {
1080 bch_err(c, "missing device for journal write\n");
1084 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
1087 bio = ca->journal.bio;
1089 bio_set_dev(bio, ca->disk_sb.bdev);
1090 bio->bi_iter.bi_sector = ptr->offset;
1091 bio->bi_end_io = journal_write_endio;
1092 bio->bi_private = ca;
1093 bio_set_op_attrs(bio, REQ_OP_WRITE,
1094 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
1095 bch2_bio_map(bio, jset, sectors << 9);
1097 trace_journal_write(bio);
1098 closure_bio_submit(bio, cl);
1100 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(jset->seq);
1103 for_each_rw_member(ca, c, i)
1104 if (journal_flushes_device(ca) &&
1105 !bch2_bkey_has_device(bkey_i_to_s_c(&w->key), i)) {
1106 percpu_ref_get(&ca->io_ref);
1108 bio = ca->journal.bio;
1110 bio_set_dev(bio, ca->disk_sb.bdev);
1111 bio->bi_opf = REQ_OP_FLUSH;
1112 bio->bi_end_io = journal_write_endio;
1113 bio->bi_private = ca;
1114 closure_bio_submit(bio, cl);
1118 bch2_bucket_seq_cleanup(c);
1120 continue_at(cl, journal_write_done, system_highpri_wq);
1123 bch2_inconsistent_error(c);
1124 continue_at(cl, journal_write_done, system_highpri_wq);