2 #include "alloc_background.h"
3 #include "alloc_foreground.h"
5 #include "btree_update.h"
10 #include "journal_io.h"
11 #include "journal_reclaim.h"
12 #include "journal_seq_blacklist.h"
15 #include <trace/events/bcachefs.h>
20 struct list_head *head;
24 #define JOURNAL_ENTRY_ADD_OK 0
25 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
28 * Given a journal entry we just read, add it to the list of journal entries to
31 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
32 struct journal_list *jlist, struct jset *j)
34 struct journal_replay *i, *pos;
35 struct list_head *where;
36 size_t bytes = vstruct_bytes(j);
40 last_seq = !list_empty(jlist->head)
41 ? list_last_entry(jlist->head, struct journal_replay,
45 /* Is this entry older than the range we need? */
46 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
47 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
51 /* Drop entries we don't need anymore */
52 list_for_each_entry_safe(i, pos, jlist->head, list) {
53 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
56 kvpfree(i, offsetof(struct journal_replay, j) +
57 vstruct_bytes(&i->j));
60 list_for_each_entry_reverse(i, jlist->head, list) {
62 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
63 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
64 memcmp(j, &i->j, bytes), c,
65 "found duplicate but non identical journal entries (seq %llu)",
70 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
78 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
84 list_add(&i->list, where);
86 memcpy(&i->j, j, bytes);
88 if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
89 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
91 fsck_err_on(1, c, "duplicate journal entries on same device");
92 ret = JOURNAL_ENTRY_ADD_OK;
98 static struct nonce journal_nonce(const struct jset *jset)
100 return (struct nonce) {{
102 [1] = ((__le32 *) &jset->seq)[0],
103 [2] = ((__le32 *) &jset->seq)[1],
104 [3] = BCH_NONCE_JOURNAL,
108 /* this fills in a range with empty jset_entries: */
109 static void journal_entry_null_range(void *start, void *end)
111 struct jset_entry *entry;
113 for (entry = start; entry != end; entry = vstruct_next(entry))
114 memset(entry, 0, sizeof(*entry));
117 #define JOURNAL_ENTRY_REREAD 5
118 #define JOURNAL_ENTRY_NONE 6
119 #define JOURNAL_ENTRY_BAD 7
121 #define journal_entry_err(c, msg, ...) \
125 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
128 bch_err(c, "corrupt metadata before write:\n" \
129 msg, ##__VA_ARGS__); \
130 if (bch2_fs_inconsistent(c)) { \
131 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
139 #define journal_entry_err_on(cond, c, msg, ...) \
140 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
142 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
143 struct jset_entry *entry,
144 struct bkey_i *k, enum btree_node_type key_type,
145 const char *type, int write)
147 void *next = vstruct_next(entry);
149 unsigned version = le32_to_cpu(jset->version);
152 if (journal_entry_err_on(!k->k.u64s, c,
153 "invalid %s in journal: k->u64s 0", type)) {
154 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
155 journal_entry_null_range(vstruct_next(entry), next);
159 if (journal_entry_err_on((void *) bkey_next(k) >
160 (void *) vstruct_next(entry), c,
161 "invalid %s in journal: extends past end of journal entry",
163 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
164 journal_entry_null_range(vstruct_next(entry), next);
168 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
169 "invalid %s in journal: bad format %u",
170 type, k->k.format)) {
171 le16_add_cpu(&entry->u64s, -k->k.u64s);
172 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
173 journal_entry_null_range(vstruct_next(entry), next);
177 if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN)
178 bch2_bkey_swab(NULL, bkey_to_packed(k));
181 version < bcachefs_metadata_version_bkey_renumber)
182 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
184 invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k), key_type);
188 bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
189 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
192 le16_add_cpu(&entry->u64s, -k->k.u64s);
193 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
194 journal_entry_null_range(vstruct_next(entry), next);
199 version < bcachefs_metadata_version_bkey_renumber)
200 bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
205 static int journal_entry_validate_btree_keys(struct bch_fs *c,
207 struct jset_entry *entry,
212 vstruct_for_each(entry, k) {
213 int ret = journal_validate_key(c, jset, entry, k,
214 __btree_node_type(entry->level,
224 static int journal_entry_validate_btree_root(struct bch_fs *c,
226 struct jset_entry *entry,
229 struct bkey_i *k = entry->start;
232 if (journal_entry_err_on(!entry->u64s ||
233 le16_to_cpu(entry->u64s) != k->k.u64s, c,
234 "invalid btree root journal entry: wrong number of keys")) {
235 void *next = vstruct_next(entry);
237 * we don't want to null out this jset_entry,
238 * just the contents, so that later we can tell
239 * we were _supposed_ to have a btree root
242 journal_entry_null_range(vstruct_next(entry), next);
246 return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
247 "btree root", write);
252 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
254 struct jset_entry *entry,
257 /* obsolete, don't care: */
261 static int journal_entry_validate_blacklist(struct bch_fs *c,
263 struct jset_entry *entry,
268 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
269 "invalid journal seq blacklist entry: bad size")) {
270 journal_entry_null_range(entry, vstruct_next(entry));
276 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
278 struct jset_entry *entry,
281 struct jset_entry_blacklist_v2 *bl_entry;
284 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
285 "invalid journal seq blacklist entry: bad size")) {
286 journal_entry_null_range(entry, vstruct_next(entry));
290 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
292 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
293 le64_to_cpu(bl_entry->end), c,
294 "invalid journal seq blacklist entry: start > end")) {
295 journal_entry_null_range(entry, vstruct_next(entry));
302 static int journal_entry_validate_usage(struct bch_fs *c,
304 struct jset_entry *entry,
307 struct jset_entry_usage *u =
308 container_of(entry, struct jset_entry_usage, entry);
309 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
312 if (journal_entry_err_on(bytes < sizeof(*u),
314 "invalid journal entry usage: bad size")) {
315 journal_entry_null_range(entry, vstruct_next(entry));
323 static int journal_entry_validate_data_usage(struct bch_fs *c,
325 struct jset_entry *entry,
328 struct jset_entry_data_usage *u =
329 container_of(entry, struct jset_entry_data_usage, entry);
330 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
333 if (journal_entry_err_on(bytes < sizeof(*u) ||
334 bytes < sizeof(*u) + u->r.nr_devs,
336 "invalid journal entry usage: bad size")) {
337 journal_entry_null_range(entry, vstruct_next(entry));
345 struct jset_entry_ops {
346 int (*validate)(struct bch_fs *, struct jset *,
347 struct jset_entry *, int);
350 static const struct jset_entry_ops bch2_jset_entry_ops[] = {
352 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
353 .validate = journal_entry_validate_##f, \
355 BCH_JSET_ENTRY_TYPES()
359 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
360 struct jset_entry *entry, int write)
362 return entry->type < BCH_JSET_ENTRY_NR
363 ? bch2_jset_entry_ops[entry->type].validate(c, jset,
368 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
371 struct jset_entry *entry;
374 vstruct_for_each(jset, entry) {
375 if (journal_entry_err_on(vstruct_next(entry) >
376 vstruct_last(jset), c,
377 "journal entry extends past end of jset")) {
378 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
382 ret = journal_entry_validate(c, jset, entry, write);
390 static int jset_validate(struct bch_fs *c,
391 struct jset *jset, u64 sector,
392 unsigned bucket_sectors_left,
393 unsigned sectors_read,
396 size_t bytes = vstruct_bytes(jset);
397 struct bch_csum csum;
401 if (le64_to_cpu(jset->magic) != jset_magic(c))
402 return JOURNAL_ENTRY_NONE;
404 version = le32_to_cpu(jset->version);
405 if ((version != BCH_JSET_VERSION_OLD &&
406 version < bcachefs_metadata_version_min) ||
407 version >= bcachefs_metadata_version_max) {
408 bch_err(c, "unknown journal entry version %u", jset->version);
409 return BCH_FSCK_UNKNOWN_VERSION;
412 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
413 "journal entry too big (%zu bytes), sector %lluu",
415 /* XXX: note we might have missing journal entries */
416 return JOURNAL_ENTRY_BAD;
419 if (bytes > sectors_read << 9)
420 return JOURNAL_ENTRY_REREAD;
422 if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
423 "journal entry with unknown csum type %llu sector %lluu",
424 JSET_CSUM_TYPE(jset), sector))
425 return JOURNAL_ENTRY_BAD;
427 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
428 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
429 "journal checksum bad, sector %llu", sector)) {
430 /* XXX: retry IO, when we start retrying checksum errors */
431 /* XXX: note we might have missing journal entries */
432 return JOURNAL_ENTRY_BAD;
435 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
436 jset->encrypted_start,
437 vstruct_end(jset) - (void *) jset->encrypted_start);
439 if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
440 "invalid journal entry: last_seq > seq"))
441 jset->last_seq = jset->seq;
448 struct journal_read_buf {
453 static int journal_read_buf_realloc(struct journal_read_buf *b,
458 /* the bios are sized for this many pages, max: */
459 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
462 new_size = roundup_pow_of_two(new_size);
463 n = kvpmalloc(new_size, GFP_KERNEL);
467 kvpfree(b->data, b->size);
473 static int journal_read_bucket(struct bch_dev *ca,
474 struct journal_read_buf *buf,
475 struct journal_list *jlist,
478 struct bch_fs *c = ca->fs;
479 struct journal_device *ja = &ca->journal;
480 struct jset *j = NULL;
481 unsigned sectors, sectors_read = 0;
482 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
483 end = offset + ca->mi.bucket_size;
484 bool saw_bad = false;
487 pr_debug("reading %u", bucket);
489 while (offset < end) {
493 sectors_read = min_t(unsigned,
494 end - offset, buf->size >> 9);
496 bio = bio_kmalloc(GFP_KERNEL,
499 bio_set_dev(bio, ca->disk_sb.bdev);
500 bio->bi_iter.bi_sector = offset;
501 bio->bi_iter.bi_size = sectors_read << 9;
502 bio_set_op_attrs(bio, REQ_OP_READ, 0);
503 bch2_bio_map(bio, buf->data);
505 ret = submit_bio_wait(bio);
508 if (bch2_dev_io_err_on(ret, ca,
509 "journal read from sector %llu",
511 bch2_meta_read_fault("journal"))
517 ret = jset_validate(c, j, offset,
518 end - offset, sectors_read,
523 case JOURNAL_ENTRY_REREAD:
524 if (vstruct_bytes(j) > buf->size) {
525 ret = journal_read_buf_realloc(buf,
531 case JOURNAL_ENTRY_NONE:
534 sectors = c->opts.block_size;
536 case JOURNAL_ENTRY_BAD:
538 sectors = c->opts.block_size;
545 * This happens sometimes if we don't have discards on -
546 * when we've partially overwritten a bucket with new
547 * journal entries. We don't need the rest of the
550 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
553 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
555 mutex_lock(&jlist->lock);
556 ret = journal_entry_add(c, ca, jlist, j);
557 mutex_unlock(&jlist->lock);
560 case JOURNAL_ENTRY_ADD_OK:
562 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
568 sectors = vstruct_sectors(j, c->block_bits);
572 sectors_read -= sectors;
573 j = ((void *) j) + (sectors << 9);
579 static void bch2_journal_read_device(struct closure *cl)
581 struct journal_device *ja =
582 container_of(cl, struct journal_device, read);
583 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
584 struct journal_list *jlist =
585 container_of(cl->parent, struct journal_list, cl);
586 struct journal_read_buf buf = { NULL, 0 };
587 u64 min_seq = U64_MAX;
594 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
598 pr_debug("%u journal buckets", ja->nr);
600 for (i = 0; i < ja->nr; i++) {
601 ret = journal_read_bucket(ca, &buf, jlist, i);
606 /* Find the journal bucket with the highest sequence number: */
607 for (i = 0; i < ja->nr; i++) {
608 if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx])
611 min_seq = min(ja->bucket_seq[i], min_seq);
615 * If there's duplicate journal entries in multiple buckets (which
616 * definitely isn't supposed to happen, but...) - make sure to start
617 * cur_idx at the last of those buckets, so we don't deadlock trying to
620 while (ja->bucket_seq[ja->cur_idx] > min_seq &&
621 ja->bucket_seq[ja->cur_idx] >
622 ja->bucket_seq[(ja->cur_idx + 1) % ja->nr])
623 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
625 ja->sectors_free = 0;
628 * Set last_idx to indicate the entire journal is full and needs to be
629 * reclaimed - journal reclaim will immediately reclaim whatever isn't
630 * pinned when it first runs:
632 ja->last_idx = (ja->cur_idx + 1) % ja->nr;
634 kvpfree(buf.data, buf.size);
635 percpu_ref_put(&ca->io_ref);
639 mutex_lock(&jlist->lock);
641 mutex_unlock(&jlist->lock);
645 void bch2_journal_entries_free(struct list_head *list)
648 while (!list_empty(list)) {
649 struct journal_replay *i =
650 list_first_entry(list, struct journal_replay, list);
652 kvpfree(i, offsetof(struct journal_replay, j) +
653 vstruct_bytes(&i->j));
657 int bch2_journal_set_seq(struct bch_fs *c, u64 last_seq, u64 end_seq)
659 struct journal *j = &c->journal;
660 struct journal_entry_pin_list *p;
661 u64 seq, nr = end_seq - last_seq + 1;
663 if (nr > j->pin.size) {
665 init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
667 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
672 atomic64_set(&j->seq, end_seq);
673 j->last_seq_ondisk = last_seq;
675 j->pin.front = last_seq;
676 j->pin.back = end_seq + 1;
678 fifo_for_each_entry_ptr(p, &j->pin, seq) {
679 INIT_LIST_HEAD(&p->list);
680 INIT_LIST_HEAD(&p->flushed);
681 atomic_set(&p->count, 0);
688 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
690 struct journal *j = &c->journal;
691 struct journal_list jlist;
692 struct journal_replay *i;
693 struct journal_entry_pin_list *p;
695 u64 cur_seq, end_seq;
697 size_t keys = 0, entries = 0;
698 bool degraded = false;
701 closure_init_stack(&jlist.cl);
702 mutex_init(&jlist.lock);
706 for_each_member_device(ca, c, iter) {
707 if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
708 !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
711 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
712 ca->mi.state == BCH_MEMBER_STATE_RO) &&
713 percpu_ref_tryget(&ca->io_ref))
714 closure_call(&ca->journal.read,
715 bch2_journal_read_device,
722 closure_sync(&jlist.cl);
727 if (list_empty(list)){
728 bch_err(c, "no journal entries found");
729 return BCH_FSCK_REPAIR_IMPOSSIBLE;
732 list_for_each_entry(i, list, list) {
733 struct bch_replicas_padded replicas;
736 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
738 ret = jset_validate_entries(c, &i->j, READ);
743 * If we're mounting in degraded mode - if we didn't read all
744 * the devices - this is wrong:
748 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
749 fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
750 "superblock not marked as containing replicas %s",
751 (bch2_replicas_entry_to_text(&PBUF(buf),
752 &replicas.e), buf)))) {
753 ret = bch2_mark_replicas(c, &replicas.e);
759 i = list_last_entry(list, struct journal_replay, list);
761 ret = bch2_journal_set_seq(c,
762 le64_to_cpu(i->j.last_seq),
763 le64_to_cpu(i->j.seq));
767 mutex_lock(&j->blacklist_lock);
769 list_for_each_entry(i, list, list) {
770 p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
772 atomic_set(&p->count, 1);
775 if (bch2_journal_seq_blacklist_read(j, i)) {
776 mutex_unlock(&j->blacklist_lock);
781 mutex_unlock(&j->blacklist_lock);
783 cur_seq = journal_last_seq(j);
784 end_seq = le64_to_cpu(list_last_entry(list,
785 struct journal_replay, list)->j.seq);
787 list_for_each_entry(i, list, list) {
788 struct jset_entry *entry;
789 struct bkey_i *k, *_n;
792 mutex_lock(&j->blacklist_lock);
793 while (cur_seq < le64_to_cpu(i->j.seq) &&
794 bch2_journal_seq_blacklist_find(j, cur_seq))
797 blacklisted = bch2_journal_seq_blacklist_find(j,
798 le64_to_cpu(i->j.seq));
799 mutex_unlock(&j->blacklist_lock);
801 fsck_err_on(blacklisted, c,
802 "found blacklisted journal entry %llu",
803 le64_to_cpu(i->j.seq));
805 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
806 "journal entries %llu-%llu missing! (replaying %llu-%llu)",
807 cur_seq, le64_to_cpu(i->j.seq) - 1,
808 journal_last_seq(j), end_seq);
810 cur_seq = le64_to_cpu(i->j.seq) + 1;
812 for_each_jset_key(k, _n, entry, &i->j)
817 bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
818 keys, entries, journal_cur_seq(j));
823 /* journal replay: */
825 int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
827 struct journal *j = &c->journal;
828 struct bkey_i *k, *_n;
829 struct jset_entry *entry;
830 struct journal_replay *i, *n;
833 list_for_each_entry_safe(i, n, list, list) {
834 j->replay_journal_seq = le64_to_cpu(i->j.seq);
836 for_each_jset_key(k, _n, entry, &i->j) {
838 if (entry->btree_id == BTREE_ID_ALLOC) {
840 * allocation code handles replay for
841 * BTREE_ID_ALLOC keys:
843 ret = bch2_alloc_replay_key(c, k);
846 * We might cause compressed extents to be
847 * split, so we need to pass in a
850 struct disk_reservation disk_res =
851 bch2_disk_reservation_init(c, 0);
853 ret = bch2_btree_insert(c, entry->btree_id, k,
856 BTREE_INSERT_JOURNAL_REPLAY|
857 BTREE_INSERT_NOMARK);
861 bch_err(c, "journal replay: error %d while replaying key",
869 bch2_journal_pin_put(j, j->replay_journal_seq);
872 j->replay_journal_seq = 0;
874 bch2_journal_set_replay_done(j);
875 bch2_journal_flush_all_pins(j);
876 ret = bch2_journal_error(j);
878 bch2_journal_entries_free(list);
884 static void __journal_write_alloc(struct journal *j,
885 struct journal_buf *w,
886 struct dev_alloc_list *devs_sorted,
889 unsigned replicas_want)
891 struct bch_fs *c = container_of(j, struct bch_fs, journal);
892 struct journal_device *ja;
896 if (*replicas >= replicas_want)
899 for (i = 0; i < devs_sorted->nr; i++) {
900 ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
907 * Check that we can use this device, and aren't already using
910 if (!ca->mi.durability ||
911 ca->mi.state != BCH_MEMBER_STATE_RW ||
913 bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
915 sectors > ja->sectors_free)
918 bch2_dev_stripe_increment(c, ca, &j->wp.stripe);
920 bch2_bkey_append_ptr(&w->key,
921 (struct bch_extent_ptr) {
922 .offset = bucket_to_sector(ca,
923 ja->buckets[ja->cur_idx]) +
929 ja->sectors_free -= sectors;
930 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
932 *replicas += ca->mi.durability;
934 if (*replicas >= replicas_want)
940 * journal_next_bucket - move on to the next journal bucket if possible
942 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
945 struct bch_fs *c = container_of(j, struct bch_fs, journal);
946 struct journal_device *ja;
948 struct dev_alloc_list devs_sorted;
949 unsigned i, replicas = 0, replicas_want =
950 READ_ONCE(c->opts.metadata_replicas);
954 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
955 &c->rw_devs[BCH_DATA_JOURNAL]);
957 __journal_write_alloc(j, w, &devs_sorted,
958 sectors, &replicas, replicas_want);
960 if (replicas >= replicas_want)
963 for (i = 0; i < devs_sorted.nr; i++) {
964 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
970 if (sectors > ja->sectors_free &&
971 sectors <= ca->mi.bucket_size &&
972 bch2_journal_dev_buckets_available(j, ja)) {
973 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
974 ja->sectors_free = ca->mi.bucket_size;
978 __journal_write_alloc(j, w, &devs_sorted,
979 sectors, &replicas, replicas_want);
983 return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
986 static void journal_write_compact(struct jset *jset)
988 struct jset_entry *i, *next, *prev = NULL;
991 * Simple compaction, dropping empty jset_entries (from journal
992 * reservations that weren't fully used) and merging jset_entries that
995 * If we wanted to be really fancy here, we could sort all the keys in
996 * the jset and drop keys that were overwritten - probably not worth it:
998 vstruct_for_each_safe(jset, i, next) {
999 unsigned u64s = le16_to_cpu(i->u64s);
1005 /* Can we merge with previous entry? */
1007 i->btree_id == prev->btree_id &&
1008 i->level == prev->level &&
1009 i->type == prev->type &&
1010 i->type == BCH_JSET_ENTRY_btree_keys &&
1011 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1012 memmove_u64s_down(vstruct_next(prev),
1015 le16_add_cpu(&prev->u64s, u64s);
1019 /* Couldn't merge, move i into new position (after prev): */
1020 prev = prev ? vstruct_next(prev) : jset->start;
1022 memmove_u64s_down(prev, i, jset_u64s(u64s));
1025 prev = prev ? vstruct_next(prev) : jset->start;
1026 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1029 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1031 /* we aren't holding j->lock: */
1032 unsigned new_size = READ_ONCE(j->buf_size_want);
1035 if (buf->buf_size >= new_size)
1038 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1042 memcpy(new_buf, buf->data, buf->buf_size);
1043 kvpfree(buf->data, buf->buf_size);
1044 buf->data = new_buf;
1045 buf->buf_size = new_size;
1048 static void journal_write_done(struct closure *cl)
1050 struct journal *j = container_of(cl, struct journal, io);
1051 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1052 struct journal_buf *w = journal_prev_buf(j);
1053 struct bch_devs_list devs =
1054 bch2_bkey_devs(bkey_i_to_s_c(&w->key));
1055 struct bch_replicas_padded replicas;
1056 u64 seq = le64_to_cpu(w->data->seq);
1057 u64 last_seq = le64_to_cpu(w->data->last_seq);
1059 bch2_time_stats_update(j->write_time, j->write_start_time);
1062 bch_err(c, "unable to write journal to sufficient devices");
1066 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
1068 if (bch2_mark_replicas(c, &replicas.e))
1071 spin_lock(&j->lock);
1072 j->seq_ondisk = seq;
1073 j->last_seq_ondisk = last_seq;
1075 if (seq >= j->pin.front)
1076 journal_seq_pin(j, seq)->devs = devs;
1079 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1082 * Must come before signaling write completion, for
1083 * bch2_fs_journal_stop():
1085 mod_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
1087 /* also must come before signalling write completion: */
1088 closure_debug_destroy(cl);
1090 BUG_ON(!j->reservations.prev_buf_unwritten);
1091 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
1092 &j->reservations.counter);
1094 closure_wake_up(&w->wait);
1097 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1098 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
1099 spin_unlock(&j->lock);
1102 bch2_fatal_error(c);
1103 bch2_journal_halt(j);
1104 spin_lock(&j->lock);
1108 static void journal_write_endio(struct bio *bio)
1110 struct bch_dev *ca = bio->bi_private;
1111 struct journal *j = &ca->fs->journal;
1113 if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
1114 bch2_meta_write_fault("journal")) {
1115 struct journal_buf *w = journal_prev_buf(j);
1116 unsigned long flags;
1118 spin_lock_irqsave(&j->err_lock, flags);
1119 bch2_bkey_drop_device(bkey_i_to_s(&w->key), ca->dev_idx);
1120 spin_unlock_irqrestore(&j->err_lock, flags);
1123 closure_put(&j->io);
1124 percpu_ref_put(&ca->io_ref);
1127 void bch2_journal_write(struct closure *cl)
1129 struct journal *j = container_of(cl, struct journal, io);
1130 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1132 struct journal_buf *w = journal_prev_buf(j);
1133 struct jset_entry *start, *end;
1136 struct bch_extent_ptr *ptr;
1137 bool validate_before_checksum = false;
1138 unsigned i, sectors, bytes, u64s;
1141 bch2_journal_pin_put(j, le64_to_cpu(w->data->seq));
1143 journal_buf_realloc(j, w);
1146 j->write_start_time = local_clock();
1148 start = vstruct_last(jset);
1149 end = bch2_journal_super_entries_add_common(c, start);
1150 u64s = (u64 *) end - (u64 *) start;
1151 BUG_ON(u64s > j->entry_u64s_reserved);
1153 le32_add_cpu(&jset->u64s, u64s);
1154 BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
1156 journal_write_compact(jset);
1158 jset->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
1159 jset->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
1160 jset->magic = cpu_to_le64(jset_magic(c));
1162 jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
1163 ? cpu_to_le32(BCH_JSET_VERSION_OLD)
1164 : cpu_to_le32(c->sb.version);
1166 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1167 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1169 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
1170 validate_before_checksum = true;
1172 if (le32_to_cpu(jset->version) <
1173 bcachefs_metadata_version_bkey_renumber)
1174 validate_before_checksum = true;
1176 if (validate_before_checksum &&
1177 jset_validate_entries(c, jset, WRITE))
1180 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1181 jset->encrypted_start,
1182 vstruct_end(jset) - (void *) jset->encrypted_start);
1184 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1185 journal_nonce(jset), jset);
1187 if (!validate_before_checksum &&
1188 jset_validate_entries(c, jset, WRITE))
1191 sectors = vstruct_sectors(jset, c->block_bits);
1192 BUG_ON(sectors > w->sectors);
1194 bytes = vstruct_bytes(jset);
1195 memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
1197 spin_lock(&j->lock);
1198 ret = journal_write_alloc(j, w, sectors);
1201 * write is allocated, no longer need to account for it in
1202 * bch2_journal_space_available():
1207 * journal entry has been compacted and allocated, recalculate space
1210 bch2_journal_space_available(j);
1211 spin_unlock(&j->lock);
1214 bch2_journal_halt(j);
1215 bch_err(c, "Unable to allocate journal write");
1216 bch2_fatal_error(c);
1217 continue_at(cl, journal_write_done, system_highpri_wq);
1222 * XXX: we really should just disable the entire journal in nochanges
1225 if (c->opts.nochanges)
1228 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1229 ca = bch_dev_bkey_exists(c, ptr->dev);
1230 if (!percpu_ref_tryget(&ca->io_ref)) {
1232 bch_err(c, "missing device for journal write\n");
1236 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
1239 bio = ca->journal.bio;
1241 bio_set_dev(bio, ca->disk_sb.bdev);
1242 bio->bi_iter.bi_sector = ptr->offset;
1243 bio->bi_iter.bi_size = sectors << 9;
1244 bio->bi_end_io = journal_write_endio;
1245 bio->bi_private = ca;
1246 bio_set_op_attrs(bio, REQ_OP_WRITE,
1247 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
1248 bch2_bio_map(bio, jset);
1250 trace_journal_write(bio);
1251 closure_bio_submit(bio, cl);
1253 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(jset->seq);
1256 for_each_rw_member(ca, c, i)
1257 if (journal_flushes_device(ca) &&
1258 !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) {
1259 percpu_ref_get(&ca->io_ref);
1261 bio = ca->journal.bio;
1263 bio_set_dev(bio, ca->disk_sb.bdev);
1264 bio->bi_opf = REQ_OP_FLUSH;
1265 bio->bi_end_io = journal_write_endio;
1266 bio->bi_private = ca;
1267 closure_bio_submit(bio, cl);
1271 bch2_bucket_seq_cleanup(c);
1273 continue_at(cl, journal_write_done, system_highpri_wq);
1276 bch2_inconsistent_error(c);
1277 continue_at(cl, journal_write_done, system_highpri_wq);