4 #include "btree_update.h"
9 #include "journal_io.h"
10 #include "journal_reclaim.h"
11 #include "journal_seq_blacklist.h"
14 #include <trace/events/bcachefs.h>
16 static struct jset_entry *bch2_journal_find_entry(struct jset *j, unsigned type,
19 struct jset_entry *entry;
21 for_each_jset_entry_type(entry, j, type)
22 if (entry->btree_id == id)
28 struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *c, struct jset *j,
29 enum btree_id id, unsigned *level)
32 struct jset_entry *entry =
33 bch2_journal_find_entry(j, BCH_JSET_ENTRY_btree_root, id);
39 return ERR_PTR(-EINVAL);
42 *level = entry->level;
43 *level = entry->level;
50 struct list_head *head;
54 #define JOURNAL_ENTRY_ADD_OK 0
55 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
58 * Given a journal entry we just read, add it to the list of journal entries to
61 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
62 struct journal_list *jlist, struct jset *j)
64 struct journal_replay *i, *pos;
65 struct list_head *where;
66 size_t bytes = vstruct_bytes(j);
70 last_seq = !list_empty(jlist->head)
71 ? list_last_entry(jlist->head, struct journal_replay,
75 /* Is this entry older than the range we need? */
76 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
77 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
81 /* Drop entries we don't need anymore */
82 list_for_each_entry_safe(i, pos, jlist->head, list) {
83 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
86 kvpfree(i, offsetof(struct journal_replay, j) +
87 vstruct_bytes(&i->j));
90 list_for_each_entry_reverse(i, jlist->head, list) {
92 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
93 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
94 memcmp(j, &i->j, bytes), c,
95 "found duplicate but non identical journal entries (seq %llu)",
100 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
108 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
114 list_add(&i->list, where);
116 memcpy(&i->j, j, bytes);
118 if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
119 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
121 fsck_err_on(1, c, "duplicate journal entries on same device");
122 ret = JOURNAL_ENTRY_ADD_OK;
128 static struct nonce journal_nonce(const struct jset *jset)
130 return (struct nonce) {{
132 [1] = ((__le32 *) &jset->seq)[0],
133 [2] = ((__le32 *) &jset->seq)[1],
134 [3] = BCH_NONCE_JOURNAL,
138 /* this fills in a range with empty jset_entries: */
139 static void journal_entry_null_range(void *start, void *end)
141 struct jset_entry *entry;
143 for (entry = start; entry != end; entry = vstruct_next(entry))
144 memset(entry, 0, sizeof(*entry));
147 #define JOURNAL_ENTRY_REREAD 5
148 #define JOURNAL_ENTRY_NONE 6
149 #define JOURNAL_ENTRY_BAD 7
151 #define journal_entry_err(c, msg, ...) \
155 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
158 bch_err(c, "corrupt metadata before write:\n" \
159 msg, ##__VA_ARGS__); \
160 if (bch2_fs_inconsistent(c)) { \
161 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
169 #define journal_entry_err_on(cond, c, msg, ...) \
170 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
172 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
173 struct jset_entry *entry,
174 struct bkey_i *k, enum bkey_type key_type,
175 const char *type, int write)
177 void *next = vstruct_next(entry);
182 if (journal_entry_err_on(!k->k.u64s, c,
183 "invalid %s in journal: k->u64s 0", type)) {
184 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
185 journal_entry_null_range(vstruct_next(entry), next);
189 if (journal_entry_err_on((void *) bkey_next(k) >
190 (void *) vstruct_next(entry), c,
191 "invalid %s in journal: extends past end of journal entry",
193 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
194 journal_entry_null_range(vstruct_next(entry), next);
198 if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
199 "invalid %s in journal: bad format %u",
200 type, k->k.format)) {
201 le16_add_cpu(&entry->u64s, -k->k.u64s);
202 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
203 journal_entry_null_range(vstruct_next(entry), next);
207 if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN)
208 bch2_bkey_swab(key_type, NULL, bkey_to_packed(k));
210 invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k));
212 bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf),
214 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
217 le16_add_cpu(&entry->u64s, -k->k.u64s);
218 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
219 journal_entry_null_range(vstruct_next(entry), next);
226 static int journal_entry_validate_btree_keys(struct bch_fs *c,
228 struct jset_entry *entry,
233 vstruct_for_each(entry, k) {
234 int ret = journal_validate_key(c, jset, entry, k,
235 bkey_type(entry->level,
245 static int journal_entry_validate_btree_root(struct bch_fs *c,
247 struct jset_entry *entry,
250 struct bkey_i *k = entry->start;
253 if (journal_entry_err_on(!entry->u64s ||
254 le16_to_cpu(entry->u64s) != k->k.u64s, c,
255 "invalid btree root journal entry: wrong number of keys")) {
256 void *next = vstruct_next(entry);
258 * we don't want to null out this jset_entry,
259 * just the contents, so that later we can tell
260 * we were _supposed_ to have a btree root
263 journal_entry_null_range(vstruct_next(entry), next);
267 return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
268 "btree root", write);
273 static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
275 struct jset_entry *entry,
278 /* obsolete, don't care: */
282 static int journal_entry_validate_blacklist(struct bch_fs *c,
284 struct jset_entry *entry,
289 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
290 "invalid journal seq blacklist entry: bad size")) {
291 journal_entry_null_range(entry, vstruct_next(entry));
297 static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
299 struct jset_entry *entry,
302 struct jset_entry_blacklist_v2 *bl_entry;
305 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
306 "invalid journal seq blacklist entry: bad size")) {
307 journal_entry_null_range(entry, vstruct_next(entry));
310 bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
312 if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
313 le64_to_cpu(bl_entry->end), c,
314 "invalid journal seq blacklist entry: start > end")) {
315 journal_entry_null_range(entry, vstruct_next(entry));
322 struct jset_entry_ops {
323 int (*validate)(struct bch_fs *, struct jset *,
324 struct jset_entry *, int);
327 const struct jset_entry_ops bch2_jset_entry_ops[] = {
329 [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
330 .validate = journal_entry_validate_##f, \
332 BCH_JSET_ENTRY_TYPES()
336 static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
337 struct jset_entry *entry, int write)
341 if (entry->type >= BCH_JSET_ENTRY_NR) {
342 journal_entry_err(c, "invalid journal entry type %u",
344 journal_entry_null_range(entry, vstruct_next(entry));
348 ret = bch2_jset_entry_ops[entry->type].validate(c, jset, entry, write);
353 static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
356 struct jset_entry *entry;
359 vstruct_for_each(jset, entry) {
360 if (journal_entry_err_on(vstruct_next(entry) >
361 vstruct_last(jset), c,
362 "journal entry extends past end of jset")) {
363 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
367 ret = journal_entry_validate(c, jset, entry, write);
375 static int jset_validate(struct bch_fs *c,
376 struct jset *jset, u64 sector,
377 unsigned bucket_sectors_left,
378 unsigned sectors_read,
381 size_t bytes = vstruct_bytes(jset);
382 struct bch_csum csum;
385 if (le64_to_cpu(jset->magic) != jset_magic(c))
386 return JOURNAL_ENTRY_NONE;
388 if (le32_to_cpu(jset->version) != BCACHE_JSET_VERSION) {
389 bch_err(c, "unknown journal entry version %u",
390 le32_to_cpu(jset->version));
391 return BCH_FSCK_UNKNOWN_VERSION;
394 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
395 "journal entry too big (%zu bytes), sector %lluu",
397 /* XXX: note we might have missing journal entries */
398 return JOURNAL_ENTRY_BAD;
401 if (bytes > sectors_read << 9)
402 return JOURNAL_ENTRY_REREAD;
404 if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
405 "journal entry with unknown csum type %llu sector %lluu",
406 JSET_CSUM_TYPE(jset), sector))
407 return JOURNAL_ENTRY_BAD;
409 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
410 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
411 "journal checksum bad, sector %llu", sector)) {
412 /* XXX: retry IO, when we start retrying checksum errors */
413 /* XXX: note we might have missing journal entries */
414 return JOURNAL_ENTRY_BAD;
417 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
418 jset->encrypted_start,
419 vstruct_end(jset) - (void *) jset->encrypted_start);
421 if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
422 "invalid journal entry: last_seq > seq"))
423 jset->last_seq = jset->seq;
430 struct journal_read_buf {
435 static int journal_read_buf_realloc(struct journal_read_buf *b,
440 /* the bios are sized for this many pages, max: */
441 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
444 new_size = roundup_pow_of_two(new_size);
445 n = kvpmalloc(new_size, GFP_KERNEL);
449 kvpfree(b->data, b->size);
455 static int journal_read_bucket(struct bch_dev *ca,
456 struct journal_read_buf *buf,
457 struct journal_list *jlist,
458 unsigned bucket, u64 *seq, bool *entries_found)
460 struct bch_fs *c = ca->fs;
461 struct journal_device *ja = &ca->journal;
462 struct bio *bio = ja->bio;
463 struct jset *j = NULL;
464 unsigned sectors, sectors_read = 0;
465 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
466 end = offset + ca->mi.bucket_size;
467 bool saw_bad = false;
470 pr_debug("reading %u", bucket);
472 while (offset < end) {
474 reread: sectors_read = min_t(unsigned,
475 end - offset, buf->size >> 9);
478 bio_set_dev(bio, ca->disk_sb.bdev);
479 bio->bi_iter.bi_sector = offset;
480 bio->bi_iter.bi_size = sectors_read << 9;
481 bio_set_op_attrs(bio, REQ_OP_READ, 0);
482 bch2_bio_map(bio, buf->data);
484 ret = submit_bio_wait(bio);
486 if (bch2_dev_io_err_on(ret, ca,
487 "journal read from sector %llu",
489 bch2_meta_read_fault("journal"))
495 ret = jset_validate(c, j, offset,
496 end - offset, sectors_read,
501 case JOURNAL_ENTRY_REREAD:
502 if (vstruct_bytes(j) > buf->size) {
503 ret = journal_read_buf_realloc(buf,
509 case JOURNAL_ENTRY_NONE:
512 sectors = c->opts.block_size;
514 case JOURNAL_ENTRY_BAD:
516 sectors = c->opts.block_size;
523 * This happens sometimes if we don't have discards on -
524 * when we've partially overwritten a bucket with new
525 * journal entries. We don't need the rest of the
528 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
531 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
533 mutex_lock(&jlist->lock);
534 ret = journal_entry_add(c, ca, jlist, j);
535 mutex_unlock(&jlist->lock);
538 case JOURNAL_ENTRY_ADD_OK:
539 *entries_found = true;
541 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
547 if (le64_to_cpu(j->seq) > *seq)
548 *seq = le64_to_cpu(j->seq);
550 sectors = vstruct_sectors(j, c->block_bits);
554 sectors_read -= sectors;
555 j = ((void *) j) + (sectors << 9);
561 static void bch2_journal_read_device(struct closure *cl)
563 #define read_bucket(b) \
565 bool entries_found = false; \
566 ret = journal_read_bucket(ca, &buf, jlist, b, &seq, \
570 __set_bit(b, bitmap); \
574 struct journal_device *ja =
575 container_of(cl, struct journal_device, read);
576 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
577 struct journal_list *jlist =
578 container_of(cl->parent, struct journal_list, cl);
579 struct request_queue *q = bdev_get_queue(ca->disk_sb.bdev);
580 struct journal_read_buf buf = { NULL, 0 };
582 DECLARE_BITMAP(bitmap, ja->nr);
590 bitmap_zero(bitmap, ja->nr);
591 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
595 pr_debug("%u journal buckets", ja->nr);
598 * If the device supports discard but not secure discard, we can't do
599 * the fancy fibonacci hash/binary search because the live journal
600 * entries might not form a contiguous range:
602 for (i = 0; i < ja->nr; i++)
606 if (!blk_queue_nonrot(q))
610 * Read journal buckets ordered by golden ratio hash to quickly
611 * find a sequence of buckets with valid journal entries
613 for (i = 0; i < ja->nr; i++) {
614 l = (i * 2654435769U) % ja->nr;
616 if (test_bit(l, bitmap))
624 * If that fails, check all the buckets we haven't checked
627 pr_debug("falling back to linear search");
629 for (l = find_first_zero_bit(bitmap, ja->nr);
631 l = find_next_zero_bit(bitmap, ja->nr, l + 1))
635 /* no journal entries on this device? */
640 r = find_next_bit(bitmap, ja->nr, l + 1);
641 pr_debug("starting binary search, l %u r %u", l, r);
644 unsigned m = (l + r) >> 1;
657 * Find the journal bucket with the highest sequence number:
659 * If there's duplicate journal entries in multiple buckets (which
660 * definitely isn't supposed to happen, but...) - make sure to start
661 * cur_idx at the last of those buckets, so we don't deadlock trying to
666 for (i = 0; i < ja->nr; i++)
667 if (ja->bucket_seq[i] >= seq &&
668 ja->bucket_seq[i] != ja->bucket_seq[(i + 1) % ja->nr]) {
670 * When journal_next_bucket() goes to allocate for
671 * the first time, it'll use the bucket after
675 seq = ja->bucket_seq[i];
679 * Set last_idx to indicate the entire journal is full and needs to be
680 * reclaimed - journal reclaim will immediately reclaim whatever isn't
681 * pinned when it first runs:
683 ja->last_idx = (ja->cur_idx + 1) % ja->nr;
686 * Read buckets in reverse order until we stop finding more journal
689 for (i = (ja->cur_idx + ja->nr - 1) % ja->nr;
691 i = (i + ja->nr - 1) % ja->nr)
692 if (!test_bit(i, bitmap) &&
696 kvpfree(buf.data, buf.size);
697 percpu_ref_put(&ca->io_ref);
700 mutex_lock(&jlist->lock);
702 mutex_unlock(&jlist->lock);
707 void bch2_journal_entries_free(struct list_head *list)
710 while (!list_empty(list)) {
711 struct journal_replay *i =
712 list_first_entry(list, struct journal_replay, list);
714 kvpfree(i, offsetof(struct journal_replay, j) +
715 vstruct_bytes(&i->j));
719 static inline bool journal_has_keys(struct list_head *list)
721 struct journal_replay *i;
722 struct jset_entry *entry;
723 struct bkey_i *k, *_n;
725 list_for_each_entry(i, list, list)
726 for_each_jset_key(k, _n, entry, &i->j)
732 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
734 struct journal *j = &c->journal;
735 struct journal_list jlist;
736 struct journal_replay *i;
737 struct journal_entry_pin_list *p;
739 u64 cur_seq, end_seq, seq;
740 unsigned iter, keys = 0, entries = 0;
742 bool degraded = false;
745 closure_init_stack(&jlist.cl);
746 mutex_init(&jlist.lock);
750 for_each_member_device(ca, c, iter) {
751 if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
754 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
755 ca->mi.state == BCH_MEMBER_STATE_RO) &&
756 percpu_ref_tryget(&ca->io_ref))
757 closure_call(&ca->journal.read,
758 bch2_journal_read_device,
765 closure_sync(&jlist.cl);
770 if (list_empty(list)){
771 bch_err(c, "no journal entries found");
772 return BCH_FSCK_REPAIR_IMPOSSIBLE;
775 fsck_err_on(c->sb.clean && journal_has_keys(list), c,
776 "filesystem marked clean but journal has keys to replay");
778 list_for_each_entry(i, list, list) {
779 ret = jset_validate_entries(c, &i->j, READ);
784 * If we're mounting in degraded mode - if we didn't read all
785 * the devices - this is wrong:
789 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
790 fsck_err_on(!bch2_replicas_marked(c, BCH_DATA_JOURNAL,
792 "superblock not marked as containing replicas (type %u)",
793 BCH_DATA_JOURNAL))) {
794 ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, i->devs);
800 i = list_last_entry(list, struct journal_replay, list);
802 nr = le64_to_cpu(i->j.seq) - le64_to_cpu(i->j.last_seq) + 1;
804 if (nr > j->pin.size) {
806 init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
808 bch_err(c, "error reallocating journal fifo (%zu open entries)", nr);
813 atomic64_set(&j->seq, le64_to_cpu(i->j.seq));
814 j->last_seq_ondisk = le64_to_cpu(i->j.last_seq);
816 j->pin.front = le64_to_cpu(i->j.last_seq);
817 j->pin.back = le64_to_cpu(i->j.seq) + 1;
819 fifo_for_each_entry_ptr(p, &j->pin, seq) {
820 INIT_LIST_HEAD(&p->list);
821 INIT_LIST_HEAD(&p->flushed);
822 atomic_set(&p->count, 0);
826 mutex_lock(&j->blacklist_lock);
828 list_for_each_entry(i, list, list) {
829 p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
831 atomic_set(&p->count, 1);
834 if (bch2_journal_seq_blacklist_read(j, i)) {
835 mutex_unlock(&j->blacklist_lock);
840 mutex_unlock(&j->blacklist_lock);
842 cur_seq = journal_last_seq(j);
843 end_seq = le64_to_cpu(list_last_entry(list,
844 struct journal_replay, list)->j.seq);
846 list_for_each_entry(i, list, list) {
847 struct jset_entry *entry;
848 struct bkey_i *k, *_n;
851 mutex_lock(&j->blacklist_lock);
852 while (cur_seq < le64_to_cpu(i->j.seq) &&
853 bch2_journal_seq_blacklist_find(j, cur_seq))
856 blacklisted = bch2_journal_seq_blacklist_find(j,
857 le64_to_cpu(i->j.seq));
858 mutex_unlock(&j->blacklist_lock);
860 fsck_err_on(blacklisted, c,
861 "found blacklisted journal entry %llu",
862 le64_to_cpu(i->j.seq));
864 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
865 "journal entries %llu-%llu missing! (replaying %llu-%llu)",
866 cur_seq, le64_to_cpu(i->j.seq) - 1,
867 journal_last_seq(j), end_seq);
869 cur_seq = le64_to_cpu(i->j.seq) + 1;
871 for_each_jset_key(k, _n, entry, &i->j)
876 bch_info(c, "journal read done, %i keys in %i entries, seq %llu",
877 keys, entries, journal_cur_seq(j));
882 /* journal replay: */
884 int bch2_journal_mark(struct bch_fs *c, struct list_head *list)
886 struct bkey_i *k, *n;
887 struct jset_entry *j;
888 struct journal_replay *r;
891 list_for_each_entry(r, list, list)
892 for_each_jset_key(k, n, j, &r->j) {
893 enum bkey_type type = bkey_type(j->level, j->btree_id);
894 struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
896 if (btree_type_has_ptrs(type)) {
897 ret = bch2_btree_mark_key_initial(c, type, k_s_c);
906 int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
908 struct journal *j = &c->journal;
909 struct journal_entry_pin_list *pin_list;
910 struct bkey_i *k, *_n;
911 struct jset_entry *entry;
912 struct journal_replay *i, *n;
915 list_for_each_entry_safe(i, n, list, list) {
917 j->replay_journal_seq = le64_to_cpu(i->j.seq);
919 for_each_jset_key(k, _n, entry, &i->j) {
921 if (entry->btree_id == BTREE_ID_ALLOC) {
923 * allocation code handles replay for
924 * BTREE_ID_ALLOC keys:
926 ret = bch2_alloc_replay_key(c, k->k.p);
929 * We might cause compressed extents to be
930 * split, so we need to pass in a
933 struct disk_reservation disk_res =
934 bch2_disk_reservation_init(c, 0);
936 ret = bch2_btree_insert(c, entry->btree_id, k,
937 &disk_res, NULL, NULL,
939 BTREE_INSERT_JOURNAL_REPLAY);
943 bch_err(c, "journal replay: error %d while replaying key",
951 pin_list = journal_seq_pin(j, j->replay_journal_seq);
953 if (atomic_dec_and_test(&pin_list->count))
957 j->replay_journal_seq = 0;
959 bch2_journal_set_replay_done(j);
960 ret = bch2_journal_flush_all_pins(j);
962 bch2_journal_entries_free(list);
968 static void bch2_journal_add_btree_root(struct journal_buf *buf,
969 enum btree_id id, struct bkey_i *k,
972 struct jset_entry *entry;
974 entry = bch2_journal_add_entry_noreservation(buf, k->k.u64s);
975 entry->type = BCH_JSET_ENTRY_btree_root;
976 entry->btree_id = id;
977 entry->level = level;
978 memcpy_u64s(entry->_data, k, k->k.u64s);
981 static unsigned journal_dev_buckets_available(struct journal *j,
984 struct journal_device *ja = &ca->journal;
985 unsigned next = (ja->cur_idx + 1) % ja->nr;
986 unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
989 * Hack to avoid a deadlock during journal replay:
990 * journal replay might require setting a new btree
991 * root, which requires writing another journal entry -
992 * thus, if the journal is full (and this happens when
993 * replaying the first journal bucket's entries) we're
996 * So don't let the journal fill up unless we're in
999 if (test_bit(JOURNAL_REPLAY_DONE, &j->flags))
1000 available = max((int) available - 2, 0);
1003 * Don't use the last bucket unless writing the new last_seq
1004 * will make another bucket available:
1006 if (ja->bucket_seq[ja->last_idx] >= journal_last_seq(j))
1007 available = max((int) available - 1, 0);
1012 /* returns number of sectors available for next journal entry: */
1013 int bch2_journal_entry_sectors(struct journal *j)
1015 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1017 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1018 unsigned sectors_available = UINT_MAX;
1019 unsigned i, nr_online = 0, nr_devs = 0;
1021 lockdep_assert_held(&j->lock);
1024 for_each_member_device_rcu(ca, c, i,
1025 &c->rw_devs[BCH_DATA_JOURNAL]) {
1026 struct journal_device *ja = &ca->journal;
1027 unsigned buckets_required = 0;
1032 sectors_available = min_t(unsigned, sectors_available,
1033 ca->mi.bucket_size);
1036 * Note that we don't allocate the space for a journal entry
1037 * until we write it out - thus, if we haven't started the write
1038 * for the previous entry we have to make sure we have space for
1041 if (bch2_extent_has_device(e.c, ca->dev_idx)) {
1042 if (j->prev_buf_sectors > ja->sectors_free)
1045 if (j->prev_buf_sectors + sectors_available >
1049 if (j->prev_buf_sectors + sectors_available >
1056 if (journal_dev_buckets_available(j, ca) >= buckets_required)
1062 if (nr_online < c->opts.metadata_replicas_required)
1065 if (nr_devs < min_t(unsigned, nr_online, c->opts.metadata_replicas))
1068 return sectors_available;
1072 * journal_next_bucket - move on to the next journal bucket if possible
1074 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
1077 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1078 struct bkey_s_extent e;
1079 struct bch_extent_ptr *ptr;
1080 struct journal_device *ja;
1082 struct dev_alloc_list devs_sorted;
1083 unsigned i, replicas, replicas_want =
1084 READ_ONCE(c->opts.metadata_replicas);
1086 spin_lock(&j->lock);
1087 e = bkey_i_to_s_extent(&j->key);
1090 * Drop any pointers to devices that have been removed, are no longer
1091 * empty, or filled up their current journal bucket:
1093 * Note that a device may have had a small amount of free space (perhaps
1094 * one sector) that wasn't enough for the smallest possible journal
1095 * entry - that's why we drop pointers to devices <= current free space,
1096 * i.e. whichever device was limiting the current journal entry size.
1098 extent_for_each_ptr_backwards(e, ptr) {
1099 ca = bch_dev_bkey_exists(c, ptr->dev);
1101 if (ca->mi.state != BCH_MEMBER_STATE_RW ||
1102 ca->journal.sectors_free <= sectors)
1103 __bch2_extent_drop_ptr(e, ptr);
1105 ca->journal.sectors_free -= sectors;
1108 replicas = bch2_extent_nr_ptrs(e.c);
1111 devs_sorted = bch2_wp_alloc_list(c, &j->wp,
1112 &c->rw_devs[BCH_DATA_JOURNAL]);
1114 for (i = 0; i < devs_sorted.nr; i++) {
1115 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
1119 if (!ca->mi.durability)
1126 if (replicas >= replicas_want)
1130 * Check that we can use this device, and aren't already using
1133 if (bch2_extent_has_device(e.c, ca->dev_idx) ||
1134 !journal_dev_buckets_available(j, ca) ||
1135 sectors > ca->mi.bucket_size)
1138 j->wp.next_alloc[ca->dev_idx] += U32_MAX;
1139 bch2_wp_rescale(c, ca, &j->wp);
1141 ja->sectors_free = ca->mi.bucket_size - sectors;
1142 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1143 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
1145 extent_ptr_append(bkey_i_to_extent(&j->key),
1146 (struct bch_extent_ptr) {
1147 .offset = bucket_to_sector(ca,
1148 ja->buckets[ja->cur_idx]),
1152 replicas += ca->mi.durability;
1156 j->prev_buf_sectors = 0;
1158 bkey_copy(&w->key, &j->key);
1159 spin_unlock(&j->lock);
1161 if (replicas < c->opts.metadata_replicas_required)
1169 static void journal_write_compact(struct jset *jset)
1171 struct jset_entry *i, *next, *prev = NULL;
1174 * Simple compaction, dropping empty jset_entries (from journal
1175 * reservations that weren't fully used) and merging jset_entries that
1178 * If we wanted to be really fancy here, we could sort all the keys in
1179 * the jset and drop keys that were overwritten - probably not worth it:
1181 vstruct_for_each_safe(jset, i, next) {
1182 unsigned u64s = le16_to_cpu(i->u64s);
1188 /* Can we merge with previous entry? */
1190 i->btree_id == prev->btree_id &&
1191 i->level == prev->level &&
1192 i->type == prev->type &&
1193 i->type == BCH_JSET_ENTRY_btree_keys &&
1194 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1195 memmove_u64s_down(vstruct_next(prev),
1198 le16_add_cpu(&prev->u64s, u64s);
1202 /* Couldn't merge, move i into new position (after prev): */
1203 prev = prev ? vstruct_next(prev) : jset->start;
1205 memmove_u64s_down(prev, i, jset_u64s(u64s));
1208 prev = prev ? vstruct_next(prev) : jset->start;
1209 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
1212 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
1214 /* we aren't holding j->lock: */
1215 unsigned new_size = READ_ONCE(j->buf_size_want);
1218 if (buf->size >= new_size)
1221 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
1225 memcpy(new_buf, buf->data, buf->size);
1226 kvpfree(buf->data, buf->size);
1227 buf->data = new_buf;
1228 buf->size = new_size;
1231 static void journal_write_done(struct closure *cl)
1233 struct journal *j = container_of(cl, struct journal, io);
1234 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1235 struct journal_buf *w = journal_prev_buf(j);
1236 struct bch_devs_list devs =
1237 bch2_extent_devs(bkey_i_to_s_c_extent(&w->key));
1238 u64 seq = le64_to_cpu(w->data->seq);
1241 bch_err(c, "unable to write journal to sufficient devices");
1245 if (bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs))
1248 bch2_time_stats_update(j->write_time, j->write_start_time);
1250 spin_lock(&j->lock);
1251 j->last_seq_ondisk = seq;
1252 if (seq >= j->pin.front)
1253 journal_seq_pin(j, seq)->devs = devs;
1256 * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
1259 * Must come before signaling write completion, for
1260 * bch2_fs_journal_stop():
1262 mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
1264 /* also must come before signalling write completion: */
1265 closure_debug_destroy(cl);
1267 BUG_ON(!j->reservations.prev_buf_unwritten);
1268 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
1269 &j->reservations.counter);
1271 closure_wake_up(&w->wait);
1274 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
1275 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
1276 spin_unlock(&j->lock);
1279 bch2_fatal_error(c);
1280 bch2_journal_halt(j);
1284 static void journal_write_endio(struct bio *bio)
1286 struct bch_dev *ca = bio->bi_private;
1287 struct journal *j = &ca->fs->journal;
1289 if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
1290 bch2_meta_write_fault("journal")) {
1291 struct journal_buf *w = journal_prev_buf(j);
1292 unsigned long flags;
1294 spin_lock_irqsave(&j->err_lock, flags);
1295 bch2_extent_drop_device(bkey_i_to_s_extent(&w->key), ca->dev_idx);
1296 spin_unlock_irqrestore(&j->err_lock, flags);
1299 closure_put(&j->io);
1300 percpu_ref_put(&ca->io_ref);
1303 void bch2_journal_write(struct closure *cl)
1305 struct journal *j = container_of(cl, struct journal, io);
1306 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1308 struct journal_buf *w = journal_prev_buf(j);
1311 struct bch_extent_ptr *ptr;
1312 unsigned i, sectors, bytes;
1314 journal_buf_realloc(j, w);
1317 j->write_start_time = local_clock();
1318 mutex_lock(&c->btree_root_lock);
1319 for (i = 0; i < BTREE_ID_NR; i++) {
1320 struct btree_root *r = &c->btree_roots[i];
1323 bch2_journal_add_btree_root(w, i, &r->key, r->level);
1325 c->btree_roots_dirty = false;
1326 mutex_unlock(&c->btree_root_lock);
1328 journal_write_compact(jset);
1330 jset->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
1331 jset->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
1332 jset->magic = cpu_to_le64(jset_magic(c));
1333 jset->version = cpu_to_le32(BCACHE_JSET_VERSION);
1335 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
1336 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
1338 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
1339 jset_validate_entries(c, jset, WRITE))
1342 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
1343 jset->encrypted_start,
1344 vstruct_end(jset) - (void *) jset->encrypted_start);
1346 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
1347 journal_nonce(jset), jset);
1349 if (!bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
1350 jset_validate_entries(c, jset, WRITE))
1353 sectors = vstruct_sectors(jset, c->block_bits);
1354 BUG_ON(sectors > j->prev_buf_sectors);
1356 bytes = vstruct_bytes(w->data);
1357 memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
1359 if (journal_write_alloc(j, w, sectors)) {
1360 bch2_journal_halt(j);
1361 bch_err(c, "Unable to allocate journal write");
1362 bch2_fatal_error(c);
1363 continue_at(cl, journal_write_done, system_highpri_wq);
1367 * XXX: we really should just disable the entire journal in nochanges
1370 if (c->opts.nochanges)
1373 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
1374 ca = bch_dev_bkey_exists(c, ptr->dev);
1375 if (!percpu_ref_tryget(&ca->io_ref)) {
1377 bch_err(c, "missing device for journal write\n");
1381 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
1384 bio = ca->journal.bio;
1386 bio_set_dev(bio, ca->disk_sb.bdev);
1387 bio->bi_iter.bi_sector = ptr->offset;
1388 bio->bi_iter.bi_size = sectors << 9;
1389 bio->bi_end_io = journal_write_endio;
1390 bio->bi_private = ca;
1391 bio_set_op_attrs(bio, REQ_OP_WRITE,
1392 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
1393 bch2_bio_map(bio, jset);
1395 trace_journal_write(bio);
1396 closure_bio_submit(bio, cl);
1398 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
1401 for_each_rw_member(ca, c, i)
1402 if (journal_flushes_device(ca) &&
1403 !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) {
1404 percpu_ref_get(&ca->io_ref);
1406 bio = ca->journal.bio;
1408 bio_set_dev(bio, ca->disk_sb.bdev);
1409 bio->bi_opf = REQ_OP_FLUSH;
1410 bio->bi_end_io = journal_write_endio;
1411 bio->bi_private = ca;
1412 closure_bio_submit(bio, cl);
1416 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr)
1417 ptr->offset += sectors;
1419 continue_at(cl, journal_write_done, system_highpri_wq);
1421 bch2_inconsistent_error(c);
1422 continue_at(cl, journal_write_done, system_highpri_wq);