2 * bcachefs journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
9 #include "bkey_methods.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
25 #include <trace/events/bcachefs.h>
27 static void journal_write(struct closure *);
28 static void journal_reclaim_fast(struct journal *);
29 static void journal_pin_add_entry(struct journal *,
30 struct journal_entry_pin_list *,
31 struct journal_entry_pin *,
32 journal_pin_flush_fn);
34 static inline struct journal_buf *journal_cur_buf(struct journal *j)
36 return j->buf + j->reservations.idx;
39 static inline struct journal_buf *journal_prev_buf(struct journal *j)
41 return j->buf + !j->reservations.idx;
44 /* Sequence number of oldest dirty journal entry */
46 static inline u64 last_seq(struct journal *j)
48 return atomic64_read(&j->seq) - fifo_used(&j->pin) + 1;
51 static inline u64 journal_pin_seq(struct journal *j,
52 struct journal_entry_pin_list *pin_list)
54 return last_seq(j) + fifo_entry_idx(&j->pin, pin_list);
57 static inline void bch2_journal_add_entry_noreservation(struct journal_buf *buf,
58 unsigned type, enum btree_id id,
60 const void *data, size_t u64s)
62 struct jset *jset = buf->data;
64 bch2_journal_add_entry_at(buf, le32_to_cpu(jset->u64s),
65 type, id, level, data, u64s);
66 le32_add_cpu(&jset->u64s, jset_u64s(u64s));
69 static struct jset_entry *bch2_journal_find_entry(struct jset *j, unsigned type,
72 struct jset_entry *entry;
74 for_each_jset_entry_type(entry, j, type)
75 if (entry->btree_id == id)
81 struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *c, struct jset *j,
82 enum btree_id id, unsigned *level)
85 struct jset_entry *entry =
86 bch2_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id);
92 *level = entry->level;
93 *level = entry->level;
97 static void bch2_journal_add_btree_root(struct journal_buf *buf,
98 enum btree_id id, struct bkey_i *k,
101 bch2_journal_add_entry_noreservation(buf,
102 JOURNAL_ENTRY_BTREE_ROOT, id, level,
106 static void journal_seq_blacklist_flush(struct journal *j,
107 struct journal_entry_pin *pin, u64 seq)
110 container_of(j, struct bch_fs, journal);
111 struct journal_seq_blacklist *bl =
112 container_of(pin, struct journal_seq_blacklist, pin);
113 struct blacklisted_node n;
118 closure_init_stack(&cl);
121 struct btree_iter iter;
124 mutex_lock(&j->blacklist_lock);
125 if (i >= bl->nr_entries) {
126 mutex_unlock(&j->blacklist_lock);
130 mutex_unlock(&j->blacklist_lock);
132 __bch2_btree_iter_init(&iter, c, n.btree_id, n.pos, 0, 0, 0);
134 b = bch2_btree_iter_peek_node(&iter);
136 /* The node might have already been rewritten: */
138 if (b->data->keys.seq == n.seq) {
139 ret = bch2_btree_node_rewrite(c, &iter, n.seq, 0);
141 bch2_btree_iter_unlock(&iter);
142 bch2_fs_fatal_error(c,
143 "error %i rewriting btree node with blacklisted journal seq",
145 bch2_journal_halt(j);
150 bch2_btree_iter_unlock(&iter);
154 struct btree_update *as;
155 struct pending_btree_node_free *d;
157 mutex_lock(&j->blacklist_lock);
158 if (i >= bl->nr_entries) {
159 mutex_unlock(&j->blacklist_lock);
163 mutex_unlock(&j->blacklist_lock);
165 mutex_lock(&c->btree_interior_update_lock);
168 * Is the node on the list of pending interior node updates -
169 * being freed? If so, wait for that to finish:
171 for_each_pending_btree_node_free(c, as, d)
172 if (n.seq == d->seq &&
173 n.btree_id == d->btree_id &&
175 !bkey_cmp(n.pos, d->key.k.p)) {
176 closure_wait(&as->wait, &cl);
177 mutex_unlock(&c->btree_interior_update_lock);
182 mutex_unlock(&c->btree_interior_update_lock);
185 mutex_lock(&j->blacklist_lock);
187 bch2_journal_pin_drop(j, &bl->pin);
192 mutex_unlock(&j->blacklist_lock);
195 static struct journal_seq_blacklist *
196 journal_seq_blacklist_find(struct journal *j, u64 seq)
198 struct journal_seq_blacklist *bl;
200 lockdep_assert_held(&j->blacklist_lock);
202 list_for_each_entry(bl, &j->seq_blacklist, list)
209 static struct journal_seq_blacklist *
210 bch2_journal_seq_blacklisted_new(struct journal *j, u64 seq)
212 struct journal_seq_blacklist *bl;
214 lockdep_assert_held(&j->blacklist_lock);
217 * When we start the journal, bch2_journal_start() will skip over @seq:
220 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
225 list_add_tail(&bl->list, &j->seq_blacklist);
230 * Returns true if @seq is newer than the most recent journal entry that got
231 * written, and data corresponding to @seq should be ignored - also marks @seq
232 * as blacklisted so that on future restarts the corresponding data will still
235 int bch2_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
237 struct journal *j = &c->journal;
238 struct journal_seq_blacklist *bl = NULL;
239 struct blacklisted_node *n;
246 journal_seq = atomic64_read(&j->seq);
248 /* Interier updates aren't journalled: */
250 BUG_ON(seq > journal_seq && test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags));
253 * Decrease this back to j->seq + 2 when we next rev the on disk format:
254 * increasing it temporarily to work around bug in old kernels
256 bch2_fs_inconsistent_on(seq > journal_seq + 4, c,
257 "bset journal seq too far in the future: %llu > %llu",
260 if (seq <= journal_seq &&
261 list_empty_careful(&j->seq_blacklist))
264 mutex_lock(&j->blacklist_lock);
266 if (seq <= journal_seq) {
267 bl = journal_seq_blacklist_find(j, seq);
271 bch_verbose(c, "btree node %u:%llu:%llu has future journal sequence number %llu, blacklisting",
272 b->btree_id, b->key.k.p.inode, b->key.k.p.offset, seq);
274 for (i = journal_seq + 1; i <= seq; i++) {
275 bl = journal_seq_blacklist_find(j, i) ?:
276 bch2_journal_seq_blacklisted_new(j, i);
284 for (n = bl->entries; n < bl->entries + bl->nr_entries; n++)
285 if (b->data->keys.seq == n->seq &&
286 b->btree_id == n->btree_id &&
287 !bkey_cmp(b->key.k.p, n->pos))
290 if (!bl->nr_entries ||
291 is_power_of_2(bl->nr_entries)) {
292 n = krealloc(bl->entries,
293 max(bl->nr_entries * 2, 8UL) * sizeof(*n),
302 bl->entries[bl->nr_entries++] = (struct blacklisted_node) {
303 .seq = b->data->keys.seq,
304 .btree_id = b->btree_id,
310 mutex_unlock(&j->blacklist_lock);
315 * Journal replay/recovery:
317 * This code is all driven from bch2_fs_start(); we first read the journal
318 * entries, do some other stuff, then we mark all the keys in the journal
319 * entries (same as garbage collection would), then we replay them - reinserting
320 * them into the cache in precisely the same order as they appear in the
323 * We only journal keys that go in leaf nodes, which simplifies things quite a
327 struct journal_list {
330 struct list_head *head;
334 #define JOURNAL_ENTRY_ADD_OK 0
335 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
338 * Given a journal entry we just read, add it to the list of journal entries to
341 static int journal_entry_add(struct bch_fs *c, struct journal_list *jlist,
344 struct journal_replay *i, *pos;
345 struct list_head *where;
346 size_t bytes = vstruct_bytes(j);
350 mutex_lock(&jlist->lock);
352 last_seq = !list_empty(jlist->head)
353 ? list_last_entry(jlist->head, struct journal_replay,
357 /* Is this entry older than the range we need? */
358 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
359 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
363 /* Drop entries we don't need anymore */
364 list_for_each_entry_safe(i, pos, jlist->head, list) {
365 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
368 kvpfree(i, offsetof(struct journal_replay, j) +
369 vstruct_bytes(&i->j));
372 list_for_each_entry_reverse(i, jlist->head, list) {
374 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
375 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
376 memcmp(j, &i->j, bytes), c,
377 "found duplicate but non identical journal entries (seq %llu)",
378 le64_to_cpu(j->seq));
380 ret = JOURNAL_ENTRY_ADD_OK;
384 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
392 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
398 memcpy(&i->j, j, bytes);
399 list_add(&i->list, where);
400 ret = JOURNAL_ENTRY_ADD_OK;
403 mutex_unlock(&jlist->lock);
407 static struct nonce journal_nonce(const struct jset *jset)
409 return (struct nonce) {{
411 [1] = ((__le32 *) &jset->seq)[0],
412 [2] = ((__le32 *) &jset->seq)[1],
413 [3] = BCH_NONCE_JOURNAL,
417 static void journal_entry_null_range(void *start, void *end)
419 struct jset_entry *entry;
421 for (entry = start; entry != end; entry = vstruct_next(entry))
422 memset(entry, 0, sizeof(*entry));
425 static int journal_validate_key(struct bch_fs *c, struct jset *j,
426 struct jset_entry *entry,
427 struct bkey_i *k, enum bkey_type key_type,
430 void *next = vstruct_next(entry);
435 if (mustfix_fsck_err_on(!k->k.u64s, c,
436 "invalid %s in journal: k->u64s 0", type)) {
437 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
438 journal_entry_null_range(vstruct_next(entry), next);
442 if (mustfix_fsck_err_on((void *) bkey_next(k) >
443 (void *) vstruct_next(entry), c,
444 "invalid %s in journal: extends past end of journal entry",
446 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
447 journal_entry_null_range(vstruct_next(entry), next);
451 if (mustfix_fsck_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
452 "invalid %s in journal: bad format %u",
453 type, k->k.format)) {
454 le16_add_cpu(&entry->u64s, -k->k.u64s);
455 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
456 journal_entry_null_range(vstruct_next(entry), next);
460 if (JSET_BIG_ENDIAN(j) != CPU_BIG_ENDIAN)
461 bch2_bkey_swab(key_type, NULL, bkey_to_packed(k));
463 invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k));
465 bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf),
467 mustfix_fsck_err(c, "invalid %s in journal: %s", type, buf);
469 le16_add_cpu(&entry->u64s, -k->k.u64s);
470 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
471 journal_entry_null_range(vstruct_next(entry), next);
478 #define JOURNAL_ENTRY_REREAD 5
479 #define JOURNAL_ENTRY_NONE 6
480 #define JOURNAL_ENTRY_BAD 7
482 #define journal_entry_err(c, msg, ...) \
484 if (write == READ) { \
485 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
487 bch_err(c, "detected corrupt metadata before write:\n" \
488 msg, ##__VA_ARGS__); \
489 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
495 #define journal_entry_err_on(cond, c, msg, ...) \
496 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
498 static int __journal_entry_validate(struct bch_fs *c, struct jset *j,
501 struct jset_entry *entry;
504 vstruct_for_each(j, entry) {
507 if (journal_entry_err_on(vstruct_next(entry) >
509 "journal entry extends past end of jset")) {
510 j->u64s = cpu_to_le64((u64 *) entry - j->_data);
514 switch (entry->type) {
515 case JOURNAL_ENTRY_BTREE_KEYS:
516 vstruct_for_each(entry, k) {
517 ret = journal_validate_key(c, j, entry, k,
518 bkey_type(entry->level,
526 case JOURNAL_ENTRY_BTREE_ROOT:
529 if (journal_entry_err_on(!entry->u64s ||
530 le16_to_cpu(entry->u64s) != k->k.u64s, c,
531 "invalid btree root journal entry: wrong number of keys")) {
532 journal_entry_null_range(entry,
533 vstruct_next(entry));
537 ret = journal_validate_key(c, j, entry, k,
538 BKEY_TYPE_BTREE, "btree root");
543 case JOURNAL_ENTRY_PRIO_PTRS:
546 case JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED:
547 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
548 "invalid journal seq blacklist entry: bad size")) {
549 journal_entry_null_range(entry,
550 vstruct_next(entry));
555 journal_entry_err(c, "invalid journal entry type %u",
557 journal_entry_null_range(entry, vstruct_next(entry));
566 static int journal_entry_validate(struct bch_fs *c,
567 struct jset *j, u64 sector,
568 unsigned bucket_sectors_left,
569 unsigned sectors_read,
572 size_t bytes = vstruct_bytes(j);
573 struct bch_csum csum;
576 if (le64_to_cpu(j->magic) != jset_magic(c))
577 return JOURNAL_ENTRY_NONE;
579 if (le32_to_cpu(j->version) != BCACHE_JSET_VERSION) {
580 bch_err(c, "unknown journal entry version %u",
581 le32_to_cpu(j->version));
582 return BCH_FSCK_UNKNOWN_VERSION;
585 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
586 "journal entry too big (%zu bytes), sector %lluu",
588 /* XXX: note we might have missing journal entries */
589 return JOURNAL_ENTRY_BAD;
592 if (bytes > sectors_read << 9)
593 return JOURNAL_ENTRY_REREAD;
595 if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)), c,
596 "journal entry with unknown csum type %llu sector %lluu",
597 JSET_CSUM_TYPE(j), sector))
598 return JOURNAL_ENTRY_BAD;
600 csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
601 if (journal_entry_err_on(bch2_crc_cmp(csum, j->csum), c,
602 "journal checksum bad, sector %llu", sector)) {
603 /* XXX: retry IO, when we start retrying checksum errors */
604 /* XXX: note we might have missing journal entries */
605 return JOURNAL_ENTRY_BAD;
608 bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
610 vstruct_end(j) - (void *) j->encrypted_start);
612 if (journal_entry_err_on(le64_to_cpu(j->last_seq) > le64_to_cpu(j->seq), c,
613 "invalid journal entry: last_seq > seq"))
614 j->last_seq = j->seq;
616 return __journal_entry_validate(c, j, write);
621 struct journal_read_buf {
626 static int journal_read_buf_realloc(struct journal_read_buf *b,
631 /* the bios are sized for this many pages, max: */
632 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
635 new_size = roundup_pow_of_two(new_size);
636 n = kvpmalloc(new_size, GFP_KERNEL);
640 kvpfree(b->data, b->size);
646 static int journal_read_bucket(struct bch_dev *ca,
647 struct journal_read_buf *buf,
648 struct journal_list *jlist,
649 unsigned bucket, u64 *seq, bool *entries_found)
651 struct bch_fs *c = ca->fs;
652 struct journal_device *ja = &ca->journal;
653 struct bio *bio = ja->bio;
654 struct jset *j = NULL;
655 unsigned sectors, sectors_read = 0;
656 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
657 end = offset + ca->mi.bucket_size;
658 bool saw_bad = false;
661 pr_debug("reading %u", bucket);
663 while (offset < end) {
665 reread: sectors_read = min_t(unsigned,
666 end - offset, buf->size >> 9);
669 bio->bi_bdev = ca->disk_sb.bdev;
670 bio->bi_iter.bi_sector = offset;
671 bio->bi_iter.bi_size = sectors_read << 9;
672 bio_set_op_attrs(bio, REQ_OP_READ, 0);
673 bch2_bio_map(bio, buf->data);
675 ret = submit_bio_wait(bio);
677 if (bch2_dev_io_err_on(ret, ca,
678 "journal read from sector %llu",
680 bch2_meta_read_fault("journal"))
686 ret = journal_entry_validate(c, j, offset,
687 end - offset, sectors_read,
692 case JOURNAL_ENTRY_REREAD:
693 if (vstruct_bytes(j) > buf->size) {
694 ret = journal_read_buf_realloc(buf,
700 case JOURNAL_ENTRY_NONE:
703 sectors = c->sb.block_size;
705 case JOURNAL_ENTRY_BAD:
707 sectors = c->sb.block_size;
714 * This happens sometimes if we don't have discards on -
715 * when we've partially overwritten a bucket with new
716 * journal entries. We don't need the rest of the
719 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
722 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
724 ret = journal_entry_add(c, jlist, j);
726 case JOURNAL_ENTRY_ADD_OK:
727 *entries_found = true;
729 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
735 if (le64_to_cpu(j->seq) > *seq)
736 *seq = le64_to_cpu(j->seq);
738 sectors = vstruct_sectors(j, c->block_bits);
742 sectors_read -= sectors;
743 j = ((void *) j) + (sectors << 9);
749 static void bch2_journal_read_device(struct closure *cl)
751 #define read_bucket(b) \
753 bool entries_found = false; \
754 ret = journal_read_bucket(ca, &buf, jlist, b, &seq, \
758 __set_bit(b, bitmap); \
762 struct journal_device *ja =
763 container_of(cl, struct journal_device, read);
764 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
765 struct journal_list *jlist =
766 container_of(cl->parent, struct journal_list, cl);
767 struct request_queue *q = bdev_get_queue(ca->disk_sb.bdev);
768 struct journal_read_buf buf = { NULL, 0 };
770 DECLARE_BITMAP(bitmap, ja->nr);
778 bitmap_zero(bitmap, ja->nr);
779 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
783 pr_debug("%u journal buckets", ja->nr);
786 * If the device supports discard but not secure discard, we can't do
787 * the fancy fibonacci hash/binary search because the live journal
788 * entries might not form a contiguous range:
790 for (i = 0; i < ja->nr; i++)
794 if (!blk_queue_nonrot(q))
798 * Read journal buckets ordered by golden ratio hash to quickly
799 * find a sequence of buckets with valid journal entries
801 for (i = 0; i < ja->nr; i++) {
802 l = (i * 2654435769U) % ja->nr;
804 if (test_bit(l, bitmap))
812 * If that fails, check all the buckets we haven't checked
815 pr_debug("falling back to linear search");
817 for (l = find_first_zero_bit(bitmap, ja->nr);
819 l = find_next_zero_bit(bitmap, ja->nr, l + 1))
823 /* no journal entries on this device? */
828 r = find_next_bit(bitmap, ja->nr, l + 1);
829 pr_debug("starting binary search, l %u r %u", l, r);
832 unsigned m = (l + r) >> 1;
845 * Find the journal bucket with the highest sequence number:
847 * If there's duplicate journal entries in multiple buckets (which
848 * definitely isn't supposed to happen, but...) - make sure to start
849 * cur_idx at the last of those buckets, so we don't deadlock trying to
854 for (i = 0; i < ja->nr; i++)
855 if (ja->bucket_seq[i] >= seq &&
856 ja->bucket_seq[i] != ja->bucket_seq[(i + 1) % ja->nr]) {
858 * When journal_next_bucket() goes to allocate for
859 * the first time, it'll use the bucket after
863 seq = ja->bucket_seq[i];
867 * Set last_idx to indicate the entire journal is full and needs to be
868 * reclaimed - journal reclaim will immediately reclaim whatever isn't
869 * pinned when it first runs:
871 ja->last_idx = (ja->cur_idx + 1) % ja->nr;
874 * Read buckets in reverse order until we stop finding more journal
877 for (i = (ja->cur_idx + ja->nr - 1) % ja->nr;
879 i = (i + ja->nr - 1) % ja->nr)
880 if (!test_bit(i, bitmap) &&
884 kvpfree(buf.data, buf.size);
885 percpu_ref_put(&ca->io_ref);
888 mutex_lock(&jlist->lock);
890 mutex_unlock(&jlist->lock);
895 void bch2_journal_entries_free(struct list_head *list)
898 while (!list_empty(list)) {
899 struct journal_replay *i =
900 list_first_entry(list, struct journal_replay, list);
902 kvpfree(i, offsetof(struct journal_replay, j) +
903 vstruct_bytes(&i->j));
907 static int journal_seq_blacklist_read(struct journal *j,
908 struct journal_replay *i,
909 struct journal_entry_pin_list *p)
911 struct bch_fs *c = container_of(j, struct bch_fs, journal);
912 struct jset_entry *entry;
913 struct journal_seq_blacklist *bl;
916 for_each_jset_entry_type(entry, &i->j,
917 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED) {
918 seq = le64_to_cpu(entry->_data[0]);
920 bch_verbose(c, "blacklisting existing journal seq %llu", seq);
922 bl = bch2_journal_seq_blacklisted_new(j, seq);
926 journal_pin_add_entry(j, p, &bl->pin,
927 journal_seq_blacklist_flush);
934 static inline bool journal_has_keys(struct list_head *list)
936 struct journal_replay *i;
937 struct jset_entry *entry;
938 struct bkey_i *k, *_n;
940 list_for_each_entry(i, list, list)
941 for_each_jset_key(k, _n, entry, &i->j)
947 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
949 struct journal *j = &c->journal;
950 struct journal_list jlist;
951 struct journal_replay *i;
952 struct journal_entry_pin_list *p;
954 u64 cur_seq, end_seq;
955 unsigned iter, keys = 0, entries = 0;
958 closure_init_stack(&jlist.cl);
959 mutex_init(&jlist.lock);
963 for_each_readable_member(ca, c, iter) {
964 percpu_ref_get(&ca->io_ref);
965 closure_call(&ca->journal.read,
966 bch2_journal_read_device,
971 closure_sync(&jlist.cl);
976 if (list_empty(list)){
977 bch_err(c, "no journal entries found");
978 return BCH_FSCK_REPAIR_IMPOSSIBLE;
981 fsck_err_on(c->sb.clean && journal_has_keys(list), c,
982 "filesystem marked clean but journal has keys to replay");
984 i = list_last_entry(list, struct journal_replay, list);
986 unfixable_fsck_err_on(le64_to_cpu(i->j.seq) -
987 le64_to_cpu(i->j.last_seq) + 1 > j->pin.size, c,
988 "too many journal entries open for refcount fifo");
990 atomic64_set(&j->seq, le64_to_cpu(i->j.seq));
991 j->last_seq_ondisk = le64_to_cpu(i->j.last_seq);
993 j->pin.front = le64_to_cpu(i->j.last_seq);
994 j->pin.back = le64_to_cpu(i->j.seq) + 1;
996 BUG_ON(last_seq(j) != le64_to_cpu(i->j.last_seq));
997 BUG_ON(journal_seq_pin(j, atomic64_read(&j->seq)) !=
998 &fifo_peek_back(&j->pin));
1000 fifo_for_each_entry_ptr(p, &j->pin, iter) {
1001 INIT_LIST_HEAD(&p->list);
1002 INIT_LIST_HEAD(&p->flushed);
1003 atomic_set(&p->count, 0);
1006 mutex_lock(&j->blacklist_lock);
1008 list_for_each_entry(i, list, list) {
1009 p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
1011 atomic_set(&p->count, 1);
1013 if (journal_seq_blacklist_read(j, i, p)) {
1014 mutex_unlock(&j->blacklist_lock);
1019 mutex_unlock(&j->blacklist_lock);
1021 cur_seq = last_seq(j);
1022 end_seq = le64_to_cpu(list_last_entry(list,
1023 struct journal_replay, list)->j.seq);
1025 list_for_each_entry(i, list, list) {
1026 struct jset_entry *entry;
1027 struct bkey_i *k, *_n;
1030 mutex_lock(&j->blacklist_lock);
1031 while (cur_seq < le64_to_cpu(i->j.seq) &&
1032 journal_seq_blacklist_find(j, cur_seq))
1035 blacklisted = journal_seq_blacklist_find(j,
1036 le64_to_cpu(i->j.seq));
1037 mutex_unlock(&j->blacklist_lock);
1039 fsck_err_on(blacklisted, c,
1040 "found blacklisted journal entry %llu",
1041 le64_to_cpu(i->j.seq));
1043 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
1044 "journal entries %llu-%llu missing! (replaying %llu-%llu)",
1045 cur_seq, le64_to_cpu(i->j.seq) - 1,
1046 last_seq(j), end_seq);
1048 cur_seq = le64_to_cpu(i->j.seq) + 1;
1050 for_each_jset_key(k, _n, entry, &i->j)
1055 bch_info(c, "journal read done, %i keys in %i entries, seq %llu",
1056 keys, entries, (u64) atomic64_read(&j->seq));
1061 int bch2_journal_mark(struct bch_fs *c, struct list_head *list)
1063 struct bkey_i *k, *n;
1064 struct jset_entry *j;
1065 struct journal_replay *r;
1068 list_for_each_entry(r, list, list)
1069 for_each_jset_key(k, n, j, &r->j) {
1070 enum bkey_type type = bkey_type(j->level, j->btree_id);
1071 struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
1073 if (btree_type_has_ptrs(type)) {
1074 ret = bch2_btree_mark_key_initial(c, type, k_s_c);
1083 static bool journal_entry_is_open(struct journal *j)
1085 return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
1088 void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
1090 struct journal_buf *w = journal_prev_buf(j);
1092 atomic_dec_bug(&journal_seq_pin(j, w->data->seq)->count);
1094 if (!need_write_just_set &&
1095 test_bit(JOURNAL_NEED_WRITE, &j->flags))
1096 __bch2_time_stats_update(j->delay_time,
1097 j->need_write_time);
1099 closure_call(&j->io, journal_write, NULL, NULL);
1101 /* Shut sparse up: */
1102 closure_init(&j->io, NULL);
1103 set_closure_fn(&j->io, journal_write, NULL);
1104 journal_write(&j->io);
1108 static void __journal_entry_new(struct journal *j, int count)
1110 struct journal_entry_pin_list *p = fifo_push_ref(&j->pin);
1113 * The fifo_push() needs to happen at the same time as j->seq is
1114 * incremented for last_seq() to be calculated correctly
1116 atomic64_inc(&j->seq);
1118 BUG_ON(journal_seq_pin(j, atomic64_read(&j->seq)) !=
1119 &fifo_peek_back(&j->pin));
1121 INIT_LIST_HEAD(&p->list);
1122 INIT_LIST_HEAD(&p->flushed);
1123 atomic_set(&p->count, count);
1126 static void __bch2_journal_next_entry(struct journal *j)
1128 struct journal_buf *buf;
1130 __journal_entry_new(j, 1);
1132 buf = journal_cur_buf(j);
1133 memset(buf->has_inode, 0, sizeof(buf->has_inode));
1135 memset(buf->data, 0, sizeof(*buf->data));
1136 buf->data->seq = cpu_to_le64(atomic64_read(&j->seq));
1137 buf->data->u64s = 0;
1140 static inline size_t journal_entry_u64s_reserve(struct journal_buf *buf)
1142 return BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1146 JOURNAL_ENTRY_ERROR,
1147 JOURNAL_ENTRY_INUSE,
1148 JOURNAL_ENTRY_CLOSED,
1150 } journal_buf_switch(struct journal *j, bool need_write_just_set)
1152 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1153 struct journal_buf *buf;
1154 union journal_res_state old, new;
1155 u64 v = atomic64_read(&j->reservations.counter);
1157 lockdep_assert_held(&j->lock);
1161 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
1162 return JOURNAL_ENTRY_CLOSED;
1164 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1165 return JOURNAL_ENTRY_ERROR;
1167 if (new.prev_buf_unwritten)
1168 return JOURNAL_ENTRY_INUSE;
1171 * avoid race between setting buf->data->u64s and
1172 * journal_res_put starting write:
1174 journal_state_inc(&new);
1176 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
1178 new.prev_buf_unwritten = 1;
1180 BUG_ON(journal_state_count(new, new.idx));
1181 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1182 old.v, new.v)) != old.v);
1184 journal_reclaim_fast(j);
1186 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
1188 buf = &j->buf[old.idx];
1189 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
1190 buf->data->last_seq = cpu_to_le64(last_seq(j));
1192 j->prev_buf_sectors =
1193 vstruct_blocks_plus(buf->data, c->block_bits,
1194 journal_entry_u64s_reserve(buf)) *
1197 BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
1199 __bch2_journal_next_entry(j);
1201 cancel_delayed_work(&j->write_work);
1202 spin_unlock(&j->lock);
1204 if (c->bucket_journal_seq > 1 << 14) {
1205 c->bucket_journal_seq = 0;
1206 bch2_bucket_seq_cleanup(c);
1209 /* ugh - might be called from __journal_res_get() under wait_event() */
1210 __set_current_state(TASK_RUNNING);
1211 bch2_journal_buf_put(j, old.idx, need_write_just_set);
1213 return JOURNAL_UNLOCKED;
1216 void bch2_journal_halt(struct journal *j)
1218 union journal_res_state old, new;
1219 u64 v = atomic64_read(&j->reservations.counter);
1223 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1226 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
1227 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1228 old.v, new.v)) != old.v);
1231 closure_wake_up(&journal_cur_buf(j)->wait);
1232 closure_wake_up(&journal_prev_buf(j)->wait);
1235 static unsigned journal_dev_buckets_available(struct journal *j,
1238 struct journal_device *ja = &ca->journal;
1239 unsigned next = (ja->cur_idx + 1) % ja->nr;
1240 unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
1243 * Hack to avoid a deadlock during journal replay:
1244 * journal replay might require setting a new btree
1245 * root, which requires writing another journal entry -
1246 * thus, if the journal is full (and this happens when
1247 * replaying the first journal bucket's entries) we're
1250 * So don't let the journal fill up unless we're in
1253 if (test_bit(JOURNAL_REPLAY_DONE, &j->flags))
1254 available = max((int) available - 2, 0);
1257 * Don't use the last bucket unless writing the new last_seq
1258 * will make another bucket available:
1260 if (ja->bucket_seq[ja->last_idx] >= last_seq(j))
1261 available = max((int) available - 1, 0);
1266 /* returns number of sectors available for next journal entry: */
1267 static int journal_entry_sectors(struct journal *j)
1269 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1271 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1272 unsigned sectors_available = UINT_MAX;
1273 unsigned i, nr_online = 0, nr_devs = 0;
1275 lockdep_assert_held(&j->lock);
1278 for_each_member_device_rcu(ca, c, i,
1279 &c->rw_devs[BCH_DATA_JOURNAL]) {
1280 struct journal_device *ja = &ca->journal;
1281 unsigned buckets_required = 0;
1286 sectors_available = min_t(unsigned, sectors_available,
1287 ca->mi.bucket_size);
1290 * Note that we don't allocate the space for a journal entry
1291 * until we write it out - thus, if we haven't started the write
1292 * for the previous entry we have to make sure we have space for
1295 if (bch2_extent_has_device(e.c, ca->dev_idx)) {
1296 if (j->prev_buf_sectors > ja->sectors_free)
1299 if (j->prev_buf_sectors + sectors_available >
1303 if (j->prev_buf_sectors + sectors_available >
1310 if (journal_dev_buckets_available(j, ca) >= buckets_required)
1316 if (nr_online < c->opts.metadata_replicas_required)
1319 if (nr_devs < min_t(unsigned, nr_online, c->opts.metadata_replicas))
1322 return sectors_available;
1326 * should _only_ called from journal_res_get() - when we actually want a
1327 * journal reservation - journal entry is open means journal is dirty:
1329 static int journal_entry_open(struct journal *j)
1331 struct journal_buf *buf = journal_cur_buf(j);
1333 int ret = 0, sectors;
1335 lockdep_assert_held(&j->lock);
1336 BUG_ON(journal_entry_is_open(j));
1338 if (!fifo_free(&j->pin))
1341 sectors = journal_entry_sectors(j);
1345 buf->disk_sectors = sectors;
1347 sectors = min_t(unsigned, sectors, buf->size >> 9);
1348 j->cur_buf_sectors = sectors;
1350 u64s = (sectors << 9) / sizeof(u64);
1352 /* Subtract the journal header */
1353 u64s -= sizeof(struct jset) / sizeof(u64);
1355 * Btree roots, prio pointers don't get added until right before we do
1358 u64s -= journal_entry_u64s_reserve(buf);
1359 u64s = max_t(ssize_t, 0L, u64s);
1361 BUG_ON(u64s >= JOURNAL_ENTRY_CLOSED_VAL);
1363 if (u64s > le32_to_cpu(buf->data->u64s)) {
1364 union journal_res_state old, new;
1365 u64 v = atomic64_read(&j->reservations.counter);
1368 * Must be set before marking the journal entry as open:
1370 j->cur_entry_u64s = u64s;
1375 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1378 /* Handle any already added entries */
1379 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
1380 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1381 old.v, new.v)) != old.v);
1386 if (j->res_get_blocked_start) {
1387 __bch2_time_stats_update(j->blocked_time,
1388 j->res_get_blocked_start);
1389 j->res_get_blocked_start = 0;
1392 mod_delayed_work(system_freezable_wq,
1394 msecs_to_jiffies(j->write_delay_ms));
1400 void bch2_journal_start(struct bch_fs *c)
1402 struct journal *j = &c->journal;
1403 struct journal_seq_blacklist *bl;
1406 list_for_each_entry(bl, &j->seq_blacklist, list)
1407 new_seq = max(new_seq, bl->seq);
1409 spin_lock(&j->lock);
1411 set_bit(JOURNAL_STARTED, &j->flags);
1413 while (atomic64_read(&j->seq) < new_seq)
1414 __journal_entry_new(j, 0);
1417 * journal_buf_switch() only inits the next journal entry when it
1418 * closes an open journal entry - the very first journal entry gets
1421 __bch2_journal_next_entry(j);
1424 * Adding entries to the next journal entry before allocating space on
1425 * disk for the next journal entry - this is ok, because these entries
1426 * only have to go down with the next journal entry we write:
1428 list_for_each_entry(bl, &j->seq_blacklist, list)
1430 bch2_journal_add_entry_noreservation(journal_cur_buf(j),
1431 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED,
1434 journal_pin_add_entry(j,
1435 &fifo_peek_back(&j->pin),
1437 journal_seq_blacklist_flush);
1441 spin_unlock(&j->lock);
1443 queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
1446 int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
1448 struct journal *j = &c->journal;
1449 struct bkey_i *k, *_n;
1450 struct jset_entry *entry;
1451 struct journal_replay *i, *n;
1452 int ret = 0, did_replay = 0;
1454 list_for_each_entry_safe(i, n, list, list) {
1455 j->replay_pin_list =
1456 journal_seq_pin(j, le64_to_cpu(i->j.seq));
1458 for_each_jset_key(k, _n, entry, &i->j) {
1459 struct disk_reservation disk_res;
1461 if (entry->btree_id == BTREE_ID_ALLOC) {
1463 * allocation code handles replay for
1464 * BTREE_ID_ALLOC keys:
1466 ret = bch2_alloc_replay_key(c, k->k.p);
1470 * We might cause compressed extents to be
1471 * split, so we need to pass in a
1474 BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
1476 ret = bch2_btree_insert(c, entry->btree_id, k,
1477 &disk_res, NULL, NULL,
1478 BTREE_INSERT_NOFAIL|
1479 BTREE_INSERT_JOURNAL_REPLAY);
1480 bch2_disk_reservation_put(c, &disk_res);
1484 bch_err(c, "journal replay: error %d while replaying key",
1493 if (atomic_dec_and_test(&j->replay_pin_list->count))
1497 j->replay_pin_list = NULL;
1500 bch2_journal_flush_pins(&c->journal, U64_MAX);
1503 * Write a new journal entry _before_ we start journalling new data -
1504 * otherwise, we could end up with btree node bsets with journal seqs
1505 * arbitrarily far in the future vs. the most recently written journal
1506 * entry on disk, if we crash before writing the next journal entry:
1508 ret = bch2_journal_meta(j);
1510 bch_err(c, "journal replay: error %d flushing journal", ret);
1515 bch2_journal_set_replay_done(j);
1517 bch2_journal_entries_free(list);
1522 * Allocate more journal space at runtime - not currently making use if it, but
1525 static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1528 struct journal *j = &c->journal;
1529 struct journal_device *ja = &ca->journal;
1530 struct bch_sb_field_journal *journal_buckets;
1531 struct disk_reservation disk_res = { 0, 0 };
1533 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1536 closure_init_stack(&cl);
1538 /* don't handle reducing nr of buckets yet: */
1543 * note: journal buckets aren't really counted as _sectors_ used yet, so
1544 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1545 * when space used goes up without a reservation - but we do need the
1546 * reservation to ensure we'll actually be able to allocate:
1549 if (bch2_disk_reservation_get(c, &disk_res,
1550 bucket_to_sector(ca, nr - ja->nr), 0))
1553 mutex_lock(&c->sb_lock);
1556 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
1557 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
1558 if (!new_buckets || !new_bucket_seq)
1561 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
1562 nr + sizeof(*journal_buckets) / sizeof(u64));
1563 if (!journal_buckets)
1566 spin_lock(&j->lock);
1567 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
1568 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
1569 swap(new_buckets, ja->buckets);
1570 swap(new_bucket_seq, ja->bucket_seq);
1572 while (ja->nr < nr) {
1573 /* must happen under journal lock, to avoid racing with gc: */
1574 long b = bch2_bucket_alloc(c, ca, RESERVE_ALLOC);
1576 if (!closure_wait(&c->freelist_wait, &cl)) {
1577 spin_unlock(&j->lock);
1579 spin_lock(&j->lock);
1584 bch2_mark_metadata_bucket(ca, &ca->buckets[b],
1585 BUCKET_JOURNAL, false);
1586 bch2_mark_alloc_bucket(ca, &ca->buckets[b], false);
1588 memmove(ja->buckets + ja->last_idx + 1,
1589 ja->buckets + ja->last_idx,
1590 (ja->nr - ja->last_idx) * sizeof(u64));
1591 memmove(ja->bucket_seq + ja->last_idx + 1,
1592 ja->bucket_seq + ja->last_idx,
1593 (ja->nr - ja->last_idx) * sizeof(u64));
1594 memmove(journal_buckets->buckets + ja->last_idx + 1,
1595 journal_buckets->buckets + ja->last_idx,
1596 (ja->nr - ja->last_idx) * sizeof(u64));
1598 ja->buckets[ja->last_idx] = b;
1599 journal_buckets->buckets[ja->last_idx] = cpu_to_le64(b);
1601 if (ja->last_idx < ja->nr) {
1602 if (ja->cur_idx >= ja->last_idx)
1609 spin_unlock(&j->lock);
1611 BUG_ON(bch2_sb_validate_journal(ca->disk_sb.sb, ca->mi));
1613 bch2_write_super(c);
1617 mutex_unlock(&c->sb_lock);
1619 kfree(new_bucket_seq);
1621 bch2_disk_reservation_put(c, &disk_res);
1624 bch2_dev_allocator_add(c, ca);
1629 int bch2_dev_journal_alloc(struct bch_dev *ca)
1633 if (dynamic_fault("bcachefs:add:journal_alloc"))
1637 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
1640 nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
1641 BCH_JOURNAL_BUCKETS_MIN,
1643 (1 << 20) / ca->mi.bucket_size));
1645 return bch2_set_nr_journal_buckets(ca->fs, ca, nr);
1651 * journal_reclaim_fast - do the fast part of journal reclaim
1653 * Called from IO submission context, does not block. Cleans up after btree
1654 * write completions by advancing the journal pin and each cache's last_idx,
1655 * kicking off discards and background reclaim as necessary.
1657 static void journal_reclaim_fast(struct journal *j)
1659 struct journal_entry_pin_list temp;
1660 bool popped = false;
1662 lockdep_assert_held(&j->lock);
1665 * Unpin journal entries whose reference counts reached zero, meaning
1666 * all btree nodes got written out
1668 while (!atomic_read(&fifo_peek_front(&j->pin).count)) {
1669 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
1670 BUG_ON(!fifo_pop(&j->pin, temp));
1679 * Journal entry pinning - machinery for holding a reference on a given journal
1680 * entry, marking it as dirty:
1683 static inline void __journal_pin_add(struct journal *j,
1684 struct journal_entry_pin_list *pin_list,
1685 struct journal_entry_pin *pin,
1686 journal_pin_flush_fn flush_fn)
1688 BUG_ON(journal_pin_active(pin));
1690 atomic_inc(&pin_list->count);
1691 pin->pin_list = pin_list;
1692 pin->flush = flush_fn;
1695 list_add(&pin->list, &pin_list->list);
1697 INIT_LIST_HEAD(&pin->list);
1700 static void journal_pin_add_entry(struct journal *j,
1701 struct journal_entry_pin_list *pin_list,
1702 struct journal_entry_pin *pin,
1703 journal_pin_flush_fn flush_fn)
1705 spin_lock_irq(&j->pin_lock);
1706 __journal_pin_add(j, pin_list, pin, flush_fn);
1707 spin_unlock_irq(&j->pin_lock);
1710 void bch2_journal_pin_add(struct journal *j,
1711 struct journal_res *res,
1712 struct journal_entry_pin *pin,
1713 journal_pin_flush_fn flush_fn)
1715 struct journal_entry_pin_list *pin_list = res->ref
1716 ? journal_seq_pin(j, res->seq)
1717 : j->replay_pin_list;
1719 spin_lock_irq(&j->pin_lock);
1720 __journal_pin_add(j, pin_list, pin, flush_fn);
1721 spin_unlock_irq(&j->pin_lock);
1724 static inline bool __journal_pin_drop(struct journal *j,
1725 struct journal_entry_pin *pin)
1727 struct journal_entry_pin_list *pin_list = pin->pin_list;
1729 pin->pin_list = NULL;
1731 /* journal_reclaim_work() might have already taken us off the list */
1732 if (!list_empty_careful(&pin->list))
1733 list_del_init(&pin->list);
1735 return atomic_dec_and_test(&pin_list->count);
1738 void bch2_journal_pin_drop(struct journal *j,
1739 struct journal_entry_pin *pin)
1741 unsigned long flags;
1742 bool wakeup = false;
1744 spin_lock_irqsave(&j->pin_lock, flags);
1745 if (journal_pin_active(pin))
1746 wakeup = __journal_pin_drop(j, pin);
1747 spin_unlock_irqrestore(&j->pin_lock, flags);
1750 * Unpinning a journal entry make make journal_next_bucket() succeed, if
1751 * writing a new last_seq will now make another bucket available:
1753 * Nested irqsave is expensive, don't do the wakeup with lock held:
1759 void bch2_journal_pin_add_if_older(struct journal *j,
1760 struct journal_entry_pin *src_pin,
1761 struct journal_entry_pin *pin,
1762 journal_pin_flush_fn flush_fn)
1764 spin_lock_irq(&j->pin_lock);
1766 if (journal_pin_active(src_pin) &&
1767 (!journal_pin_active(pin) ||
1768 fifo_entry_idx(&j->pin, src_pin->pin_list) <
1769 fifo_entry_idx(&j->pin, pin->pin_list))) {
1770 if (journal_pin_active(pin))
1771 __journal_pin_drop(j, pin);
1772 __journal_pin_add(j, src_pin->pin_list, pin, flush_fn);
1775 spin_unlock_irq(&j->pin_lock);
1778 static struct journal_entry_pin *
1779 journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
1781 struct journal_entry_pin_list *pin_list;
1782 struct journal_entry_pin *ret = NULL;
1785 /* so we don't iterate over empty fifo entries below: */
1786 if (!atomic_read(&fifo_peek_front(&j->pin).count)) {
1787 spin_lock(&j->lock);
1788 journal_reclaim_fast(j);
1789 spin_unlock(&j->lock);
1792 spin_lock_irq(&j->pin_lock);
1793 fifo_for_each_entry_ptr(pin_list, &j->pin, iter) {
1794 if (journal_pin_seq(j, pin_list) > seq_to_flush)
1797 ret = list_first_entry_or_null(&pin_list->list,
1798 struct journal_entry_pin, list);
1800 /* must be list_del_init(), see bch2_journal_pin_drop() */
1801 list_move(&ret->list, &pin_list->flushed);
1802 *seq = journal_pin_seq(j, pin_list);
1806 spin_unlock_irq(&j->pin_lock);
1811 static bool journal_flush_done(struct journal *j, u64 seq_to_flush)
1815 spin_lock(&j->lock);
1816 journal_reclaim_fast(j);
1818 ret = (fifo_used(&j->pin) == 1 &&
1819 atomic_read(&fifo_peek_front(&j->pin).count) == 1) ||
1820 last_seq(j) > seq_to_flush;
1821 spin_unlock(&j->lock);
1826 void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
1828 struct journal_entry_pin *pin;
1831 if (!test_bit(JOURNAL_STARTED, &j->flags))
1834 while ((pin = journal_get_next_pin(j, seq_to_flush, &pin_seq)))
1835 pin->flush(j, pin, pin_seq);
1838 journal_flush_done(j, seq_to_flush) ||
1839 bch2_journal_error(j));
1842 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
1846 spin_lock(&j->lock);
1848 (ja->last_idx != ja->cur_idx &&
1849 ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
1850 spin_unlock(&j->lock);
1856 * journal_reclaim_work - free up journal buckets
1858 * Background journal reclaim writes out btree nodes. It should be run
1859 * early enough so that we never completely run out of journal buckets.
1861 * High watermarks for triggering background reclaim:
1862 * - FIFO has fewer than 512 entries left
1863 * - fewer than 25% journal buckets free
1865 * Background reclaim runs until low watermarks are reached:
1866 * - FIFO has more than 1024 entries left
1867 * - more than 50% journal buckets free
1869 * As long as a reclaim can complete in the time it takes to fill up
1870 * 512 journal entries or 25% of all journal buckets, then
1871 * journal_next_bucket() should not stall.
1873 static void journal_reclaim_work(struct work_struct *work)
1875 struct bch_fs *c = container_of(to_delayed_work(work),
1876 struct bch_fs, journal.reclaim_work);
1877 struct journal *j = &c->journal;
1879 struct journal_entry_pin *pin;
1880 u64 seq, seq_to_flush = 0;
1881 unsigned iter, bucket_to_flush;
1882 unsigned long next_flush;
1883 bool reclaim_lock_held = false, need_flush;
1886 * Advance last_idx to point to the oldest journal entry containing
1887 * btree node updates that have not yet been written out
1889 for_each_rw_member(ca, c, iter) {
1890 struct journal_device *ja = &ca->journal;
1895 while (should_discard_bucket(j, ja)) {
1896 if (!reclaim_lock_held) {
1899 * might be called from __journal_res_get()
1900 * under wait_event() - have to go back to
1901 * TASK_RUNNING before doing something that
1902 * would block, but only if we're doing work:
1904 __set_current_state(TASK_RUNNING);
1906 mutex_lock(&j->reclaim_lock);
1907 reclaim_lock_held = true;
1908 /* recheck under reclaim_lock: */
1912 if (ca->mi.discard &&
1913 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1914 blkdev_issue_discard(ca->disk_sb.bdev,
1915 bucket_to_sector(ca,
1916 ja->buckets[ja->last_idx]),
1917 ca->mi.bucket_size, GFP_NOIO, 0);
1919 spin_lock(&j->lock);
1920 ja->last_idx = (ja->last_idx + 1) % ja->nr;
1921 spin_unlock(&j->lock);
1927 * Write out enough btree nodes to free up 50% journal
1930 spin_lock(&j->lock);
1931 bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
1932 seq_to_flush = max_t(u64, seq_to_flush,
1933 ja->bucket_seq[bucket_to_flush]);
1934 spin_unlock(&j->lock);
1937 if (reclaim_lock_held)
1938 mutex_unlock(&j->reclaim_lock);
1940 /* Also flush if the pin fifo is more than half full */
1941 seq_to_flush = max_t(s64, seq_to_flush,
1942 (s64) atomic64_read(&j->seq) -
1943 (j->pin.size >> 1));
1946 * If it's been longer than j->reclaim_delay_ms since we last flushed,
1947 * make sure to flush at least one journal pin:
1949 next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
1950 need_flush = time_after(jiffies, next_flush);
1952 while ((pin = journal_get_next_pin(j, need_flush
1954 : seq_to_flush, &seq))) {
1955 __set_current_state(TASK_RUNNING);
1956 pin->flush(j, pin, seq);
1959 j->last_flushed = jiffies;
1962 if (!test_bit(BCH_FS_RO, &c->flags))
1963 queue_delayed_work(system_freezable_wq, &j->reclaim_work,
1964 msecs_to_jiffies(j->reclaim_delay_ms));
1968 * journal_next_bucket - move on to the next journal bucket if possible
1970 static int journal_write_alloc(struct journal *j, unsigned sectors)
1972 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1973 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1974 struct bch_extent_ptr *ptr;
1975 struct journal_device *ja;
1977 struct dev_alloc_list devs_sorted;
1978 unsigned i, replicas, replicas_want =
1979 READ_ONCE(c->opts.metadata_replicas);
1981 spin_lock(&j->lock);
1984 * Drop any pointers to devices that have been removed, are no longer
1985 * empty, or filled up their current journal bucket:
1987 * Note that a device may have had a small amount of free space (perhaps
1988 * one sector) that wasn't enough for the smallest possible journal
1989 * entry - that's why we drop pointers to devices <= current free space,
1990 * i.e. whichever device was limiting the current journal entry size.
1992 extent_for_each_ptr_backwards(e, ptr) {
1993 ca = c->devs[ptr->dev];
1995 if (ca->mi.state != BCH_MEMBER_STATE_RW ||
1996 ca->journal.sectors_free <= sectors)
1997 __bch2_extent_drop_ptr(e, ptr);
1999 ca->journal.sectors_free -= sectors;
2002 replicas = bch2_extent_nr_ptrs(e.c);
2005 devs_sorted = bch2_wp_alloc_list(c, &j->wp,
2006 &c->rw_devs[BCH_DATA_JOURNAL]);
2008 for (i = 0; i < devs_sorted.nr; i++) {
2009 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
2017 if (replicas >= replicas_want)
2021 * Check that we can use this device, and aren't already using
2024 if (bch2_extent_has_device(e.c, ca->dev_idx) ||
2025 !journal_dev_buckets_available(j, ca) ||
2026 sectors > ca->mi.bucket_size)
2029 j->wp.next_alloc[ca->dev_idx] += U32_MAX;
2030 bch2_wp_rescale(c, ca, &j->wp);
2032 ja->sectors_free = ca->mi.bucket_size - sectors;
2033 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
2034 ja->bucket_seq[ja->cur_idx] = atomic64_read(&j->seq);
2036 extent_ptr_append(bkey_i_to_extent(&j->key),
2037 (struct bch_extent_ptr) {
2038 .offset = bucket_to_sector(ca,
2039 ja->buckets[ja->cur_idx]),
2046 j->prev_buf_sectors = 0;
2047 spin_unlock(&j->lock);
2049 if (replicas < c->opts.metadata_replicas_required)
2057 static void journal_write_compact(struct jset *jset)
2059 struct jset_entry *i, *next, *prev = NULL;
2062 * Simple compaction, dropping empty jset_entries (from journal
2063 * reservations that weren't fully used) and merging jset_entries that
2066 * If we wanted to be really fancy here, we could sort all the keys in
2067 * the jset and drop keys that were overwritten - probably not worth it:
2069 vstruct_for_each_safe(jset, i, next) {
2070 unsigned u64s = le16_to_cpu(i->u64s);
2076 /* Can we merge with previous entry? */
2078 i->btree_id == prev->btree_id &&
2079 i->level == prev->level &&
2080 i->type == prev->type &&
2081 i->type == JOURNAL_ENTRY_BTREE_KEYS &&
2082 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
2083 memmove_u64s_down(vstruct_next(prev),
2086 le16_add_cpu(&prev->u64s, u64s);
2090 /* Couldn't merge, move i into new position (after prev): */
2091 prev = prev ? vstruct_next(prev) : jset->start;
2093 memmove_u64s_down(prev, i, jset_u64s(u64s));
2096 prev = prev ? vstruct_next(prev) : jset->start;
2097 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
2100 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
2102 /* we aren't holding j->lock: */
2103 unsigned new_size = READ_ONCE(j->buf_size_want);
2106 if (buf->size >= new_size)
2109 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
2113 memcpy(new_buf, buf->data, buf->size);
2114 kvpfree(buf->data, buf->size);
2115 buf->data = new_buf;
2116 buf->size = new_size;
2119 static void journal_write_done(struct closure *cl)
2121 struct journal *j = container_of(cl, struct journal, io);
2122 struct journal_buf *w = journal_prev_buf(j);
2124 __bch2_time_stats_update(j->write_time, j->write_start_time);
2126 spin_lock(&j->lock);
2127 j->last_seq_ondisk = le64_to_cpu(w->data->last_seq);
2130 * Updating last_seq_ondisk may let journal_reclaim_work() discard more
2133 * Must come before signaling write completion, for
2134 * bch2_fs_journal_stop():
2136 mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
2138 /* also must come before signalling write completion: */
2139 closure_debug_destroy(cl);
2141 BUG_ON(!j->reservations.prev_buf_unwritten);
2142 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
2143 &j->reservations.counter);
2145 closure_wake_up(&w->wait);
2148 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
2149 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
2150 spin_unlock(&j->lock);
2153 static void journal_write_error(struct closure *cl)
2155 struct journal *j = container_of(cl, struct journal, io);
2156 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2157 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
2159 while (j->replicas_failed) {
2160 unsigned idx = __fls(j->replicas_failed);
2162 bch2_extent_drop_ptr_idx(e, idx);
2163 j->replicas_failed ^= 1 << idx;
2166 if (!bch2_extent_nr_ptrs(e.c)) {
2167 bch_err(c, "unable to write journal to sufficient devices");
2171 if (bch2_check_mark_super(c, e.c, BCH_DATA_JOURNAL))
2175 journal_write_done(cl);
2178 bch2_fatal_error(c);
2179 bch2_journal_halt(j);
2183 static void journal_write_endio(struct bio *bio)
2185 struct bch_dev *ca = bio->bi_private;
2186 struct journal *j = &ca->fs->journal;
2188 if (bch2_dev_io_err_on(bio->bi_error, ca, "journal write") ||
2189 bch2_meta_write_fault("journal")) {
2190 /* Was this a flush or an actual journal write? */
2191 if (ca->journal.ptr_idx != U8_MAX) {
2192 set_bit(ca->journal.ptr_idx, &j->replicas_failed);
2193 set_closure_fn(&j->io, journal_write_error,
2198 closure_put(&j->io);
2199 percpu_ref_put(&ca->io_ref);
2202 static void journal_write(struct closure *cl)
2204 struct journal *j = container_of(cl, struct journal, io);
2205 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2207 struct journal_buf *w = journal_prev_buf(j);
2210 struct bch_extent_ptr *ptr;
2211 unsigned i, sectors, bytes, ptr_idx = 0;
2213 journal_buf_realloc(j, w);
2216 j->write_start_time = local_clock();
2217 mutex_lock(&c->btree_root_lock);
2218 for (i = 0; i < BTREE_ID_NR; i++) {
2219 struct btree_root *r = &c->btree_roots[i];
2222 bch2_journal_add_btree_root(w, i, &r->key, r->level);
2224 mutex_unlock(&c->btree_root_lock);
2226 journal_write_compact(jset);
2228 jset->read_clock = cpu_to_le16(c->prio_clock[READ].hand);
2229 jset->write_clock = cpu_to_le16(c->prio_clock[WRITE].hand);
2230 jset->magic = cpu_to_le64(jset_magic(c));
2231 jset->version = cpu_to_le32(BCACHE_JSET_VERSION);
2233 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
2234 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
2236 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
2237 __journal_entry_validate(c, jset, WRITE))
2240 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
2241 jset->encrypted_start,
2242 vstruct_end(jset) - (void *) jset->encrypted_start);
2244 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
2245 journal_nonce(jset), jset);
2247 if (!bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
2248 __journal_entry_validate(c, jset, WRITE))
2251 sectors = vstruct_sectors(jset, c->block_bits);
2252 BUG_ON(sectors > j->prev_buf_sectors);
2254 bytes = vstruct_bytes(w->data);
2255 memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
2257 if (journal_write_alloc(j, sectors)) {
2258 bch2_journal_halt(j);
2259 bch_err(c, "Unable to allocate journal write");
2260 bch2_fatal_error(c);
2261 continue_at(cl, journal_write_done, system_highpri_wq);
2264 if (bch2_check_mark_super(c, bkey_i_to_s_c_extent(&j->key),
2269 * XXX: we really should just disable the entire journal in nochanges
2272 if (c->opts.nochanges)
2275 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr) {
2276 ca = c->devs[ptr->dev];
2277 if (!percpu_ref_tryget(&ca->io_ref)) {
2279 bch_err(c, "missing device for journal write\n");
2283 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
2286 ca->journal.ptr_idx = ptr_idx++;
2287 bio = ca->journal.bio;
2289 bio->bi_iter.bi_sector = ptr->offset;
2290 bio->bi_bdev = ca->disk_sb.bdev;
2291 bio->bi_iter.bi_size = sectors << 9;
2292 bio->bi_end_io = journal_write_endio;
2293 bio->bi_private = ca;
2294 bio_set_op_attrs(bio, REQ_OP_WRITE,
2295 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
2296 bch2_bio_map(bio, jset);
2298 trace_journal_write(bio);
2299 closure_bio_submit(bio, cl);
2301 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
2304 for_each_rw_member(ca, c, i)
2305 if (journal_flushes_device(ca) &&
2306 !bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key), i)) {
2307 percpu_ref_get(&ca->io_ref);
2309 ca->journal.ptr_idx = U8_MAX;
2310 bio = ca->journal.bio;
2312 bio->bi_bdev = ca->disk_sb.bdev;
2313 bio->bi_opf = REQ_OP_FLUSH;
2314 bio->bi_end_io = journal_write_endio;
2315 bio->bi_private = ca;
2316 closure_bio_submit(bio, cl);
2320 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr)
2321 ptr->offset += sectors;
2323 continue_at(cl, journal_write_done, system_highpri_wq);
2325 bch2_inconsistent_error(c);
2326 continue_at(cl, journal_write_done, system_highpri_wq);
2329 static void journal_write_work(struct work_struct *work)
2331 struct journal *j = container_of(to_delayed_work(work),
2332 struct journal, write_work);
2333 spin_lock(&j->lock);
2334 if (!journal_entry_is_open(j)) {
2335 spin_unlock(&j->lock);
2339 set_bit(JOURNAL_NEED_WRITE, &j->flags);
2340 if (journal_buf_switch(j, false) != JOURNAL_UNLOCKED)
2341 spin_unlock(&j->lock);
2345 * Given an inode number, if that inode number has data in the journal that
2346 * hasn't yet been flushed, return the journal sequence number that needs to be
2349 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
2351 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
2354 if (!test_bit(h, j->buf[0].has_inode) &&
2355 !test_bit(h, j->buf[1].has_inode))
2358 spin_lock(&j->lock);
2359 if (test_bit(h, journal_cur_buf(j)->has_inode))
2360 seq = atomic64_read(&j->seq);
2361 else if (test_bit(h, journal_prev_buf(j)->has_inode))
2362 seq = atomic64_read(&j->seq) - 1;
2363 spin_unlock(&j->lock);
2368 static int __journal_res_get(struct journal *j, struct journal_res *res,
2369 unsigned u64s_min, unsigned u64s_max)
2371 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2372 struct journal_buf *buf;
2375 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2379 spin_lock(&j->lock);
2381 * Recheck after taking the lock, so we don't race with another thread
2382 * that just did journal_entry_open() and call journal_entry_close()
2385 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2387 spin_unlock(&j->lock);
2392 * If we couldn't get a reservation because the current buf filled up,
2393 * and we had room for a bigger entry on disk, signal that we want to
2394 * realloc the journal bufs:
2396 buf = journal_cur_buf(j);
2397 if (journal_entry_is_open(j) &&
2398 buf->size >> 9 < buf->disk_sectors &&
2399 buf->size < JOURNAL_ENTRY_SIZE_MAX)
2400 j->buf_size_want = max(j->buf_size_want, buf->size << 1);
2403 * Close the current journal entry if necessary, then try to start a new
2406 switch (journal_buf_switch(j, false)) {
2407 case JOURNAL_ENTRY_ERROR:
2408 spin_unlock(&j->lock);
2410 case JOURNAL_ENTRY_INUSE:
2411 /* haven't finished writing out the previous one: */
2412 spin_unlock(&j->lock);
2413 trace_journal_entry_full(c);
2415 case JOURNAL_ENTRY_CLOSED:
2417 case JOURNAL_UNLOCKED:
2421 /* We now have a new, closed journal buf - see if we can open it: */
2422 ret = journal_entry_open(j);
2423 spin_unlock(&j->lock);
2430 /* Journal's full, we have to wait */
2433 * Direct reclaim - can't rely on reclaim from work item
2436 journal_reclaim_work(&j->reclaim_work.work);
2438 trace_journal_full(c);
2440 if (!j->res_get_blocked_start)
2441 j->res_get_blocked_start = local_clock() ?: 1;
2446 * Essentially the entry function to the journaling code. When bcachefs is doing
2447 * a btree insert, it calls this function to get the current journal write.
2448 * Journal write is the structure used set up journal writes. The calling
2449 * function will then add its keys to the structure, queuing them for the next
2452 * To ensure forward progress, the current task must not be holding any
2453 * btree node write locks.
2455 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
2456 unsigned u64s_min, unsigned u64s_max)
2461 (ret = __journal_res_get(j, res, u64s_min,
2463 return ret < 0 ? ret : 0;
2466 void bch2_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent)
2468 spin_lock(&j->lock);
2470 BUG_ON(seq > atomic64_read(&j->seq));
2472 if (bch2_journal_error(j)) {
2473 spin_unlock(&j->lock);
2477 if (seq == atomic64_read(&j->seq)) {
2478 if (!closure_wait(&journal_cur_buf(j)->wait, parent))
2480 } else if (seq + 1 == atomic64_read(&j->seq) &&
2481 j->reservations.prev_buf_unwritten) {
2482 if (!closure_wait(&journal_prev_buf(j)->wait, parent))
2487 /* check if raced with write completion (or failure) */
2488 if (!j->reservations.prev_buf_unwritten ||
2489 bch2_journal_error(j))
2490 closure_wake_up(&journal_prev_buf(j)->wait);
2493 spin_unlock(&j->lock);
2496 void bch2_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent)
2498 struct journal_buf *buf;
2500 spin_lock(&j->lock);
2502 BUG_ON(seq > atomic64_read(&j->seq));
2504 if (bch2_journal_error(j)) {
2505 spin_unlock(&j->lock);
2509 if (seq == atomic64_read(&j->seq)) {
2510 bool set_need_write = false;
2512 buf = journal_cur_buf(j);
2514 if (parent && !closure_wait(&buf->wait, parent))
2517 if (!test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags)) {
2518 j->need_write_time = local_clock();
2519 set_need_write = true;
2522 switch (journal_buf_switch(j, set_need_write)) {
2523 case JOURNAL_ENTRY_ERROR:
2525 closure_wake_up(&buf->wait);
2527 case JOURNAL_ENTRY_CLOSED:
2529 * Journal entry hasn't been opened yet, but caller
2530 * claims it has something (seq == j->seq):
2533 case JOURNAL_ENTRY_INUSE:
2535 case JOURNAL_UNLOCKED:
2538 } else if (parent &&
2539 seq + 1 == atomic64_read(&j->seq) &&
2540 j->reservations.prev_buf_unwritten) {
2541 buf = journal_prev_buf(j);
2543 if (!closure_wait(&buf->wait, parent))
2548 /* check if raced with write completion (or failure) */
2549 if (!j->reservations.prev_buf_unwritten ||
2550 bch2_journal_error(j))
2551 closure_wake_up(&buf->wait);
2554 spin_unlock(&j->lock);
2557 static int journal_seq_flushed(struct journal *j, u64 seq)
2559 struct journal_buf *buf;
2562 spin_lock(&j->lock);
2563 BUG_ON(seq > atomic64_read(&j->seq));
2565 if (seq == atomic64_read(&j->seq)) {
2566 bool set_need_write = false;
2570 buf = journal_cur_buf(j);
2572 if (!test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags)) {
2573 j->need_write_time = local_clock();
2574 set_need_write = true;
2577 switch (journal_buf_switch(j, set_need_write)) {
2578 case JOURNAL_ENTRY_ERROR:
2581 case JOURNAL_ENTRY_CLOSED:
2583 * Journal entry hasn't been opened yet, but caller
2584 * claims it has something (seq == j->seq):
2587 case JOURNAL_ENTRY_INUSE:
2589 case JOURNAL_UNLOCKED:
2592 } else if (seq + 1 == atomic64_read(&j->seq) &&
2593 j->reservations.prev_buf_unwritten) {
2594 ret = bch2_journal_error(j);
2597 spin_unlock(&j->lock);
2602 int bch2_journal_flush_seq(struct journal *j, u64 seq)
2604 u64 start_time = local_clock();
2607 ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
2609 bch2_time_stats_update(j->flush_seq_time, start_time);
2611 return ret ?: ret2 < 0 ? ret2 : 0;
2614 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
2616 struct journal_res res;
2617 unsigned u64s = jset_u64s(0);
2619 memset(&res, 0, sizeof(res));
2621 bch2_journal_res_get(j, &res, u64s, u64s);
2622 bch2_journal_res_put(j, &res);
2624 bch2_journal_flush_seq_async(j, res.seq, parent);
2627 int bch2_journal_meta(struct journal *j)
2629 struct journal_res res;
2630 unsigned u64s = jset_u64s(0);
2633 memset(&res, 0, sizeof(res));
2635 ret = bch2_journal_res_get(j, &res, u64s, u64s);
2639 bch2_journal_res_put(j, &res);
2641 return bch2_journal_flush_seq(j, res.seq);
2644 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
2646 u64 seq, journal_seq;
2648 spin_lock(&j->lock);
2649 journal_seq = atomic64_read(&j->seq);
2651 if (journal_entry_is_open(j)) {
2653 } else if (journal_seq) {
2654 seq = journal_seq - 1;
2656 spin_unlock(&j->lock);
2659 spin_unlock(&j->lock);
2661 bch2_journal_flush_seq_async(j, seq, parent);
2664 int bch2_journal_flush(struct journal *j)
2666 u64 seq, journal_seq;
2668 spin_lock(&j->lock);
2669 journal_seq = atomic64_read(&j->seq);
2671 if (journal_entry_is_open(j)) {
2673 } else if (journal_seq) {
2674 seq = journal_seq - 1;
2676 spin_unlock(&j->lock);
2679 spin_unlock(&j->lock);
2681 return bch2_journal_flush_seq(j, seq);
2684 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
2686 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2687 union journal_res_state *s = &j->reservations;
2693 spin_lock(&j->lock);
2695 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2696 "active journal entries:\t%zu\n"
2698 "last_seq:\t\t%llu\n"
2699 "last_seq_ondisk:\t%llu\n"
2700 "reservation count:\t%u\n"
2701 "reservation offset:\t%u\n"
2702 "current entry u64s:\t%u\n"
2703 "io in flight:\t\t%i\n"
2704 "need write:\t\t%i\n"
2706 "replay done:\t\t%i\n",
2708 (u64) atomic64_read(&j->seq),
2711 journal_state_count(*s, s->idx),
2712 s->cur_entry_offset,
2714 s->prev_buf_unwritten,
2715 test_bit(JOURNAL_NEED_WRITE, &j->flags),
2716 journal_entry_is_open(j),
2717 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
2719 for_each_member_device_rcu(ca, c, iter,
2720 &c->rw_devs[BCH_DATA_JOURNAL]) {
2721 struct journal_device *ja = &ca->journal;
2726 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2729 "\tcur_idx\t\t%u (seq %llu)\n"
2730 "\tlast_idx\t%u (seq %llu)\n",
2732 ja->cur_idx, ja->bucket_seq[ja->cur_idx],
2733 ja->last_idx, ja->bucket_seq[ja->last_idx]);
2736 spin_unlock(&j->lock);
2742 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
2744 struct journal_entry_pin_list *pin_list;
2745 struct journal_entry_pin *pin;
2749 spin_lock_irq(&j->pin_lock);
2750 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
2751 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2753 journal_pin_seq(j, pin_list),
2754 atomic_read(&pin_list->count));
2756 list_for_each_entry(pin, &pin_list->list, list)
2757 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2761 if (!list_empty(&pin_list->flushed))
2762 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2765 list_for_each_entry(pin, &pin_list->flushed, list)
2766 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2770 spin_unlock_irq(&j->pin_lock);
2775 static bool bch2_journal_writing_to_device(struct bch_dev *ca)
2777 struct journal *j = &ca->fs->journal;
2780 spin_lock(&j->lock);
2781 ret = bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key),
2783 spin_unlock(&j->lock);
2789 * This asumes that ca has already been marked read-only so that
2790 * journal_next_bucket won't pick buckets out of ca any more.
2791 * Hence, if the journal is not currently pointing to ca, there
2792 * will be no new writes to journal entries in ca after all the
2793 * pending ones have been flushed to disk.
2795 * If the journal is being written to ca, write a new record, and
2796 * journal_next_bucket will notice that the device is no longer
2797 * writeable and pick a new set of devices to write to.
2800 int bch2_journal_move(struct bch_dev *ca)
2802 struct journal_device *ja = &ca->journal;
2803 struct journal *j = &ca->fs->journal;
2804 u64 seq_to_flush = 0;
2808 if (bch2_journal_writing_to_device(ca)) {
2810 * bch_journal_meta will write a record and we'll wait
2811 * for the write to complete.
2812 * Actually writing the journal (journal_write_locked)
2813 * will call journal_next_bucket which notices that the
2814 * device is no longer writeable, and picks a new one.
2816 bch2_journal_meta(j);
2817 BUG_ON(bch2_journal_writing_to_device(ca));
2820 for (i = 0; i < ja->nr; i++)
2821 seq_to_flush = max(seq_to_flush, ja->bucket_seq[i]);
2823 bch2_journal_flush_pins(j, seq_to_flush);
2826 * Force a meta-data journal entry to be written so that
2827 * we have newer journal entries in devices other than ca,
2828 * and wait for the meta data write to complete.
2830 bch2_journal_meta(j);
2833 * Verify that we no longer need any of the journal entries in
2836 spin_lock(&j->lock);
2837 ret = j->last_seq_ondisk > seq_to_flush ? 0 : -EIO;
2838 spin_unlock(&j->lock);
2843 void bch2_fs_journal_stop(struct journal *j)
2845 if (!test_bit(JOURNAL_STARTED, &j->flags))
2849 * Empty out the journal by first flushing everything pinning existing
2850 * journal entries, then force a brand new empty journal entry to be
2853 bch2_journal_flush_pins(j, U64_MAX);
2854 bch2_journal_flush_async(j, NULL);
2855 bch2_journal_meta(j);
2857 cancel_delayed_work_sync(&j->write_work);
2858 cancel_delayed_work_sync(&j->reclaim_work);
2861 void bch2_dev_journal_exit(struct bch_dev *ca)
2863 kfree(ca->journal.bio);
2864 kfree(ca->journal.buckets);
2865 kfree(ca->journal.bucket_seq);
2867 ca->journal.bio = NULL;
2868 ca->journal.buckets = NULL;
2869 ca->journal.bucket_seq = NULL;
2872 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
2874 struct journal_device *ja = &ca->journal;
2875 struct bch_sb_field_journal *journal_buckets =
2876 bch2_sb_get_journal(sb);
2879 ja->nr = bch2_nr_journal_buckets(journal_buckets);
2881 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2882 if (!ja->bucket_seq)
2885 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
2886 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
2887 if (!ca->journal.bio)
2890 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2894 for (i = 0; i < ja->nr; i++)
2895 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
2900 void bch2_fs_journal_exit(struct journal *j)
2902 kvpfree(j->buf[1].data, j->buf[1].size);
2903 kvpfree(j->buf[0].data, j->buf[0].size);
2907 int bch2_fs_journal_init(struct journal *j)
2909 static struct lock_class_key res_key;
2911 spin_lock_init(&j->lock);
2912 spin_lock_init(&j->pin_lock);
2913 init_waitqueue_head(&j->wait);
2914 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
2915 INIT_DELAYED_WORK(&j->reclaim_work, journal_reclaim_work);
2916 mutex_init(&j->blacklist_lock);
2917 INIT_LIST_HEAD(&j->seq_blacklist);
2918 mutex_init(&j->reclaim_lock);
2920 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
2922 j->buf[0].size = JOURNAL_ENTRY_SIZE_MIN;
2923 j->buf[1].size = JOURNAL_ENTRY_SIZE_MIN;
2924 j->write_delay_ms = 100;
2925 j->reclaim_delay_ms = 100;
2927 bkey_extent_init(&j->key);
2929 atomic64_set(&j->reservations.counter,
2930 ((union journal_res_state)
2931 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
2933 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
2934 !(j->buf[0].data = kvpmalloc(j->buf[0].size, GFP_KERNEL)) ||
2935 !(j->buf[1].data = kvpmalloc(j->buf[1].size, GFP_KERNEL)))
2938 j->pin.front = j->pin.back = 1;