2 * bcachefs journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
9 #include "bkey_methods.h"
12 #include "btree_update.h"
24 #include <trace/events/bcachefs.h>
26 static void journal_write(struct closure *);
27 static void journal_reclaim_fast(struct journal *);
28 static void journal_pin_add_entry(struct journal *,
29 struct journal_entry_pin_list *,
30 struct journal_entry_pin *,
31 journal_pin_flush_fn);
33 static inline struct journal_buf *journal_cur_buf(struct journal *j)
35 return j->buf + j->reservations.idx;
38 static inline struct journal_buf *journal_prev_buf(struct journal *j)
40 return j->buf + !j->reservations.idx;
43 /* Sequence number of oldest dirty journal entry */
45 static inline u64 last_seq(struct journal *j)
47 return atomic64_read(&j->seq) - fifo_used(&j->pin) + 1;
50 static inline u64 journal_pin_seq(struct journal *j,
51 struct journal_entry_pin_list *pin_list)
53 return last_seq(j) + fifo_entry_idx(&j->pin, pin_list);
56 static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
57 struct jset_entry *entry, unsigned type)
59 while (entry < vstruct_last(jset)) {
60 if (JOURNAL_ENTRY_TYPE(entry) == type)
63 entry = vstruct_next(entry);
69 #define for_each_jset_entry_type(entry, jset, type) \
70 for (entry = (jset)->start; \
71 (entry = __jset_entry_type_next(jset, entry, type)); \
72 entry = vstruct_next(entry))
74 #define for_each_jset_key(k, _n, entry, jset) \
75 for_each_jset_entry_type(entry, jset, JOURNAL_ENTRY_BTREE_KEYS) \
76 vstruct_for_each_safe(entry, k, _n)
78 static inline void bch2_journal_add_entry(struct journal_buf *buf,
79 const void *data, size_t u64s,
80 unsigned type, enum btree_id id,
83 struct jset *jset = buf->data;
85 bch2_journal_add_entry_at(buf, data, u64s, type, id, level,
86 le32_to_cpu(jset->u64s));
87 le32_add_cpu(&jset->u64s, jset_u64s(u64s));
90 static struct jset_entry *bch2_journal_find_entry(struct jset *j, unsigned type,
93 struct jset_entry *entry;
95 for_each_jset_entry_type(entry, j, type)
96 if (entry->btree_id == id)
102 struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *c, struct jset *j,
103 enum btree_id id, unsigned *level)
106 struct jset_entry *entry =
107 bch2_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id);
113 *level = entry->level;
114 *level = entry->level;
118 static void bch2_journal_add_btree_root(struct journal_buf *buf,
119 enum btree_id id, struct bkey_i *k,
122 bch2_journal_add_entry(buf, k, k->k.u64s,
123 JOURNAL_ENTRY_BTREE_ROOT, id, level);
126 static inline void bch2_journal_add_prios(struct journal *j,
127 struct journal_buf *buf)
130 * no prio bucket ptrs yet... XXX should change the allocator so this
133 if (!buf->nr_prio_buckets)
136 bch2_journal_add_entry(buf, j->prio_buckets, buf->nr_prio_buckets,
137 JOURNAL_ENTRY_PRIO_PTRS, 0, 0);
140 static void journal_seq_blacklist_flush(struct journal *j,
141 struct journal_entry_pin *pin, u64 seq)
144 container_of(j, struct bch_fs, journal);
145 struct journal_seq_blacklist *bl =
146 container_of(pin, struct journal_seq_blacklist, pin);
147 struct blacklisted_node n;
152 closure_init_stack(&cl);
155 struct btree_iter iter;
158 mutex_lock(&j->blacklist_lock);
159 if (i >= bl->nr_entries) {
160 mutex_unlock(&j->blacklist_lock);
164 mutex_unlock(&j->blacklist_lock);
166 bch2_btree_iter_init(&iter, c, n.btree_id, n.pos);
167 iter.is_extents = false;
169 b = bch2_btree_iter_peek_node(&iter);
171 /* The node might have already been rewritten: */
173 if (b->data->keys.seq == n.seq) {
174 ret = bch2_btree_node_rewrite(&iter, b, &cl);
176 bch2_btree_iter_unlock(&iter);
179 if (ret == -EAGAIN ||
183 bch2_fs_fatal_error(c,
184 "error %i rewriting btree node with blacklisted journal seq",
186 bch2_journal_halt(j);
191 bch2_btree_iter_unlock(&iter);
197 struct btree_interior_update *as;
198 struct pending_btree_node_free *d;
200 mutex_lock(&j->blacklist_lock);
201 if (i >= bl->nr_entries) {
202 mutex_unlock(&j->blacklist_lock);
206 mutex_unlock(&j->blacklist_lock);
208 mutex_lock(&c->btree_interior_update_lock);
211 * Is the node on the list of pending interior node updates -
212 * being freed? If so, wait for that to finish:
214 for_each_pending_btree_node_free(c, as, d)
215 if (n.seq == d->seq &&
216 n.btree_id == d->btree_id &&
218 !bkey_cmp(n.pos, d->key.k.p)) {
219 closure_wait(&as->wait, &cl);
220 mutex_unlock(&c->btree_interior_update_lock);
225 mutex_unlock(&c->btree_interior_update_lock);
228 mutex_lock(&j->blacklist_lock);
230 bch2_journal_pin_drop(j, &bl->pin);
235 mutex_unlock(&j->blacklist_lock);
238 static struct journal_seq_blacklist *
239 journal_seq_blacklist_find(struct journal *j, u64 seq)
241 struct journal_seq_blacklist *bl;
243 lockdep_assert_held(&j->blacklist_lock);
245 list_for_each_entry(bl, &j->seq_blacklist, list)
252 static struct journal_seq_blacklist *
253 bch2_journal_seq_blacklisted_new(struct journal *j, u64 seq)
255 struct journal_seq_blacklist *bl;
257 lockdep_assert_held(&j->blacklist_lock);
260 * When we start the journal, bch2_journal_start() will skip over @seq:
263 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
268 list_add_tail(&bl->list, &j->seq_blacklist);
273 * Returns true if @seq is newer than the most recent journal entry that got
274 * written, and data corresponding to @seq should be ignored - also marks @seq
275 * as blacklisted so that on future restarts the corresponding data will still
278 int bch2_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
280 struct journal *j = &c->journal;
281 struct journal_seq_blacklist *bl = NULL;
282 struct blacklisted_node *n;
289 journal_seq = atomic64_read(&j->seq);
291 /* Interier updates aren't journalled: */
293 BUG_ON(seq > journal_seq && test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags));
296 * Decrease this back to j->seq + 2 when we next rev the on disk format:
297 * increasing it temporarily to work around bug in old kernels
299 bch2_fs_inconsistent_on(seq > journal_seq + 4, c,
300 "bset journal seq too far in the future: %llu > %llu",
303 if (seq <= journal_seq &&
304 list_empty_careful(&j->seq_blacklist))
307 mutex_lock(&j->blacklist_lock);
309 if (seq <= journal_seq) {
310 bl = journal_seq_blacklist_find(j, seq);
314 bch_verbose(c, "btree node %u:%llu:%llu has future journal sequence number %llu, blacklisting",
315 b->btree_id, b->key.k.p.inode, b->key.k.p.offset, seq);
317 for (i = journal_seq + 1; i <= seq; i++) {
318 bl = journal_seq_blacklist_find(j, i) ?:
319 bch2_journal_seq_blacklisted_new(j, i);
327 for (n = bl->entries; n < bl->entries + bl->nr_entries; n++)
328 if (b->data->keys.seq == n->seq &&
329 b->btree_id == n->btree_id &&
330 !bkey_cmp(b->key.k.p, n->pos))
333 if (!bl->nr_entries ||
334 is_power_of_2(bl->nr_entries)) {
335 n = krealloc(bl->entries,
336 max(bl->nr_entries * 2, 8UL) * sizeof(*n),
345 bl->entries[bl->nr_entries++] = (struct blacklisted_node) {
346 .seq = b->data->keys.seq,
347 .btree_id = b->btree_id,
353 mutex_unlock(&j->blacklist_lock);
358 * Journal replay/recovery:
360 * This code is all driven from bch2_fs_start(); we first read the journal
361 * entries, do some other stuff, then we mark all the keys in the journal
362 * entries (same as garbage collection would), then we replay them - reinserting
363 * them into the cache in precisely the same order as they appear in the
366 * We only journal keys that go in leaf nodes, which simplifies things quite a
370 struct journal_list {
373 struct list_head *head;
377 #define JOURNAL_ENTRY_ADD_OK 0
378 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
381 * Given a journal entry we just read, add it to the list of journal entries to
384 static int journal_entry_add(struct bch_fs *c, struct journal_list *jlist,
387 struct journal_replay *i, *pos;
388 struct list_head *where;
389 size_t bytes = vstruct_bytes(j);
393 mutex_lock(&jlist->lock);
395 last_seq = !list_empty(jlist->head)
396 ? list_last_entry(jlist->head, struct journal_replay,
400 /* Is this entry older than the range we need? */
401 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
402 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
406 /* Drop entries we don't need anymore */
407 list_for_each_entry_safe(i, pos, jlist->head, list) {
408 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
411 kvpfree(i, offsetof(struct journal_replay, j) +
412 vstruct_bytes(&i->j));
415 list_for_each_entry_reverse(i, jlist->head, list) {
417 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
418 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
419 memcmp(j, &i->j, bytes), c,
420 "found duplicate but non identical journal entries (seq %llu)",
421 le64_to_cpu(j->seq));
423 ret = JOURNAL_ENTRY_ADD_OK;
427 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
435 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
441 memcpy(&i->j, j, bytes);
442 list_add(&i->list, where);
443 ret = JOURNAL_ENTRY_ADD_OK;
446 mutex_unlock(&jlist->lock);
450 static struct nonce journal_nonce(const struct jset *jset)
452 return (struct nonce) {{
454 [1] = ((__le32 *) &jset->seq)[0],
455 [2] = ((__le32 *) &jset->seq)[1],
456 [3] = BCH_NONCE_JOURNAL,
460 static void journal_entry_null_range(void *start, void *end)
462 struct jset_entry *entry;
464 for (entry = start; entry != end; entry = vstruct_next(entry)) {
469 SET_JOURNAL_ENTRY_TYPE(entry, 0);
473 static int journal_validate_key(struct bch_fs *c, struct jset *j,
474 struct jset_entry *entry,
475 struct bkey_i *k, enum bkey_type key_type,
478 void *next = vstruct_next(entry);
483 if (mustfix_fsck_err_on(!k->k.u64s, c,
484 "invalid %s in journal: k->u64s 0", type)) {
485 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
486 journal_entry_null_range(vstruct_next(entry), next);
490 if (mustfix_fsck_err_on((void *) bkey_next(k) >
491 (void *) vstruct_next(entry), c,
492 "invalid %s in journal: extends past end of journal entry",
494 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
495 journal_entry_null_range(vstruct_next(entry), next);
499 if (mustfix_fsck_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
500 "invalid %s in journal: bad format %u",
501 type, k->k.format)) {
502 le16_add_cpu(&entry->u64s, -k->k.u64s);
503 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
504 journal_entry_null_range(vstruct_next(entry), next);
508 if (JSET_BIG_ENDIAN(j) != CPU_BIG_ENDIAN)
509 bch2_bkey_swab(key_type, NULL, bkey_to_packed(k));
511 invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k));
513 bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf),
515 mustfix_fsck_err(c, "invalid %s in journal: %s", type, buf);
517 le16_add_cpu(&entry->u64s, -k->k.u64s);
518 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
519 journal_entry_null_range(vstruct_next(entry), next);
526 #define JOURNAL_ENTRY_REREAD 5
527 #define JOURNAL_ENTRY_NONE 6
528 #define JOURNAL_ENTRY_BAD 7
530 static int journal_entry_validate(struct bch_fs *c,
531 struct jset *j, u64 sector,
532 unsigned bucket_sectors_left,
533 unsigned sectors_read)
535 struct jset_entry *entry;
536 size_t bytes = vstruct_bytes(j);
537 struct bch_csum csum;
540 if (le64_to_cpu(j->magic) != jset_magic(c))
541 return JOURNAL_ENTRY_NONE;
543 if (le32_to_cpu(j->version) != BCACHE_JSET_VERSION) {
544 bch_err(c, "unknown journal entry version %u",
545 le32_to_cpu(j->version));
546 return BCH_FSCK_UNKNOWN_VERSION;
549 if (mustfix_fsck_err_on(bytes > bucket_sectors_left << 9, c,
550 "journal entry too big (%zu bytes), sector %lluu",
552 /* XXX: note we might have missing journal entries */
553 return JOURNAL_ENTRY_BAD;
556 if (bytes > sectors_read << 9)
557 return JOURNAL_ENTRY_REREAD;
559 if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j)), c,
560 "journal entry with unknown csum type %llu sector %lluu",
561 JSET_CSUM_TYPE(j), sector))
562 return JOURNAL_ENTRY_BAD;
564 csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
565 if (mustfix_fsck_err_on(bch2_crc_cmp(csum, j->csum), c,
566 "journal checksum bad, sector %llu", sector)) {
567 /* XXX: retry IO, when we start retrying checksum errors */
568 /* XXX: note we might have missing journal entries */
569 return JOURNAL_ENTRY_BAD;
572 bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
574 vstruct_end(j) - (void *) j->encrypted_start);
576 if (mustfix_fsck_err_on(le64_to_cpu(j->last_seq) > le64_to_cpu(j->seq), c,
577 "invalid journal entry: last_seq > seq"))
578 j->last_seq = j->seq;
580 vstruct_for_each(j, entry) {
583 if (mustfix_fsck_err_on(vstruct_next(entry) >
585 "journal entry extents past end of jset")) {
586 j->u64s = cpu_to_le64((u64 *) entry - j->_data);
590 switch (JOURNAL_ENTRY_TYPE(entry)) {
591 case JOURNAL_ENTRY_BTREE_KEYS:
592 vstruct_for_each(entry, k) {
593 ret = journal_validate_key(c, j, entry, k,
594 bkey_type(entry->level,
602 case JOURNAL_ENTRY_BTREE_ROOT:
605 if (mustfix_fsck_err_on(!entry->u64s ||
606 le16_to_cpu(entry->u64s) != k->k.u64s, c,
607 "invalid btree root journal entry: wrong number of keys")) {
608 journal_entry_null_range(entry,
609 vstruct_next(entry));
613 ret = journal_validate_key(c, j, entry, k,
614 BKEY_TYPE_BTREE, "btree root");
619 case JOURNAL_ENTRY_PRIO_PTRS:
622 case JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED:
623 if (mustfix_fsck_err_on(le16_to_cpu(entry->u64s) != 1, c,
624 "invalid journal seq blacklist entry: bad size")) {
625 journal_entry_null_range(entry,
626 vstruct_next(entry));
631 mustfix_fsck_err(c, "invalid journal entry type %llu",
632 JOURNAL_ENTRY_TYPE(entry));
633 journal_entry_null_range(entry, vstruct_next(entry));
642 struct journal_read_buf {
647 static int journal_read_buf_realloc(struct journal_read_buf *b,
652 /* the bios are sized for this many pages, max: */
653 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
656 new_size = roundup_pow_of_two(new_size);
657 n = kvpmalloc(new_size, GFP_KERNEL);
661 kvpfree(b->data, b->size);
667 static int journal_read_bucket(struct bch_dev *ca,
668 struct journal_read_buf *buf,
669 struct journal_list *jlist,
670 unsigned bucket, u64 *seq, bool *entries_found)
672 struct bch_fs *c = ca->fs;
673 struct journal_device *ja = &ca->journal;
674 struct bio *bio = ja->bio;
675 struct jset *j = NULL;
676 unsigned sectors, sectors_read = 0;
677 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
678 end = offset + ca->mi.bucket_size;
679 bool saw_bad = false;
682 pr_debug("reading %u", bucket);
684 while (offset < end) {
686 reread: sectors_read = min_t(unsigned,
687 end - offset, buf->size >> 9);
690 bio->bi_bdev = ca->disk_sb.bdev;
691 bio->bi_iter.bi_sector = offset;
692 bio->bi_iter.bi_size = sectors_read << 9;
693 bio_set_op_attrs(bio, REQ_OP_READ, 0);
694 bch2_bio_map(bio, buf->data);
696 ret = submit_bio_wait(bio);
698 if (bch2_dev_fatal_io_err_on(ret, ca,
699 "journal read from sector %llu",
701 bch2_meta_read_fault("journal"))
707 ret = journal_entry_validate(c, j, offset,
708 end - offset, sectors_read);
712 case JOURNAL_ENTRY_REREAD:
713 if (vstruct_bytes(j) > buf->size) {
714 ret = journal_read_buf_realloc(buf,
720 case JOURNAL_ENTRY_NONE:
723 sectors = c->sb.block_size;
725 case JOURNAL_ENTRY_BAD:
727 sectors = c->sb.block_size;
734 * This happens sometimes if we don't have discards on -
735 * when we've partially overwritten a bucket with new
736 * journal entries. We don't need the rest of the
739 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
742 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
744 ret = journal_entry_add(c, jlist, j);
746 case JOURNAL_ENTRY_ADD_OK:
747 *entries_found = true;
749 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
755 if (le64_to_cpu(j->seq) > *seq)
756 *seq = le64_to_cpu(j->seq);
758 sectors = vstruct_sectors(j, c->block_bits);
762 sectors_read -= sectors;
763 j = ((void *) j) + (sectors << 9);
769 static void bch2_journal_read_device(struct closure *cl)
771 #define read_bucket(b) \
773 bool entries_found = false; \
774 ret = journal_read_bucket(ca, &buf, jlist, b, &seq, \
778 __set_bit(b, bitmap); \
782 struct journal_device *ja =
783 container_of(cl, struct journal_device, read);
784 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
785 struct journal_list *jlist =
786 container_of(cl->parent, struct journal_list, cl);
787 struct request_queue *q = bdev_get_queue(ca->disk_sb.bdev);
788 struct journal_read_buf buf = { NULL, 0 };
790 DECLARE_BITMAP(bitmap, ja->nr);
798 bitmap_zero(bitmap, ja->nr);
799 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
803 pr_debug("%u journal buckets", ja->nr);
806 * If the device supports discard but not secure discard, we can't do
807 * the fancy fibonacci hash/binary search because the live journal
808 * entries might not form a contiguous range:
810 for (i = 0; i < ja->nr; i++)
814 if (!blk_queue_nonrot(q))
818 * Read journal buckets ordered by golden ratio hash to quickly
819 * find a sequence of buckets with valid journal entries
821 for (i = 0; i < ja->nr; i++) {
822 l = (i * 2654435769U) % ja->nr;
824 if (test_bit(l, bitmap))
832 * If that fails, check all the buckets we haven't checked
835 pr_debug("falling back to linear search");
837 for (l = find_first_zero_bit(bitmap, ja->nr);
839 l = find_next_zero_bit(bitmap, ja->nr, l + 1))
843 /* no journal entries on this device? */
848 r = find_next_bit(bitmap, ja->nr, l + 1);
849 pr_debug("starting binary search, l %u r %u", l, r);
852 unsigned m = (l + r) >> 1;
865 * Find the journal bucket with the highest sequence number:
867 * If there's duplicate journal entries in multiple buckets (which
868 * definitely isn't supposed to happen, but...) - make sure to start
869 * cur_idx at the last of those buckets, so we don't deadlock trying to
874 for (i = 0; i < ja->nr; i++)
875 if (ja->bucket_seq[i] >= seq &&
876 ja->bucket_seq[i] != ja->bucket_seq[(i + 1) % ja->nr]) {
878 * When journal_next_bucket() goes to allocate for
879 * the first time, it'll use the bucket after
883 seq = ja->bucket_seq[i];
887 * Set last_idx to indicate the entire journal is full and needs to be
888 * reclaimed - journal reclaim will immediately reclaim whatever isn't
889 * pinned when it first runs:
891 ja->last_idx = (ja->cur_idx + 1) % ja->nr;
894 * Read buckets in reverse order until we stop finding more journal
897 for (i = (ja->cur_idx + ja->nr - 1) % ja->nr;
899 i = (i + ja->nr - 1) % ja->nr)
900 if (!test_bit(i, bitmap) &&
904 kvpfree(buf.data, buf.size);
905 percpu_ref_put(&ca->io_ref);
908 mutex_lock(&jlist->lock);
910 mutex_unlock(&jlist->lock);
915 void bch2_journal_entries_free(struct list_head *list)
918 while (!list_empty(list)) {
919 struct journal_replay *i =
920 list_first_entry(list, struct journal_replay, list);
922 kvpfree(i, offsetof(struct journal_replay, j) +
923 vstruct_bytes(&i->j));
927 static int journal_seq_blacklist_read(struct journal *j,
928 struct journal_replay *i,
929 struct journal_entry_pin_list *p)
931 struct bch_fs *c = container_of(j, struct bch_fs, journal);
932 struct jset_entry *entry;
933 struct journal_seq_blacklist *bl;
936 for_each_jset_entry_type(entry, &i->j,
937 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED) {
938 seq = le64_to_cpu(entry->_data[0]);
940 bch_verbose(c, "blacklisting existing journal seq %llu", seq);
942 bl = bch2_journal_seq_blacklisted_new(j, seq);
946 journal_pin_add_entry(j, p, &bl->pin,
947 journal_seq_blacklist_flush);
954 static inline bool journal_has_keys(struct list_head *list)
956 struct journal_replay *i;
957 struct jset_entry *entry;
958 struct bkey_i *k, *_n;
960 list_for_each_entry(i, list, list)
961 for_each_jset_key(k, _n, entry, &i->j)
967 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
969 struct journal *j = &c->journal;
970 struct jset_entry *prio_ptrs;
971 struct journal_list jlist;
972 struct journal_replay *i;
973 struct journal_entry_pin_list *p;
975 u64 cur_seq, end_seq;
976 unsigned iter, keys = 0, entries = 0;
979 closure_init_stack(&jlist.cl);
980 mutex_init(&jlist.lock);
984 for_each_readable_member(ca, c, iter) {
985 percpu_ref_get(&ca->io_ref);
986 closure_call(&ca->journal.read,
987 bch2_journal_read_device,
992 closure_sync(&jlist.cl);
997 if (list_empty(list)){
998 bch_err(c, "no journal entries found");
999 return BCH_FSCK_REPAIR_IMPOSSIBLE;
1002 fsck_err_on(c->sb.clean && journal_has_keys(list), c,
1003 "filesystem marked clean but journal has keys to replay");
1005 i = list_last_entry(list, struct journal_replay, list);
1007 unfixable_fsck_err_on(le64_to_cpu(i->j.seq) -
1008 le64_to_cpu(i->j.last_seq) + 1 > j->pin.size, c,
1009 "too many journal entries open for refcount fifo");
1011 atomic64_set(&j->seq, le64_to_cpu(i->j.seq));
1012 j->last_seq_ondisk = le64_to_cpu(i->j.last_seq);
1014 j->pin.front = le64_to_cpu(i->j.last_seq);
1015 j->pin.back = le64_to_cpu(i->j.seq) + 1;
1017 BUG_ON(last_seq(j) != le64_to_cpu(i->j.last_seq));
1018 BUG_ON(journal_seq_pin(j, atomic64_read(&j->seq)) !=
1019 &fifo_peek_back(&j->pin));
1021 fifo_for_each_entry_ptr(p, &j->pin, iter) {
1022 INIT_LIST_HEAD(&p->list);
1023 INIT_LIST_HEAD(&p->flushed);
1024 atomic_set(&p->count, 0);
1027 mutex_lock(&j->blacklist_lock);
1029 list_for_each_entry(i, list, list) {
1030 p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
1032 atomic_set(&p->count, 1);
1034 if (journal_seq_blacklist_read(j, i, p)) {
1035 mutex_unlock(&j->blacklist_lock);
1040 mutex_unlock(&j->blacklist_lock);
1042 cur_seq = last_seq(j);
1043 end_seq = le64_to_cpu(list_last_entry(list,
1044 struct journal_replay, list)->j.seq);
1046 list_for_each_entry(i, list, list) {
1047 struct jset_entry *entry;
1048 struct bkey_i *k, *_n;
1051 mutex_lock(&j->blacklist_lock);
1052 while (cur_seq < le64_to_cpu(i->j.seq) &&
1053 journal_seq_blacklist_find(j, cur_seq))
1056 blacklisted = journal_seq_blacklist_find(j,
1057 le64_to_cpu(i->j.seq));
1058 mutex_unlock(&j->blacklist_lock);
1060 fsck_err_on(blacklisted, c,
1061 "found blacklisted journal entry %llu",
1062 le64_to_cpu(i->j.seq));
1064 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
1065 "journal entries %llu-%llu missing! (replaying %llu-%llu)",
1066 cur_seq, le64_to_cpu(i->j.seq) - 1,
1067 last_seq(j), end_seq);
1069 cur_seq = le64_to_cpu(i->j.seq) + 1;
1071 for_each_jset_key(k, _n, entry, &i->j)
1076 bch_info(c, "journal read done, %i keys in %i entries, seq %llu",
1077 keys, entries, (u64) atomic64_read(&j->seq));
1079 i = list_last_entry(list, struct journal_replay, list);
1080 prio_ptrs = bch2_journal_find_entry(&i->j, JOURNAL_ENTRY_PRIO_PTRS, 0);
1082 memcpy_u64s(j->prio_buckets,
1084 le16_to_cpu(prio_ptrs->u64s));
1085 j->nr_prio_buckets = le16_to_cpu(prio_ptrs->u64s);
1091 int bch2_journal_mark(struct bch_fs *c, struct list_head *list)
1093 struct bkey_i *k, *n;
1094 struct jset_entry *j;
1095 struct journal_replay *r;
1098 list_for_each_entry(r, list, list)
1099 for_each_jset_key(k, n, j, &r->j) {
1100 enum bkey_type type = bkey_type(j->level, j->btree_id);
1101 struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
1103 if (btree_type_has_ptrs(type)) {
1104 ret = bch2_btree_mark_key_initial(c, type, k_s_c);
1113 static bool journal_entry_is_open(struct journal *j)
1115 return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
1118 void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
1120 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1121 struct journal_buf *w = journal_prev_buf(j);
1123 atomic_dec_bug(&journal_seq_pin(j, w->data->seq)->count);
1125 if (!need_write_just_set &&
1126 test_bit(JOURNAL_NEED_WRITE, &j->flags))
1127 __bch2_time_stats_update(j->delay_time,
1128 j->need_write_time);
1130 closure_call(&j->io, journal_write, NULL, &c->cl);
1132 /* Shut sparse up: */
1133 closure_init(&j->io, &c->cl);
1134 set_closure_fn(&j->io, journal_write, NULL);
1135 journal_write(&j->io);
1139 static void __journal_entry_new(struct journal *j, int count)
1141 struct journal_entry_pin_list *p = fifo_push_ref(&j->pin);
1144 * The fifo_push() needs to happen at the same time as j->seq is
1145 * incremented for last_seq() to be calculated correctly
1147 atomic64_inc(&j->seq);
1149 BUG_ON(journal_seq_pin(j, atomic64_read(&j->seq)) !=
1150 &fifo_peek_back(&j->pin));
1152 INIT_LIST_HEAD(&p->list);
1153 INIT_LIST_HEAD(&p->flushed);
1154 atomic_set(&p->count, count);
1157 static void __bch2_journal_next_entry(struct journal *j)
1159 struct journal_buf *buf;
1161 __journal_entry_new(j, 1);
1163 buf = journal_cur_buf(j);
1164 memset(buf->has_inode, 0, sizeof(buf->has_inode));
1166 memset(buf->data, 0, sizeof(*buf->data));
1167 buf->data->seq = cpu_to_le64(atomic64_read(&j->seq));
1168 buf->data->u64s = 0;
1171 static inline size_t journal_entry_u64s_reserve(struct journal_buf *buf)
1173 unsigned ret = BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1175 if (buf->nr_prio_buckets)
1176 ret += JSET_KEYS_U64s + buf->nr_prio_buckets;
1182 JOURNAL_ENTRY_ERROR,
1183 JOURNAL_ENTRY_INUSE,
1184 JOURNAL_ENTRY_CLOSED,
1186 } journal_buf_switch(struct journal *j, bool need_write_just_set)
1188 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1189 struct journal_buf *buf;
1190 union journal_res_state old, new;
1191 u64 v = atomic64_read(&j->reservations.counter);
1193 lockdep_assert_held(&j->lock);
1197 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
1198 return JOURNAL_ENTRY_CLOSED;
1200 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1201 return JOURNAL_ENTRY_ERROR;
1203 if (new.prev_buf_unwritten)
1204 return JOURNAL_ENTRY_INUSE;
1207 * avoid race between setting buf->data->u64s and
1208 * journal_res_put starting write:
1210 journal_state_inc(&new);
1212 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
1214 new.prev_buf_unwritten = 1;
1216 BUG_ON(journal_state_count(new, new.idx));
1217 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1218 old.v, new.v)) != old.v);
1220 journal_reclaim_fast(j);
1222 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
1224 buf = &j->buf[old.idx];
1225 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
1226 buf->data->last_seq = cpu_to_le64(last_seq(j));
1228 j->prev_buf_sectors =
1229 vstruct_blocks_plus(buf->data, c->block_bits,
1230 journal_entry_u64s_reserve(buf)) *
1233 BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
1235 __bch2_journal_next_entry(j);
1237 cancel_delayed_work(&j->write_work);
1238 spin_unlock(&j->lock);
1240 if (c->bucket_journal_seq > 1 << 14) {
1241 c->bucket_journal_seq = 0;
1242 bch2_bucket_seq_cleanup(c);
1245 /* ugh - might be called from __journal_res_get() under wait_event() */
1246 __set_current_state(TASK_RUNNING);
1247 bch2_journal_buf_put(j, old.idx, need_write_just_set);
1249 return JOURNAL_UNLOCKED;
1252 void bch2_journal_halt(struct journal *j)
1254 union journal_res_state old, new;
1255 u64 v = atomic64_read(&j->reservations.counter);
1259 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1262 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
1263 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1264 old.v, new.v)) != old.v);
1267 closure_wake_up(&journal_cur_buf(j)->wait);
1268 closure_wake_up(&journal_prev_buf(j)->wait);
1271 static unsigned journal_dev_buckets_available(struct journal *j,
1274 struct journal_device *ja = &ca->journal;
1275 unsigned next = (ja->cur_idx + 1) % ja->nr;
1276 unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
1279 * Hack to avoid a deadlock during journal replay:
1280 * journal replay might require setting a new btree
1281 * root, which requires writing another journal entry -
1282 * thus, if the journal is full (and this happens when
1283 * replaying the first journal bucket's entries) we're
1286 * So don't let the journal fill up unless we're in
1289 if (test_bit(JOURNAL_REPLAY_DONE, &j->flags))
1290 available = max((int) available - 2, 0);
1293 * Don't use the last bucket unless writing the new last_seq
1294 * will make another bucket available:
1296 if (ja->bucket_seq[ja->last_idx] >= last_seq(j))
1297 available = max((int) available - 1, 0);
1302 /* returns number of sectors available for next journal entry: */
1303 static int journal_entry_sectors(struct journal *j)
1305 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1307 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1308 unsigned sectors_available = UINT_MAX;
1309 unsigned i, nr_online = 0, nr_devs = 0;
1311 lockdep_assert_held(&j->lock);
1313 spin_lock(&j->devs.lock);
1314 group_for_each_dev(ca, &j->devs, i) {
1315 unsigned buckets_required = 0;
1317 sectors_available = min_t(unsigned, sectors_available,
1318 ca->mi.bucket_size);
1321 * Note that we don't allocate the space for a journal entry
1322 * until we write it out - thus, if we haven't started the write
1323 * for the previous entry we have to make sure we have space for
1326 if (bch2_extent_has_device(e.c, ca->dev_idx)) {
1327 if (j->prev_buf_sectors > ca->journal.sectors_free)
1330 if (j->prev_buf_sectors + sectors_available >
1331 ca->journal.sectors_free)
1334 if (j->prev_buf_sectors + sectors_available >
1341 if (journal_dev_buckets_available(j, ca) >= buckets_required)
1345 spin_unlock(&j->devs.lock);
1347 if (nr_online < c->opts.metadata_replicas_required)
1350 if (nr_devs < min_t(unsigned, nr_online, c->opts.metadata_replicas))
1353 return sectors_available;
1357 * should _only_ called from journal_res_get() - when we actually want a
1358 * journal reservation - journal entry is open means journal is dirty:
1360 static int journal_entry_open(struct journal *j)
1362 struct journal_buf *buf = journal_cur_buf(j);
1364 int ret = 0, sectors;
1366 lockdep_assert_held(&j->lock);
1367 BUG_ON(journal_entry_is_open(j));
1369 if (!fifo_free(&j->pin))
1372 sectors = journal_entry_sectors(j);
1376 buf->disk_sectors = sectors;
1378 sectors = min_t(unsigned, sectors, buf->size >> 9);
1380 j->cur_buf_sectors = sectors;
1381 buf->nr_prio_buckets = j->nr_prio_buckets;
1383 u64s = (sectors << 9) / sizeof(u64);
1385 /* Subtract the journal header */
1386 u64s -= sizeof(struct jset) / sizeof(u64);
1388 * Btree roots, prio pointers don't get added until right before we do
1391 u64s -= journal_entry_u64s_reserve(buf);
1392 u64s = max_t(ssize_t, 0L, u64s);
1394 BUG_ON(u64s >= JOURNAL_ENTRY_CLOSED_VAL);
1396 if (u64s > le32_to_cpu(buf->data->u64s)) {
1397 union journal_res_state old, new;
1398 u64 v = atomic64_read(&j->reservations.counter);
1401 * Must be set before marking the journal entry as open:
1403 j->cur_entry_u64s = u64s;
1408 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1411 /* Handle any already added entries */
1412 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
1413 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1414 old.v, new.v)) != old.v);
1419 if (j->res_get_blocked_start) {
1420 __bch2_time_stats_update(j->blocked_time,
1421 j->res_get_blocked_start);
1422 j->res_get_blocked_start = 0;
1425 mod_delayed_work(system_freezable_wq,
1427 msecs_to_jiffies(j->write_delay_ms));
1433 void bch2_journal_start(struct bch_fs *c)
1435 struct journal *j = &c->journal;
1436 struct journal_seq_blacklist *bl;
1439 list_for_each_entry(bl, &j->seq_blacklist, list)
1440 new_seq = max(new_seq, bl->seq);
1442 spin_lock(&j->lock);
1444 set_bit(JOURNAL_STARTED, &j->flags);
1446 while (atomic64_read(&j->seq) < new_seq)
1447 __journal_entry_new(j, 0);
1450 * journal_buf_switch() only inits the next journal entry when it
1451 * closes an open journal entry - the very first journal entry gets
1454 __bch2_journal_next_entry(j);
1457 * Adding entries to the next journal entry before allocating space on
1458 * disk for the next journal entry - this is ok, because these entries
1459 * only have to go down with the next journal entry we write:
1461 list_for_each_entry(bl, &j->seq_blacklist, list)
1463 bch2_journal_add_entry(journal_cur_buf(j), &bl->seq, 1,
1464 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED,
1467 journal_pin_add_entry(j,
1468 &fifo_peek_back(&j->pin),
1470 journal_seq_blacklist_flush);
1474 spin_unlock(&j->lock);
1476 queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
1479 int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
1481 struct journal *j = &c->journal;
1482 struct bkey_i *k, *_n;
1483 struct jset_entry *entry;
1484 struct journal_replay *i, *n;
1485 int ret = 0, did_replay = 0;
1487 list_for_each_entry_safe(i, n, list, list) {
1488 j->replay_pin_list =
1489 journal_seq_pin(j, le64_to_cpu(i->j.seq));
1491 for_each_jset_key(k, _n, entry, &i->j) {
1492 struct disk_reservation disk_res;
1495 * We might cause compressed extents to be split, so we
1496 * need to pass in a disk_reservation:
1498 BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
1500 ret = bch2_btree_insert(c, entry->btree_id, k,
1501 &disk_res, NULL, NULL,
1502 BTREE_INSERT_NOFAIL|
1503 BTREE_INSERT_JOURNAL_REPLAY);
1504 bch2_disk_reservation_put(c, &disk_res);
1507 bch_err(c, "journal replay: error %d while replaying key",
1516 if (atomic_dec_and_test(&j->replay_pin_list->count))
1520 j->replay_pin_list = NULL;
1523 bch2_journal_flush_pins(&c->journal, U64_MAX);
1526 * Write a new journal entry _before_ we start journalling new data -
1527 * otherwise, we could end up with btree node bsets with journal seqs
1528 * arbitrarily far in the future vs. the most recently written journal
1529 * entry on disk, if we crash before writing the next journal entry:
1531 ret = bch2_journal_meta(j);
1533 bch_err(c, "journal replay: error %d flushing journal", ret);
1538 bch2_journal_set_replay_done(j);
1540 bch2_journal_entries_free(list);
1546 * Allocate more journal space at runtime - not currently making use if it, but
1549 static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1552 struct journal *j = &c->journal;
1553 struct journal_device *ja = &ca->journal;
1554 struct bch_sb_field_journal *journal_buckets;
1555 struct disk_reservation disk_res = { 0, 0 };
1557 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1560 closure_init_stack(&cl);
1562 /* don't handle reducing nr of buckets yet: */
1567 * note: journal buckets aren't really counted as _sectors_ used yet, so
1568 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1569 * when space used goes up without a reservation - but we do need the
1570 * reservation to ensure we'll actually be able to allocate:
1573 if (bch2_disk_reservation_get(c, &disk_res,
1574 (nr - ja->nr) << ca->bucket_bits, 0))
1577 mutex_lock(&c->sb_lock);
1580 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
1581 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
1582 if (!new_buckets || !new_bucket_seq)
1585 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
1586 nr + sizeof(*journal_buckets) / sizeof(u64));
1587 if (!journal_buckets)
1590 spin_lock(&j->lock);
1591 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
1592 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
1593 swap(new_buckets, ja->buckets);
1594 swap(new_bucket_seq, ja->bucket_seq);
1596 while (ja->nr < nr) {
1597 /* must happen under journal lock, to avoid racing with gc: */
1598 u64 b = bch2_bucket_alloc(ca, RESERVE_NONE);
1600 if (!closure_wait(&c->freelist_wait, &cl)) {
1601 spin_unlock(&j->lock);
1603 spin_lock(&j->lock);
1608 bch2_mark_metadata_bucket(ca, &ca->buckets[b],
1609 BUCKET_JOURNAL, false);
1610 bch2_mark_alloc_bucket(ca, &ca->buckets[b], false);
1612 memmove(ja->buckets + ja->last_idx + 1,
1613 ja->buckets + ja->last_idx,
1614 (ja->nr - ja->last_idx) * sizeof(u64));
1615 memmove(ja->bucket_seq + ja->last_idx + 1,
1616 ja->bucket_seq + ja->last_idx,
1617 (ja->nr - ja->last_idx) * sizeof(u64));
1618 memmove(journal_buckets->buckets + ja->last_idx + 1,
1619 journal_buckets->buckets + ja->last_idx,
1620 (ja->nr - ja->last_idx) * sizeof(u64));
1622 ja->buckets[ja->last_idx] = b;
1623 journal_buckets->buckets[ja->last_idx] = cpu_to_le64(b);
1625 if (ja->last_idx < ja->nr) {
1626 if (ja->cur_idx >= ja->last_idx)
1633 spin_unlock(&j->lock);
1635 BUG_ON(bch2_validate_journal_layout(ca->disk_sb.sb, ca->mi));
1637 bch2_write_super(c);
1641 mutex_unlock(&c->sb_lock);
1643 kfree(new_bucket_seq);
1645 bch2_disk_reservation_put(c, &disk_res);
1651 int bch2_dev_journal_alloc(struct bch_dev *ca)
1653 struct journal_device *ja = &ca->journal;
1654 struct bch_sb_field_journal *journal_buckets;
1658 if (dynamic_fault("bcachefs:add:journal_alloc"))
1662 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
1665 nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
1666 BCH_JOURNAL_BUCKETS_MIN,
1668 (1 << 20) / ca->mi.bucket_size));
1670 p = krealloc(ja->bucket_seq, nr * sizeof(u64),
1671 GFP_KERNEL|__GFP_ZERO);
1677 p = krealloc(ja->buckets, nr * sizeof(u64),
1678 GFP_KERNEL|__GFP_ZERO);
1684 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
1685 nr + sizeof(*journal_buckets) / sizeof(u64));
1686 if (!journal_buckets)
1689 for (i = 0, b = ca->mi.first_bucket;
1690 i < nr && b < ca->mi.nbuckets; b++) {
1691 if (!is_available_bucket(ca->buckets[b].mark))
1694 bch2_mark_metadata_bucket(ca, &ca->buckets[b],
1695 BUCKET_JOURNAL, true);
1697 journal_buckets->buckets[i] = cpu_to_le64(b);
1704 BUG_ON(bch2_validate_journal_layout(ca->disk_sb.sb, ca->mi));
1714 * journal_reclaim_fast - do the fast part of journal reclaim
1716 * Called from IO submission context, does not block. Cleans up after btree
1717 * write completions by advancing the journal pin and each cache's last_idx,
1718 * kicking off discards and background reclaim as necessary.
1720 static void journal_reclaim_fast(struct journal *j)
1722 struct journal_entry_pin_list temp;
1723 bool popped = false;
1725 lockdep_assert_held(&j->lock);
1728 * Unpin journal entries whose reference counts reached zero, meaning
1729 * all btree nodes got written out
1731 while (!atomic_read(&fifo_peek_front(&j->pin).count)) {
1732 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
1733 BUG_ON(!fifo_pop(&j->pin, temp));
1742 * Journal entry pinning - machinery for holding a reference on a given journal
1743 * entry, marking it as dirty:
1746 static inline void __journal_pin_add(struct journal *j,
1747 struct journal_entry_pin_list *pin_list,
1748 struct journal_entry_pin *pin,
1749 journal_pin_flush_fn flush_fn)
1751 BUG_ON(journal_pin_active(pin));
1753 atomic_inc(&pin_list->count);
1754 pin->pin_list = pin_list;
1755 pin->flush = flush_fn;
1758 list_add(&pin->list, &pin_list->list);
1760 INIT_LIST_HEAD(&pin->list);
1763 static void journal_pin_add_entry(struct journal *j,
1764 struct journal_entry_pin_list *pin_list,
1765 struct journal_entry_pin *pin,
1766 journal_pin_flush_fn flush_fn)
1768 spin_lock_irq(&j->pin_lock);
1769 __journal_pin_add(j, pin_list, pin, flush_fn);
1770 spin_unlock_irq(&j->pin_lock);
1773 void bch2_journal_pin_add(struct journal *j,
1774 struct journal_res *res,
1775 struct journal_entry_pin *pin,
1776 journal_pin_flush_fn flush_fn)
1778 struct journal_entry_pin_list *pin_list = res->ref
1779 ? journal_seq_pin(j, res->seq)
1780 : j->replay_pin_list;
1782 spin_lock_irq(&j->pin_lock);
1783 __journal_pin_add(j, pin_list, pin, flush_fn);
1784 spin_unlock_irq(&j->pin_lock);
1787 static inline bool __journal_pin_drop(struct journal *j,
1788 struct journal_entry_pin *pin)
1790 struct journal_entry_pin_list *pin_list = pin->pin_list;
1792 pin->pin_list = NULL;
1794 /* journal_reclaim_work() might have already taken us off the list */
1795 if (!list_empty_careful(&pin->list))
1796 list_del_init(&pin->list);
1798 return atomic_dec_and_test(&pin_list->count);
1801 void bch2_journal_pin_drop(struct journal *j,
1802 struct journal_entry_pin *pin)
1804 unsigned long flags;
1807 if (!journal_pin_active(pin))
1810 spin_lock_irqsave(&j->pin_lock, flags);
1811 wakeup = __journal_pin_drop(j, pin);
1812 spin_unlock_irqrestore(&j->pin_lock, flags);
1815 * Unpinning a journal entry make make journal_next_bucket() succeed, if
1816 * writing a new last_seq will now make another bucket available:
1818 * Nested irqsave is expensive, don't do the wakeup with lock held:
1824 void bch2_journal_pin_add_if_older(struct journal *j,
1825 struct journal_entry_pin *src_pin,
1826 struct journal_entry_pin *pin,
1827 journal_pin_flush_fn flush_fn)
1829 spin_lock_irq(&j->pin_lock);
1831 if (journal_pin_active(src_pin) &&
1832 (!journal_pin_active(pin) ||
1833 fifo_entry_idx(&j->pin, src_pin->pin_list) <
1834 fifo_entry_idx(&j->pin, pin->pin_list))) {
1835 if (journal_pin_active(pin))
1836 __journal_pin_drop(j, pin);
1837 __journal_pin_add(j, src_pin->pin_list, pin, flush_fn);
1840 spin_unlock_irq(&j->pin_lock);
1843 static struct journal_entry_pin *
1844 journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
1846 struct journal_entry_pin_list *pin_list;
1847 struct journal_entry_pin *ret = NULL;
1850 /* so we don't iterate over empty fifo entries below: */
1851 if (!atomic_read(&fifo_peek_front(&j->pin).count)) {
1852 spin_lock(&j->lock);
1853 journal_reclaim_fast(j);
1854 spin_unlock(&j->lock);
1857 spin_lock_irq(&j->pin_lock);
1858 fifo_for_each_entry_ptr(pin_list, &j->pin, iter) {
1859 if (journal_pin_seq(j, pin_list) > seq_to_flush)
1862 ret = list_first_entry_or_null(&pin_list->list,
1863 struct journal_entry_pin, list);
1865 /* must be list_del_init(), see bch2_journal_pin_drop() */
1866 list_move(&ret->list, &pin_list->flushed);
1867 *seq = journal_pin_seq(j, pin_list);
1871 spin_unlock_irq(&j->pin_lock);
1876 static bool journal_flush_done(struct journal *j, u64 seq_to_flush)
1880 spin_lock(&j->lock);
1881 journal_reclaim_fast(j);
1883 ret = (fifo_used(&j->pin) == 1 &&
1884 atomic_read(&fifo_peek_front(&j->pin).count) == 1) ||
1885 last_seq(j) > seq_to_flush;
1886 spin_unlock(&j->lock);
1891 void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
1893 struct journal_entry_pin *pin;
1896 while ((pin = journal_get_next_pin(j, seq_to_flush, &pin_seq)))
1897 pin->flush(j, pin, pin_seq);
1900 journal_flush_done(j, seq_to_flush) ||
1901 bch2_journal_error(j));
1904 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
1908 spin_lock(&j->lock);
1910 (ja->last_idx != ja->cur_idx &&
1911 ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
1912 spin_unlock(&j->lock);
1918 * journal_reclaim_work - free up journal buckets
1920 * Background journal reclaim writes out btree nodes. It should be run
1921 * early enough so that we never completely run out of journal buckets.
1923 * High watermarks for triggering background reclaim:
1924 * - FIFO has fewer than 512 entries left
1925 * - fewer than 25% journal buckets free
1927 * Background reclaim runs until low watermarks are reached:
1928 * - FIFO has more than 1024 entries left
1929 * - more than 50% journal buckets free
1931 * As long as a reclaim can complete in the time it takes to fill up
1932 * 512 journal entries or 25% of all journal buckets, then
1933 * journal_next_bucket() should not stall.
1935 static void journal_reclaim_work(struct work_struct *work)
1937 struct bch_fs *c = container_of(to_delayed_work(work),
1938 struct bch_fs, journal.reclaim_work);
1939 struct journal *j = &c->journal;
1941 struct journal_entry_pin *pin;
1942 u64 seq, seq_to_flush = 0;
1943 unsigned iter, bucket_to_flush;
1944 unsigned long next_flush;
1945 bool reclaim_lock_held = false, need_flush;
1948 * Advance last_idx to point to the oldest journal entry containing
1949 * btree node updates that have not yet been written out
1951 for_each_rw_member(ca, c, iter) {
1952 struct journal_device *ja = &ca->journal;
1957 while (should_discard_bucket(j, ja)) {
1958 if (!reclaim_lock_held) {
1961 * might be called from __journal_res_get()
1962 * under wait_event() - have to go back to
1963 * TASK_RUNNING before doing something that
1964 * would block, but only if we're doing work:
1966 __set_current_state(TASK_RUNNING);
1968 mutex_lock(&j->reclaim_lock);
1969 reclaim_lock_held = true;
1970 /* recheck under reclaim_lock: */
1974 if (ca->mi.discard &&
1975 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1976 blkdev_issue_discard(ca->disk_sb.bdev,
1977 bucket_to_sector(ca,
1978 ja->buckets[ja->last_idx]),
1979 ca->mi.bucket_size, GFP_NOIO, 0);
1981 spin_lock(&j->lock);
1982 ja->last_idx = (ja->last_idx + 1) % ja->nr;
1983 spin_unlock(&j->lock);
1989 * Write out enough btree nodes to free up 50% journal
1992 spin_lock(&j->lock);
1993 bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
1994 seq_to_flush = max_t(u64, seq_to_flush,
1995 ja->bucket_seq[bucket_to_flush]);
1996 spin_unlock(&j->lock);
1999 if (reclaim_lock_held)
2000 mutex_unlock(&j->reclaim_lock);
2002 /* Also flush if the pin fifo is more than half full */
2003 seq_to_flush = max_t(s64, seq_to_flush,
2004 (s64) atomic64_read(&j->seq) -
2005 (j->pin.size >> 1));
2008 * If it's been longer than j->reclaim_delay_ms since we last flushed,
2009 * make sure to flush at least one journal pin:
2011 next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
2012 need_flush = time_after(jiffies, next_flush);
2014 while ((pin = journal_get_next_pin(j, need_flush
2016 : seq_to_flush, &seq))) {
2017 __set_current_state(TASK_RUNNING);
2018 pin->flush(j, pin, seq);
2021 j->last_flushed = jiffies;
2024 if (!test_bit(BCH_FS_RO, &c->flags))
2025 queue_delayed_work(system_freezable_wq, &j->reclaim_work,
2026 msecs_to_jiffies(j->reclaim_delay_ms));
2030 * journal_next_bucket - move on to the next journal bucket if possible
2032 static int journal_write_alloc(struct journal *j, unsigned sectors)
2034 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2035 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
2036 struct bch_extent_ptr *ptr;
2037 struct journal_device *ja;
2040 unsigned i, replicas, replicas_want =
2041 READ_ONCE(c->opts.metadata_replicas);
2043 spin_lock(&j->lock);
2046 * Drop any pointers to devices that have been removed, are no longer
2047 * empty, or filled up their current journal bucket:
2049 * Note that a device may have had a small amount of free space (perhaps
2050 * one sector) that wasn't enough for the smallest possible journal
2051 * entry - that's why we drop pointers to devices <= current free space,
2052 * i.e. whichever device was limiting the current journal entry size.
2054 extent_for_each_ptr_backwards(e, ptr) {
2055 ca = c->devs[ptr->dev];
2057 if (ca->mi.state != BCH_MEMBER_STATE_RW ||
2058 ca->journal.sectors_free <= sectors)
2059 __bch2_extent_drop_ptr(e, ptr);
2061 ca->journal.sectors_free -= sectors;
2064 replicas = bch2_extent_nr_ptrs(e.c);
2066 spin_lock(&j->devs.lock);
2072 for (i = 0; i + 1 < j->devs.nr; i++)
2073 if (j->devs.d[i + 0].dev->mi.tier >
2074 j->devs.d[i + 1].dev->mi.tier) {
2075 swap(j->devs.d[i], j->devs.d[i + 1]);
2081 * Pick devices for next journal write:
2082 * XXX: sort devices by free journal space?
2084 group_for_each_dev(ca, &j->devs, i) {
2087 if (replicas >= replicas_want)
2091 * Check that we can use this device, and aren't already using
2094 if (bch2_extent_has_device(e.c, ca->dev_idx) ||
2095 !journal_dev_buckets_available(j, ca) ||
2096 sectors > ca->mi.bucket_size)
2099 ja->sectors_free = ca->mi.bucket_size - sectors;
2100 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
2101 ja->bucket_seq[ja->cur_idx] = atomic64_read(&j->seq);
2103 extent_ptr_append(bkey_i_to_extent(&j->key),
2104 (struct bch_extent_ptr) {
2105 .offset = bucket_to_sector(ca,
2106 ja->buckets[ja->cur_idx]),
2111 spin_unlock(&j->devs.lock);
2113 j->prev_buf_sectors = 0;
2114 spin_unlock(&j->lock);
2116 if (replicas < c->opts.metadata_replicas_required)
2124 static void journal_write_compact(struct jset *jset)
2126 struct jset_entry *i, *next, *prev = NULL;
2129 * Simple compaction, dropping empty jset_entries (from journal
2130 * reservations that weren't fully used) and merging jset_entries that
2133 * If we wanted to be really fancy here, we could sort all the keys in
2134 * the jset and drop keys that were overwritten - probably not worth it:
2136 vstruct_for_each_safe(jset, i, next) {
2137 unsigned u64s = le16_to_cpu(i->u64s);
2143 /* Can we merge with previous entry? */
2145 i->btree_id == prev->btree_id &&
2146 i->level == prev->level &&
2147 JOURNAL_ENTRY_TYPE(i) == JOURNAL_ENTRY_TYPE(prev) &&
2148 JOURNAL_ENTRY_TYPE(i) == JOURNAL_ENTRY_BTREE_KEYS &&
2149 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
2150 memmove_u64s_down(vstruct_next(prev),
2153 le16_add_cpu(&prev->u64s, u64s);
2157 /* Couldn't merge, move i into new position (after prev): */
2158 prev = prev ? vstruct_next(prev) : jset->start;
2160 memmove_u64s_down(prev, i, jset_u64s(u64s));
2163 prev = prev ? vstruct_next(prev) : jset->start;
2164 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
2167 static void journal_write_endio(struct bio *bio)
2169 struct bch_dev *ca = bio->bi_private;
2170 struct journal *j = &ca->fs->journal;
2172 if (bch2_dev_fatal_io_err_on(bio->bi_error, ca, "journal write") ||
2173 bch2_meta_write_fault("journal"))
2174 bch2_journal_halt(j);
2176 closure_put(&j->io);
2177 percpu_ref_put(&ca->io_ref);
2180 static void journal_write_done(struct closure *cl)
2182 struct journal *j = container_of(cl, struct journal, io);
2183 struct journal_buf *w = journal_prev_buf(j);
2185 __bch2_time_stats_update(j->write_time, j->write_start_time);
2187 j->last_seq_ondisk = le64_to_cpu(w->data->last_seq);
2190 * Updating last_seq_ondisk may let journal_reclaim_work() discard more
2193 * Must come before signaling write completion, for
2194 * bch2_fs_journal_stop():
2196 mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
2198 BUG_ON(!j->reservations.prev_buf_unwritten);
2199 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
2200 &j->reservations.counter);
2203 * XXX: this is racy, we could technically end up doing the wake up
2204 * after the journal_buf struct has been reused for the next write
2205 * (because we're clearing JOURNAL_IO_IN_FLIGHT) and wake up things that
2206 * are waiting on the _next_ write, not this one.
2208 * The wake up can't come before, because journal_flush_seq_async() is
2209 * looking at JOURNAL_IO_IN_FLIGHT when it has to wait on a journal
2210 * write that was already in flight.
2212 * The right fix is to use a lock here, but using j.lock here means it
2213 * has to be a spin_lock_irqsave() lock which then requires propagating
2214 * the irq()ness to other locks and it's all kinds of nastiness.
2217 closure_wake_up(&w->wait);
2221 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
2223 /* we aren't holding j->lock: */
2224 unsigned new_size = READ_ONCE(j->buf_size_want);
2227 if (buf->size >= new_size)
2230 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
2234 memcpy(new_buf, buf->data, buf->size);
2235 kvpfree(buf->data, buf->size);
2236 buf->data = new_buf;
2237 buf->size = new_size;
2240 static void journal_write(struct closure *cl)
2242 struct journal *j = container_of(cl, struct journal, io);
2243 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2245 struct journal_buf *w = journal_prev_buf(j);
2248 struct bch_extent_ptr *ptr;
2249 unsigned i, sectors, bytes;
2251 journal_buf_realloc(j, w);
2254 j->write_start_time = local_clock();
2256 bch2_journal_add_prios(j, w);
2258 mutex_lock(&c->btree_root_lock);
2259 for (i = 0; i < BTREE_ID_NR; i++) {
2260 struct btree_root *r = &c->btree_roots[i];
2263 bch2_journal_add_btree_root(w, i, &r->key, r->level);
2265 mutex_unlock(&c->btree_root_lock);
2267 journal_write_compact(jset);
2269 jset->read_clock = cpu_to_le16(c->prio_clock[READ].hand);
2270 jset->write_clock = cpu_to_le16(c->prio_clock[WRITE].hand);
2271 jset->magic = cpu_to_le64(jset_magic(c));
2272 jset->version = cpu_to_le32(BCACHE_JSET_VERSION);
2274 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
2275 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
2277 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
2278 jset->encrypted_start,
2279 vstruct_end(jset) - (void *) jset->encrypted_start);
2281 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
2282 journal_nonce(jset), jset);
2284 sectors = vstruct_sectors(jset, c->block_bits);
2285 BUG_ON(sectors > j->prev_buf_sectors);
2287 bytes = vstruct_bytes(w->data);
2288 memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
2290 if (journal_write_alloc(j, sectors)) {
2291 bch2_journal_halt(j);
2292 bch_err(c, "Unable to allocate journal write");
2293 bch2_fatal_error(c);
2294 closure_return_with_destructor(cl, journal_write_done);
2297 bch2_check_mark_super(c, &j->key, true);
2300 * XXX: we really should just disable the entire journal in nochanges
2303 if (c->opts.nochanges)
2306 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr) {
2307 ca = c->devs[ptr->dev];
2308 if (!percpu_ref_tryget(&ca->io_ref)) {
2310 bch_err(c, "missing device for journal write\n");
2314 atomic64_add(sectors, &ca->meta_sectors_written);
2316 bio = ca->journal.bio;
2318 bio->bi_iter.bi_sector = ptr->offset;
2319 bio->bi_bdev = ca->disk_sb.bdev;
2320 bio->bi_iter.bi_size = sectors << 9;
2321 bio->bi_end_io = journal_write_endio;
2322 bio->bi_private = ca;
2323 bio_set_op_attrs(bio, REQ_OP_WRITE,
2324 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
2325 bch2_bio_map(bio, jset);
2327 trace_journal_write(bio);
2328 closure_bio_submit(bio, cl);
2330 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
2333 for_each_rw_member(ca, c, i)
2334 if (journal_flushes_device(ca) &&
2335 !bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key), i)) {
2336 percpu_ref_get(&ca->io_ref);
2338 bio = ca->journal.bio;
2340 bio->bi_bdev = ca->disk_sb.bdev;
2341 bio->bi_end_io = journal_write_endio;
2342 bio->bi_private = ca;
2343 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
2344 closure_bio_submit(bio, cl);
2348 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr)
2349 ptr->offset += sectors;
2351 closure_return_with_destructor(cl, journal_write_done);
2354 static void journal_write_work(struct work_struct *work)
2356 struct journal *j = container_of(to_delayed_work(work),
2357 struct journal, write_work);
2358 spin_lock(&j->lock);
2359 if (!journal_entry_is_open(j)) {
2360 spin_unlock(&j->lock);
2364 set_bit(JOURNAL_NEED_WRITE, &j->flags);
2365 if (journal_buf_switch(j, false) != JOURNAL_UNLOCKED)
2366 spin_unlock(&j->lock);
2370 * Given an inode number, if that inode number has data in the journal that
2371 * hasn't yet been flushed, return the journal sequence number that needs to be
2374 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
2376 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
2379 if (!test_bit(h, j->buf[0].has_inode) &&
2380 !test_bit(h, j->buf[1].has_inode))
2383 spin_lock(&j->lock);
2384 if (test_bit(h, journal_cur_buf(j)->has_inode))
2385 seq = atomic64_read(&j->seq);
2386 else if (test_bit(h, journal_prev_buf(j)->has_inode))
2387 seq = atomic64_read(&j->seq) - 1;
2388 spin_unlock(&j->lock);
2393 static int __journal_res_get(struct journal *j, struct journal_res *res,
2394 unsigned u64s_min, unsigned u64s_max)
2396 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2397 struct journal_buf *buf;
2400 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2404 spin_lock(&j->lock);
2406 * Recheck after taking the lock, so we don't race with another thread
2407 * that just did journal_entry_open() and call journal_entry_close()
2410 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2412 spin_unlock(&j->lock);
2417 * If we couldn't get a reservation because the current buf filled up,
2418 * and we had room for a bigger entry on disk, signal that we want to
2419 * realloc the journal bufs:
2421 buf = journal_cur_buf(j);
2422 if (journal_entry_is_open(j) &&
2423 buf->size >> 9 < buf->disk_sectors &&
2424 buf->size < JOURNAL_ENTRY_SIZE_MAX)
2425 j->buf_size_want = max(j->buf_size_want, buf->size << 1);
2428 * Close the current journal entry if necessary, then try to start a new
2431 switch (journal_buf_switch(j, false)) {
2432 case JOURNAL_ENTRY_ERROR:
2433 spin_unlock(&j->lock);
2435 case JOURNAL_ENTRY_INUSE:
2436 /* haven't finished writing out the previous one: */
2437 spin_unlock(&j->lock);
2438 trace_journal_entry_full(c);
2440 case JOURNAL_ENTRY_CLOSED:
2442 case JOURNAL_UNLOCKED:
2446 /* We now have a new, closed journal buf - see if we can open it: */
2447 ret = journal_entry_open(j);
2448 spin_unlock(&j->lock);
2455 /* Journal's full, we have to wait */
2458 * Direct reclaim - can't rely on reclaim from work item
2461 journal_reclaim_work(&j->reclaim_work.work);
2463 trace_journal_full(c);
2465 if (!j->res_get_blocked_start)
2466 j->res_get_blocked_start = local_clock() ?: 1;
2471 * Essentially the entry function to the journaling code. When bcachefs is doing
2472 * a btree insert, it calls this function to get the current journal write.
2473 * Journal write is the structure used set up journal writes. The calling
2474 * function will then add its keys to the structure, queuing them for the next
2477 * To ensure forward progress, the current task must not be holding any
2478 * btree node write locks.
2480 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
2481 unsigned u64s_min, unsigned u64s_max)
2486 (ret = __journal_res_get(j, res, u64s_min,
2488 return ret < 0 ? ret : 0;
2491 void bch2_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent)
2493 spin_lock(&j->lock);
2495 BUG_ON(seq > atomic64_read(&j->seq));
2497 if (bch2_journal_error(j)) {
2498 spin_unlock(&j->lock);
2502 if (seq == atomic64_read(&j->seq)) {
2503 if (!closure_wait(&journal_cur_buf(j)->wait, parent))
2505 } else if (seq + 1 == atomic64_read(&j->seq) &&
2506 j->reservations.prev_buf_unwritten) {
2507 if (!closure_wait(&journal_prev_buf(j)->wait, parent))
2512 /* check if raced with write completion (or failure) */
2513 if (!j->reservations.prev_buf_unwritten ||
2514 bch2_journal_error(j))
2515 closure_wake_up(&journal_prev_buf(j)->wait);
2518 spin_unlock(&j->lock);
2521 void bch2_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent)
2523 struct journal_buf *buf;
2525 spin_lock(&j->lock);
2527 BUG_ON(seq > atomic64_read(&j->seq));
2529 if (bch2_journal_error(j)) {
2530 spin_unlock(&j->lock);
2534 if (seq == atomic64_read(&j->seq)) {
2535 bool set_need_write = false;
2537 buf = journal_cur_buf(j);
2539 if (parent && !closure_wait(&buf->wait, parent))
2542 if (!test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags)) {
2543 j->need_write_time = local_clock();
2544 set_need_write = true;
2547 switch (journal_buf_switch(j, set_need_write)) {
2548 case JOURNAL_ENTRY_ERROR:
2550 closure_wake_up(&buf->wait);
2552 case JOURNAL_ENTRY_CLOSED:
2554 * Journal entry hasn't been opened yet, but caller
2555 * claims it has something (seq == j->seq):
2558 case JOURNAL_ENTRY_INUSE:
2560 case JOURNAL_UNLOCKED:
2563 } else if (parent &&
2564 seq + 1 == atomic64_read(&j->seq) &&
2565 j->reservations.prev_buf_unwritten) {
2566 buf = journal_prev_buf(j);
2568 if (!closure_wait(&buf->wait, parent))
2573 /* check if raced with write completion (or failure) */
2574 if (!j->reservations.prev_buf_unwritten ||
2575 bch2_journal_error(j))
2576 closure_wake_up(&buf->wait);
2579 spin_unlock(&j->lock);
2582 int bch2_journal_flush_seq(struct journal *j, u64 seq)
2585 u64 start_time = local_clock();
2587 closure_init_stack(&cl);
2588 bch2_journal_flush_seq_async(j, seq, &cl);
2591 bch2_time_stats_update(j->flush_seq_time, start_time);
2593 return bch2_journal_error(j);
2596 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
2598 struct journal_res res;
2599 unsigned u64s = jset_u64s(0);
2601 memset(&res, 0, sizeof(res));
2603 bch2_journal_res_get(j, &res, u64s, u64s);
2604 bch2_journal_res_put(j, &res);
2606 bch2_journal_flush_seq_async(j, res.seq, parent);
2609 int bch2_journal_meta(struct journal *j)
2611 struct journal_res res;
2612 unsigned u64s = jset_u64s(0);
2615 memset(&res, 0, sizeof(res));
2617 ret = bch2_journal_res_get(j, &res, u64s, u64s);
2621 bch2_journal_res_put(j, &res);
2623 return bch2_journal_flush_seq(j, res.seq);
2626 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
2628 u64 seq, journal_seq;
2630 spin_lock(&j->lock);
2631 journal_seq = atomic64_read(&j->seq);
2633 if (journal_entry_is_open(j)) {
2635 } else if (journal_seq) {
2636 seq = journal_seq - 1;
2638 spin_unlock(&j->lock);
2641 spin_unlock(&j->lock);
2643 bch2_journal_flush_seq_async(j, seq, parent);
2646 int bch2_journal_flush(struct journal *j)
2648 u64 seq, journal_seq;
2650 spin_lock(&j->lock);
2651 journal_seq = atomic64_read(&j->seq);
2653 if (journal_entry_is_open(j)) {
2655 } else if (journal_seq) {
2656 seq = journal_seq - 1;
2658 spin_unlock(&j->lock);
2661 spin_unlock(&j->lock);
2663 return bch2_journal_flush_seq(j, seq);
2666 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
2668 union journal_res_state *s = &j->reservations;
2674 spin_lock(&j->lock);
2676 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2677 "active journal entries:\t%zu\n"
2679 "last_seq:\t\t%llu\n"
2680 "last_seq_ondisk:\t%llu\n"
2681 "reservation count:\t%u\n"
2682 "reservation offset:\t%u\n"
2683 "current entry u64s:\t%u\n"
2684 "io in flight:\t\t%i\n"
2685 "need write:\t\t%i\n"
2687 "replay done:\t\t%i\n",
2689 (u64) atomic64_read(&j->seq),
2692 journal_state_count(*s, s->idx),
2693 s->cur_entry_offset,
2695 s->prev_buf_unwritten,
2696 test_bit(JOURNAL_NEED_WRITE, &j->flags),
2697 journal_entry_is_open(j),
2698 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
2700 spin_lock(&j->devs.lock);
2701 group_for_each_dev(ca, &j->devs, iter) {
2702 struct journal_device *ja = &ca->journal;
2704 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2707 "\tcur_idx\t\t%u (seq %llu)\n"
2708 "\tlast_idx\t%u (seq %llu)\n",
2710 ja->cur_idx, ja->bucket_seq[ja->cur_idx],
2711 ja->last_idx, ja->bucket_seq[ja->last_idx]);
2713 spin_unlock(&j->devs.lock);
2715 spin_unlock(&j->lock);
2721 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
2723 struct journal_entry_pin_list *pin_list;
2724 struct journal_entry_pin *pin;
2728 spin_lock_irq(&j->pin_lock);
2729 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
2730 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2732 journal_pin_seq(j, pin_list),
2733 atomic_read(&pin_list->count));
2735 list_for_each_entry(pin, &pin_list->list, list)
2736 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2740 if (!list_empty(&pin_list->flushed))
2741 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2744 list_for_each_entry(pin, &pin_list->flushed, list)
2745 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2749 spin_unlock_irq(&j->pin_lock);
2754 static bool bch2_journal_writing_to_device(struct bch_dev *ca)
2756 struct journal *j = &ca->fs->journal;
2759 spin_lock(&j->lock);
2760 ret = bch2_extent_has_device(bkey_i_to_s_c_extent(&j->key),
2762 spin_unlock(&j->lock);
2768 * This asumes that ca has already been marked read-only so that
2769 * journal_next_bucket won't pick buckets out of ca any more.
2770 * Hence, if the journal is not currently pointing to ca, there
2771 * will be no new writes to journal entries in ca after all the
2772 * pending ones have been flushed to disk.
2774 * If the journal is being written to ca, write a new record, and
2775 * journal_next_bucket will notice that the device is no longer
2776 * writeable and pick a new set of devices to write to.
2779 int bch2_journal_move(struct bch_dev *ca)
2781 struct journal_device *ja = &ca->journal;
2782 struct journal *j = &ca->fs->journal;
2783 u64 seq_to_flush = 0;
2787 if (bch2_journal_writing_to_device(ca)) {
2789 * bch_journal_meta will write a record and we'll wait
2790 * for the write to complete.
2791 * Actually writing the journal (journal_write_locked)
2792 * will call journal_next_bucket which notices that the
2793 * device is no longer writeable, and picks a new one.
2795 bch2_journal_meta(j);
2796 BUG_ON(bch2_journal_writing_to_device(ca));
2799 for (i = 0; i < ja->nr; i++)
2800 seq_to_flush = max(seq_to_flush, ja->bucket_seq[i]);
2802 bch2_journal_flush_pins(j, seq_to_flush);
2805 * Force a meta-data journal entry to be written so that
2806 * we have newer journal entries in devices other than ca,
2807 * and wait for the meta data write to complete.
2809 bch2_journal_meta(j);
2812 * Verify that we no longer need any of the journal entries in
2815 spin_lock(&j->lock);
2816 ret = j->last_seq_ondisk > seq_to_flush ? 0 : -EIO;
2817 spin_unlock(&j->lock);
2822 void bch2_fs_journal_stop(struct journal *j)
2824 if (!test_bit(JOURNAL_STARTED, &j->flags))
2828 * Empty out the journal by first flushing everything pinning existing
2829 * journal entries, then force a brand new empty journal entry to be
2832 bch2_journal_flush_pins(j, U64_MAX);
2833 bch2_journal_flush_async(j, NULL);
2834 bch2_journal_meta(j);
2836 cancel_delayed_work_sync(&j->write_work);
2837 cancel_delayed_work_sync(&j->reclaim_work);
2840 void bch2_dev_journal_exit(struct bch_dev *ca)
2842 kfree(ca->journal.bio);
2843 kfree(ca->journal.buckets);
2844 kfree(ca->journal.bucket_seq);
2846 ca->journal.bio = NULL;
2847 ca->journal.buckets = NULL;
2848 ca->journal.bucket_seq = NULL;
2851 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
2853 struct journal_device *ja = &ca->journal;
2854 struct bch_sb_field_journal *journal_buckets =
2855 bch2_sb_get_journal(sb);
2858 ja->nr = bch2_nr_journal_buckets(journal_buckets);
2860 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2861 if (!ja->bucket_seq)
2864 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
2865 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
2866 if (!ca->journal.bio)
2869 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2873 for (i = 0; i < ja->nr; i++)
2874 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
2879 void bch2_fs_journal_exit(struct journal *j)
2881 kvpfree(j->buf[1].data, j->buf[1].size);
2882 kvpfree(j->buf[0].data, j->buf[0].size);
2886 int bch2_fs_journal_init(struct journal *j)
2888 static struct lock_class_key res_key;
2890 spin_lock_init(&j->lock);
2891 spin_lock_init(&j->pin_lock);
2892 init_waitqueue_head(&j->wait);
2893 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
2894 INIT_DELAYED_WORK(&j->reclaim_work, journal_reclaim_work);
2895 mutex_init(&j->blacklist_lock);
2896 INIT_LIST_HEAD(&j->seq_blacklist);
2897 spin_lock_init(&j->devs.lock);
2898 mutex_init(&j->reclaim_lock);
2900 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
2902 j->buf[0].size = JOURNAL_ENTRY_SIZE_MIN;
2903 j->buf[1].size = JOURNAL_ENTRY_SIZE_MIN;
2904 j->write_delay_ms = 100;
2905 j->reclaim_delay_ms = 100;
2907 bkey_extent_init(&j->key);
2909 atomic64_set(&j->reservations.counter,
2910 ((union journal_res_state)
2911 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
2913 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
2914 !(j->buf[0].data = kvpmalloc(j->buf[0].size, GFP_KERNEL)) ||
2915 !(j->buf[1].data = kvpmalloc(j->buf[1].size, GFP_KERNEL)))
2918 j->pin.front = j->pin.back = 1;