2 * bcachefs journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
9 #include "bkey_methods.h"
12 #include "btree_update.h"
13 #include "btree_update_interior.h"
25 #include <trace/events/bcachefs.h>
27 static void journal_write(struct closure *);
28 static void journal_reclaim_fast(struct journal *);
29 static void journal_pin_add_entry(struct journal *,
30 struct journal_entry_pin_list *,
31 struct journal_entry_pin *,
32 journal_pin_flush_fn);
34 static inline void journal_wake(struct journal *j)
37 closure_wake_up(&j->async_wait);
40 static inline struct journal_buf *journal_cur_buf(struct journal *j)
42 return j->buf + j->reservations.idx;
45 static inline struct journal_buf *journal_prev_buf(struct journal *j)
47 return j->buf + !j->reservations.idx;
50 /* Sequence number of oldest dirty journal entry */
52 static inline u64 journal_last_seq(struct journal *j)
57 static inline u64 journal_cur_seq(struct journal *j)
59 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
61 return j->pin.back - 1;
64 static inline u64 journal_pin_seq(struct journal *j,
65 struct journal_entry_pin_list *pin_list)
67 return fifo_entry_idx_abs(&j->pin, pin_list);
70 u64 bch2_journal_pin_seq(struct journal *j, struct journal_entry_pin *pin)
75 if (journal_pin_active(pin))
76 ret = journal_pin_seq(j, pin->pin_list);
77 spin_unlock(&j->lock);
82 static inline void bch2_journal_add_entry_noreservation(struct journal_buf *buf,
83 unsigned type, enum btree_id id,
85 const void *data, size_t u64s)
87 struct jset *jset = buf->data;
89 bch2_journal_add_entry_at(buf, le32_to_cpu(jset->u64s),
90 type, id, level, data, u64s);
91 le32_add_cpu(&jset->u64s, jset_u64s(u64s));
94 static struct jset_entry *bch2_journal_find_entry(struct jset *j, unsigned type,
97 struct jset_entry *entry;
99 for_each_jset_entry_type(entry, j, type)
100 if (entry->btree_id == id)
106 struct bkey_i *bch2_journal_find_btree_root(struct bch_fs *c, struct jset *j,
107 enum btree_id id, unsigned *level)
110 struct jset_entry *entry =
111 bch2_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id);
117 return ERR_PTR(-EINVAL);
120 *level = entry->level;
121 *level = entry->level;
125 static void bch2_journal_add_btree_root(struct journal_buf *buf,
126 enum btree_id id, struct bkey_i *k,
129 bch2_journal_add_entry_noreservation(buf,
130 JOURNAL_ENTRY_BTREE_ROOT, id, level,
134 static void journal_seq_blacklist_flush(struct journal *j,
135 struct journal_entry_pin *pin, u64 seq)
138 container_of(j, struct bch_fs, journal);
139 struct journal_seq_blacklist *bl =
140 container_of(pin, struct journal_seq_blacklist, pin);
141 struct blacklisted_node n;
146 closure_init_stack(&cl);
149 struct btree_iter iter;
152 mutex_lock(&j->blacklist_lock);
153 if (i >= bl->nr_entries) {
154 mutex_unlock(&j->blacklist_lock);
158 mutex_unlock(&j->blacklist_lock);
160 __bch2_btree_iter_init(&iter, c, n.btree_id, n.pos, 0, 0, 0);
162 b = bch2_btree_iter_peek_node(&iter);
164 /* The node might have already been rewritten: */
166 if (b->data->keys.seq == n.seq) {
167 ret = bch2_btree_node_rewrite(c, &iter, n.seq, 0);
169 bch2_btree_iter_unlock(&iter);
170 bch2_fs_fatal_error(c,
171 "error %i rewriting btree node with blacklisted journal seq",
173 bch2_journal_halt(j);
178 bch2_btree_iter_unlock(&iter);
182 struct btree_update *as;
183 struct pending_btree_node_free *d;
185 mutex_lock(&j->blacklist_lock);
186 if (i >= bl->nr_entries) {
187 mutex_unlock(&j->blacklist_lock);
191 mutex_unlock(&j->blacklist_lock);
193 mutex_lock(&c->btree_interior_update_lock);
196 * Is the node on the list of pending interior node updates -
197 * being freed? If so, wait for that to finish:
199 for_each_pending_btree_node_free(c, as, d)
200 if (n.seq == d->seq &&
201 n.btree_id == d->btree_id &&
203 !bkey_cmp(n.pos, d->key.k.p)) {
204 closure_wait(&as->wait, &cl);
205 mutex_unlock(&c->btree_interior_update_lock);
210 mutex_unlock(&c->btree_interior_update_lock);
213 mutex_lock(&j->blacklist_lock);
215 bch2_journal_pin_drop(j, &bl->pin);
220 mutex_unlock(&j->blacklist_lock);
223 static struct journal_seq_blacklist *
224 journal_seq_blacklist_find(struct journal *j, u64 seq)
226 struct journal_seq_blacklist *bl;
228 lockdep_assert_held(&j->blacklist_lock);
230 list_for_each_entry(bl, &j->seq_blacklist, list)
237 static struct journal_seq_blacklist *
238 bch2_journal_seq_blacklisted_new(struct journal *j, u64 seq)
240 struct journal_seq_blacklist *bl;
242 lockdep_assert_held(&j->blacklist_lock);
245 * When we start the journal, bch2_journal_start() will skip over @seq:
248 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
253 list_add_tail(&bl->list, &j->seq_blacklist);
258 * Returns true if @seq is newer than the most recent journal entry that got
259 * written, and data corresponding to @seq should be ignored - also marks @seq
260 * as blacklisted so that on future restarts the corresponding data will still
263 int bch2_journal_seq_should_ignore(struct bch_fs *c, u64 seq, struct btree *b)
265 struct journal *j = &c->journal;
266 struct journal_seq_blacklist *bl = NULL;
267 struct blacklisted_node *n;
275 journal_seq = journal_cur_seq(j);
276 spin_unlock(&j->lock);
278 /* Interier updates aren't journalled: */
280 BUG_ON(seq > journal_seq && test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags));
283 * Decrease this back to j->seq + 2 when we next rev the on disk format:
284 * increasing it temporarily to work around bug in old kernels
286 bch2_fs_inconsistent_on(seq > journal_seq + 4, c,
287 "bset journal seq too far in the future: %llu > %llu",
290 if (seq <= journal_seq &&
291 list_empty_careful(&j->seq_blacklist))
294 mutex_lock(&j->blacklist_lock);
296 if (seq <= journal_seq) {
297 bl = journal_seq_blacklist_find(j, seq);
301 bch_verbose(c, "btree node %u:%llu:%llu has future journal sequence number %llu, blacklisting",
302 b->btree_id, b->key.k.p.inode, b->key.k.p.offset, seq);
304 for (i = journal_seq + 1; i <= seq; i++) {
305 bl = journal_seq_blacklist_find(j, i) ?:
306 bch2_journal_seq_blacklisted_new(j, i);
314 for (n = bl->entries; n < bl->entries + bl->nr_entries; n++)
315 if (b->data->keys.seq == n->seq &&
316 b->btree_id == n->btree_id &&
317 !bkey_cmp(b->key.k.p, n->pos))
320 if (!bl->nr_entries ||
321 is_power_of_2(bl->nr_entries)) {
322 n = krealloc(bl->entries,
323 max(bl->nr_entries * 2, 8UL) * sizeof(*n),
332 bl->entries[bl->nr_entries++] = (struct blacklisted_node) {
333 .seq = b->data->keys.seq,
334 .btree_id = b->btree_id,
340 mutex_unlock(&j->blacklist_lock);
345 * Journal replay/recovery:
347 * This code is all driven from bch2_fs_start(); we first read the journal
348 * entries, do some other stuff, then we mark all the keys in the journal
349 * entries (same as garbage collection would), then we replay them - reinserting
350 * them into the cache in precisely the same order as they appear in the
353 * We only journal keys that go in leaf nodes, which simplifies things quite a
357 struct journal_list {
360 struct list_head *head;
364 #define JOURNAL_ENTRY_ADD_OK 0
365 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
368 * Given a journal entry we just read, add it to the list of journal entries to
371 static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
372 struct journal_list *jlist, struct jset *j)
374 struct journal_replay *i, *pos;
375 struct list_head *where;
376 size_t bytes = vstruct_bytes(j);
380 last_seq = !list_empty(jlist->head)
381 ? list_last_entry(jlist->head, struct journal_replay,
385 /* Is this entry older than the range we need? */
386 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
387 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
391 /* Drop entries we don't need anymore */
392 list_for_each_entry_safe(i, pos, jlist->head, list) {
393 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
396 kvpfree(i, offsetof(struct journal_replay, j) +
397 vstruct_bytes(&i->j));
400 list_for_each_entry_reverse(i, jlist->head, list) {
402 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
403 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
404 memcmp(j, &i->j, bytes), c,
405 "found duplicate but non identical journal entries (seq %llu)",
406 le64_to_cpu(j->seq));
410 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
418 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
424 list_add(&i->list, where);
426 memcpy(&i->j, j, bytes);
428 if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
429 bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
431 fsck_err_on(1, c, "duplicate journal entries on same device");
432 ret = JOURNAL_ENTRY_ADD_OK;
438 static struct nonce journal_nonce(const struct jset *jset)
440 return (struct nonce) {{
442 [1] = ((__le32 *) &jset->seq)[0],
443 [2] = ((__le32 *) &jset->seq)[1],
444 [3] = BCH_NONCE_JOURNAL,
448 /* this fills in a range with empty jset_entries: */
449 static void journal_entry_null_range(void *start, void *end)
451 struct jset_entry *entry;
453 for (entry = start; entry != end; entry = vstruct_next(entry))
454 memset(entry, 0, sizeof(*entry));
457 static int journal_validate_key(struct bch_fs *c, struct jset *jset,
458 struct jset_entry *entry,
459 struct bkey_i *k, enum bkey_type key_type,
462 void *next = vstruct_next(entry);
467 if (mustfix_fsck_err_on(!k->k.u64s, c,
468 "invalid %s in journal: k->u64s 0", type)) {
469 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
470 journal_entry_null_range(vstruct_next(entry), next);
474 if (mustfix_fsck_err_on((void *) bkey_next(k) >
475 (void *) vstruct_next(entry), c,
476 "invalid %s in journal: extends past end of journal entry",
478 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
479 journal_entry_null_range(vstruct_next(entry), next);
483 if (mustfix_fsck_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
484 "invalid %s in journal: bad format %u",
485 type, k->k.format)) {
486 le16_add_cpu(&entry->u64s, -k->k.u64s);
487 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
488 journal_entry_null_range(vstruct_next(entry), next);
492 if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN)
493 bch2_bkey_swab(key_type, NULL, bkey_to_packed(k));
495 invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k));
497 bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf),
499 mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
502 le16_add_cpu(&entry->u64s, -k->k.u64s);
503 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
504 journal_entry_null_range(vstruct_next(entry), next);
511 #define JOURNAL_ENTRY_REREAD 5
512 #define JOURNAL_ENTRY_NONE 6
513 #define JOURNAL_ENTRY_BAD 7
515 #define journal_entry_err(c, msg, ...) \
517 if (write == READ) { \
518 mustfix_fsck_err(c, msg, ##__VA_ARGS__); \
520 bch_err(c, "detected corrupt metadata before write:\n" \
521 msg, ##__VA_ARGS__); \
522 ret = BCH_FSCK_ERRORS_NOT_FIXED; \
528 #define journal_entry_err_on(cond, c, msg, ...) \
529 ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
531 static int journal_entry_validate_entries(struct bch_fs *c, struct jset *jset,
534 struct jset_entry *entry;
537 vstruct_for_each(jset, entry) {
538 void *next = vstruct_next(entry);
541 if (journal_entry_err_on(vstruct_next(entry) >
542 vstruct_last(jset), c,
543 "journal entry extends past end of jset")) {
544 jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
548 switch (entry->type) {
549 case JOURNAL_ENTRY_BTREE_KEYS:
550 vstruct_for_each(entry, k) {
551 ret = journal_validate_key(c, jset, entry, k,
552 bkey_type(entry->level,
560 case JOURNAL_ENTRY_BTREE_ROOT:
563 if (journal_entry_err_on(!entry->u64s ||
564 le16_to_cpu(entry->u64s) != k->k.u64s, c,
565 "invalid btree root journal entry: wrong number of keys")) {
567 * we don't want to null out this jset_entry,
568 * just the contents, so that later we can tell
569 * we were _supposed_ to have a btree root
572 journal_entry_null_range(vstruct_next(entry), next);
576 ret = journal_validate_key(c, jset, entry, k,
577 BKEY_TYPE_BTREE, "btree root");
582 case JOURNAL_ENTRY_PRIO_PTRS:
585 case JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED:
586 if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1, c,
587 "invalid journal seq blacklist entry: bad size")) {
588 journal_entry_null_range(entry,
589 vstruct_next(entry));
594 journal_entry_err(c, "invalid journal entry type %u",
596 journal_entry_null_range(entry, vstruct_next(entry));
605 static int journal_entry_validate(struct bch_fs *c,
606 struct jset *jset, u64 sector,
607 unsigned bucket_sectors_left,
608 unsigned sectors_read,
611 size_t bytes = vstruct_bytes(jset);
612 struct bch_csum csum;
615 if (le64_to_cpu(jset->magic) != jset_magic(c))
616 return JOURNAL_ENTRY_NONE;
618 if (le32_to_cpu(jset->version) != BCACHE_JSET_VERSION) {
619 bch_err(c, "unknown journal entry version %u",
620 le32_to_cpu(jset->version));
621 return BCH_FSCK_UNKNOWN_VERSION;
624 if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
625 "journal entry too big (%zu bytes), sector %lluu",
627 /* XXX: note we might have missing journal entries */
628 return JOURNAL_ENTRY_BAD;
631 if (bytes > sectors_read << 9)
632 return JOURNAL_ENTRY_REREAD;
634 if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
635 "journal entry with unknown csum type %llu sector %lluu",
636 JSET_CSUM_TYPE(jset), sector))
637 return JOURNAL_ENTRY_BAD;
639 csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
640 if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
641 "journal checksum bad, sector %llu", sector)) {
642 /* XXX: retry IO, when we start retrying checksum errors */
643 /* XXX: note we might have missing journal entries */
644 return JOURNAL_ENTRY_BAD;
647 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
648 jset->encrypted_start,
649 vstruct_end(jset) - (void *) jset->encrypted_start);
651 if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
652 "invalid journal entry: last_seq > seq"))
653 jset->last_seq = jset->seq;
660 struct journal_read_buf {
665 static int journal_read_buf_realloc(struct journal_read_buf *b,
670 /* the bios are sized for this many pages, max: */
671 if (new_size > JOURNAL_ENTRY_SIZE_MAX)
674 new_size = roundup_pow_of_two(new_size);
675 n = kvpmalloc(new_size, GFP_KERNEL);
679 kvpfree(b->data, b->size);
685 static int journal_read_bucket(struct bch_dev *ca,
686 struct journal_read_buf *buf,
687 struct journal_list *jlist,
688 unsigned bucket, u64 *seq, bool *entries_found)
690 struct bch_fs *c = ca->fs;
691 struct journal_device *ja = &ca->journal;
692 struct bio *bio = ja->bio;
693 struct jset *j = NULL;
694 unsigned sectors, sectors_read = 0;
695 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
696 end = offset + ca->mi.bucket_size;
697 bool saw_bad = false;
700 pr_debug("reading %u", bucket);
702 while (offset < end) {
704 reread: sectors_read = min_t(unsigned,
705 end - offset, buf->size >> 9);
708 bio_set_dev(bio, ca->disk_sb.bdev);
709 bio->bi_iter.bi_sector = offset;
710 bio->bi_iter.bi_size = sectors_read << 9;
711 bio_set_op_attrs(bio, REQ_OP_READ, 0);
712 bch2_bio_map(bio, buf->data);
714 ret = submit_bio_wait(bio);
716 if (bch2_dev_io_err_on(ret, ca,
717 "journal read from sector %llu",
719 bch2_meta_read_fault("journal"))
725 ret = journal_entry_validate(c, j, offset,
726 end - offset, sectors_read,
731 case JOURNAL_ENTRY_REREAD:
732 if (vstruct_bytes(j) > buf->size) {
733 ret = journal_read_buf_realloc(buf,
739 case JOURNAL_ENTRY_NONE:
742 sectors = c->opts.block_size;
744 case JOURNAL_ENTRY_BAD:
746 sectors = c->opts.block_size;
753 * This happens sometimes if we don't have discards on -
754 * when we've partially overwritten a bucket with new
755 * journal entries. We don't need the rest of the
758 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
761 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
763 mutex_lock(&jlist->lock);
764 ret = journal_entry_add(c, ca, jlist, j);
765 mutex_unlock(&jlist->lock);
768 case JOURNAL_ENTRY_ADD_OK:
769 *entries_found = true;
771 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
777 if (le64_to_cpu(j->seq) > *seq)
778 *seq = le64_to_cpu(j->seq);
780 sectors = vstruct_sectors(j, c->block_bits);
784 sectors_read -= sectors;
785 j = ((void *) j) + (sectors << 9);
791 static void bch2_journal_read_device(struct closure *cl)
793 #define read_bucket(b) \
795 bool entries_found = false; \
796 ret = journal_read_bucket(ca, &buf, jlist, b, &seq, \
800 __set_bit(b, bitmap); \
804 struct journal_device *ja =
805 container_of(cl, struct journal_device, read);
806 struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
807 struct journal_list *jlist =
808 container_of(cl->parent, struct journal_list, cl);
809 struct request_queue *q = bdev_get_queue(ca->disk_sb.bdev);
810 struct journal_read_buf buf = { NULL, 0 };
812 DECLARE_BITMAP(bitmap, ja->nr);
820 bitmap_zero(bitmap, ja->nr);
821 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
825 pr_debug("%u journal buckets", ja->nr);
828 * If the device supports discard but not secure discard, we can't do
829 * the fancy fibonacci hash/binary search because the live journal
830 * entries might not form a contiguous range:
832 for (i = 0; i < ja->nr; i++)
836 if (!blk_queue_nonrot(q))
840 * Read journal buckets ordered by golden ratio hash to quickly
841 * find a sequence of buckets with valid journal entries
843 for (i = 0; i < ja->nr; i++) {
844 l = (i * 2654435769U) % ja->nr;
846 if (test_bit(l, bitmap))
854 * If that fails, check all the buckets we haven't checked
857 pr_debug("falling back to linear search");
859 for (l = find_first_zero_bit(bitmap, ja->nr);
861 l = find_next_zero_bit(bitmap, ja->nr, l + 1))
865 /* no journal entries on this device? */
870 r = find_next_bit(bitmap, ja->nr, l + 1);
871 pr_debug("starting binary search, l %u r %u", l, r);
874 unsigned m = (l + r) >> 1;
887 * Find the journal bucket with the highest sequence number:
889 * If there's duplicate journal entries in multiple buckets (which
890 * definitely isn't supposed to happen, but...) - make sure to start
891 * cur_idx at the last of those buckets, so we don't deadlock trying to
896 for (i = 0; i < ja->nr; i++)
897 if (ja->bucket_seq[i] >= seq &&
898 ja->bucket_seq[i] != ja->bucket_seq[(i + 1) % ja->nr]) {
900 * When journal_next_bucket() goes to allocate for
901 * the first time, it'll use the bucket after
905 seq = ja->bucket_seq[i];
909 * Set last_idx to indicate the entire journal is full and needs to be
910 * reclaimed - journal reclaim will immediately reclaim whatever isn't
911 * pinned when it first runs:
913 ja->last_idx = (ja->cur_idx + 1) % ja->nr;
916 * Read buckets in reverse order until we stop finding more journal
919 for (i = (ja->cur_idx + ja->nr - 1) % ja->nr;
921 i = (i + ja->nr - 1) % ja->nr)
922 if (!test_bit(i, bitmap) &&
926 kvpfree(buf.data, buf.size);
927 percpu_ref_put(&ca->io_ref);
930 mutex_lock(&jlist->lock);
932 mutex_unlock(&jlist->lock);
937 void bch2_journal_entries_free(struct list_head *list)
940 while (!list_empty(list)) {
941 struct journal_replay *i =
942 list_first_entry(list, struct journal_replay, list);
944 kvpfree(i, offsetof(struct journal_replay, j) +
945 vstruct_bytes(&i->j));
949 static int journal_seq_blacklist_read(struct journal *j,
950 struct journal_replay *i,
951 struct journal_entry_pin_list *p)
953 struct bch_fs *c = container_of(j, struct bch_fs, journal);
954 struct jset_entry *entry;
955 struct journal_seq_blacklist *bl;
958 for_each_jset_entry_type(entry, &i->j,
959 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED) {
960 struct jset_entry_blacklist *bl_entry =
961 container_of(entry, struct jset_entry_blacklist, entry);
962 seq = le64_to_cpu(bl_entry->seq);
964 bch_verbose(c, "blacklisting existing journal seq %llu", seq);
966 bl = bch2_journal_seq_blacklisted_new(j, seq);
970 journal_pin_add_entry(j, p, &bl->pin,
971 journal_seq_blacklist_flush);
978 static inline bool journal_has_keys(struct list_head *list)
980 struct journal_replay *i;
981 struct jset_entry *entry;
982 struct bkey_i *k, *_n;
984 list_for_each_entry(i, list, list)
985 for_each_jset_key(k, _n, entry, &i->j)
991 int bch2_journal_read(struct bch_fs *c, struct list_head *list)
993 struct journal *j = &c->journal;
994 struct journal_list jlist;
995 struct journal_replay *i;
996 struct journal_entry_pin_list *p;
998 u64 cur_seq, end_seq, seq;
999 unsigned iter, keys = 0, entries = 0;
1001 bool degraded = false;
1004 closure_init_stack(&jlist.cl);
1005 mutex_init(&jlist.lock);
1009 for_each_member_device(ca, c, iter) {
1010 if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
1013 if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
1014 ca->mi.state == BCH_MEMBER_STATE_RO) &&
1015 percpu_ref_tryget(&ca->io_ref))
1016 closure_call(&ca->journal.read,
1017 bch2_journal_read_device,
1024 closure_sync(&jlist.cl);
1029 if (list_empty(list)){
1030 bch_err(c, "no journal entries found");
1031 return BCH_FSCK_REPAIR_IMPOSSIBLE;
1034 fsck_err_on(c->sb.clean && journal_has_keys(list), c,
1035 "filesystem marked clean but journal has keys to replay");
1037 list_for_each_entry(i, list, list) {
1038 ret = journal_entry_validate_entries(c, &i->j, READ);
1043 * If we're mounting in degraded mode - if we didn't read all
1044 * the devices - this is wrong:
1048 (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
1049 fsck_err_on(!bch2_replicas_marked(c, BCH_DATA_JOURNAL,
1051 "superblock not marked as containing replicas (type %u)",
1052 BCH_DATA_JOURNAL))) {
1053 ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, i->devs);
1059 i = list_last_entry(list, struct journal_replay, list);
1061 nr = le64_to_cpu(i->j.seq) - le64_to_cpu(i->j.last_seq) + 1;
1063 if (nr > j->pin.size) {
1065 init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
1067 bch_err(c, "error reallocating journal fifo (%zu open entries)", nr);
1072 atomic64_set(&j->seq, le64_to_cpu(i->j.seq));
1073 j->last_seq_ondisk = le64_to_cpu(i->j.last_seq);
1075 j->pin.front = le64_to_cpu(i->j.last_seq);
1076 j->pin.back = le64_to_cpu(i->j.seq) + 1;
1078 fifo_for_each_entry_ptr(p, &j->pin, seq) {
1079 INIT_LIST_HEAD(&p->list);
1080 INIT_LIST_HEAD(&p->flushed);
1081 atomic_set(&p->count, 0);
1085 mutex_lock(&j->blacklist_lock);
1087 list_for_each_entry(i, list, list) {
1088 p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
1090 atomic_set(&p->count, 1);
1093 if (journal_seq_blacklist_read(j, i, p)) {
1094 mutex_unlock(&j->blacklist_lock);
1099 mutex_unlock(&j->blacklist_lock);
1101 cur_seq = journal_last_seq(j);
1102 end_seq = le64_to_cpu(list_last_entry(list,
1103 struct journal_replay, list)->j.seq);
1105 list_for_each_entry(i, list, list) {
1106 struct jset_entry *entry;
1107 struct bkey_i *k, *_n;
1110 mutex_lock(&j->blacklist_lock);
1111 while (cur_seq < le64_to_cpu(i->j.seq) &&
1112 journal_seq_blacklist_find(j, cur_seq))
1115 blacklisted = journal_seq_blacklist_find(j,
1116 le64_to_cpu(i->j.seq));
1117 mutex_unlock(&j->blacklist_lock);
1119 fsck_err_on(blacklisted, c,
1120 "found blacklisted journal entry %llu",
1121 le64_to_cpu(i->j.seq));
1123 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
1124 "journal entries %llu-%llu missing! (replaying %llu-%llu)",
1125 cur_seq, le64_to_cpu(i->j.seq) - 1,
1126 journal_last_seq(j), end_seq);
1128 cur_seq = le64_to_cpu(i->j.seq) + 1;
1130 for_each_jset_key(k, _n, entry, &i->j)
1135 bch_info(c, "journal read done, %i keys in %i entries, seq %llu",
1136 keys, entries, journal_cur_seq(j));
1141 int bch2_journal_mark(struct bch_fs *c, struct list_head *list)
1143 struct bkey_i *k, *n;
1144 struct jset_entry *j;
1145 struct journal_replay *r;
1148 list_for_each_entry(r, list, list)
1149 for_each_jset_key(k, n, j, &r->j) {
1150 enum bkey_type type = bkey_type(j->level, j->btree_id);
1151 struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
1153 if (btree_type_has_ptrs(type)) {
1154 ret = bch2_btree_mark_key_initial(c, type, k_s_c);
1163 static bool journal_entry_is_open(struct journal *j)
1165 return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
1168 void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
1170 struct journal_buf *w = journal_prev_buf(j);
1172 atomic_dec_bug(&journal_seq_pin(j, le64_to_cpu(w->data->seq))->count);
1174 if (!need_write_just_set &&
1175 test_bit(JOURNAL_NEED_WRITE, &j->flags))
1176 __bch2_time_stats_update(j->delay_time,
1177 j->need_write_time);
1179 closure_call(&j->io, journal_write, NULL, NULL);
1181 /* Shut sparse up: */
1182 closure_init(&j->io, NULL);
1183 set_closure_fn(&j->io, journal_write, NULL);
1184 journal_write(&j->io);
1188 static void journal_pin_new_entry(struct journal *j, int count)
1190 struct journal_entry_pin_list *p;
1193 * The fifo_push() needs to happen at the same time as j->seq is
1194 * incremented for journal_last_seq() to be calculated correctly
1196 atomic64_inc(&j->seq);
1197 p = fifo_push_ref(&j->pin);
1199 INIT_LIST_HEAD(&p->list);
1200 INIT_LIST_HEAD(&p->flushed);
1201 atomic_set(&p->count, count);
1205 static void bch2_journal_buf_init(struct journal *j)
1207 struct journal_buf *buf = journal_cur_buf(j);
1209 memset(buf->has_inode, 0, sizeof(buf->has_inode));
1211 memset(buf->data, 0, sizeof(*buf->data));
1212 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
1213 buf->data->u64s = 0;
1216 static inline size_t journal_entry_u64s_reserve(struct journal_buf *buf)
1218 return BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1222 JOURNAL_ENTRY_ERROR,
1223 JOURNAL_ENTRY_INUSE,
1224 JOURNAL_ENTRY_CLOSED,
1226 } journal_buf_switch(struct journal *j, bool need_write_just_set)
1228 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1229 struct journal_buf *buf;
1230 union journal_res_state old, new;
1231 u64 v = atomic64_read(&j->reservations.counter);
1233 lockdep_assert_held(&j->lock);
1237 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
1238 return JOURNAL_ENTRY_CLOSED;
1240 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1241 return JOURNAL_ENTRY_ERROR;
1243 if (new.prev_buf_unwritten)
1244 return JOURNAL_ENTRY_INUSE;
1247 * avoid race between setting buf->data->u64s and
1248 * journal_res_put starting write:
1250 journal_state_inc(&new);
1252 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
1254 new.prev_buf_unwritten = 1;
1256 BUG_ON(journal_state_count(new, new.idx));
1257 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1258 old.v, new.v)) != old.v);
1260 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
1262 buf = &j->buf[old.idx];
1263 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
1265 j->prev_buf_sectors =
1266 vstruct_blocks_plus(buf->data, c->block_bits,
1267 journal_entry_u64s_reserve(buf)) *
1269 BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
1271 journal_reclaim_fast(j);
1272 /* XXX: why set this here, and not in journal_write()? */
1273 buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
1275 journal_pin_new_entry(j, 1);
1277 bch2_journal_buf_init(j);
1279 cancel_delayed_work(&j->write_work);
1280 spin_unlock(&j->lock);
1282 if (c->bucket_journal_seq > 1 << 14) {
1283 c->bucket_journal_seq = 0;
1284 bch2_bucket_seq_cleanup(c);
1287 /* ugh - might be called from __journal_res_get() under wait_event() */
1288 __set_current_state(TASK_RUNNING);
1289 bch2_journal_buf_put(j, old.idx, need_write_just_set);
1291 return JOURNAL_UNLOCKED;
1294 void bch2_journal_halt(struct journal *j)
1296 union journal_res_state old, new;
1297 u64 v = atomic64_read(&j->reservations.counter);
1301 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1304 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
1305 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1306 old.v, new.v)) != old.v);
1309 closure_wake_up(&journal_cur_buf(j)->wait);
1310 closure_wake_up(&journal_prev_buf(j)->wait);
1313 static unsigned journal_dev_buckets_available(struct journal *j,
1316 struct journal_device *ja = &ca->journal;
1317 unsigned next = (ja->cur_idx + 1) % ja->nr;
1318 unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
1321 * Hack to avoid a deadlock during journal replay:
1322 * journal replay might require setting a new btree
1323 * root, which requires writing another journal entry -
1324 * thus, if the journal is full (and this happens when
1325 * replaying the first journal bucket's entries) we're
1328 * So don't let the journal fill up unless we're in
1331 if (test_bit(JOURNAL_REPLAY_DONE, &j->flags))
1332 available = max((int) available - 2, 0);
1335 * Don't use the last bucket unless writing the new last_seq
1336 * will make another bucket available:
1338 if (ja->bucket_seq[ja->last_idx] >= journal_last_seq(j))
1339 available = max((int) available - 1, 0);
1344 /* returns number of sectors available for next journal entry: */
1345 static int journal_entry_sectors(struct journal *j)
1347 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1349 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1350 unsigned sectors_available = UINT_MAX;
1351 unsigned i, nr_online = 0, nr_devs = 0;
1353 lockdep_assert_held(&j->lock);
1356 for_each_member_device_rcu(ca, c, i,
1357 &c->rw_devs[BCH_DATA_JOURNAL]) {
1358 struct journal_device *ja = &ca->journal;
1359 unsigned buckets_required = 0;
1364 sectors_available = min_t(unsigned, sectors_available,
1365 ca->mi.bucket_size);
1368 * Note that we don't allocate the space for a journal entry
1369 * until we write it out - thus, if we haven't started the write
1370 * for the previous entry we have to make sure we have space for
1373 if (bch2_extent_has_device(e.c, ca->dev_idx)) {
1374 if (j->prev_buf_sectors > ja->sectors_free)
1377 if (j->prev_buf_sectors + sectors_available >
1381 if (j->prev_buf_sectors + sectors_available >
1388 if (journal_dev_buckets_available(j, ca) >= buckets_required)
1394 if (nr_online < c->opts.metadata_replicas_required)
1397 if (nr_devs < min_t(unsigned, nr_online, c->opts.metadata_replicas))
1400 return sectors_available;
1404 * should _only_ called from journal_res_get() - when we actually want a
1405 * journal reservation - journal entry is open means journal is dirty:
1409 * 0: journal currently full (must wait)
1410 * -EROFS: insufficient rw devices
1411 * -EIO: journal error
1413 static int journal_entry_open(struct journal *j)
1415 struct journal_buf *buf = journal_cur_buf(j);
1416 union journal_res_state old, new;
1421 lockdep_assert_held(&j->lock);
1422 BUG_ON(journal_entry_is_open(j));
1424 if (!fifo_free(&j->pin))
1427 sectors = journal_entry_sectors(j);
1431 buf->disk_sectors = sectors;
1433 sectors = min_t(unsigned, sectors, buf->size >> 9);
1434 j->cur_buf_sectors = sectors;
1436 u64s = (sectors << 9) / sizeof(u64);
1438 /* Subtract the journal header */
1439 u64s -= sizeof(struct jset) / sizeof(u64);
1441 * Btree roots, prio pointers don't get added until right before we do
1444 u64s -= journal_entry_u64s_reserve(buf);
1445 u64s = max_t(ssize_t, 0L, u64s);
1447 BUG_ON(u64s >= JOURNAL_ENTRY_CLOSED_VAL);
1449 if (u64s <= le32_to_cpu(buf->data->u64s))
1453 * Must be set before marking the journal entry as open:
1455 j->cur_entry_u64s = u64s;
1457 v = atomic64_read(&j->reservations.counter);
1461 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1464 /* Handle any already added entries */
1465 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
1466 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1467 old.v, new.v)) != old.v);
1469 if (j->res_get_blocked_start)
1470 __bch2_time_stats_update(j->blocked_time,
1471 j->res_get_blocked_start);
1472 j->res_get_blocked_start = 0;
1474 mod_delayed_work(system_freezable_wq,
1476 msecs_to_jiffies(j->write_delay_ms));
1481 void bch2_journal_start(struct bch_fs *c)
1483 struct journal *j = &c->journal;
1484 struct journal_seq_blacklist *bl;
1487 list_for_each_entry(bl, &j->seq_blacklist, list)
1488 new_seq = max(new_seq, bl->seq);
1490 spin_lock(&j->lock);
1492 set_bit(JOURNAL_STARTED, &j->flags);
1494 while (journal_cur_seq(j) < new_seq)
1495 journal_pin_new_entry(j, 0);
1498 * journal_buf_switch() only inits the next journal entry when it
1499 * closes an open journal entry - the very first journal entry gets
1502 journal_pin_new_entry(j, 1);
1503 bch2_journal_buf_init(j);
1505 spin_unlock(&j->lock);
1508 * Adding entries to the next journal entry before allocating space on
1509 * disk for the next journal entry - this is ok, because these entries
1510 * only have to go down with the next journal entry we write:
1512 list_for_each_entry(bl, &j->seq_blacklist, list)
1514 bch2_journal_add_entry_noreservation(journal_cur_buf(j),
1515 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED,
1518 journal_pin_add_entry(j,
1519 &fifo_peek_back(&j->pin),
1521 journal_seq_blacklist_flush);
1525 queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
1528 int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
1530 struct journal *j = &c->journal;
1531 struct bkey_i *k, *_n;
1532 struct jset_entry *entry;
1533 struct journal_replay *i, *n;
1536 list_for_each_entry_safe(i, n, list, list) {
1537 j->replay_pin_list =
1538 journal_seq_pin(j, le64_to_cpu(i->j.seq));
1540 for_each_jset_key(k, _n, entry, &i->j) {
1542 if (entry->btree_id == BTREE_ID_ALLOC) {
1544 * allocation code handles replay for
1545 * BTREE_ID_ALLOC keys:
1547 ret = bch2_alloc_replay_key(c, k->k.p);
1550 * We might cause compressed extents to be
1551 * split, so we need to pass in a
1554 struct disk_reservation disk_res =
1555 bch2_disk_reservation_init(c, 0);
1557 ret = bch2_btree_insert(c, entry->btree_id, k,
1558 &disk_res, NULL, NULL,
1559 BTREE_INSERT_NOFAIL|
1560 BTREE_INSERT_JOURNAL_REPLAY);
1564 bch_err(c, "journal replay: error %d while replaying key",
1572 if (atomic_dec_and_test(&j->replay_pin_list->count))
1576 j->replay_pin_list = NULL;
1578 bch2_journal_set_replay_done(j);
1579 ret = bch2_journal_flush_all_pins(j);
1581 bch2_journal_entries_free(list);
1586 * Allocate more journal space at runtime - not currently making use if it, but
1589 static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1592 struct journal *j = &c->journal;
1593 struct journal_device *ja = &ca->journal;
1594 struct bch_sb_field_journal *journal_buckets;
1595 struct disk_reservation disk_res = { 0, 0 };
1597 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
1600 closure_init_stack(&cl);
1602 /* don't handle reducing nr of buckets yet: */
1607 * note: journal buckets aren't really counted as _sectors_ used yet, so
1608 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1609 * when space used goes up without a reservation - but we do need the
1610 * reservation to ensure we'll actually be able to allocate:
1613 if (bch2_disk_reservation_get(c, &disk_res,
1614 bucket_to_sector(ca, nr - ja->nr), 1, 0))
1617 mutex_lock(&c->sb_lock);
1620 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
1621 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
1622 if (!new_buckets || !new_bucket_seq)
1625 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
1626 nr + sizeof(*journal_buckets) / sizeof(u64));
1627 if (!journal_buckets)
1630 spin_lock(&j->lock);
1631 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
1632 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
1633 swap(new_buckets, ja->buckets);
1634 swap(new_bucket_seq, ja->bucket_seq);
1635 spin_unlock(&j->lock);
1637 while (ja->nr < nr) {
1638 struct open_bucket *ob;
1642 ob_idx = bch2_bucket_alloc(c, ca, RESERVE_ALLOC, false, &cl);
1644 if (!closure_wait(&c->freelist_wait, &cl))
1649 ob = c->open_buckets + ob_idx;
1650 bucket = sector_to_bucket(ca, ob->ptr.offset);
1652 spin_lock(&j->lock);
1653 __array_insert_item(ja->buckets, ja->nr, ja->last_idx);
1654 __array_insert_item(ja->bucket_seq, ja->nr, ja->last_idx);
1655 __array_insert_item(journal_buckets->buckets, ja->nr, ja->last_idx);
1657 ja->buckets[ja->last_idx] = bucket;
1658 ja->bucket_seq[ja->last_idx] = 0;
1659 journal_buckets->buckets[ja->last_idx] = cpu_to_le64(bucket);
1661 if (ja->last_idx < ja->nr) {
1662 if (ja->cur_idx >= ja->last_idx)
1667 spin_unlock(&j->lock);
1669 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
1671 gc_phase(GC_PHASE_SB), 0);
1673 bch2_open_bucket_put(c, ob);
1676 bch2_write_super(c);
1680 mutex_unlock(&c->sb_lock);
1682 kfree(new_bucket_seq);
1684 bch2_disk_reservation_put(c, &disk_res);
1687 bch2_dev_allocator_add(c, ca);
1694 int bch2_dev_journal_alloc(struct bch_fs *c, struct bch_dev *ca)
1698 if (dynamic_fault("bcachefs:add:journal_alloc"))
1702 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
1705 nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
1706 BCH_JOURNAL_BUCKETS_MIN,
1708 (1 << 20) / ca->mi.bucket_size));
1710 return bch2_set_nr_journal_buckets(c, ca, nr);
1716 * journal_reclaim_fast - do the fast part of journal reclaim
1718 * Called from IO submission context, does not block. Cleans up after btree
1719 * write completions by advancing the journal pin and each cache's last_idx,
1720 * kicking off discards and background reclaim as necessary.
1722 static void journal_reclaim_fast(struct journal *j)
1724 struct journal_entry_pin_list temp;
1725 bool popped = false;
1727 lockdep_assert_held(&j->lock);
1730 * Unpin journal entries whose reference counts reached zero, meaning
1731 * all btree nodes got written out
1733 while (!atomic_read(&fifo_peek_front(&j->pin).count)) {
1734 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
1735 BUG_ON(!fifo_pop(&j->pin, temp));
1744 * Journal entry pinning - machinery for holding a reference on a given journal
1745 * entry, marking it as dirty:
1748 static inline void __journal_pin_add(struct journal *j,
1749 struct journal_entry_pin_list *pin_list,
1750 struct journal_entry_pin *pin,
1751 journal_pin_flush_fn flush_fn)
1753 BUG_ON(journal_pin_active(pin));
1754 BUG_ON(!atomic_read(&pin_list->count));
1756 atomic_inc(&pin_list->count);
1757 pin->pin_list = pin_list;
1758 pin->flush = flush_fn;
1761 list_add(&pin->list, &pin_list->list);
1763 INIT_LIST_HEAD(&pin->list);
1766 * If the journal is currently full, we might want to call flush_fn
1772 static void journal_pin_add_entry(struct journal *j,
1773 struct journal_entry_pin_list *pin_list,
1774 struct journal_entry_pin *pin,
1775 journal_pin_flush_fn flush_fn)
1777 spin_lock(&j->lock);
1778 __journal_pin_add(j, pin_list, pin, flush_fn);
1779 spin_unlock(&j->lock);
1782 void bch2_journal_pin_add(struct journal *j,
1783 struct journal_res *res,
1784 struct journal_entry_pin *pin,
1785 journal_pin_flush_fn flush_fn)
1787 struct journal_entry_pin_list *pin_list = res->ref
1788 ? journal_seq_pin(j, res->seq)
1789 : j->replay_pin_list;
1791 spin_lock(&j->lock);
1792 __journal_pin_add(j, pin_list, pin, flush_fn);
1793 spin_unlock(&j->lock);
1796 static inline void __journal_pin_drop(struct journal *j,
1797 struct journal_entry_pin *pin)
1799 struct journal_entry_pin_list *pin_list = pin->pin_list;
1801 if (!journal_pin_active(pin))
1804 pin->pin_list = NULL;
1805 list_del_init(&pin->list);
1808 * Unpinning a journal entry make make journal_next_bucket() succeed, if
1809 * writing a new last_seq will now make another bucket available:
1811 if (atomic_dec_and_test(&pin_list->count) &&
1812 pin_list == &fifo_peek_front(&j->pin))
1813 journal_reclaim_fast(j);
1816 void bch2_journal_pin_drop(struct journal *j,
1817 struct journal_entry_pin *pin)
1819 spin_lock(&j->lock);
1820 __journal_pin_drop(j, pin);
1821 spin_unlock(&j->lock);
1824 void bch2_journal_pin_add_if_older(struct journal *j,
1825 struct journal_entry_pin *src_pin,
1826 struct journal_entry_pin *pin,
1827 journal_pin_flush_fn flush_fn)
1829 spin_lock(&j->lock);
1831 if (journal_pin_active(src_pin) &&
1832 (!journal_pin_active(pin) ||
1833 journal_pin_seq(j, src_pin->pin_list) <
1834 journal_pin_seq(j, pin->pin_list))) {
1835 __journal_pin_drop(j, pin);
1836 __journal_pin_add(j, src_pin->pin_list, pin, flush_fn);
1839 spin_unlock(&j->lock);
1842 static struct journal_entry_pin *
1843 __journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
1845 struct journal_entry_pin_list *pin_list;
1846 struct journal_entry_pin *ret;
1849 /* no need to iterate over empty fifo entries: */
1850 journal_reclaim_fast(j);
1852 fifo_for_each_entry_ptr(pin_list, &j->pin, iter) {
1853 if (iter > seq_to_flush)
1856 ret = list_first_entry_or_null(&pin_list->list,
1857 struct journal_entry_pin, list);
1859 /* must be list_del_init(), see bch2_journal_pin_drop() */
1860 list_move(&ret->list, &pin_list->flushed);
1869 static struct journal_entry_pin *
1870 journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
1872 struct journal_entry_pin *ret;
1874 spin_lock(&j->lock);
1875 ret = __journal_get_next_pin(j, seq_to_flush, seq);
1876 spin_unlock(&j->lock);
1881 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
1882 struct journal_entry_pin **pin,
1889 ret = bch2_journal_error(j);
1893 spin_lock(&j->lock);
1895 * If journal replay hasn't completed, the unreplayed journal entries
1896 * hold refs on their corresponding sequence numbers
1898 ret = (*pin = __journal_get_next_pin(j, seq_to_flush, pin_seq)) != NULL ||
1899 !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
1900 journal_last_seq(j) > seq_to_flush ||
1901 (fifo_used(&j->pin) == 1 &&
1902 atomic_read(&fifo_peek_front(&j->pin).count) == 1);
1903 spin_unlock(&j->lock);
1908 int bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
1910 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1911 struct journal_entry_pin *pin;
1915 if (!test_bit(JOURNAL_STARTED, &j->flags))
1918 wait_event(j->wait, journal_flush_done(j, seq_to_flush, &pin, &pin_seq));
1920 /* flushing a journal pin might cause a new one to be added: */
1921 pin->flush(j, pin, pin_seq);
1925 spin_lock(&j->lock);
1926 flush = journal_last_seq(j) != j->last_seq_ondisk ||
1927 (seq_to_flush == U64_MAX && c->btree_roots_dirty);
1928 spin_unlock(&j->lock);
1930 return flush ? bch2_journal_meta(j) : 0;
1933 int bch2_journal_flush_all_pins(struct journal *j)
1935 return bch2_journal_flush_pins(j, U64_MAX);
1938 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
1942 spin_lock(&j->lock);
1944 (ja->last_idx != ja->cur_idx &&
1945 ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
1946 spin_unlock(&j->lock);
1952 * journal_reclaim_work - free up journal buckets
1954 * Background journal reclaim writes out btree nodes. It should be run
1955 * early enough so that we never completely run out of journal buckets.
1957 * High watermarks for triggering background reclaim:
1958 * - FIFO has fewer than 512 entries left
1959 * - fewer than 25% journal buckets free
1961 * Background reclaim runs until low watermarks are reached:
1962 * - FIFO has more than 1024 entries left
1963 * - more than 50% journal buckets free
1965 * As long as a reclaim can complete in the time it takes to fill up
1966 * 512 journal entries or 25% of all journal buckets, then
1967 * journal_next_bucket() should not stall.
1969 static void journal_reclaim_work(struct work_struct *work)
1971 struct bch_fs *c = container_of(to_delayed_work(work),
1972 struct bch_fs, journal.reclaim_work);
1973 struct journal *j = &c->journal;
1975 struct journal_entry_pin *pin;
1976 u64 seq, seq_to_flush = 0;
1977 unsigned iter, bucket_to_flush;
1978 unsigned long next_flush;
1979 bool reclaim_lock_held = false, need_flush;
1982 * Advance last_idx to point to the oldest journal entry containing
1983 * btree node updates that have not yet been written out
1985 for_each_rw_member(ca, c, iter) {
1986 struct journal_device *ja = &ca->journal;
1991 while (should_discard_bucket(j, ja)) {
1992 if (!reclaim_lock_held) {
1995 * might be called from __journal_res_get()
1996 * under wait_event() - have to go back to
1997 * TASK_RUNNING before doing something that
1998 * would block, but only if we're doing work:
2000 __set_current_state(TASK_RUNNING);
2002 mutex_lock(&j->reclaim_lock);
2003 reclaim_lock_held = true;
2004 /* recheck under reclaim_lock: */
2008 if (ca->mi.discard &&
2009 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
2010 blkdev_issue_discard(ca->disk_sb.bdev,
2011 bucket_to_sector(ca,
2012 ja->buckets[ja->last_idx]),
2013 ca->mi.bucket_size, GFP_NOIO, 0);
2015 spin_lock(&j->lock);
2016 ja->last_idx = (ja->last_idx + 1) % ja->nr;
2017 spin_unlock(&j->lock);
2023 * Write out enough btree nodes to free up 50% journal
2026 spin_lock(&j->lock);
2027 bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
2028 seq_to_flush = max_t(u64, seq_to_flush,
2029 ja->bucket_seq[bucket_to_flush]);
2030 spin_unlock(&j->lock);
2033 if (reclaim_lock_held)
2034 mutex_unlock(&j->reclaim_lock);
2036 /* Also flush if the pin fifo is more than half full */
2037 spin_lock(&j->lock);
2038 seq_to_flush = max_t(s64, seq_to_flush,
2039 (s64) journal_cur_seq(j) -
2040 (j->pin.size >> 1));
2041 spin_unlock(&j->lock);
2044 * If it's been longer than j->reclaim_delay_ms since we last flushed,
2045 * make sure to flush at least one journal pin:
2047 next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
2048 need_flush = time_after(jiffies, next_flush);
2050 while ((pin = journal_get_next_pin(j, need_flush
2052 : seq_to_flush, &seq))) {
2053 __set_current_state(TASK_RUNNING);
2054 pin->flush(j, pin, seq);
2057 j->last_flushed = jiffies;
2060 if (!test_bit(BCH_FS_RO, &c->flags))
2061 queue_delayed_work(system_freezable_wq, &j->reclaim_work,
2062 msecs_to_jiffies(j->reclaim_delay_ms));
2066 * journal_next_bucket - move on to the next journal bucket if possible
2068 static int journal_write_alloc(struct journal *j, struct journal_buf *w,
2071 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2072 struct bkey_s_extent e;
2073 struct bch_extent_ptr *ptr;
2074 struct journal_device *ja;
2076 struct dev_alloc_list devs_sorted;
2077 unsigned i, replicas, replicas_want =
2078 READ_ONCE(c->opts.metadata_replicas);
2080 spin_lock(&j->lock);
2081 e = bkey_i_to_s_extent(&j->key);
2084 * Drop any pointers to devices that have been removed, are no longer
2085 * empty, or filled up their current journal bucket:
2087 * Note that a device may have had a small amount of free space (perhaps
2088 * one sector) that wasn't enough for the smallest possible journal
2089 * entry - that's why we drop pointers to devices <= current free space,
2090 * i.e. whichever device was limiting the current journal entry size.
2092 extent_for_each_ptr_backwards(e, ptr) {
2093 ca = bch_dev_bkey_exists(c, ptr->dev);
2095 if (ca->mi.state != BCH_MEMBER_STATE_RW ||
2096 ca->journal.sectors_free <= sectors)
2097 __bch2_extent_drop_ptr(e, ptr);
2099 ca->journal.sectors_free -= sectors;
2102 replicas = bch2_extent_nr_ptrs(e.c);
2105 devs_sorted = bch2_wp_alloc_list(c, &j->wp,
2106 &c->rw_devs[BCH_DATA_JOURNAL]);
2108 for (i = 0; i < devs_sorted.nr; i++) {
2109 ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
2117 if (replicas >= replicas_want)
2121 * Check that we can use this device, and aren't already using
2124 if (bch2_extent_has_device(e.c, ca->dev_idx) ||
2125 !journal_dev_buckets_available(j, ca) ||
2126 sectors > ca->mi.bucket_size)
2129 j->wp.next_alloc[ca->dev_idx] += U32_MAX;
2130 bch2_wp_rescale(c, ca, &j->wp);
2132 ja->sectors_free = ca->mi.bucket_size - sectors;
2133 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
2134 ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
2136 extent_ptr_append(bkey_i_to_extent(&j->key),
2137 (struct bch_extent_ptr) {
2138 .offset = bucket_to_sector(ca,
2139 ja->buckets[ja->cur_idx]),
2146 j->prev_buf_sectors = 0;
2148 bkey_copy(&w->key, &j->key);
2149 spin_unlock(&j->lock);
2151 if (replicas < c->opts.metadata_replicas_required)
2159 static void journal_write_compact(struct jset *jset)
2161 struct jset_entry *i, *next, *prev = NULL;
2164 * Simple compaction, dropping empty jset_entries (from journal
2165 * reservations that weren't fully used) and merging jset_entries that
2168 * If we wanted to be really fancy here, we could sort all the keys in
2169 * the jset and drop keys that were overwritten - probably not worth it:
2171 vstruct_for_each_safe(jset, i, next) {
2172 unsigned u64s = le16_to_cpu(i->u64s);
2178 /* Can we merge with previous entry? */
2180 i->btree_id == prev->btree_id &&
2181 i->level == prev->level &&
2182 i->type == prev->type &&
2183 i->type == JOURNAL_ENTRY_BTREE_KEYS &&
2184 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
2185 memmove_u64s_down(vstruct_next(prev),
2188 le16_add_cpu(&prev->u64s, u64s);
2192 /* Couldn't merge, move i into new position (after prev): */
2193 prev = prev ? vstruct_next(prev) : jset->start;
2195 memmove_u64s_down(prev, i, jset_u64s(u64s));
2198 prev = prev ? vstruct_next(prev) : jset->start;
2199 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
2202 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
2204 /* we aren't holding j->lock: */
2205 unsigned new_size = READ_ONCE(j->buf_size_want);
2208 if (buf->size >= new_size)
2211 new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
2215 memcpy(new_buf, buf->data, buf->size);
2216 kvpfree(buf->data, buf->size);
2217 buf->data = new_buf;
2218 buf->size = new_size;
2221 static void journal_write_done(struct closure *cl)
2223 struct journal *j = container_of(cl, struct journal, io);
2224 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2225 struct journal_buf *w = journal_prev_buf(j);
2226 struct bch_devs_list devs =
2227 bch2_extent_devs(bkey_i_to_s_c_extent(&w->key));
2230 bch_err(c, "unable to write journal to sufficient devices");
2234 if (bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs))
2237 __bch2_time_stats_update(j->write_time, j->write_start_time);
2239 spin_lock(&j->lock);
2240 j->last_seq_ondisk = le64_to_cpu(w->data->last_seq);
2242 journal_seq_pin(j, le64_to_cpu(w->data->seq))->devs = devs;
2245 * Updating last_seq_ondisk may let journal_reclaim_work() discard more
2248 * Must come before signaling write completion, for
2249 * bch2_fs_journal_stop():
2251 mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
2253 /* also must come before signalling write completion: */
2254 closure_debug_destroy(cl);
2256 BUG_ON(!j->reservations.prev_buf_unwritten);
2257 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
2258 &j->reservations.counter);
2260 closure_wake_up(&w->wait);
2263 if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
2264 mod_delayed_work(system_freezable_wq, &j->write_work, 0);
2265 spin_unlock(&j->lock);
2268 bch2_fatal_error(c);
2269 bch2_journal_halt(j);
2273 static void journal_write_endio(struct bio *bio)
2275 struct bch_dev *ca = bio->bi_private;
2276 struct journal *j = &ca->fs->journal;
2278 if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
2279 bch2_meta_write_fault("journal")) {
2280 struct journal_buf *w = journal_prev_buf(j);
2281 unsigned long flags;
2283 spin_lock_irqsave(&j->err_lock, flags);
2284 bch2_extent_drop_device(bkey_i_to_s_extent(&w->key), ca->dev_idx);
2285 spin_unlock_irqrestore(&j->err_lock, flags);
2288 closure_put(&j->io);
2289 percpu_ref_put(&ca->io_ref);
2292 static void journal_write(struct closure *cl)
2294 struct journal *j = container_of(cl, struct journal, io);
2295 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2297 struct journal_buf *w = journal_prev_buf(j);
2300 struct bch_extent_ptr *ptr;
2301 unsigned i, sectors, bytes;
2303 journal_buf_realloc(j, w);
2306 j->write_start_time = local_clock();
2307 mutex_lock(&c->btree_root_lock);
2308 for (i = 0; i < BTREE_ID_NR; i++) {
2309 struct btree_root *r = &c->btree_roots[i];
2312 bch2_journal_add_btree_root(w, i, &r->key, r->level);
2314 c->btree_roots_dirty = false;
2315 mutex_unlock(&c->btree_root_lock);
2317 journal_write_compact(jset);
2319 jset->read_clock = cpu_to_le16(c->prio_clock[READ].hand);
2320 jset->write_clock = cpu_to_le16(c->prio_clock[WRITE].hand);
2321 jset->magic = cpu_to_le64(jset_magic(c));
2322 jset->version = cpu_to_le32(BCACHE_JSET_VERSION);
2324 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
2325 SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
2327 if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
2328 journal_entry_validate_entries(c, jset, WRITE))
2331 bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
2332 jset->encrypted_start,
2333 vstruct_end(jset) - (void *) jset->encrypted_start);
2335 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
2336 journal_nonce(jset), jset);
2338 if (!bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) &&
2339 journal_entry_validate_entries(c, jset, WRITE))
2342 sectors = vstruct_sectors(jset, c->block_bits);
2343 BUG_ON(sectors > j->prev_buf_sectors);
2345 bytes = vstruct_bytes(w->data);
2346 memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
2348 if (journal_write_alloc(j, w, sectors)) {
2349 bch2_journal_halt(j);
2350 bch_err(c, "Unable to allocate journal write");
2351 bch2_fatal_error(c);
2352 continue_at(cl, journal_write_done, system_highpri_wq);
2356 * XXX: we really should just disable the entire journal in nochanges
2359 if (c->opts.nochanges)
2362 extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
2363 ca = bch_dev_bkey_exists(c, ptr->dev);
2364 if (!percpu_ref_tryget(&ca->io_ref)) {
2366 bch_err(c, "missing device for journal write\n");
2370 this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
2373 bio = ca->journal.bio;
2375 bio_set_dev(bio, ca->disk_sb.bdev);
2376 bio->bi_iter.bi_sector = ptr->offset;
2377 bio->bi_iter.bi_size = sectors << 9;
2378 bio->bi_end_io = journal_write_endio;
2379 bio->bi_private = ca;
2380 bio_set_op_attrs(bio, REQ_OP_WRITE,
2381 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
2382 bch2_bio_map(bio, jset);
2384 trace_journal_write(bio);
2385 closure_bio_submit(bio, cl);
2387 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
2390 for_each_rw_member(ca, c, i)
2391 if (journal_flushes_device(ca) &&
2392 !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) {
2393 percpu_ref_get(&ca->io_ref);
2395 bio = ca->journal.bio;
2397 bio_set_dev(bio, ca->disk_sb.bdev);
2398 bio->bi_opf = REQ_OP_FLUSH;
2399 bio->bi_end_io = journal_write_endio;
2400 bio->bi_private = ca;
2401 closure_bio_submit(bio, cl);
2405 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr)
2406 ptr->offset += sectors;
2408 continue_at(cl, journal_write_done, system_highpri_wq);
2410 bch2_inconsistent_error(c);
2411 continue_at(cl, journal_write_done, system_highpri_wq);
2415 * returns true if there's nothing to flush and no journal write still in flight
2417 static bool journal_flush_write(struct journal *j)
2421 spin_lock(&j->lock);
2422 ret = !j->reservations.prev_buf_unwritten;
2424 if (!journal_entry_is_open(j)) {
2425 spin_unlock(&j->lock);
2429 set_bit(JOURNAL_NEED_WRITE, &j->flags);
2430 if (journal_buf_switch(j, false) == JOURNAL_UNLOCKED)
2433 spin_unlock(&j->lock);
2437 static void journal_write_work(struct work_struct *work)
2439 struct journal *j = container_of(work, struct journal, write_work.work);
2441 journal_flush_write(j);
2445 * Given an inode number, if that inode number has data in the journal that
2446 * hasn't yet been flushed, return the journal sequence number that needs to be
2449 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
2451 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
2454 if (!test_bit(h, j->buf[0].has_inode) &&
2455 !test_bit(h, j->buf[1].has_inode))
2458 spin_lock(&j->lock);
2459 if (test_bit(h, journal_cur_buf(j)->has_inode))
2460 seq = journal_cur_seq(j);
2461 else if (test_bit(h, journal_prev_buf(j)->has_inode))
2462 seq = journal_cur_seq(j) - 1;
2463 spin_unlock(&j->lock);
2468 static int __journal_res_get(struct journal *j, struct journal_res *res,
2469 unsigned u64s_min, unsigned u64s_max)
2471 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2472 struct journal_buf *buf;
2475 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2479 spin_lock(&j->lock);
2481 * Recheck after taking the lock, so we don't race with another thread
2482 * that just did journal_entry_open() and call journal_entry_close()
2485 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2487 spin_unlock(&j->lock);
2492 * If we couldn't get a reservation because the current buf filled up,
2493 * and we had room for a bigger entry on disk, signal that we want to
2494 * realloc the journal bufs:
2496 buf = journal_cur_buf(j);
2497 if (journal_entry_is_open(j) &&
2498 buf->size >> 9 < buf->disk_sectors &&
2499 buf->size < JOURNAL_ENTRY_SIZE_MAX)
2500 j->buf_size_want = max(j->buf_size_want, buf->size << 1);
2503 * Close the current journal entry if necessary, then try to start a new
2506 switch (journal_buf_switch(j, false)) {
2507 case JOURNAL_ENTRY_ERROR:
2508 spin_unlock(&j->lock);
2510 case JOURNAL_ENTRY_INUSE:
2511 /* haven't finished writing out the previous one: */
2512 spin_unlock(&j->lock);
2513 trace_journal_entry_full(c);
2515 case JOURNAL_ENTRY_CLOSED:
2517 case JOURNAL_UNLOCKED:
2521 /* We now have a new, closed journal buf - see if we can open it: */
2522 ret = journal_entry_open(j);
2523 spin_unlock(&j->lock);
2530 /* Journal's full, we have to wait */
2533 * Direct reclaim - can't rely on reclaim from work item
2536 journal_reclaim_work(&j->reclaim_work.work);
2538 trace_journal_full(c);
2540 if (!j->res_get_blocked_start)
2541 j->res_get_blocked_start = local_clock() ?: 1;
2546 * Essentially the entry function to the journaling code. When bcachefs is doing
2547 * a btree insert, it calls this function to get the current journal write.
2548 * Journal write is the structure used set up journal writes. The calling
2549 * function will then add its keys to the structure, queuing them for the next
2552 * To ensure forward progress, the current task must not be holding any
2553 * btree node write locks.
2555 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
2556 unsigned u64s_min, unsigned u64s_max)
2561 (ret = __journal_res_get(j, res, u64s_min,
2563 return ret < 0 ? ret : 0;
2566 u64 bch2_journal_last_unwritten_seq(struct journal *j)
2570 spin_lock(&j->lock);
2571 seq = journal_cur_seq(j);
2572 if (j->reservations.prev_buf_unwritten)
2574 spin_unlock(&j->lock);
2579 int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *parent)
2583 spin_lock(&j->lock);
2584 BUG_ON(seq > journal_cur_seq(j));
2586 if (seq < journal_cur_seq(j) ||
2587 journal_entry_is_open(j)) {
2588 spin_unlock(&j->lock);
2592 ret = journal_entry_open(j);
2594 closure_wait(&j->async_wait, parent);
2595 spin_unlock(&j->lock);
2598 journal_reclaim_work(&j->reclaim_work.work);
2603 void bch2_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent)
2605 spin_lock(&j->lock);
2607 BUG_ON(seq > journal_cur_seq(j));
2609 if (bch2_journal_error(j)) {
2610 spin_unlock(&j->lock);
2614 if (seq == journal_cur_seq(j)) {
2615 if (!closure_wait(&journal_cur_buf(j)->wait, parent))
2617 } else if (seq + 1 == journal_cur_seq(j) &&
2618 j->reservations.prev_buf_unwritten) {
2619 if (!closure_wait(&journal_prev_buf(j)->wait, parent))
2624 /* check if raced with write completion (or failure) */
2625 if (!j->reservations.prev_buf_unwritten ||
2626 bch2_journal_error(j))
2627 closure_wake_up(&journal_prev_buf(j)->wait);
2630 spin_unlock(&j->lock);
2633 void bch2_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent)
2635 struct journal_buf *buf;
2637 spin_lock(&j->lock);
2639 BUG_ON(seq > journal_cur_seq(j));
2641 if (bch2_journal_error(j)) {
2642 spin_unlock(&j->lock);
2646 if (seq == journal_cur_seq(j)) {
2647 bool set_need_write = false;
2649 buf = journal_cur_buf(j);
2651 if (parent && !closure_wait(&buf->wait, parent))
2654 if (!test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags)) {
2655 j->need_write_time = local_clock();
2656 set_need_write = true;
2659 switch (journal_buf_switch(j, set_need_write)) {
2660 case JOURNAL_ENTRY_ERROR:
2662 closure_wake_up(&buf->wait);
2664 case JOURNAL_ENTRY_CLOSED:
2666 * Journal entry hasn't been opened yet, but caller
2667 * claims it has something
2670 case JOURNAL_ENTRY_INUSE:
2672 case JOURNAL_UNLOCKED:
2675 } else if (parent &&
2676 seq + 1 == journal_cur_seq(j) &&
2677 j->reservations.prev_buf_unwritten) {
2678 buf = journal_prev_buf(j);
2680 if (!closure_wait(&buf->wait, parent))
2685 /* check if raced with write completion (or failure) */
2686 if (!j->reservations.prev_buf_unwritten ||
2687 bch2_journal_error(j))
2688 closure_wake_up(&buf->wait);
2691 spin_unlock(&j->lock);
2694 static int journal_seq_flushed(struct journal *j, u64 seq)
2696 struct journal_buf *buf;
2699 spin_lock(&j->lock);
2700 BUG_ON(seq > journal_cur_seq(j));
2702 if (seq == journal_cur_seq(j)) {
2703 bool set_need_write = false;
2707 buf = journal_cur_buf(j);
2709 if (!test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags)) {
2710 j->need_write_time = local_clock();
2711 set_need_write = true;
2714 switch (journal_buf_switch(j, set_need_write)) {
2715 case JOURNAL_ENTRY_ERROR:
2718 case JOURNAL_ENTRY_CLOSED:
2720 * Journal entry hasn't been opened yet, but caller
2721 * claims it has something
2724 case JOURNAL_ENTRY_INUSE:
2726 case JOURNAL_UNLOCKED:
2729 } else if (seq + 1 == journal_cur_seq(j) &&
2730 j->reservations.prev_buf_unwritten) {
2731 ret = bch2_journal_error(j);
2734 spin_unlock(&j->lock);
2739 int bch2_journal_flush_seq(struct journal *j, u64 seq)
2741 u64 start_time = local_clock();
2744 ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
2746 bch2_time_stats_update(j->flush_seq_time, start_time);
2748 return ret ?: ret2 < 0 ? ret2 : 0;
2751 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
2753 struct journal_res res;
2754 unsigned u64s = jset_u64s(0);
2756 memset(&res, 0, sizeof(res));
2758 bch2_journal_res_get(j, &res, u64s, u64s);
2759 bch2_journal_res_put(j, &res);
2761 bch2_journal_flush_seq_async(j, res.seq, parent);
2764 int bch2_journal_meta(struct journal *j)
2766 struct journal_res res;
2767 unsigned u64s = jset_u64s(0);
2770 memset(&res, 0, sizeof(res));
2772 ret = bch2_journal_res_get(j, &res, u64s, u64s);
2776 bch2_journal_res_put(j, &res);
2778 return bch2_journal_flush_seq(j, res.seq);
2781 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
2783 u64 seq, journal_seq;
2785 spin_lock(&j->lock);
2786 journal_seq = journal_cur_seq(j);
2788 if (journal_entry_is_open(j)) {
2790 } else if (journal_seq) {
2791 seq = journal_seq - 1;
2793 spin_unlock(&j->lock);
2796 spin_unlock(&j->lock);
2798 bch2_journal_flush_seq_async(j, seq, parent);
2801 int bch2_journal_flush(struct journal *j)
2803 u64 seq, journal_seq;
2805 spin_lock(&j->lock);
2806 journal_seq = journal_cur_seq(j);
2808 if (journal_entry_is_open(j)) {
2810 } else if (journal_seq) {
2811 seq = journal_seq - 1;
2813 spin_unlock(&j->lock);
2816 spin_unlock(&j->lock);
2818 return bch2_journal_flush_seq(j, seq);
2821 int bch2_journal_flush_device(struct journal *j, int dev_idx)
2823 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2824 struct journal_entry_pin_list *p;
2825 struct bch_devs_list devs;
2829 spin_lock(&j->lock);
2830 fifo_for_each_entry_ptr(p, &j->pin, iter)
2832 ? bch2_dev_list_has_dev(p->devs, dev_idx)
2833 : p->devs.nr < c->opts.metadata_replicas)
2835 spin_unlock(&j->lock);
2837 ret = bch2_journal_flush_pins(j, seq);
2841 mutex_lock(&c->replicas_gc_lock);
2842 bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL);
2846 spin_lock(&j->lock);
2847 while (!ret && seq < j->pin.back) {
2848 seq = max(seq, journal_last_seq(j));
2849 devs = journal_seq_pin(j, seq)->devs;
2852 spin_unlock(&j->lock);
2853 ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs);
2854 spin_lock(&j->lock);
2856 spin_unlock(&j->lock);
2858 bch2_replicas_gc_end(c, ret);
2859 mutex_unlock(&c->replicas_gc_lock);
2864 /* startup/shutdown: */
2866 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
2868 union journal_res_state state;
2869 struct journal_buf *w;
2872 spin_lock(&j->lock);
2873 state = READ_ONCE(j->reservations);
2874 w = j->buf + !state.idx;
2876 ret = state.prev_buf_unwritten &&
2877 bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), dev_idx);
2878 spin_unlock(&j->lock);
2883 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
2885 spin_lock(&j->lock);
2886 bch2_extent_drop_device(bkey_i_to_s_extent(&j->key), ca->dev_idx);
2887 spin_unlock(&j->lock);
2889 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
2892 void bch2_fs_journal_stop(struct journal *j)
2894 wait_event(j->wait, journal_flush_write(j));
2896 cancel_delayed_work_sync(&j->write_work);
2897 cancel_delayed_work_sync(&j->reclaim_work);
2900 void bch2_dev_journal_exit(struct bch_dev *ca)
2902 kfree(ca->journal.bio);
2903 kfree(ca->journal.buckets);
2904 kfree(ca->journal.bucket_seq);
2906 ca->journal.bio = NULL;
2907 ca->journal.buckets = NULL;
2908 ca->journal.bucket_seq = NULL;
2911 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
2913 struct journal_device *ja = &ca->journal;
2914 struct bch_sb_field_journal *journal_buckets =
2915 bch2_sb_get_journal(sb);
2918 ja->nr = bch2_nr_journal_buckets(journal_buckets);
2920 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2921 if (!ja->bucket_seq)
2924 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
2925 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
2926 if (!ca->journal.bio)
2929 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2933 for (i = 0; i < ja->nr; i++)
2934 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
2939 void bch2_fs_journal_exit(struct journal *j)
2941 kvpfree(j->buf[1].data, j->buf[1].size);
2942 kvpfree(j->buf[0].data, j->buf[0].size);
2946 int bch2_fs_journal_init(struct journal *j)
2948 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2949 static struct lock_class_key res_key;
2952 pr_verbose_init(c->opts, "");
2954 spin_lock_init(&j->lock);
2955 spin_lock_init(&j->err_lock);
2956 init_waitqueue_head(&j->wait);
2957 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
2958 INIT_DELAYED_WORK(&j->reclaim_work, journal_reclaim_work);
2959 mutex_init(&j->blacklist_lock);
2960 INIT_LIST_HEAD(&j->seq_blacklist);
2961 mutex_init(&j->reclaim_lock);
2963 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
2965 j->buf[0].size = JOURNAL_ENTRY_SIZE_MIN;
2966 j->buf[1].size = JOURNAL_ENTRY_SIZE_MIN;
2967 j->write_delay_ms = 100;
2968 j->reclaim_delay_ms = 100;
2970 bkey_extent_init(&j->key);
2972 atomic64_set(&j->reservations.counter,
2973 ((union journal_res_state)
2974 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
2976 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
2977 !(j->buf[0].data = kvpmalloc(j->buf[0].size, GFP_KERNEL)) ||
2978 !(j->buf[1].data = kvpmalloc(j->buf[1].size, GFP_KERNEL))) {
2983 j->pin.front = j->pin.back = 1;
2985 pr_verbose_init(c->opts, "ret %i", ret);
2991 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
2993 struct bch_fs *c = container_of(j, struct bch_fs, journal);
2994 union journal_res_state *s = &j->reservations;
3000 spin_lock(&j->lock);
3002 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
3003 "active journal entries:\t%llu\n"
3005 "last_seq:\t\t%llu\n"
3006 "last_seq_ondisk:\t%llu\n"
3007 "reservation count:\t%u\n"
3008 "reservation offset:\t%u\n"
3009 "current entry u64s:\t%u\n"
3010 "io in flight:\t\t%i\n"
3011 "need write:\t\t%i\n"
3013 "replay done:\t\t%i\n",
3016 journal_last_seq(j),
3018 journal_state_count(*s, s->idx),
3019 s->cur_entry_offset,
3021 s->prev_buf_unwritten,
3022 test_bit(JOURNAL_NEED_WRITE, &j->flags),
3023 journal_entry_is_open(j),
3024 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
3026 for_each_member_device_rcu(ca, c, iter,
3027 &c->rw_devs[BCH_DATA_JOURNAL]) {
3028 struct journal_device *ja = &ca->journal;
3033 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
3036 "\tcur_idx\t\t%u (seq %llu)\n"
3037 "\tlast_idx\t%u (seq %llu)\n",
3039 ja->cur_idx, ja->bucket_seq[ja->cur_idx],
3040 ja->last_idx, ja->bucket_seq[ja->last_idx]);
3043 spin_unlock(&j->lock);
3049 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
3051 struct journal_entry_pin_list *pin_list;
3052 struct journal_entry_pin *pin;
3056 spin_lock(&j->lock);
3057 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
3058 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
3060 i, atomic_read(&pin_list->count));
3062 list_for_each_entry(pin, &pin_list->list, list)
3063 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
3067 if (!list_empty(&pin_list->flushed))
3068 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
3071 list_for_each_entry(pin, &pin_list->flushed, list)
3072 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
3076 spin_unlock(&j->lock);