2 * bcache journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
9 #include "bkey_methods.h"
12 #include "btree_update.h"
24 #include <trace/events/bcache.h>
26 static void journal_write(struct closure *);
27 static void journal_reclaim_fast(struct journal *);
28 static void journal_pin_add_entry(struct journal *,
29 struct journal_entry_pin_list *,
30 struct journal_entry_pin *,
31 journal_pin_flush_fn);
33 static inline struct journal_buf *journal_cur_buf(struct journal *j)
35 return j->buf + j->reservations.idx;
38 static inline struct journal_buf *journal_prev_buf(struct journal *j)
40 return j->buf + !j->reservations.idx;
43 /* Sequence number of oldest dirty journal entry */
45 static inline u64 last_seq(struct journal *j)
47 return atomic64_read(&j->seq) - fifo_used(&j->pin) + 1;
50 static inline u64 journal_pin_seq(struct journal *j,
51 struct journal_entry_pin_list *pin_list)
53 return last_seq(j) + fifo_entry_idx(&j->pin, pin_list);
56 static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
57 struct jset_entry *entry, unsigned type)
59 while (entry < vstruct_last(jset)) {
60 if (JOURNAL_ENTRY_TYPE(entry) == type)
63 entry = vstruct_next(entry);
69 #define for_each_jset_entry_type(entry, jset, type) \
70 for (entry = (jset)->start; \
71 (entry = __jset_entry_type_next(jset, entry, type)); \
72 entry = vstruct_next(entry))
74 #define for_each_jset_key(k, _n, entry, jset) \
75 for_each_jset_entry_type(entry, jset, JOURNAL_ENTRY_BTREE_KEYS) \
76 vstruct_for_each_safe(entry, k, _n)
78 static inline void bch_journal_add_entry(struct journal_buf *buf,
79 const void *data, size_t u64s,
80 unsigned type, enum btree_id id,
83 struct jset *jset = buf->data;
85 bch_journal_add_entry_at(buf, data, u64s, type, id, level,
86 le32_to_cpu(jset->u64s));
87 le32_add_cpu(&jset->u64s, jset_u64s(u64s));
90 static struct jset_entry *bch_journal_find_entry(struct jset *j, unsigned type,
93 struct jset_entry *entry;
95 for_each_jset_entry_type(entry, j, type)
96 if (entry->btree_id == id)
102 struct bkey_i *bch_journal_find_btree_root(struct cache_set *c, struct jset *j,
103 enum btree_id id, unsigned *level)
106 struct jset_entry *entry =
107 bch_journal_find_entry(j, JOURNAL_ENTRY_BTREE_ROOT, id);
113 *level = entry->level;
114 *level = entry->level;
118 static void bch_journal_add_btree_root(struct journal_buf *buf,
119 enum btree_id id, struct bkey_i *k,
122 bch_journal_add_entry(buf, k, k->k.u64s,
123 JOURNAL_ENTRY_BTREE_ROOT, id, level);
126 static inline void bch_journal_add_prios(struct journal *j,
127 struct journal_buf *buf)
130 * no prio bucket ptrs yet... XXX should change the allocator so this
133 if (!buf->nr_prio_buckets)
136 bch_journal_add_entry(buf, j->prio_buckets, buf->nr_prio_buckets,
137 JOURNAL_ENTRY_PRIO_PTRS, 0, 0);
140 static void journal_seq_blacklist_flush(struct journal *j,
141 struct journal_entry_pin *pin)
143 struct cache_set *c =
144 container_of(j, struct cache_set, journal);
145 struct journal_seq_blacklist *bl =
146 container_of(pin, struct journal_seq_blacklist, pin);
147 struct blacklisted_node n;
152 closure_init_stack(&cl);
155 struct btree_iter iter;
158 mutex_lock(&j->blacklist_lock);
159 if (i >= bl->nr_entries) {
160 mutex_unlock(&j->blacklist_lock);
164 mutex_unlock(&j->blacklist_lock);
166 bch_btree_iter_init(&iter, c, n.btree_id, n.pos);
167 iter.is_extents = false;
169 b = bch_btree_iter_peek_node(&iter);
171 /* The node might have already been rewritten: */
173 if (b->data->keys.seq == n.seq &&
174 !bkey_cmp(b->key.k.p, n.pos)) {
175 ret = bch_btree_node_rewrite(&iter, b, &cl);
177 bch_btree_iter_unlock(&iter);
180 if (ret == -EAGAIN ||
184 /* -EROFS or perhaps -ENOSPC - bail out: */
190 bch_btree_iter_unlock(&iter);
196 struct btree_interior_update *as;
197 struct pending_btree_node_free *d;
199 mutex_lock(&j->blacklist_lock);
200 if (i >= bl->nr_entries) {
201 mutex_unlock(&j->blacklist_lock);
205 mutex_unlock(&j->blacklist_lock);
207 mutex_lock(&c->btree_interior_update_lock);
210 * Is the node on the list of pending interior node updates -
211 * being freed? If so, wait for that to finish:
213 for_each_pending_btree_node_free(c, as, d)
214 if (n.seq == d->seq &&
215 n.btree_id == d->btree_id &&
217 !bkey_cmp(n.pos, d->key.k.p)) {
218 closure_wait(&as->wait, &cl);
219 mutex_unlock(&c->btree_interior_update_lock);
224 mutex_unlock(&c->btree_interior_update_lock);
227 mutex_lock(&j->blacklist_lock);
229 bch_journal_pin_drop(j, &bl->pin);
234 mutex_unlock(&j->blacklist_lock);
237 static struct journal_seq_blacklist *
238 journal_seq_blacklist_find(struct journal *j, u64 seq)
240 struct journal_seq_blacklist *bl;
242 lockdep_assert_held(&j->blacklist_lock);
244 list_for_each_entry(bl, &j->seq_blacklist, list)
251 static struct journal_seq_blacklist *
252 bch_journal_seq_blacklisted_new(struct journal *j, u64 seq)
254 struct journal_seq_blacklist *bl;
256 lockdep_assert_held(&j->blacklist_lock);
258 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
263 list_add_tail(&bl->list, &j->seq_blacklist);
268 * Returns true if @seq is newer than the most recent journal entry that got
269 * written, and data corresponding to @seq should be ignored - also marks @seq
270 * as blacklisted so that on future restarts the corresponding data will still
273 int bch_journal_seq_should_ignore(struct cache_set *c, u64 seq, struct btree *b)
275 struct journal *j = &c->journal;
276 struct journal_seq_blacklist *bl = NULL;
277 struct blacklisted_node *n;
284 journal_seq = atomic64_read(&j->seq);
286 /* Interier updates aren't journalled: */
288 BUG_ON(seq > journal_seq && test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags));
290 if (seq <= journal_seq) {
291 if (list_empty_careful(&j->seq_blacklist))
294 mutex_lock(&j->blacklist_lock);
295 ret = journal_seq_blacklist_find(j, seq) != NULL;
296 mutex_unlock(&j->blacklist_lock);
301 * Decrease this back to j->seq + 2 when we next rev the on disk format:
302 * increasing it temporarily to work around bug in old kernels
304 bch_fs_inconsistent_on(seq > journal_seq + 4, c,
305 "bset journal seq too far in the future: %llu > %llu",
308 bch_verbose(c, "btree node %u:%llu:%llu has future journal sequence number %llu, blacklisting",
309 b->btree_id, b->key.k.p.inode, b->key.k.p.offset, seq);
312 * When we start the journal, bch_journal_start() will skip over @seq:
315 mutex_lock(&j->blacklist_lock);
317 for (i = journal_seq + 1; i <= seq; i++) {
318 bl = journal_seq_blacklist_find(j, i) ?:
319 bch_journal_seq_blacklisted_new(j, i);
327 for (n = bl->entries; n < bl->entries + bl->nr_entries; n++)
328 if (b->data->keys.seq == n->seq &&
329 b->btree_id == n->btree_id &&
330 !bkey_cmp(b->key.k.p, n->pos))
333 if (!bl->nr_entries ||
334 is_power_of_2(bl->nr_entries)) {
335 n = krealloc(bl->entries,
336 max(bl->nr_entries * 2, 8UL) * sizeof(*n),
345 bl->entries[bl->nr_entries++] = (struct blacklisted_node) {
346 .seq = b->data->keys.seq,
347 .btree_id = b->btree_id,
353 mutex_unlock(&j->blacklist_lock);
358 * Journal replay/recovery:
360 * This code is all driven from run_cache_set(); we first read the journal
361 * entries, do some other stuff, then we mark all the keys in the journal
362 * entries (same as garbage collection would), then we replay them - reinserting
363 * them into the cache in precisely the same order as they appear in the
366 * We only journal keys that go in leaf nodes, which simplifies things quite a
370 struct journal_list {
373 struct list_head *head;
377 #define JOURNAL_ENTRY_ADD_OK 0
378 #define JOURNAL_ENTRY_ADD_OUT_OF_RANGE 5
381 * Given a journal entry we just read, add it to the list of journal entries to
384 static int journal_entry_add(struct cache_set *c, struct journal_list *jlist,
387 struct journal_replay *i, *pos;
388 struct list_head *where;
389 size_t bytes = vstruct_bytes(j);
393 mutex_lock(&jlist->lock);
395 last_seq = !list_empty(jlist->head)
396 ? list_last_entry(jlist->head, struct journal_replay,
400 /* Is this entry older than the range we need? */
401 if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
402 ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
406 /* Drop entries we don't need anymore */
407 list_for_each_entry_safe(i, pos, jlist->head, list) {
408 if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
414 list_for_each_entry_reverse(i, jlist->head, list) {
416 if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
417 fsck_err_on(bytes != vstruct_bytes(&i->j) ||
418 memcmp(j, &i->j, bytes), c,
419 "found duplicate but non identical journal entries (seq %llu)",
420 le64_to_cpu(j->seq));
422 ret = JOURNAL_ENTRY_ADD_OK;
426 if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
434 i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
440 memcpy(&i->j, j, bytes);
441 list_add(&i->list, where);
442 ret = JOURNAL_ENTRY_ADD_OK;
445 mutex_unlock(&jlist->lock);
449 static struct nonce journal_nonce(const struct jset *jset)
451 return (struct nonce) {{
453 [1] = ((__le32 *) &jset->seq)[0],
454 [2] = ((__le32 *) &jset->seq)[1],
455 [3] = BCH_NONCE_JOURNAL,
459 static void journal_entry_null_range(void *start, void *end)
461 struct jset_entry *entry;
463 for (entry = start; entry != end; entry = vstruct_next(entry)) {
468 SET_JOURNAL_ENTRY_TYPE(entry, 0);
472 static int journal_validate_key(struct cache_set *c, struct jset *j,
473 struct jset_entry *entry,
474 struct bkey_i *k, enum bkey_type key_type,
477 void *next = vstruct_next(entry);
482 if (mustfix_fsck_err_on(!k->k.u64s, c,
483 "invalid %s in journal: k->u64s 0", type)) {
484 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
485 journal_entry_null_range(vstruct_next(entry), next);
489 if (mustfix_fsck_err_on((void *) bkey_next(k) >
490 (void *) vstruct_next(entry), c,
491 "invalid %s in journal: extends past end of journal entry",
493 entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
494 journal_entry_null_range(vstruct_next(entry), next);
498 if (mustfix_fsck_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
499 "invalid %s in journal: bad format %u",
500 type, k->k.format)) {
501 le16_add_cpu(&entry->u64s, -k->k.u64s);
502 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
503 journal_entry_null_range(vstruct_next(entry), next);
507 if (JSET_BIG_ENDIAN(j) != CPU_BIG_ENDIAN)
508 bch_bkey_swab(key_type, NULL, bkey_to_packed(k));
510 invalid = bkey_invalid(c, key_type, bkey_i_to_s_c(k));
512 bch_bkey_val_to_text(c, key_type, buf, sizeof(buf),
514 mustfix_fsck_err(c, "invalid %s in journal: %s", type, buf);
516 le16_add_cpu(&entry->u64s, -k->k.u64s);
517 memmove(k, bkey_next(k), next - (void *) bkey_next(k));
518 journal_entry_null_range(vstruct_next(entry), next);
525 #define JOURNAL_ENTRY_REREAD 5
526 #define JOURNAL_ENTRY_NONE 6
527 #define JOURNAL_ENTRY_BAD 7
529 static int journal_entry_validate(struct cache_set *c,
530 struct jset *j, u64 sector,
531 unsigned bucket_sectors_left,
532 unsigned sectors_read)
534 struct jset_entry *entry;
535 size_t bytes = vstruct_bytes(j);
536 struct bch_csum csum;
539 if (le64_to_cpu(j->magic) != jset_magic(c))
540 return JOURNAL_ENTRY_NONE;
542 if (le32_to_cpu(j->version) != BCACHE_JSET_VERSION) {
543 bch_err(c, "unknown journal entry version %u",
544 le32_to_cpu(j->version));
545 return BCH_FSCK_UNKNOWN_VERSION;
548 if (mustfix_fsck_err_on(bytes > bucket_sectors_left << 9 ||
549 bytes > c->journal.entry_size_max, c,
550 "journal entry too big (%zu bytes), sector %lluu",
552 /* XXX: note we might have missing journal entries */
553 return JOURNAL_ENTRY_BAD;
556 if (bytes > sectors_read << 9)
557 return JOURNAL_ENTRY_REREAD;
559 if (fsck_err_on(!bch_checksum_type_valid(c, JSET_CSUM_TYPE(j)), c,
560 "journal entry with unknown csum type %llu sector %lluu",
561 JSET_CSUM_TYPE(j), sector))
562 return JOURNAL_ENTRY_BAD;
564 csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j);
565 if (mustfix_fsck_err_on(bch_crc_cmp(csum, j->csum), c,
566 "journal checksum bad, sector %llu", sector)) {
567 /* XXX: retry IO, when we start retrying checksum errors */
568 /* XXX: note we might have missing journal entries */
569 return JOURNAL_ENTRY_BAD;
572 bch_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
574 vstruct_end(j) - (void *) j->encrypted_start);
576 if (mustfix_fsck_err_on(le64_to_cpu(j->last_seq) > le64_to_cpu(j->seq), c,
577 "invalid journal entry: last_seq > seq"))
578 j->last_seq = j->seq;
580 vstruct_for_each(j, entry) {
583 if (mustfix_fsck_err_on(vstruct_next(entry) >
585 "journal entry extents past end of jset")) {
586 j->u64s = cpu_to_le64((u64 *) entry - j->_data);
590 switch (JOURNAL_ENTRY_TYPE(entry)) {
591 case JOURNAL_ENTRY_BTREE_KEYS:
592 vstruct_for_each(entry, k) {
593 ret = journal_validate_key(c, j, entry, k,
594 bkey_type(entry->level,
602 case JOURNAL_ENTRY_BTREE_ROOT:
605 if (mustfix_fsck_err_on(!entry->u64s ||
606 le16_to_cpu(entry->u64s) != k->k.u64s, c,
607 "invalid btree root journal entry: wrong number of keys")) {
608 journal_entry_null_range(entry,
609 vstruct_next(entry));
613 ret = journal_validate_key(c, j, entry, k,
614 BKEY_TYPE_BTREE, "btree root");
619 case JOURNAL_ENTRY_PRIO_PTRS:
622 case JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED:
623 if (mustfix_fsck_err_on(le16_to_cpu(entry->u64s) != 1, c,
624 "invalid journal seq blacklist entry: bad size")) {
625 journal_entry_null_range(entry,
626 vstruct_next(entry));
631 mustfix_fsck_err(c, "invalid journal entry type %llu",
632 JOURNAL_ENTRY_TYPE(entry));
633 journal_entry_null_range(entry, vstruct_next(entry));
642 struct journal_read_buf {
647 static int journal_read_buf_realloc(struct journal_read_buf *b,
652 new_size = roundup_pow_of_two(new_size);
653 n = (void *) __get_free_pages(GFP_KERNEL, get_order(new_size));
657 free_pages((unsigned long) b->data, get_order(b->size));
663 static int journal_read_bucket(struct cache *ca,
664 struct journal_read_buf *buf,
665 struct journal_list *jlist,
666 unsigned bucket, u64 *seq, bool *entries_found)
668 struct cache_set *c = ca->set;
669 struct journal_device *ja = &ca->journal;
670 struct bio *bio = ja->bio;
671 struct jset *j = NULL;
672 unsigned sectors, sectors_read = 0;
673 u64 offset = bucket_to_sector(ca, ja->buckets[bucket]),
674 end = offset + ca->mi.bucket_size;
675 bool saw_bad = false;
678 pr_debug("reading %u", bucket);
680 while (offset < end) {
682 reread: sectors_read = min_t(unsigned,
683 end - offset, buf->size >> 9);
686 bio->bi_bdev = ca->disk_sb.bdev;
687 bio->bi_iter.bi_sector = offset;
688 bio->bi_iter.bi_size = sectors_read << 9;
689 bio_set_op_attrs(bio, REQ_OP_READ, 0);
690 bch_bio_map(bio, buf->data);
692 ret = submit_bio_wait(bio);
694 if (bch_dev_fatal_io_err_on(ret, ca,
695 "journal read from sector %llu",
697 bch_meta_read_fault("journal"))
703 ret = journal_entry_validate(c, j, offset,
704 end - offset, sectors_read);
708 case JOURNAL_ENTRY_REREAD:
709 if (vstruct_bytes(j) > buf->size) {
710 ret = journal_read_buf_realloc(buf,
716 case JOURNAL_ENTRY_NONE:
719 sectors = c->sb.block_size;
721 case JOURNAL_ENTRY_BAD:
723 sectors = c->sb.block_size;
730 * This happens sometimes if we don't have discards on -
731 * when we've partially overwritten a bucket with new
732 * journal entries. We don't need the rest of the
735 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket])
738 ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
740 ret = journal_entry_add(c, jlist, j);
742 case JOURNAL_ENTRY_ADD_OK:
743 *entries_found = true;
745 case JOURNAL_ENTRY_ADD_OUT_OF_RANGE:
751 if (le64_to_cpu(j->seq) > *seq)
752 *seq = le64_to_cpu(j->seq);
754 sectors = vstruct_sectors(j, c->block_bits);
758 sectors_read -= sectors;
759 j = ((void *) j) + (sectors << 9);
765 static void bch_journal_read_device(struct closure *cl)
767 #define read_bucket(b) \
769 bool entries_found = false; \
770 ret = journal_read_bucket(ca, &buf, jlist, b, &seq, \
774 __set_bit(b, bitmap); \
778 struct journal_device *ja =
779 container_of(cl, struct journal_device, read);
780 struct cache *ca = container_of(ja, struct cache, journal);
781 struct journal_list *jlist =
782 container_of(cl->parent, struct journal_list, cl);
783 struct request_queue *q = bdev_get_queue(ca->disk_sb.bdev);
784 struct journal_read_buf buf = { NULL, 0 };
786 DECLARE_BITMAP(bitmap, ja->nr);
794 bitmap_zero(bitmap, ja->nr);
795 ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
799 pr_debug("%u journal buckets", ja->nr);
802 * If the device supports discard but not secure discard, we can't do
803 * the fancy fibonacci hash/binary search because the live journal
804 * entries might not form a contiguous range:
806 for (i = 0; i < ja->nr; i++)
810 if (!blk_queue_nonrot(q))
814 * Read journal buckets ordered by golden ratio hash to quickly
815 * find a sequence of buckets with valid journal entries
817 for (i = 0; i < ja->nr; i++) {
818 l = (i * 2654435769U) % ja->nr;
820 if (test_bit(l, bitmap))
828 * If that fails, check all the buckets we haven't checked
831 pr_debug("falling back to linear search");
833 for (l = find_first_zero_bit(bitmap, ja->nr);
835 l = find_next_zero_bit(bitmap, ja->nr, l + 1))
839 /* no journal entries on this device? */
844 r = find_next_bit(bitmap, ja->nr, l + 1);
845 pr_debug("starting binary search, l %u r %u", l, r);
848 unsigned m = (l + r) >> 1;
861 * Find the journal bucket with the highest sequence number:
863 * If there's duplicate journal entries in multiple buckets (which
864 * definitely isn't supposed to happen, but...) - make sure to start
865 * cur_idx at the last of those buckets, so we don't deadlock trying to
870 for (i = 0; i < ja->nr; i++)
871 if (ja->bucket_seq[i] >= seq &&
872 ja->bucket_seq[i] != ja->bucket_seq[(i + 1) % ja->nr]) {
874 * When journal_next_bucket() goes to allocate for
875 * the first time, it'll use the bucket after
879 seq = ja->bucket_seq[i];
883 * Set last_idx to indicate the entire journal is full and needs to be
884 * reclaimed - journal reclaim will immediately reclaim whatever isn't
885 * pinned when it first runs:
887 ja->last_idx = (ja->cur_idx + 1) % ja->nr;
890 * Read buckets in reverse order until we stop finding more journal
893 for (i = (ja->cur_idx + ja->nr - 1) % ja->nr;
895 i = (i + ja->nr - 1) % ja->nr)
896 if (!test_bit(i, bitmap) &&
900 free_pages((unsigned long) buf.data, get_order(buf.size));
903 mutex_lock(&jlist->lock);
905 mutex_unlock(&jlist->lock);
910 void bch_journal_entries_free(struct list_head *list)
913 while (!list_empty(list)) {
914 struct journal_replay *i =
915 list_first_entry(list, struct journal_replay, list);
921 static int journal_seq_blacklist_read(struct journal *j,
922 struct journal_replay *i,
923 struct journal_entry_pin_list *p)
925 struct cache_set *c = container_of(j, struct cache_set, journal);
926 struct jset_entry *entry;
927 struct journal_seq_blacklist *bl;
930 for_each_jset_entry_type(entry, &i->j,
931 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED) {
932 seq = le64_to_cpu(entry->_data[0]);
934 bch_verbose(c, "blacklisting existing journal seq %llu", seq);
936 bl = bch_journal_seq_blacklisted_new(j, seq);
940 journal_pin_add_entry(j, p, &bl->pin,
941 journal_seq_blacklist_flush);
948 static inline bool journal_has_keys(struct list_head *list)
950 struct journal_replay *i;
951 struct jset_entry *entry;
952 struct bkey_i *k, *_n;
954 list_for_each_entry(i, list, list)
955 for_each_jset_key(k, _n, entry, &i->j)
961 int bch_journal_read(struct cache_set *c, struct list_head *list)
963 struct jset_entry *prio_ptrs;
964 struct journal_list jlist;
965 struct journal_replay *i;
967 struct journal_entry_pin_list *p;
969 u64 cur_seq, end_seq;
973 closure_init_stack(&jlist.cl);
974 mutex_init(&jlist.lock);
978 for_each_cache(ca, c, iter)
979 closure_call(&ca->journal.read,
980 bch_journal_read_device,
984 closure_sync(&jlist.cl);
989 if (list_empty(list)){
990 bch_err(c, "no journal entries found");
991 return BCH_FSCK_REPAIR_IMPOSSIBLE;
994 fsck_err_on(c->sb.clean && journal_has_keys(list), c,
995 "filesystem marked clean but journal has keys to replay");
997 j = &list_entry(list->prev, struct journal_replay, list)->j;
999 unfixable_fsck_err_on(le64_to_cpu(j->seq) -
1000 le64_to_cpu(j->last_seq) + 1 >
1001 c->journal.pin.size, c,
1002 "too many journal entries open for refcount fifo");
1004 c->journal.pin.back = le64_to_cpu(j->seq) -
1005 le64_to_cpu(j->last_seq) + 1;
1007 atomic64_set(&c->journal.seq, le64_to_cpu(j->seq));
1008 c->journal.last_seq_ondisk = le64_to_cpu(j->last_seq);
1010 BUG_ON(last_seq(&c->journal) != le64_to_cpu(j->last_seq));
1012 i = list_first_entry(list, struct journal_replay, list);
1014 mutex_lock(&c->journal.blacklist_lock);
1016 fifo_for_each_entry_ptr(p, &c->journal.pin, iter) {
1017 u64 seq = journal_pin_seq(&c->journal, p);
1019 INIT_LIST_HEAD(&p->list);
1021 if (i && le64_to_cpu(i->j.seq) == seq) {
1022 atomic_set(&p->count, 1);
1024 if (journal_seq_blacklist_read(&c->journal, i, p)) {
1025 mutex_unlock(&c->journal.blacklist_lock);
1029 i = list_is_last(&i->list, list)
1031 : list_next_entry(i, list);
1033 atomic_set(&p->count, 0);
1037 mutex_unlock(&c->journal.blacklist_lock);
1039 cur_seq = last_seq(&c->journal);
1040 end_seq = le64_to_cpu(list_last_entry(list,
1041 struct journal_replay, list)->j.seq);
1043 list_for_each_entry(i, list, list) {
1046 mutex_lock(&c->journal.blacklist_lock);
1047 while (cur_seq < le64_to_cpu(i->j.seq) &&
1048 journal_seq_blacklist_find(&c->journal, cur_seq))
1051 blacklisted = journal_seq_blacklist_find(&c->journal,
1052 le64_to_cpu(i->j.seq));
1053 mutex_unlock(&c->journal.blacklist_lock);
1055 fsck_err_on(blacklisted, c,
1056 "found blacklisted journal entry %llu",
1057 le64_to_cpu(i->j.seq));
1059 fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
1060 "journal entries %llu-%llu missing! (replaying %llu-%llu)",
1061 cur_seq, le64_to_cpu(i->j.seq) - 1,
1062 last_seq(&c->journal), end_seq);
1064 cur_seq = le64_to_cpu(i->j.seq) + 1;
1067 prio_ptrs = bch_journal_find_entry(j, JOURNAL_ENTRY_PRIO_PTRS, 0);
1069 memcpy_u64s(c->journal.prio_buckets,
1071 le16_to_cpu(prio_ptrs->u64s));
1072 c->journal.nr_prio_buckets = le16_to_cpu(prio_ptrs->u64s);
1078 void bch_journal_mark(struct cache_set *c, struct list_head *list)
1080 struct bkey_i *k, *n;
1081 struct jset_entry *j;
1082 struct journal_replay *r;
1084 list_for_each_entry(r, list, list)
1085 for_each_jset_key(k, n, j, &r->j) {
1086 enum bkey_type type = bkey_type(j->level, j->btree_id);
1087 struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
1089 if (btree_type_has_ptrs(type))
1090 bch_btree_mark_key_initial(c, type, k_s_c);
1094 static bool journal_entry_is_open(struct journal *j)
1096 return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
1099 void bch_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
1101 struct cache_set *c = container_of(j, struct cache_set, journal);
1103 if (!need_write_just_set &&
1104 test_bit(JOURNAL_NEED_WRITE, &j->flags))
1105 __bch_time_stats_update(j->delay_time,
1106 j->need_write_time);
1108 closure_call(&j->io, journal_write, NULL, &c->cl);
1110 /* Shut sparse up: */
1111 closure_init(&j->io, &c->cl);
1112 set_closure_fn(&j->io, journal_write, NULL);
1113 journal_write(&j->io);
1117 static void __bch_journal_next_entry(struct journal *j)
1119 struct journal_entry_pin_list pin_list, *p;
1120 struct journal_buf *buf;
1123 * The fifo_push() needs to happen at the same time as j->seq is
1124 * incremented for last_seq() to be calculated correctly
1126 atomic64_inc(&j->seq);
1127 BUG_ON(!fifo_push(&j->pin, pin_list));
1128 p = &fifo_peek_back(&j->pin);
1130 INIT_LIST_HEAD(&p->list);
1131 atomic_set(&p->count, 1);
1133 if (test_bit(JOURNAL_REPLAY_DONE, &j->flags)) {
1135 j->cur_pin_list = p;
1138 buf = journal_cur_buf(j);
1139 memset(buf->has_inode, 0, sizeof(buf->has_inode));
1141 memset(buf->data, 0, sizeof(*buf->data));
1142 buf->data->seq = cpu_to_le64(atomic64_read(&j->seq));
1143 buf->data->u64s = 0;
1145 BUG_ON(journal_pin_seq(j, p) != atomic64_read(&j->seq));
1148 static inline size_t journal_entry_u64s_reserve(struct journal_buf *buf)
1150 unsigned ret = BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1152 if (buf->nr_prio_buckets)
1153 ret += JSET_KEYS_U64s + buf->nr_prio_buckets;
1159 JOURNAL_ENTRY_ERROR,
1160 JOURNAL_ENTRY_INUSE,
1161 JOURNAL_ENTRY_CLOSED,
1163 } journal_buf_switch(struct journal *j, bool need_write_just_set)
1165 struct cache_set *c = container_of(j, struct cache_set, journal);
1166 struct journal_buf *buf;
1167 union journal_res_state old, new;
1168 u64 v = atomic64_read(&j->reservations.counter);
1172 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
1173 return JOURNAL_ENTRY_CLOSED;
1175 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1176 return JOURNAL_ENTRY_ERROR;
1178 if (new.prev_buf_unwritten)
1179 return JOURNAL_ENTRY_INUSE;
1182 * avoid race between setting buf->data->u64s and
1183 * journal_res_put starting write:
1185 journal_state_inc(&new);
1187 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
1189 new.prev_buf_unwritten = 1;
1191 BUG_ON(journal_state_count(new, new.idx));
1192 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1193 old.v, new.v)) != old.v);
1195 journal_reclaim_fast(j);
1197 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
1199 buf = &j->buf[old.idx];
1200 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
1201 buf->data->last_seq = cpu_to_le64(last_seq(j));
1203 j->prev_buf_sectors =
1204 vstruct_blocks_plus(buf->data, c->block_bits,
1205 journal_entry_u64s_reserve(buf)) *
1208 BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
1210 atomic_dec_bug(&fifo_peek_back(&j->pin).count);
1211 __bch_journal_next_entry(j);
1213 cancel_delayed_work(&j->write_work);
1214 spin_unlock(&j->lock);
1216 if (c->bucket_journal_seq > 1 << 14) {
1217 c->bucket_journal_seq = 0;
1218 bch_bucket_seq_cleanup(c);
1221 /* ugh - might be called from __journal_res_get() under wait_event() */
1222 __set_current_state(TASK_RUNNING);
1223 bch_journal_buf_put(j, old.idx, need_write_just_set);
1225 return JOURNAL_UNLOCKED;
1228 void bch_journal_halt(struct journal *j)
1230 union journal_res_state old, new;
1231 u64 v = atomic64_read(&j->reservations.counter);
1235 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1238 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
1239 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1240 old.v, new.v)) != old.v);
1243 closure_wake_up(&journal_cur_buf(j)->wait);
1244 closure_wake_up(&journal_prev_buf(j)->wait);
1247 static unsigned journal_dev_buckets_available(struct journal *j,
1250 struct journal_device *ja = &ca->journal;
1251 unsigned next = (ja->cur_idx + 1) % ja->nr;
1252 unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
1255 * Hack to avoid a deadlock during journal replay:
1256 * journal replay might require setting a new btree
1257 * root, which requires writing another journal entry -
1258 * thus, if the journal is full (and this happens when
1259 * replaying the first journal bucket's entries) we're
1262 * So don't let the journal fill up unless we're in
1265 if (test_bit(JOURNAL_REPLAY_DONE, &j->flags))
1266 available = max((int) available - 2, 0);
1269 * Don't use the last bucket unless writing the new last_seq
1270 * will make another bucket available:
1272 if (ja->bucket_seq[ja->last_idx] >= last_seq(j))
1273 available = max((int) available - 1, 0);
1278 /* returns number of sectors available for next journal entry: */
1279 static int journal_entry_sectors(struct journal *j)
1281 struct cache_set *c = container_of(j, struct cache_set, journal);
1283 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1284 unsigned sectors_available = j->entry_size_max >> 9;
1285 unsigned i, nr_online = 0, nr_devs = 0;
1287 lockdep_assert_held(&j->lock);
1290 group_for_each_cache_rcu(ca, &j->devs, i) {
1291 unsigned buckets_required = 0;
1293 sectors_available = min_t(unsigned, sectors_available,
1294 ca->mi.bucket_size);
1297 * Note that we don't allocate the space for a journal entry
1298 * until we write it out - thus, if we haven't started the write
1299 * for the previous entry we have to make sure we have space for
1302 if (bch_extent_has_device(e.c, ca->dev_idx)) {
1303 if (j->prev_buf_sectors > ca->journal.sectors_free)
1306 if (j->prev_buf_sectors + sectors_available >
1307 ca->journal.sectors_free)
1310 if (j->prev_buf_sectors + sectors_available >
1317 if (journal_dev_buckets_available(j, ca) >= buckets_required)
1323 if (nr_online < c->opts.metadata_replicas)
1326 if (nr_devs < c->opts.metadata_replicas)
1329 return sectors_available;
1333 * should _only_ called from journal_res_get() - when we actually want a
1334 * journal reservation - journal entry is open means journal is dirty:
1336 static int journal_entry_open(struct journal *j)
1338 struct journal_buf *buf = journal_cur_buf(j);
1340 int ret = 0, sectors;
1342 lockdep_assert_held(&j->lock);
1343 BUG_ON(journal_entry_is_open(j));
1345 if (!fifo_free(&j->pin))
1348 sectors = journal_entry_sectors(j);
1352 j->cur_buf_sectors = sectors;
1353 buf->nr_prio_buckets = j->nr_prio_buckets;
1355 u64s = (sectors << 9) / sizeof(u64);
1357 /* Subtract the journal header */
1358 u64s -= sizeof(struct jset) / sizeof(u64);
1360 * Btree roots, prio pointers don't get added until right before we do
1363 u64s -= journal_entry_u64s_reserve(buf);
1364 u64s = max_t(ssize_t, 0L, u64s);
1366 BUG_ON(u64s >= JOURNAL_ENTRY_CLOSED_VAL);
1368 if (u64s > le32_to_cpu(buf->data->u64s)) {
1369 union journal_res_state old, new;
1370 u64 v = atomic64_read(&j->reservations.counter);
1373 * Must be set before marking the journal entry as open:
1375 j->cur_entry_u64s = u64s;
1380 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
1383 /* Handle any already added entries */
1384 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
1385 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
1386 old.v, new.v)) != old.v);
1391 if (j->res_get_blocked_start) {
1392 __bch_time_stats_update(j->blocked_time,
1393 j->res_get_blocked_start);
1394 j->res_get_blocked_start = 0;
1397 mod_delayed_work(system_freezable_wq,
1399 msecs_to_jiffies(j->write_delay_ms));
1405 void bch_journal_start(struct cache_set *c)
1407 struct journal *j = &c->journal;
1408 struct journal_seq_blacklist *bl;
1413 for_each_cache(ca, c, i)
1414 if (is_journal_device(ca))
1415 bch_dev_group_add(&c->journal.devs, ca);
1417 list_for_each_entry(bl, &j->seq_blacklist, list)
1418 new_seq = max(new_seq, bl->seq);
1420 spin_lock(&j->lock);
1422 set_bit(JOURNAL_STARTED, &j->flags);
1424 while (atomic64_read(&j->seq) < new_seq) {
1425 struct journal_entry_pin_list pin_list, *p;
1427 BUG_ON(!fifo_push(&j->pin, pin_list));
1428 p = &fifo_peek_back(&j->pin);
1430 INIT_LIST_HEAD(&p->list);
1431 atomic_set(&p->count, 0);
1432 atomic64_inc(&j->seq);
1436 * journal_buf_switch() only inits the next journal entry when it
1437 * closes an open journal entry - the very first journal entry gets
1440 __bch_journal_next_entry(j);
1443 * Adding entries to the next journal entry before allocating space on
1444 * disk for the next journal entry - this is ok, because these entries
1445 * only have to go down with the next journal entry we write:
1447 list_for_each_entry(bl, &j->seq_blacklist, list)
1449 bch_journal_add_entry(journal_cur_buf(j), &bl->seq, 1,
1450 JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED,
1453 journal_pin_add_entry(j,
1454 &fifo_peek_back(&j->pin),
1456 journal_seq_blacklist_flush);
1460 spin_unlock(&j->lock);
1462 queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
1465 int bch_journal_replay(struct cache_set *c, struct list_head *list)
1467 int ret = 0, keys = 0, entries = 0;
1468 struct journal *j = &c->journal;
1469 struct bkey_i *k, *_n;
1470 struct jset_entry *entry;
1471 struct journal_replay *i, *n;
1473 list_for_each_entry_safe(i, n, list, list) {
1475 &j->pin.data[((j->pin.back - 1 -
1476 (atomic64_read(&j->seq) -
1477 le64_to_cpu(i->j.seq))) &
1480 for_each_jset_key(k, _n, entry, &i->j) {
1481 struct disk_reservation disk_res;
1484 * We might cause compressed extents to be split, so we
1485 * need to pass in a disk_reservation:
1487 BUG_ON(bch_disk_reservation_get(c, &disk_res, 0, 0));
1489 trace_bcache_journal_replay_key(&k->k);
1491 ret = bch_btree_insert(c, entry->btree_id, k,
1492 &disk_res, NULL, NULL,
1493 BTREE_INSERT_NOFAIL|
1494 BTREE_INSERT_JOURNAL_REPLAY);
1495 bch_disk_reservation_put(c, &disk_res);
1504 if (atomic_dec_and_test(&j->cur_pin_list->count))
1514 * Write a new journal entry _before_ we start journalling new data -
1515 * otherwise, we could end up with btree node bsets with journal seqs
1516 * arbitrarily far in the future vs. the most recently written journal
1517 * entry on disk, if we crash before writing the next journal entry:
1519 ret = bch_journal_meta(&c->journal);
1524 bch_info(c, "journal replay done, %i keys in %i entries, seq %llu",
1525 keys, entries, (u64) atomic64_read(&j->seq));
1527 bch_journal_set_replay_done(&c->journal);
1530 bch_err(c, "journal replay error: %d", ret);
1532 bch_journal_entries_free(list);
1537 static int bch_set_nr_journal_buckets(struct cache *ca, unsigned nr)
1539 struct journal_device *ja = &ca->journal;
1540 struct bch_sb_field_journal *journal_buckets =
1541 bch_sb_get_journal(ca->disk_sb.sb);
1542 struct bch_sb_field *f;
1545 p = krealloc(ja->bucket_seq, nr * sizeof(u64),
1546 GFP_KERNEL|__GFP_ZERO);
1552 p = krealloc(ja->buckets, nr * sizeof(u64),
1553 GFP_KERNEL|__GFP_ZERO);
1559 f = bch_dev_sb_field_resize(&ca->disk_sb, &journal_buckets->field, nr +
1560 sizeof(*journal_buckets) / sizeof(u64));
1563 f->type = BCH_SB_FIELD_journal;
1569 int bch_dev_journal_alloc(struct cache *ca)
1571 struct journal_device *ja = &ca->journal;
1572 struct bch_sb_field_journal *journal_buckets;
1576 if (ca->mi.tier != 0)
1579 if (dynamic_fault("bcache:add:journal_alloc"))
1583 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
1586 ret = bch_set_nr_journal_buckets(ca,
1587 clamp_t(unsigned, ca->mi.nbuckets >> 8,
1588 BCH_JOURNAL_BUCKETS_MIN,
1590 (1 << 20) / ca->mi.bucket_size)));
1594 journal_buckets = bch_sb_get_journal(ca->disk_sb.sb);
1596 for (i = 0; i < ja->nr; i++) {
1597 u64 bucket = ca->mi.first_bucket + i;
1599 ja->buckets[i] = bucket;
1600 journal_buckets->buckets[i] = cpu_to_le64(bucket);
1602 bch_mark_metadata_bucket(ca, &ca->buckets[bucket], true);
1611 * journal_reclaim_fast - do the fast part of journal reclaim
1613 * Called from IO submission context, does not block. Cleans up after btree
1614 * write completions by advancing the journal pin and each cache's last_idx,
1615 * kicking off discards and background reclaim as necessary.
1617 static void journal_reclaim_fast(struct journal *j)
1619 struct journal_entry_pin_list temp;
1620 bool popped = false;
1622 lockdep_assert_held(&j->lock);
1625 * Unpin journal entries whose reference counts reached zero, meaning
1626 * all btree nodes got written out
1628 while (!atomic_read(&fifo_peek_front(&j->pin).count)) {
1629 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
1630 BUG_ON(!fifo_pop(&j->pin, temp));
1639 * Journal entry pinning - machinery for holding a reference on a given journal
1640 * entry, marking it as dirty:
1643 static inline void __journal_pin_add(struct journal *j,
1644 struct journal_entry_pin_list *pin_list,
1645 struct journal_entry_pin *pin,
1646 journal_pin_flush_fn flush_fn)
1648 BUG_ON(journal_pin_active(pin));
1650 atomic_inc(&pin_list->count);
1651 pin->pin_list = pin_list;
1652 pin->flush = flush_fn;
1655 list_add(&pin->list, &pin_list->list);
1657 INIT_LIST_HEAD(&pin->list);
1660 static void journal_pin_add_entry(struct journal *j,
1661 struct journal_entry_pin_list *pin_list,
1662 struct journal_entry_pin *pin,
1663 journal_pin_flush_fn flush_fn)
1665 spin_lock_irq(&j->pin_lock);
1666 __journal_pin_add(j, pin_list, pin, flush_fn);
1667 spin_unlock_irq(&j->pin_lock);
1670 void bch_journal_pin_add(struct journal *j,
1671 struct journal_entry_pin *pin,
1672 journal_pin_flush_fn flush_fn)
1674 spin_lock_irq(&j->pin_lock);
1675 __journal_pin_add(j, j->cur_pin_list, pin, flush_fn);
1676 spin_unlock_irq(&j->pin_lock);
1679 static inline bool __journal_pin_drop(struct journal *j,
1680 struct journal_entry_pin *pin)
1682 struct journal_entry_pin_list *pin_list = pin->pin_list;
1684 pin->pin_list = NULL;
1686 /* journal_reclaim_work() might have already taken us off the list */
1687 if (!list_empty_careful(&pin->list))
1688 list_del_init(&pin->list);
1690 return atomic_dec_and_test(&pin_list->count);
1693 void bch_journal_pin_drop(struct journal *j,
1694 struct journal_entry_pin *pin)
1696 unsigned long flags;
1699 if (!journal_pin_active(pin))
1702 spin_lock_irqsave(&j->pin_lock, flags);
1703 wakeup = __journal_pin_drop(j, pin);
1704 spin_unlock_irqrestore(&j->pin_lock, flags);
1707 * Unpinning a journal entry make make journal_next_bucket() succeed, if
1708 * writing a new last_seq will now make another bucket available:
1710 * Nested irqsave is expensive, don't do the wakeup with lock held:
1716 void bch_journal_pin_add_if_older(struct journal *j,
1717 struct journal_entry_pin *src_pin,
1718 struct journal_entry_pin *pin,
1719 journal_pin_flush_fn flush_fn)
1721 spin_lock_irq(&j->pin_lock);
1723 if (journal_pin_active(src_pin) &&
1724 (!journal_pin_active(pin) ||
1725 fifo_entry_idx(&j->pin, src_pin->pin_list) <
1726 fifo_entry_idx(&j->pin, pin->pin_list))) {
1727 if (journal_pin_active(pin))
1728 __journal_pin_drop(j, pin);
1729 __journal_pin_add(j, src_pin->pin_list,
1733 spin_unlock_irq(&j->pin_lock);
1737 static struct journal_entry_pin *
1738 journal_get_next_pin(struct journal *j, u64 seq_to_flush)
1740 struct journal_entry_pin_list *pin_list;
1741 struct journal_entry_pin *ret = NULL;
1744 /* so we don't iterate over empty fifo entries below: */
1745 if (!atomic_read(&fifo_peek_front(&j->pin).count)) {
1746 spin_lock(&j->lock);
1747 journal_reclaim_fast(j);
1748 spin_unlock(&j->lock);
1751 spin_lock_irq(&j->pin_lock);
1752 fifo_for_each_entry_ptr(pin_list, &j->pin, iter) {
1753 if (journal_pin_seq(j, pin_list) > seq_to_flush)
1756 ret = list_first_entry_or_null(&pin_list->list,
1757 struct journal_entry_pin, list);
1759 /* must be list_del_init(), see bch_journal_pin_drop() */
1760 list_del_init(&ret->list);
1764 spin_unlock_irq(&j->pin_lock);
1769 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
1773 spin_lock(&j->lock);
1774 ret = (ja->last_idx != ja->cur_idx &&
1775 ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
1776 spin_unlock(&j->lock);
1782 * journal_reclaim_work - free up journal buckets
1784 * Background journal reclaim writes out btree nodes. It should be run
1785 * early enough so that we never completely run out of journal buckets.
1787 * High watermarks for triggering background reclaim:
1788 * - FIFO has fewer than 512 entries left
1789 * - fewer than 25% journal buckets free
1791 * Background reclaim runs until low watermarks are reached:
1792 * - FIFO has more than 1024 entries left
1793 * - more than 50% journal buckets free
1795 * As long as a reclaim can complete in the time it takes to fill up
1796 * 512 journal entries or 25% of all journal buckets, then
1797 * journal_next_bucket() should not stall.
1799 static void journal_reclaim_work(struct work_struct *work)
1801 struct cache_set *c = container_of(to_delayed_work(work),
1802 struct cache_set, journal.reclaim_work);
1803 struct journal *j = &c->journal;
1805 struct journal_entry_pin *pin;
1806 u64 seq_to_flush = 0;
1807 unsigned iter, bucket_to_flush;
1808 unsigned long next_flush;
1809 bool reclaim_lock_held = false, need_flush;
1812 * Advance last_idx to point to the oldest journal entry containing
1813 * btree node updates that have not yet been written out
1815 group_for_each_cache(ca, &j->devs, iter) {
1816 struct journal_device *ja = &ca->journal;
1818 while (should_discard_bucket(j, ja)) {
1819 if (!reclaim_lock_held) {
1822 * might be called from __journal_res_get()
1823 * under wait_event() - have to go back to
1824 * TASK_RUNNING before doing something that
1825 * would block, but only if we're doing work:
1827 __set_current_state(TASK_RUNNING);
1829 mutex_lock(&j->reclaim_lock);
1830 reclaim_lock_held = true;
1831 /* recheck under reclaim_lock: */
1835 if (ca->mi.discard &&
1836 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
1837 blkdev_issue_discard(ca->disk_sb.bdev,
1838 bucket_to_sector(ca,
1839 ja->buckets[ja->last_idx]),
1840 ca->mi.bucket_size, GFP_NOIO, 0);
1842 spin_lock(&j->lock);
1843 ja->last_idx = (ja->last_idx + 1) % ja->nr;
1844 spin_unlock(&j->lock);
1850 * Write out enough btree nodes to free up 50% journal
1853 spin_lock(&j->lock);
1854 bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
1855 seq_to_flush = max_t(u64, seq_to_flush,
1856 ja->bucket_seq[bucket_to_flush]);
1857 spin_unlock(&j->lock);
1860 if (reclaim_lock_held)
1861 mutex_unlock(&j->reclaim_lock);
1863 /* Also flush if the pin fifo is more than half full */
1864 seq_to_flush = max_t(s64, seq_to_flush,
1865 (s64) atomic64_read(&j->seq) -
1866 (j->pin.size >> 1));
1869 * If it's been longer than j->reclaim_delay_ms since we last flushed,
1870 * make sure to flush at least one journal pin:
1872 next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
1873 need_flush = time_after(jiffies, next_flush);
1875 while ((pin = journal_get_next_pin(j, need_flush
1878 __set_current_state(TASK_RUNNING);
1882 j->last_flushed = jiffies;
1885 if (!test_bit(BCH_FS_RO, &c->flags))
1886 queue_delayed_work(system_freezable_wq, &j->reclaim_work,
1887 msecs_to_jiffies(j->reclaim_delay_ms));
1891 * journal_next_bucket - move on to the next journal bucket if possible
1893 static int journal_write_alloc(struct journal *j, unsigned sectors)
1895 struct cache_set *c = container_of(j, struct cache_set, journal);
1896 struct bkey_s_extent e = bkey_i_to_s_extent(&j->key);
1897 struct bch_extent_ptr *ptr;
1899 unsigned iter, replicas, replicas_want =
1900 READ_ONCE(c->opts.metadata_replicas);
1902 spin_lock(&j->lock);
1906 * Drop any pointers to devices that have been removed, are no longer
1907 * empty, or filled up their current journal bucket:
1909 * Note that a device may have had a small amount of free space (perhaps
1910 * one sector) that wasn't enough for the smallest possible journal
1911 * entry - that's why we drop pointers to devices <= current free space,
1912 * i.e. whichever device was limiting the current journal entry size.
1914 extent_for_each_ptr_backwards(e, ptr)
1915 if (!(ca = PTR_CACHE(c, ptr)) ||
1916 ca->mi.state != BCH_MEMBER_STATE_ACTIVE ||
1917 ca->journal.sectors_free <= sectors)
1918 __bch_extent_drop_ptr(e, ptr);
1920 ca->journal.sectors_free -= sectors;
1922 replicas = bch_extent_nr_ptrs(e.c);
1925 * Determine location of the next journal write:
1926 * XXX: sort caches by free journal space
1928 group_for_each_cache_rcu(ca, &j->devs, iter) {
1929 struct journal_device *ja = &ca->journal;
1931 if (replicas >= replicas_want)
1935 * Check that we can use this device, and aren't already using
1938 if (bch_extent_has_device(e.c, ca->dev_idx) ||
1939 !journal_dev_buckets_available(j, ca) ||
1940 sectors > ca->mi.bucket_size)
1943 ja->sectors_free = ca->mi.bucket_size - sectors;
1944 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
1945 ja->bucket_seq[ja->cur_idx] = atomic64_read(&j->seq);
1947 extent_ptr_append(bkey_i_to_extent(&j->key),
1948 (struct bch_extent_ptr) {
1949 .offset = bucket_to_sector(ca,
1950 ja->buckets[ja->cur_idx]),
1955 trace_bcache_journal_next_bucket(ca, ja->cur_idx, ja->last_idx);
1960 j->prev_buf_sectors = 0;
1961 spin_unlock(&j->lock);
1963 if (replicas < replicas_want)
1969 static void journal_write_compact(struct jset *jset)
1971 struct jset_entry *i, *next, *prev = NULL;
1974 * Simple compaction, dropping empty jset_entries (from journal
1975 * reservations that weren't fully used) and merging jset_entries that
1978 * If we wanted to be really fancy here, we could sort all the keys in
1979 * the jset and drop keys that were overwritten - probably not worth it:
1981 vstruct_for_each_safe(jset, i, next) {
1982 unsigned u64s = le16_to_cpu(i->u64s);
1988 /* Can we merge with previous entry? */
1990 i->btree_id == prev->btree_id &&
1991 i->level == prev->level &&
1992 JOURNAL_ENTRY_TYPE(i) == JOURNAL_ENTRY_TYPE(prev) &&
1993 JOURNAL_ENTRY_TYPE(i) == JOURNAL_ENTRY_BTREE_KEYS &&
1994 le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
1995 memmove_u64s_down(vstruct_next(prev),
1998 le16_add_cpu(&prev->u64s, u64s);
2002 /* Couldn't merge, move i into new position (after prev): */
2003 prev = prev ? vstruct_next(prev) : jset->start;
2005 memmove_u64s_down(prev, i, jset_u64s(u64s));
2008 prev = prev ? vstruct_next(prev) : jset->start;
2009 jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
2012 static void journal_write_endio(struct bio *bio)
2014 struct cache *ca = bio->bi_private;
2015 struct journal *j = &ca->set->journal;
2017 if (bch_dev_fatal_io_err_on(bio->bi_error, ca, "journal write") ||
2018 bch_meta_write_fault("journal"))
2019 bch_journal_halt(j);
2021 closure_put(&j->io);
2022 percpu_ref_put(&ca->ref);
2025 static void journal_write_done(struct closure *cl)
2027 struct journal *j = container_of(cl, struct journal, io);
2028 struct journal_buf *w = journal_prev_buf(j);
2030 j->last_seq_ondisk = le64_to_cpu(w->data->last_seq);
2032 __bch_time_stats_update(j->write_time, j->write_start_time);
2034 BUG_ON(!j->reservations.prev_buf_unwritten);
2035 atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
2036 &j->reservations.counter);
2039 * XXX: this is racy, we could technically end up doing the wake up
2040 * after the journal_buf struct has been reused for the next write
2041 * (because we're clearing JOURNAL_IO_IN_FLIGHT) and wake up things that
2042 * are waiting on the _next_ write, not this one.
2044 * The wake up can't come before, because journal_flush_seq_async() is
2045 * looking at JOURNAL_IO_IN_FLIGHT when it has to wait on a journal
2046 * write that was already in flight.
2048 * The right fix is to use a lock here, but using j.lock here means it
2049 * has to be a spin_lock_irqsave() lock which then requires propagating
2050 * the irq()ness to other locks and it's all kinds of nastiness.
2053 closure_wake_up(&w->wait);
2057 * Updating last_seq_ondisk may let journal_reclaim_work() discard more
2060 mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
2063 static void journal_write(struct closure *cl)
2065 struct journal *j = container_of(cl, struct journal, io);
2066 struct cache_set *c = container_of(j, struct cache_set, journal);
2068 struct journal_buf *w = journal_prev_buf(j);
2069 struct jset *jset = w->data;
2071 struct bch_extent_ptr *ptr;
2072 unsigned i, sectors, bytes;
2074 j->write_start_time = local_clock();
2076 bch_journal_add_prios(j, w);
2078 mutex_lock(&c->btree_root_lock);
2079 for (i = 0; i < BTREE_ID_NR; i++) {
2080 struct btree_root *r = &c->btree_roots[i];
2083 bch_journal_add_btree_root(w, i, &r->key, r->level);
2085 mutex_unlock(&c->btree_root_lock);
2087 journal_write_compact(jset);
2089 jset->read_clock = cpu_to_le16(c->prio_clock[READ].hand);
2090 jset->write_clock = cpu_to_le16(c->prio_clock[WRITE].hand);
2091 jset->magic = cpu_to_le64(jset_magic(c));
2092 jset->version = cpu_to_le32(BCACHE_JSET_VERSION);
2094 SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
2095 SET_JSET_CSUM_TYPE(jset, bch_meta_checksum_type(c));
2097 bch_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
2098 jset->encrypted_start,
2099 vstruct_end(jset) - (void *) jset->encrypted_start);
2101 jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
2102 journal_nonce(jset), jset);
2104 sectors = vstruct_sectors(jset, c->block_bits);
2105 BUG_ON(sectors > j->prev_buf_sectors);
2107 bytes = vstruct_bytes(w->data);
2108 memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
2110 if (journal_write_alloc(j, sectors)) {
2111 bch_journal_halt(j);
2112 bch_err(c, "Unable to allocate journal write");
2114 closure_return_with_destructor(cl, journal_write_done);
2117 bch_check_mark_super(c, &j->key, true);
2120 * XXX: we really should just disable the entire journal in nochanges
2123 if (c->opts.nochanges)
2126 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr) {
2128 ca = PTR_CACHE(c, ptr);
2130 percpu_ref_get(&ca->ref);
2135 bch_err(c, "missing device for journal write\n");
2139 atomic64_add(sectors, &ca->meta_sectors_written);
2141 bio = ca->journal.bio;
2143 bio->bi_iter.bi_sector = ptr->offset;
2144 bio->bi_bdev = ca->disk_sb.bdev;
2145 bio->bi_iter.bi_size = sectors << 9;
2146 bio->bi_end_io = journal_write_endio;
2147 bio->bi_private = ca;
2148 bio_set_op_attrs(bio, REQ_OP_WRITE,
2149 REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
2150 bch_bio_map(bio, jset);
2152 trace_bcache_journal_write(bio);
2153 closure_bio_submit_punt(bio, cl, c);
2155 ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
2158 for_each_cache(ca, c, i)
2159 if (ca->mi.state == BCH_MEMBER_STATE_ACTIVE &&
2160 journal_flushes_device(ca) &&
2161 !bch_extent_has_device(bkey_i_to_s_c_extent(&j->key), i)) {
2162 percpu_ref_get(&ca->ref);
2164 bio = ca->journal.bio;
2166 bio->bi_bdev = ca->disk_sb.bdev;
2167 bio->bi_end_io = journal_write_endio;
2168 bio->bi_private = ca;
2169 bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
2170 closure_bio_submit_punt(bio, cl, c);
2174 extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr)
2175 ptr->offset += sectors;
2177 closure_return_with_destructor(cl, journal_write_done);
2180 static void journal_write_work(struct work_struct *work)
2182 struct journal *j = container_of(to_delayed_work(work),
2183 struct journal, write_work);
2184 spin_lock(&j->lock);
2185 set_bit(JOURNAL_NEED_WRITE, &j->flags);
2187 if (journal_buf_switch(j, false) != JOURNAL_UNLOCKED)
2188 spin_unlock(&j->lock);
2192 * Given an inode number, if that inode number has data in the journal that
2193 * hasn't yet been flushed, return the journal sequence number that needs to be
2196 u64 bch_inode_journal_seq(struct journal *j, u64 inode)
2198 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
2201 if (!test_bit(h, j->buf[0].has_inode) &&
2202 !test_bit(h, j->buf[1].has_inode))
2205 spin_lock(&j->lock);
2206 if (test_bit(h, journal_cur_buf(j)->has_inode))
2207 seq = atomic64_read(&j->seq);
2208 else if (test_bit(h, journal_prev_buf(j)->has_inode))
2209 seq = atomic64_read(&j->seq) - 1;
2210 spin_unlock(&j->lock);
2215 static int __journal_res_get(struct journal *j, struct journal_res *res,
2216 unsigned u64s_min, unsigned u64s_max)
2218 struct cache_set *c = container_of(j, struct cache_set, journal);
2221 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2225 spin_lock(&j->lock);
2227 * Recheck after taking the lock, so we don't race with another thread
2228 * that just did journal_entry_open() and call journal_entry_close()
2231 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
2233 spin_unlock(&j->lock);
2238 * Ok, no more room in the current journal entry - try to start a new
2241 switch (journal_buf_switch(j, false)) {
2242 case JOURNAL_ENTRY_ERROR:
2243 spin_unlock(&j->lock);
2245 case JOURNAL_ENTRY_INUSE:
2246 /* haven't finished writing out the previous one: */
2247 spin_unlock(&j->lock);
2248 trace_bcache_journal_entry_full(c);
2250 case JOURNAL_ENTRY_CLOSED:
2252 case JOURNAL_UNLOCKED:
2256 /* We now have a new, closed journal buf - see if we can open it: */
2257 ret = journal_entry_open(j);
2258 spin_unlock(&j->lock);
2265 /* Journal's full, we have to wait */
2268 * Direct reclaim - can't rely on reclaim from work item
2271 journal_reclaim_work(&j->reclaim_work.work);
2273 trace_bcache_journal_full(c);
2275 if (!j->res_get_blocked_start)
2276 j->res_get_blocked_start = local_clock() ?: 1;
2281 * Essentially the entry function to the journaling code. When bcache is doing
2282 * a btree insert, it calls this function to get the current journal write.
2283 * Journal write is the structure used set up journal writes. The calling
2284 * function will then add its keys to the structure, queuing them for the
2287 * To ensure forward progress, the current task must not be holding any
2288 * btree node write locks.
2290 int bch_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
2291 unsigned u64s_min, unsigned u64s_max)
2296 (ret = __journal_res_get(j, res, u64s_min,
2298 return ret < 0 ? ret : 0;
2301 void bch_journal_wait_on_seq(struct journal *j, u64 seq, struct closure *parent)
2303 spin_lock(&j->lock);
2305 BUG_ON(seq > atomic64_read(&j->seq));
2307 if (bch_journal_error(j)) {
2308 spin_unlock(&j->lock);
2312 if (seq == atomic64_read(&j->seq)) {
2313 if (!closure_wait(&journal_cur_buf(j)->wait, parent))
2315 } else if (seq + 1 == atomic64_read(&j->seq) &&
2316 j->reservations.prev_buf_unwritten) {
2317 if (!closure_wait(&journal_prev_buf(j)->wait, parent))
2322 /* check if raced with write completion (or failure) */
2323 if (!j->reservations.prev_buf_unwritten ||
2324 bch_journal_error(j))
2325 closure_wake_up(&journal_prev_buf(j)->wait);
2328 spin_unlock(&j->lock);
2331 void bch_journal_flush_seq_async(struct journal *j, u64 seq, struct closure *parent)
2333 spin_lock(&j->lock);
2335 BUG_ON(seq > atomic64_read(&j->seq));
2337 if (bch_journal_error(j)) {
2338 spin_unlock(&j->lock);
2342 if (seq == atomic64_read(&j->seq)) {
2343 bool set_need_write = false;
2346 !closure_wait(&journal_cur_buf(j)->wait, parent))
2349 if (!test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags)) {
2350 j->need_write_time = local_clock();
2351 set_need_write = true;
2354 switch (journal_buf_switch(j, set_need_write)) {
2355 case JOURNAL_ENTRY_ERROR:
2357 closure_wake_up(&journal_cur_buf(j)->wait);
2359 case JOURNAL_ENTRY_CLOSED:
2361 * Journal entry hasn't been opened yet, but caller
2362 * claims it has something (seq == j->seq):
2365 case JOURNAL_ENTRY_INUSE:
2367 case JOURNAL_UNLOCKED:
2370 } else if (parent &&
2371 seq + 1 == atomic64_read(&j->seq) &&
2372 j->reservations.prev_buf_unwritten) {
2373 if (!closure_wait(&journal_prev_buf(j)->wait, parent))
2378 /* check if raced with write completion (or failure) */
2379 if (!j->reservations.prev_buf_unwritten ||
2380 bch_journal_error(j))
2381 closure_wake_up(&journal_prev_buf(j)->wait);
2384 spin_unlock(&j->lock);
2387 int bch_journal_flush_seq(struct journal *j, u64 seq)
2390 u64 start_time = local_clock();
2392 closure_init_stack(&cl);
2393 bch_journal_flush_seq_async(j, seq, &cl);
2396 bch_time_stats_update(j->flush_seq_time, start_time);
2398 return bch_journal_error(j);
2401 void bch_journal_meta_async(struct journal *j, struct closure *parent)
2403 struct journal_res res;
2404 unsigned u64s = jset_u64s(0);
2406 memset(&res, 0, sizeof(res));
2408 bch_journal_res_get(j, &res, u64s, u64s);
2409 bch_journal_res_put(j, &res);
2411 bch_journal_flush_seq_async(j, res.seq, parent);
2414 int bch_journal_meta(struct journal *j)
2416 struct journal_res res;
2417 unsigned u64s = jset_u64s(0);
2420 memset(&res, 0, sizeof(res));
2422 ret = bch_journal_res_get(j, &res, u64s, u64s);
2426 bch_journal_res_put(j, &res);
2428 return bch_journal_flush_seq(j, res.seq);
2431 void bch_journal_flush_async(struct journal *j, struct closure *parent)
2433 u64 seq, journal_seq;
2435 spin_lock(&j->lock);
2436 journal_seq = atomic64_read(&j->seq);
2438 if (journal_entry_is_open(j)) {
2440 } else if (journal_seq) {
2441 seq = journal_seq - 1;
2443 spin_unlock(&j->lock);
2446 spin_unlock(&j->lock);
2448 bch_journal_flush_seq_async(j, seq, parent);
2451 int bch_journal_flush(struct journal *j)
2453 u64 seq, journal_seq;
2455 spin_lock(&j->lock);
2456 journal_seq = atomic64_read(&j->seq);
2458 if (journal_entry_is_open(j)) {
2460 } else if (journal_seq) {
2461 seq = journal_seq - 1;
2463 spin_unlock(&j->lock);
2466 spin_unlock(&j->lock);
2468 return bch_journal_flush_seq(j, seq);
2471 void bch_journal_free(struct journal *j)
2473 unsigned order = get_order(j->entry_size_max);
2475 free_pages((unsigned long) j->buf[1].data, order);
2476 free_pages((unsigned long) j->buf[0].data, order);
2480 int bch_journal_alloc(struct journal *j, unsigned entry_size_max)
2482 static struct lock_class_key res_key;
2483 unsigned order = get_order(entry_size_max);
2485 spin_lock_init(&j->lock);
2486 spin_lock_init(&j->pin_lock);
2487 init_waitqueue_head(&j->wait);
2488 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
2489 INIT_DELAYED_WORK(&j->reclaim_work, journal_reclaim_work);
2490 mutex_init(&j->blacklist_lock);
2491 INIT_LIST_HEAD(&j->seq_blacklist);
2492 spin_lock_init(&j->devs.lock);
2493 mutex_init(&j->reclaim_lock);
2495 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
2497 j->entry_size_max = entry_size_max;
2498 j->write_delay_ms = 100;
2499 j->reclaim_delay_ms = 100;
2501 bkey_extent_init(&j->key);
2503 atomic64_set(&j->reservations.counter,
2504 ((union journal_res_state)
2505 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
2507 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
2508 !(j->buf[0].data = (void *) __get_free_pages(GFP_KERNEL, order)) ||
2509 !(j->buf[1].data = (void *) __get_free_pages(GFP_KERNEL, order)))
2515 ssize_t bch_journal_print_debug(struct journal *j, char *buf)
2517 union journal_res_state *s = &j->reservations;
2523 spin_lock(&j->lock);
2525 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2526 "active journal entries:\t%zu\n"
2528 "last_seq:\t\t%llu\n"
2529 "last_seq_ondisk:\t%llu\n"
2530 "reservation count:\t%u\n"
2531 "reservation offset:\t%u\n"
2532 "current entry u64s:\t%u\n"
2533 "io in flight:\t\t%i\n"
2534 "need write:\t\t%i\n"
2536 "replay done:\t\t%i\n",
2538 (u64) atomic64_read(&j->seq),
2541 journal_state_count(*s, s->idx),
2542 s->cur_entry_offset,
2544 s->prev_buf_unwritten,
2545 test_bit(JOURNAL_NEED_WRITE, &j->flags),
2546 journal_entry_is_open(j),
2547 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
2549 group_for_each_cache_rcu(ca, &j->devs, iter) {
2550 struct journal_device *ja = &ca->journal;
2552 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
2555 "\tcur_idx\t\t%u (seq %llu)\n"
2556 "\tlast_idx\t%u (seq %llu)\n",
2558 ja->cur_idx, ja->bucket_seq[ja->cur_idx],
2559 ja->last_idx, ja->bucket_seq[ja->last_idx]);
2562 spin_unlock(&j->lock);
2568 static bool bch_journal_writing_to_device(struct cache *ca)
2570 struct journal *j = &ca->set->journal;
2573 spin_lock(&j->lock);
2574 ret = bch_extent_has_device(bkey_i_to_s_c_extent(&j->key),
2576 spin_unlock(&j->lock);
2582 * This asumes that ca has already been marked read-only so that
2583 * journal_next_bucket won't pick buckets out of ca any more.
2584 * Hence, if the journal is not currently pointing to ca, there
2585 * will be no new writes to journal entries in ca after all the
2586 * pending ones have been flushed to disk.
2588 * If the journal is being written to ca, write a new record, and
2589 * journal_next_bucket will notice that the device is no longer
2590 * writeable and pick a new set of devices to write to.
2593 int bch_journal_move(struct cache *ca)
2595 u64 last_flushed_seq;
2596 struct journal_device *ja = &ca->journal;
2597 struct cache_set *c = ca->set;
2598 struct journal *j = &c->journal;
2600 int ret = 0; /* Success */
2602 if (bch_journal_writing_to_device(ca)) {
2604 * bch_journal_meta will write a record and we'll wait
2605 * for the write to complete.
2606 * Actually writing the journal (journal_write_locked)
2607 * will call journal_next_bucket which notices that the
2608 * device is no longer writeable, and picks a new one.
2610 bch_journal_meta(j);
2611 BUG_ON(bch_journal_writing_to_device(ca));
2615 * Flush all btree updates to backing store so that any
2616 * journal entries written to ca become stale and are no
2621 * XXX: switch to normal journal reclaim machinery
2626 * Force a meta-data journal entry to be written so that
2627 * we have newer journal entries in devices other than ca,
2628 * and wait for the meta data write to complete.
2630 bch_journal_meta(j);
2633 * Verify that we no longer need any of the journal entries in
2636 spin_lock(&j->lock);
2637 last_flushed_seq = last_seq(j);
2638 spin_unlock(&j->lock);
2640 for (i = 0; i < ja->nr; i += 1)
2641 BUG_ON(ja->bucket_seq[i] > last_flushed_seq);
2646 void bch_journal_free_cache(struct cache *ca)
2648 kfree(ca->journal.buckets);
2649 kfree(ca->journal.bucket_seq);
2652 int bch_journal_init_cache(struct cache *ca)
2654 struct journal_device *ja = &ca->journal;
2655 struct bch_sb_field_journal *journal_buckets =
2656 bch_sb_get_journal(ca->disk_sb.sb);
2657 unsigned i, journal_entry_pages;
2659 journal_entry_pages =
2660 DIV_ROUND_UP(1U << BCH_SB_JOURNAL_ENTRY_SIZE(ca->disk_sb.sb),
2663 ja->nr = bch_nr_journal_buckets(journal_buckets);
2665 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2666 if (!ja->bucket_seq)
2669 ca->journal.bio = bio_kmalloc(GFP_KERNEL, journal_entry_pages);
2670 if (!ca->journal.bio)
2673 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
2677 for (i = 0; i < ja->nr; i++)
2678 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);