2 * bcachefs journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
8 #include "alloc_foreground.h"
9 #include "bkey_methods.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
18 #include <trace/events/bcachefs.h>
20 static bool journal_entry_is_open(struct journal *j)
22 return j->reservations.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
25 void bch2_journal_buf_put_slowpath(struct journal *j, bool need_write_just_set)
27 struct journal_buf *w = journal_prev_buf(j);
29 atomic_dec_bug(&journal_seq_pin(j, le64_to_cpu(w->data->seq))->count);
31 if (!need_write_just_set &&
32 test_bit(JOURNAL_NEED_WRITE, &j->flags))
33 bch2_time_stats_update(j->delay_time,
36 closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
39 static void journal_pin_new_entry(struct journal *j, int count)
41 struct journal_entry_pin_list *p;
44 * The fifo_push() needs to happen at the same time as j->seq is
45 * incremented for journal_last_seq() to be calculated correctly
47 atomic64_inc(&j->seq);
48 p = fifo_push_ref(&j->pin);
50 INIT_LIST_HEAD(&p->list);
51 INIT_LIST_HEAD(&p->flushed);
52 atomic_set(&p->count, count);
56 static void bch2_journal_buf_init(struct journal *j)
58 struct journal_buf *buf = journal_cur_buf(j);
60 memset(buf->has_inode, 0, sizeof(buf->has_inode));
62 memset(buf->data, 0, sizeof(*buf->data));
63 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
67 static inline size_t journal_entry_u64s_reserve(struct journal_buf *buf)
69 return BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
72 static inline bool journal_entry_empty(struct jset *j)
76 if (j->seq != j->last_seq)
79 vstruct_for_each(j, i)
80 if (i->type || i->u64s)
90 } journal_buf_switch(struct journal *j, bool need_write_just_set)
92 struct bch_fs *c = container_of(j, struct bch_fs, journal);
93 struct journal_buf *buf = journal_cur_buf(j);
94 union journal_res_state old, new;
95 u64 v = atomic64_read(&j->reservations.counter);
97 lockdep_assert_held(&j->lock);
101 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
102 return JOURNAL_ENTRY_CLOSED;
104 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
105 /* this entry will never be written: */
106 closure_wake_up(&buf->wait);
107 return JOURNAL_ENTRY_ERROR;
110 if (new.prev_buf_unwritten)
111 return JOURNAL_ENTRY_INUSE;
114 * avoid race between setting buf->data->u64s and
115 * journal_res_put starting write:
117 journal_state_inc(&new);
119 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
121 new.prev_buf_unwritten = 1;
123 BUG_ON(journal_state_count(new, new.idx));
124 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
125 old.v, new.v)) != old.v);
127 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
129 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
131 j->prev_buf_sectors =
132 vstruct_blocks_plus(buf->data, c->block_bits,
133 journal_entry_u64s_reserve(buf)) *
135 BUG_ON(j->prev_buf_sectors > j->cur_buf_sectors);
138 * We have to set last_seq here, _before_ opening a new journal entry:
140 * A threads may replace an old pin with a new pin on their current
141 * journal reservation - the expectation being that the journal will
142 * contain either what the old pin protected or what the new pin
145 * After the old pin is dropped journal_last_seq() won't include the old
146 * pin, so we can only write the updated last_seq on the entry that
147 * contains whatever the new pin protects.
149 * Restated, we can _not_ update last_seq for a given entry if there
150 * could be a newer entry open with reservations/pins that have been
153 * Hence, we want update/set last_seq on the current journal entry right
154 * before we open a new one:
156 bch2_journal_reclaim_fast(j);
157 buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
159 if (journal_entry_empty(buf->data))
160 clear_bit(JOURNAL_NOT_EMPTY, &j->flags);
162 set_bit(JOURNAL_NOT_EMPTY, &j->flags);
164 journal_pin_new_entry(j, 1);
166 bch2_journal_buf_init(j);
168 cancel_delayed_work(&j->write_work);
169 spin_unlock(&j->lock);
171 /* ugh - might be called from __journal_res_get() under wait_event() */
172 __set_current_state(TASK_RUNNING);
173 bch2_journal_buf_put(j, old.idx, need_write_just_set);
175 return JOURNAL_UNLOCKED;
178 void bch2_journal_halt(struct journal *j)
180 union journal_res_state old, new;
181 u64 v = atomic64_read(&j->reservations.counter);
185 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
188 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
189 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
190 old.v, new.v)) != old.v);
193 closure_wake_up(&journal_cur_buf(j)->wait);
194 closure_wake_up(&journal_prev_buf(j)->wait);
198 * should _only_ called from journal_res_get() - when we actually want a
199 * journal reservation - journal entry is open means journal is dirty:
203 * 0: journal currently full (must wait)
204 * -EROFS: insufficient rw devices
205 * -EIO: journal error
207 static int journal_entry_open(struct journal *j)
209 struct journal_buf *buf = journal_cur_buf(j);
210 union journal_res_state old, new;
215 lockdep_assert_held(&j->lock);
216 BUG_ON(journal_entry_is_open(j));
218 if (!fifo_free(&j->pin))
221 sectors = bch2_journal_entry_sectors(j);
225 buf->disk_sectors = sectors;
227 sectors = min_t(unsigned, sectors, buf->size >> 9);
228 j->cur_buf_sectors = sectors;
230 u64s = (sectors << 9) / sizeof(u64);
232 /* Subtract the journal header */
233 u64s -= sizeof(struct jset) / sizeof(u64);
235 * Btree roots, prio pointers don't get added until right before we do
238 u64s -= journal_entry_u64s_reserve(buf);
239 u64s = max_t(ssize_t, 0L, u64s);
241 BUG_ON(u64s >= JOURNAL_ENTRY_CLOSED_VAL);
243 if (u64s <= le32_to_cpu(buf->data->u64s))
247 * Must be set before marking the journal entry as open:
249 j->cur_entry_u64s = u64s;
251 v = atomic64_read(&j->reservations.counter);
255 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
258 /* Handle any already added entries */
259 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
260 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
261 old.v, new.v)) != old.v);
263 if (j->res_get_blocked_start)
264 bch2_time_stats_update(j->blocked_time,
265 j->res_get_blocked_start);
266 j->res_get_blocked_start = 0;
268 mod_delayed_work(system_freezable_wq,
270 msecs_to_jiffies(j->write_delay_ms));
275 static bool __journal_entry_close(struct journal *j)
279 if (!journal_entry_is_open(j)) {
280 spin_unlock(&j->lock);
284 set_need_write = !test_and_set_bit(JOURNAL_NEED_WRITE, &j->flags);
286 j->need_write_time = local_clock();
288 switch (journal_buf_switch(j, set_need_write)) {
289 case JOURNAL_ENTRY_INUSE:
290 spin_unlock(&j->lock);
293 spin_unlock(&j->lock);
294 case JOURNAL_UNLOCKED:
299 static bool journal_entry_close(struct journal *j)
302 return __journal_entry_close(j);
305 static void journal_write_work(struct work_struct *work)
307 struct journal *j = container_of(work, struct journal, write_work.work);
309 journal_entry_close(j);
313 * Given an inode number, if that inode number has data in the journal that
314 * hasn't yet been flushed, return the journal sequence number that needs to be
317 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
319 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
322 if (!test_bit(h, j->buf[0].has_inode) &&
323 !test_bit(h, j->buf[1].has_inode))
327 if (test_bit(h, journal_cur_buf(j)->has_inode))
328 seq = journal_cur_seq(j);
329 else if (test_bit(h, journal_prev_buf(j)->has_inode))
330 seq = journal_cur_seq(j) - 1;
331 spin_unlock(&j->lock);
336 static int __journal_res_get(struct journal *j, struct journal_res *res,
337 unsigned u64s_min, unsigned u64s_max)
339 struct bch_fs *c = container_of(j, struct bch_fs, journal);
340 struct journal_buf *buf;
343 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
349 * Recheck after taking the lock, so we don't race with another thread
350 * that just did journal_entry_open() and call journal_entry_close()
353 ret = journal_res_get_fast(j, res, u64s_min, u64s_max);
355 spin_unlock(&j->lock);
360 * If we couldn't get a reservation because the current buf filled up,
361 * and we had room for a bigger entry on disk, signal that we want to
362 * realloc the journal bufs:
364 buf = journal_cur_buf(j);
365 if (journal_entry_is_open(j) &&
366 buf->size >> 9 < buf->disk_sectors &&
367 buf->size < JOURNAL_ENTRY_SIZE_MAX)
368 j->buf_size_want = max(j->buf_size_want, buf->size << 1);
371 * Close the current journal entry if necessary, then try to start a new
374 switch (journal_buf_switch(j, false)) {
375 case JOURNAL_ENTRY_ERROR:
376 spin_unlock(&j->lock);
378 case JOURNAL_ENTRY_INUSE:
379 /* haven't finished writing out the previous one: */
380 spin_unlock(&j->lock);
381 trace_journal_entry_full(c);
383 case JOURNAL_ENTRY_CLOSED:
385 case JOURNAL_UNLOCKED:
389 /* We now have a new, closed journal buf - see if we can open it: */
390 ret = journal_entry_open(j);
391 spin_unlock(&j->lock);
398 /* Journal's full, we have to wait */
401 * Direct reclaim - can't rely on reclaim from work item
404 bch2_journal_reclaim_work(&j->reclaim_work.work);
406 trace_journal_full(c);
408 if (!j->res_get_blocked_start)
409 j->res_get_blocked_start = local_clock() ?: 1;
414 * Essentially the entry function to the journaling code. When bcachefs is doing
415 * a btree insert, it calls this function to get the current journal write.
416 * Journal write is the structure used set up journal writes. The calling
417 * function will then add its keys to the structure, queuing them for the next
420 * To ensure forward progress, the current task must not be holding any
421 * btree node write locks.
423 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
424 unsigned u64s_min, unsigned u64s_max)
429 (ret = __journal_res_get(j, res, u64s_min,
431 return ret < 0 ? ret : 0;
434 u64 bch2_journal_last_unwritten_seq(struct journal *j)
439 seq = journal_cur_seq(j);
440 if (j->reservations.prev_buf_unwritten)
442 spin_unlock(&j->lock);
448 * bch2_journal_open_seq_async - try to open a new journal entry if @seq isn't
449 * open yet, or wait if we cannot
451 * used by the btree interior update machinery, when it needs to write a new
452 * btree root - every journal entry contains the roots of all the btrees, so it
453 * doesn't need to bother with getting a journal reservation
455 int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *parent)
460 BUG_ON(seq > journal_cur_seq(j));
462 if (seq < journal_cur_seq(j) ||
463 journal_entry_is_open(j)) {
464 spin_unlock(&j->lock);
468 ret = journal_entry_open(j);
470 closure_wait(&j->async_wait, parent);
471 spin_unlock(&j->lock);
474 bch2_journal_reclaim_work(&j->reclaim_work.work);
479 static int journal_seq_error(struct journal *j, u64 seq)
481 union journal_res_state state = READ_ONCE(j->reservations);
483 if (seq == journal_cur_seq(j))
484 return bch2_journal_error(j);
486 if (seq + 1 == journal_cur_seq(j) &&
487 !state.prev_buf_unwritten &&
494 static inline struct journal_buf *
495 journal_seq_to_buf(struct journal *j, u64 seq)
497 /* seq should be for a journal entry that has been opened: */
498 BUG_ON(seq > journal_cur_seq(j));
499 BUG_ON(seq == journal_cur_seq(j) &&
500 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
502 if (seq == journal_cur_seq(j))
503 return journal_cur_buf(j);
504 if (seq + 1 == journal_cur_seq(j) &&
505 j->reservations.prev_buf_unwritten)
506 return journal_prev_buf(j);
511 * bch2_journal_wait_on_seq - wait for a journal entry to be written
513 * does _not_ cause @seq to be written immediately - if there is no other
514 * activity to cause the relevant journal entry to be filled up or flushed it
515 * can wait for an arbitrary amount of time (up to @j->write_delay_ms, which is
518 void bch2_journal_wait_on_seq(struct journal *j, u64 seq,
519 struct closure *parent)
521 struct journal_buf *buf;
525 if ((buf = journal_seq_to_buf(j, seq))) {
526 if (!closure_wait(&buf->wait, parent))
529 if (seq == journal_cur_seq(j)) {
531 if (bch2_journal_error(j))
532 closure_wake_up(&buf->wait);
536 spin_unlock(&j->lock);
540 * bch2_journal_flush_seq_async - wait for a journal entry to be written
542 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
545 void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
546 struct closure *parent)
548 struct journal_buf *buf;
553 (buf = journal_seq_to_buf(j, seq)))
554 if (!closure_wait(&buf->wait, parent))
557 if (seq == journal_cur_seq(j))
558 __journal_entry_close(j);
560 spin_unlock(&j->lock);
563 static int journal_seq_flushed(struct journal *j, u64 seq)
568 ret = seq <= j->seq_ondisk ? 1 : journal_seq_error(j, seq);
570 if (seq == journal_cur_seq(j))
571 __journal_entry_close(j);
573 spin_unlock(&j->lock);
578 int bch2_journal_flush_seq(struct journal *j, u64 seq)
580 u64 start_time = local_clock();
583 ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
585 bch2_time_stats_update(j->flush_seq_time, start_time);
587 return ret ?: ret2 < 0 ? ret2 : 0;
591 * bch2_journal_meta_async - force a journal entry to be written
593 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
595 struct journal_res res;
596 unsigned u64s = jset_u64s(0);
598 memset(&res, 0, sizeof(res));
600 bch2_journal_res_get(j, &res, u64s, u64s);
601 bch2_journal_res_put(j, &res);
603 bch2_journal_flush_seq_async(j, res.seq, parent);
606 int bch2_journal_meta(struct journal *j)
608 struct journal_res res;
609 unsigned u64s = jset_u64s(0);
612 memset(&res, 0, sizeof(res));
614 ret = bch2_journal_res_get(j, &res, u64s, u64s);
618 bch2_journal_res_put(j, &res);
620 return bch2_journal_flush_seq(j, res.seq);
624 * bch2_journal_flush_async - if there is an open journal entry, or a journal
625 * still being written, write it and wait for the write to complete
627 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
629 u64 seq, journal_seq;
632 journal_seq = journal_cur_seq(j);
634 if (journal_entry_is_open(j)) {
636 } else if (journal_seq) {
637 seq = journal_seq - 1;
639 spin_unlock(&j->lock);
642 spin_unlock(&j->lock);
644 bch2_journal_flush_seq_async(j, seq, parent);
647 int bch2_journal_flush(struct journal *j)
649 u64 seq, journal_seq;
652 journal_seq = journal_cur_seq(j);
654 if (journal_entry_is_open(j)) {
656 } else if (journal_seq) {
657 seq = journal_seq - 1;
659 spin_unlock(&j->lock);
662 spin_unlock(&j->lock);
664 return bch2_journal_flush_seq(j, seq);
667 /* allocate journal on a device: */
669 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
670 bool new_fs, struct closure *cl)
672 struct bch_fs *c = ca->fs;
673 struct journal_device *ja = &ca->journal;
674 struct bch_sb_field_journal *journal_buckets;
675 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
678 /* don't handle reducing nr of buckets yet: */
683 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
684 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
685 if (!new_buckets || !new_bucket_seq)
688 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
689 nr + sizeof(*journal_buckets) / sizeof(u64));
690 if (!journal_buckets)
694 * We may be called from the device add path, before the new device has
695 * actually been added to the running filesystem:
698 spin_lock(&c->journal.lock);
700 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
701 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
702 swap(new_buckets, ja->buckets);
703 swap(new_bucket_seq, ja->bucket_seq);
706 spin_unlock(&c->journal.lock);
708 while (ja->nr < nr) {
709 struct open_bucket *ob = NULL;
713 bucket = bch2_bucket_alloc_new_fs(ca);
719 ob = bch2_bucket_alloc(c, ca, RESERVE_ALLOC,
722 ret = cl ? -EAGAIN : -ENOSPC;
726 bucket = sector_to_bucket(ca, ob->ptr.offset);
730 percpu_down_read_preempt_disable(&c->usage_lock);
731 spin_lock(&c->journal.lock);
736 __array_insert_item(ja->buckets, ja->nr, ja->last_idx);
737 __array_insert_item(ja->bucket_seq, ja->nr, ja->last_idx);
738 __array_insert_item(journal_buckets->buckets, ja->nr, ja->last_idx);
740 ja->buckets[ja->last_idx] = bucket;
741 ja->bucket_seq[ja->last_idx] = 0;
742 journal_buckets->buckets[ja->last_idx] = cpu_to_le64(bucket);
744 if (ja->last_idx < ja->nr) {
745 if (ja->cur_idx >= ja->last_idx)
751 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
753 gc_phase(GC_PHASE_SB),
755 ? BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE
759 spin_unlock(&c->journal.lock);
760 percpu_up_read_preempt_enable(&c->usage_lock);
766 bch2_open_bucket_put(c, ob);
771 kfree(new_bucket_seq);
778 * Allocate more journal space at runtime - not currently making use if it, but
781 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
784 struct journal_device *ja = &ca->journal;
789 closure_init_stack(&cl);
792 struct disk_reservation disk_res = { 0, 0 };
796 mutex_lock(&c->sb_lock);
800 * note: journal buckets aren't really counted as _sectors_ used yet, so
801 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
802 * when space used goes up without a reservation - but we do need the
803 * reservation to ensure we'll actually be able to allocate:
806 if (bch2_disk_reservation_get(c, &disk_res,
807 bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
808 mutex_unlock(&c->sb_lock);
812 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
814 bch2_disk_reservation_put(c, &disk_res);
816 if (ja->nr != current_nr)
818 mutex_unlock(&c->sb_lock);
819 } while (ret == -EAGAIN);
824 int bch2_dev_journal_alloc(struct bch_dev *ca)
828 if (dynamic_fault("bcachefs:add:journal_alloc"))
832 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
835 nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
836 BCH_JOURNAL_BUCKETS_MIN,
838 (1 << 20) / ca->mi.bucket_size));
840 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
843 /* startup/shutdown: */
845 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
847 union journal_res_state state;
848 struct journal_buf *w;
852 state = READ_ONCE(j->reservations);
853 w = j->buf + !state.idx;
855 ret = state.prev_buf_unwritten &&
856 bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), dev_idx);
857 spin_unlock(&j->lock);
862 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
865 bch2_extent_drop_device(bkey_i_to_s_extent(&j->key), ca->dev_idx);
866 spin_unlock(&j->lock);
868 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
871 void bch2_fs_journal_stop(struct journal *j)
873 struct bch_fs *c = container_of(j, struct bch_fs, journal);
875 wait_event(j->wait, journal_entry_close(j));
877 /* do we need to write another journal entry? */
878 if (test_bit(JOURNAL_NOT_EMPTY, &j->flags) ||
879 c->btree_roots_dirty)
880 bch2_journal_meta(j);
882 BUG_ON(journal_entry_is_open(j) ||
883 j->reservations.prev_buf_unwritten);
885 BUG_ON(!bch2_journal_error(j) &&
886 test_bit(JOURNAL_NOT_EMPTY, &j->flags));
888 cancel_delayed_work_sync(&j->write_work);
889 cancel_delayed_work_sync(&j->reclaim_work);
892 void bch2_fs_journal_start(struct journal *j)
894 struct bch_fs *c = container_of(j, struct bch_fs, journal);
895 struct journal_seq_blacklist *bl;
898 list_for_each_entry(bl, &j->seq_blacklist, list)
899 blacklist = max(blacklist, bl->end);
903 set_bit(JOURNAL_STARTED, &j->flags);
905 while (journal_cur_seq(j) < blacklist)
906 journal_pin_new_entry(j, 0);
909 * journal_buf_switch() only inits the next journal entry when it
910 * closes an open journal entry - the very first journal entry gets
913 journal_pin_new_entry(j, 1);
914 bch2_journal_buf_init(j);
916 c->last_bucket_seq_cleanup = journal_cur_seq(j);
918 spin_unlock(&j->lock);
921 * Adding entries to the next journal entry before allocating space on
922 * disk for the next journal entry - this is ok, because these entries
923 * only have to go down with the next journal entry we write:
925 bch2_journal_seq_blacklist_write(j);
927 queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
932 void bch2_dev_journal_exit(struct bch_dev *ca)
934 kfree(ca->journal.bio);
935 kfree(ca->journal.buckets);
936 kfree(ca->journal.bucket_seq);
938 ca->journal.bio = NULL;
939 ca->journal.buckets = NULL;
940 ca->journal.bucket_seq = NULL;
943 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
945 struct journal_device *ja = &ca->journal;
946 struct bch_sb_field_journal *journal_buckets =
947 bch2_sb_get_journal(sb);
950 ja->nr = bch2_nr_journal_buckets(journal_buckets);
952 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
956 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
957 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
958 if (!ca->journal.bio)
961 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
965 for (i = 0; i < ja->nr; i++)
966 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
971 void bch2_fs_journal_exit(struct journal *j)
973 kvpfree(j->buf[1].data, j->buf[1].size);
974 kvpfree(j->buf[0].data, j->buf[0].size);
978 int bch2_fs_journal_init(struct journal *j)
980 struct bch_fs *c = container_of(j, struct bch_fs, journal);
981 static struct lock_class_key res_key;
984 pr_verbose_init(c->opts, "");
986 spin_lock_init(&j->lock);
987 spin_lock_init(&j->err_lock);
988 init_waitqueue_head(&j->wait);
989 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
990 INIT_DELAYED_WORK(&j->reclaim_work, bch2_journal_reclaim_work);
991 init_waitqueue_head(&j->pin_flush_wait);
992 mutex_init(&j->blacklist_lock);
993 INIT_LIST_HEAD(&j->seq_blacklist);
994 mutex_init(&j->reclaim_lock);
996 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
998 j->buf[0].size = JOURNAL_ENTRY_SIZE_MIN;
999 j->buf[1].size = JOURNAL_ENTRY_SIZE_MIN;
1000 j->write_delay_ms = 1000;
1001 j->reclaim_delay_ms = 100;
1003 bkey_extent_init(&j->key);
1005 atomic64_set(&j->reservations.counter,
1006 ((union journal_res_state)
1007 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1009 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1010 !(j->buf[0].data = kvpmalloc(j->buf[0].size, GFP_KERNEL)) ||
1011 !(j->buf[1].data = kvpmalloc(j->buf[1].size, GFP_KERNEL))) {
1016 j->pin.front = j->pin.back = 1;
1018 pr_verbose_init(c->opts, "ret %i", ret);
1024 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
1026 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1027 union journal_res_state *s = &j->reservations;
1033 spin_lock(&j->lock);
1035 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1036 "active journal entries:\t%llu\n"
1038 "last_seq:\t\t%llu\n"
1039 "last_seq_ondisk:\t%llu\n"
1040 "reservation count:\t%u\n"
1041 "reservation offset:\t%u\n"
1042 "current entry u64s:\t%u\n"
1043 "io in flight:\t\t%i\n"
1044 "need write:\t\t%i\n"
1046 "replay done:\t\t%i\n",
1049 journal_last_seq(j),
1051 journal_state_count(*s, s->idx),
1052 s->cur_entry_offset,
1054 s->prev_buf_unwritten,
1055 test_bit(JOURNAL_NEED_WRITE, &j->flags),
1056 journal_entry_is_open(j),
1057 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1059 for_each_member_device_rcu(ca, c, iter,
1060 &c->rw_devs[BCH_DATA_JOURNAL]) {
1061 struct journal_device *ja = &ca->journal;
1066 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1069 "\tcur_idx\t\t%u (seq %llu)\n"
1070 "\tlast_idx\t%u (seq %llu)\n",
1072 ja->cur_idx, ja->bucket_seq[ja->cur_idx],
1073 ja->last_idx, ja->bucket_seq[ja->last_idx]);
1076 spin_unlock(&j->lock);
1082 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
1084 struct journal_entry_pin_list *pin_list;
1085 struct journal_entry_pin *pin;
1089 spin_lock(&j->lock);
1090 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1091 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1093 i, atomic_read(&pin_list->count));
1095 list_for_each_entry(pin, &pin_list->list, list)
1096 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1100 if (!list_empty(&pin_list->flushed))
1101 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1104 list_for_each_entry(pin, &pin_list->flushed, list)
1105 ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1109 spin_unlock(&j->lock);