2 * bcachefs journalling code, for btree insertions
4 * Copyright 2012 Google, Inc.
8 #include "alloc_foreground.h"
9 #include "bkey_methods.h"
13 #include "journal_io.h"
14 #include "journal_reclaim.h"
15 #include "journal_seq_blacklist.h"
18 #include <trace/events/bcachefs.h>
20 static bool __journal_entry_is_open(union journal_res_state state)
22 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
25 static bool journal_entry_is_open(struct journal *j)
27 return __journal_entry_is_open(j->reservations);
30 static void journal_pin_new_entry(struct journal *j, int count)
32 struct journal_entry_pin_list *p;
35 * The fifo_push() needs to happen at the same time as j->seq is
36 * incremented for journal_last_seq() to be calculated correctly
38 atomic64_inc(&j->seq);
39 p = fifo_push_ref(&j->pin);
41 INIT_LIST_HEAD(&p->list);
42 INIT_LIST_HEAD(&p->flushed);
43 atomic_set(&p->count, count);
47 static void bch2_journal_buf_init(struct journal *j)
49 struct journal_buf *buf = journal_cur_buf(j);
51 memset(buf->has_inode, 0, sizeof(buf->has_inode));
53 memset(buf->data, 0, sizeof(*buf->data));
54 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
58 static inline bool journal_entry_empty(struct jset *j)
62 if (j->seq != j->last_seq)
65 vstruct_for_each(j, i)
66 if (i->type || i->u64s)
71 void bch2_journal_halt(struct journal *j)
73 union journal_res_state old, new;
74 u64 v = atomic64_read(&j->reservations.counter);
78 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
81 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
82 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
83 old.v, new.v)) != old.v);
86 closure_wake_up(&journal_cur_buf(j)->wait);
89 /* journal entry close/open: */
91 void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
93 if (!need_write_just_set &&
94 test_bit(JOURNAL_NEED_WRITE, &j->flags))
95 bch2_time_stats_update(j->delay_time,
98 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
100 closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
104 * Returns true if journal entry is now closed:
106 static bool __journal_entry_close(struct journal *j)
108 struct bch_fs *c = container_of(j, struct bch_fs, journal);
109 struct journal_buf *buf = journal_cur_buf(j);
110 union journal_res_state old, new;
111 u64 v = atomic64_read(&j->reservations.counter);
112 bool set_need_write = false;
115 lockdep_assert_held(&j->lock);
119 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
122 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
123 /* this entry will never be written: */
124 closure_wake_up(&buf->wait);
128 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
129 set_bit(JOURNAL_NEED_WRITE, &j->flags);
130 j->need_write_time = local_clock();
131 set_need_write = true;
134 if (new.prev_buf_unwritten)
137 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
139 new.prev_buf_unwritten = 1;
141 BUG_ON(journal_state_count(new, new.idx));
142 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
143 old.v, new.v)) != old.v);
145 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
147 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
148 buf->u64s_reserved) << c->block_bits;
149 BUG_ON(sectors > buf->sectors);
150 buf->sectors = sectors;
152 bkey_extent_init(&buf->key);
155 * We have to set last_seq here, _before_ opening a new journal entry:
157 * A threads may replace an old pin with a new pin on their current
158 * journal reservation - the expectation being that the journal will
159 * contain either what the old pin protected or what the new pin
162 * After the old pin is dropped journal_last_seq() won't include the old
163 * pin, so we can only write the updated last_seq on the entry that
164 * contains whatever the new pin protects.
166 * Restated, we can _not_ update last_seq for a given entry if there
167 * could be a newer entry open with reservations/pins that have been
170 * Hence, we want update/set last_seq on the current journal entry right
171 * before we open a new one:
173 buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
175 if (journal_entry_empty(buf->data))
176 clear_bit(JOURNAL_NOT_EMPTY, &j->flags);
178 set_bit(JOURNAL_NOT_EMPTY, &j->flags);
180 journal_pin_new_entry(j, 1);
182 bch2_journal_buf_init(j);
184 cancel_delayed_work(&j->write_work);
186 bch2_journal_space_available(j);
188 bch2_journal_buf_put(j, old.idx, set_need_write);
192 static bool journal_entry_close(struct journal *j)
197 ret = __journal_entry_close(j);
198 spin_unlock(&j->lock);
204 * should _only_ called from journal_res_get() - when we actually want a
205 * journal reservation - journal entry is open means journal is dirty:
209 * -ENOSPC: journal currently full, must invoke reclaim
210 * -EAGAIN: journal blocked, must wait
211 * -EROFS: insufficient rw devices or journal error
213 static int journal_entry_open(struct journal *j)
215 struct journal_buf *buf = journal_cur_buf(j);
216 union journal_res_state old, new;
220 lockdep_assert_held(&j->lock);
221 BUG_ON(journal_entry_is_open(j));
226 if (j->cur_entry_error)
227 return j->cur_entry_error;
229 BUG_ON(!j->cur_entry_sectors);
231 buf->u64s_reserved = j->entry_u64s_reserved;
232 buf->disk_sectors = j->cur_entry_sectors;
233 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
235 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
236 journal_entry_overhead(j);
237 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
239 if (u64s <= le32_to_cpu(buf->data->u64s))
243 * Must be set before marking the journal entry as open:
245 j->cur_entry_u64s = u64s;
247 v = atomic64_read(&j->reservations.counter);
251 EBUG_ON(journal_state_count(new, new.idx));
253 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
256 /* Handle any already added entries */
257 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
258 journal_state_inc(&new);
259 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
260 old.v, new.v)) != old.v);
262 if (j->res_get_blocked_start)
263 bch2_time_stats_update(j->blocked_time,
264 j->res_get_blocked_start);
265 j->res_get_blocked_start = 0;
267 mod_delayed_work(system_freezable_wq,
269 msecs_to_jiffies(j->write_delay_ms));
274 static bool journal_quiesced(struct journal *j)
276 union journal_res_state state = READ_ONCE(j->reservations);
277 bool ret = !state.prev_buf_unwritten && !__journal_entry_is_open(state);
280 journal_entry_close(j);
284 static void journal_quiesce(struct journal *j)
286 wait_event(j->wait, journal_quiesced(j));
289 static void journal_write_work(struct work_struct *work)
291 struct journal *j = container_of(work, struct journal, write_work.work);
293 journal_entry_close(j);
297 * Given an inode number, if that inode number has data in the journal that
298 * hasn't yet been flushed, return the journal sequence number that needs to be
301 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
303 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
306 if (!test_bit(h, j->buf[0].has_inode) &&
307 !test_bit(h, j->buf[1].has_inode))
311 if (test_bit(h, journal_cur_buf(j)->has_inode))
312 seq = journal_cur_seq(j);
313 else if (test_bit(h, journal_prev_buf(j)->has_inode))
314 seq = journal_cur_seq(j) - 1;
315 spin_unlock(&j->lock);
320 static int __journal_res_get(struct journal *j, struct journal_res *res,
323 struct bch_fs *c = container_of(j, struct bch_fs, journal);
324 struct journal_buf *buf;
327 if (journal_res_get_fast(j, res, flags))
330 if (bch2_journal_error(j))
336 * Recheck after taking the lock, so we don't race with another thread
337 * that just did journal_entry_open() and call journal_entry_close()
340 if (journal_res_get_fast(j, res, flags)) {
341 spin_unlock(&j->lock);
346 * If we couldn't get a reservation because the current buf filled up,
347 * and we had room for a bigger entry on disk, signal that we want to
348 * realloc the journal bufs:
350 buf = journal_cur_buf(j);
351 if (journal_entry_is_open(j) &&
352 buf->buf_size >> 9 < buf->disk_sectors &&
353 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
354 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
356 if (journal_entry_is_open(j) &&
357 !__journal_entry_close(j)) {
359 * We failed to get a reservation on the current open journal
360 * entry because it's full, and we can't close it because
361 * there's still a previous one in flight:
363 trace_journal_entry_full(c);
366 ret = journal_entry_open(j);
369 if ((ret == -EAGAIN || ret == -ENOSPC) &&
370 !j->res_get_blocked_start)
371 j->res_get_blocked_start = local_clock() ?: 1;
373 spin_unlock(&j->lock);
377 if (ret == -ENOSPC) {
379 * Journal is full - can't rely on reclaim from work item due to
382 trace_journal_full(c);
383 if (!(flags & JOURNAL_RES_GET_NONBLOCK))
384 bch2_journal_reclaim_work(&j->reclaim_work.work);
392 * Essentially the entry function to the journaling code. When bcachefs is doing
393 * a btree insert, it calls this function to get the current journal write.
394 * Journal write is the structure used set up journal writes. The calling
395 * function will then add its keys to the structure, queuing them for the next
398 * To ensure forward progress, the current task must not be holding any
399 * btree node write locks.
401 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
406 closure_wait_event(&j->async_wait,
407 (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
408 (flags & JOURNAL_RES_GET_NONBLOCK));
412 /* journal_entry_res: */
414 void bch2_journal_entry_res_resize(struct journal *j,
415 struct journal_entry_res *res,
418 union journal_res_state state;
419 int d = new_u64s - res->u64s;
423 j->entry_u64s_reserved += d;
427 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
429 state = READ_ONCE(j->reservations);
431 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
432 state.cur_entry_offset > j->cur_entry_u64s) {
433 j->cur_entry_u64s += d;
435 * Not enough room in current journal entry, have to flush it:
437 __journal_entry_close(j);
439 journal_cur_buf(j)->u64s_reserved += d;
442 spin_unlock(&j->lock);
446 /* journal flushing: */
448 u64 bch2_journal_last_unwritten_seq(struct journal *j)
453 seq = journal_cur_seq(j);
454 if (j->reservations.prev_buf_unwritten)
456 spin_unlock(&j->lock);
462 * bch2_journal_open_seq_async - try to open a new journal entry if @seq isn't
463 * open yet, or wait if we cannot
465 * used by the btree interior update machinery, when it needs to write a new
466 * btree root - every journal entry contains the roots of all the btrees, so it
467 * doesn't need to bother with getting a journal reservation
469 int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
471 struct bch_fs *c = container_of(j, struct bch_fs, journal);
477 * Can't try to open more than one sequence number ahead:
479 BUG_ON(journal_cur_seq(j) < seq && !journal_entry_is_open(j));
481 if (journal_cur_seq(j) > seq ||
482 journal_entry_is_open(j)) {
483 spin_unlock(&j->lock);
487 if (journal_cur_seq(j) < seq &&
488 !__journal_entry_close(j)) {
489 /* haven't finished writing out the previous one: */
490 trace_journal_entry_full(c);
493 BUG_ON(journal_cur_seq(j) != seq);
495 ret = journal_entry_open(j);
498 if ((ret == -EAGAIN || ret == -ENOSPC) &&
499 !j->res_get_blocked_start)
500 j->res_get_blocked_start = local_clock() ?: 1;
502 if (ret == -EAGAIN || ret == -ENOSPC)
503 closure_wait(&j->async_wait, cl);
505 spin_unlock(&j->lock);
507 if (ret == -ENOSPC) {
508 trace_journal_full(c);
509 bch2_journal_reclaim_work(&j->reclaim_work.work);
516 static int journal_seq_error(struct journal *j, u64 seq)
518 union journal_res_state state = READ_ONCE(j->reservations);
520 if (seq == journal_cur_seq(j))
521 return bch2_journal_error(j);
523 if (seq + 1 == journal_cur_seq(j) &&
524 !state.prev_buf_unwritten &&
531 static inline struct journal_buf *
532 journal_seq_to_buf(struct journal *j, u64 seq)
534 /* seq should be for a journal entry that has been opened: */
535 BUG_ON(seq > journal_cur_seq(j));
536 BUG_ON(seq == journal_cur_seq(j) &&
537 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
539 if (seq == journal_cur_seq(j))
540 return journal_cur_buf(j);
541 if (seq + 1 == journal_cur_seq(j) &&
542 j->reservations.prev_buf_unwritten)
543 return journal_prev_buf(j);
548 * bch2_journal_wait_on_seq - wait for a journal entry to be written
550 * does _not_ cause @seq to be written immediately - if there is no other
551 * activity to cause the relevant journal entry to be filled up or flushed it
552 * can wait for an arbitrary amount of time (up to @j->write_delay_ms, which is
555 void bch2_journal_wait_on_seq(struct journal *j, u64 seq,
556 struct closure *parent)
558 struct journal_buf *buf;
562 if ((buf = journal_seq_to_buf(j, seq))) {
563 if (!closure_wait(&buf->wait, parent))
566 if (seq == journal_cur_seq(j)) {
568 if (bch2_journal_error(j))
569 closure_wake_up(&buf->wait);
573 spin_unlock(&j->lock);
577 * bch2_journal_flush_seq_async - wait for a journal entry to be written
579 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
582 void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
583 struct closure *parent)
585 struct journal_buf *buf;
590 (buf = journal_seq_to_buf(j, seq)))
591 if (!closure_wait(&buf->wait, parent))
594 if (seq == journal_cur_seq(j))
595 __journal_entry_close(j);
596 spin_unlock(&j->lock);
599 static int journal_seq_flushed(struct journal *j, u64 seq)
604 ret = seq <= j->seq_ondisk ? 1 : journal_seq_error(j, seq);
606 if (seq == journal_cur_seq(j))
607 __journal_entry_close(j);
608 spin_unlock(&j->lock);
613 int bch2_journal_flush_seq(struct journal *j, u64 seq)
615 u64 start_time = local_clock();
618 ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
620 bch2_time_stats_update(j->flush_seq_time, start_time);
622 return ret ?: ret2 < 0 ? ret2 : 0;
626 * bch2_journal_meta_async - force a journal entry to be written
628 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
630 struct journal_res res;
632 memset(&res, 0, sizeof(res));
634 bch2_journal_res_get(j, &res, jset_u64s(0), 0);
635 bch2_journal_res_put(j, &res);
637 bch2_journal_flush_seq_async(j, res.seq, parent);
640 int bch2_journal_meta(struct journal *j)
642 struct journal_res res;
645 memset(&res, 0, sizeof(res));
647 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
651 bch2_journal_res_put(j, &res);
653 return bch2_journal_flush_seq(j, res.seq);
657 * bch2_journal_flush_async - if there is an open journal entry, or a journal
658 * still being written, write it and wait for the write to complete
660 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
662 u64 seq, journal_seq;
665 journal_seq = journal_cur_seq(j);
667 if (journal_entry_is_open(j)) {
669 } else if (journal_seq) {
670 seq = journal_seq - 1;
672 spin_unlock(&j->lock);
675 spin_unlock(&j->lock);
677 bch2_journal_flush_seq_async(j, seq, parent);
680 int bch2_journal_flush(struct journal *j)
682 u64 seq, journal_seq;
685 journal_seq = journal_cur_seq(j);
687 if (journal_entry_is_open(j)) {
689 } else if (journal_seq) {
690 seq = journal_seq - 1;
692 spin_unlock(&j->lock);
695 spin_unlock(&j->lock);
697 return bch2_journal_flush_seq(j, seq);
700 /* block/unlock the journal: */
702 void bch2_journal_unblock(struct journal *j)
706 spin_unlock(&j->lock);
711 void bch2_journal_block(struct journal *j)
715 spin_unlock(&j->lock);
720 /* allocate journal on a device: */
722 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
723 bool new_fs, struct closure *cl)
725 struct bch_fs *c = ca->fs;
726 struct journal_device *ja = &ca->journal;
727 struct bch_sb_field_journal *journal_buckets;
728 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
731 /* don't handle reducing nr of buckets yet: */
736 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
737 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
738 if (!new_buckets || !new_bucket_seq)
741 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
742 nr + sizeof(*journal_buckets) / sizeof(u64));
743 if (!journal_buckets)
747 * We may be called from the device add path, before the new device has
748 * actually been added to the running filesystem:
751 spin_lock(&c->journal.lock);
753 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
754 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
755 swap(new_buckets, ja->buckets);
756 swap(new_bucket_seq, ja->bucket_seq);
759 spin_unlock(&c->journal.lock);
761 while (ja->nr < nr) {
762 struct open_bucket *ob = NULL;
766 bucket = bch2_bucket_alloc_new_fs(ca);
772 ob = bch2_bucket_alloc(c, ca, RESERVE_ALLOC,
775 ret = cl ? -EAGAIN : -ENOSPC;
779 bucket = sector_to_bucket(ca, ob->ptr.offset);
783 percpu_down_read_preempt_disable(&c->mark_lock);
784 spin_lock(&c->journal.lock);
789 __array_insert_item(ja->buckets, ja->nr, ja->last_idx);
790 __array_insert_item(ja->bucket_seq, ja->nr, ja->last_idx);
791 __array_insert_item(journal_buckets->buckets, ja->nr, ja->last_idx);
793 ja->buckets[ja->last_idx] = bucket;
794 ja->bucket_seq[ja->last_idx] = 0;
795 journal_buckets->buckets[ja->last_idx] = cpu_to_le64(bucket);
797 if (ja->last_idx < ja->nr) {
798 if (ja->cur_idx >= ja->last_idx)
804 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
806 gc_phase(GC_PHASE_SB),
810 spin_unlock(&c->journal.lock);
811 percpu_up_read_preempt_enable(&c->mark_lock);
817 bch2_open_bucket_put(c, ob);
822 kfree(new_bucket_seq);
829 * Allocate more journal space at runtime - not currently making use if it, but
832 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
835 struct journal_device *ja = &ca->journal;
840 closure_init_stack(&cl);
843 struct disk_reservation disk_res = { 0, 0 };
847 mutex_lock(&c->sb_lock);
851 * note: journal buckets aren't really counted as _sectors_ used yet, so
852 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
853 * when space used goes up without a reservation - but we do need the
854 * reservation to ensure we'll actually be able to allocate:
857 if (bch2_disk_reservation_get(c, &disk_res,
858 bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
859 mutex_unlock(&c->sb_lock);
863 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
865 bch2_disk_reservation_put(c, &disk_res);
867 if (ja->nr != current_nr)
869 mutex_unlock(&c->sb_lock);
870 } while (ret == -EAGAIN);
875 int bch2_dev_journal_alloc(struct bch_dev *ca)
879 if (dynamic_fault("bcachefs:add:journal_alloc"))
883 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
886 nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
887 BCH_JOURNAL_BUCKETS_MIN,
889 (1 << 20) / ca->mi.bucket_size));
891 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
894 /* startup/shutdown: */
896 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
898 union journal_res_state state;
899 struct journal_buf *w;
903 state = READ_ONCE(j->reservations);
904 w = j->buf + !state.idx;
906 ret = state.prev_buf_unwritten &&
907 bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), dev_idx);
908 spin_unlock(&j->lock);
913 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
915 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
918 void bch2_fs_journal_stop(struct journal *j)
920 struct bch_fs *c = container_of(j, struct bch_fs, journal);
922 wait_event(j->wait, journal_entry_close(j));
924 /* do we need to write another journal entry? */
925 if (test_bit(JOURNAL_NOT_EMPTY, &j->flags) ||
926 c->btree_roots_dirty)
927 bch2_journal_meta(j);
931 BUG_ON(!bch2_journal_error(j) &&
932 test_bit(JOURNAL_NOT_EMPTY, &j->flags));
934 cancel_delayed_work_sync(&j->write_work);
935 cancel_delayed_work_sync(&j->reclaim_work);
938 void bch2_fs_journal_start(struct journal *j)
940 struct bch_fs *c = container_of(j, struct bch_fs, journal);
941 struct journal_seq_blacklist *bl;
944 list_for_each_entry(bl, &j->seq_blacklist, list)
945 blacklist = max(blacklist, bl->end);
949 set_bit(JOURNAL_STARTED, &j->flags);
951 while (journal_cur_seq(j) < blacklist)
952 journal_pin_new_entry(j, 0);
955 * __journal_entry_close() only inits the next journal entry when it
956 * closes an open journal entry - the very first journal entry gets
959 journal_pin_new_entry(j, 1);
960 bch2_journal_buf_init(j);
962 c->last_bucket_seq_cleanup = journal_cur_seq(j);
964 bch2_journal_space_available(j);
965 spin_unlock(&j->lock);
968 * Adding entries to the next journal entry before allocating space on
969 * disk for the next journal entry - this is ok, because these entries
970 * only have to go down with the next journal entry we write:
972 bch2_journal_seq_blacklist_write(j);
974 queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
979 void bch2_dev_journal_exit(struct bch_dev *ca)
981 kfree(ca->journal.bio);
982 kfree(ca->journal.buckets);
983 kfree(ca->journal.bucket_seq);
985 ca->journal.bio = NULL;
986 ca->journal.buckets = NULL;
987 ca->journal.bucket_seq = NULL;
990 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
992 struct journal_device *ja = &ca->journal;
993 struct bch_sb_field_journal *journal_buckets =
994 bch2_sb_get_journal(sb);
997 ja->nr = bch2_nr_journal_buckets(journal_buckets);
999 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1000 if (!ja->bucket_seq)
1003 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1004 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1005 if (!ca->journal.bio)
1008 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1012 for (i = 0; i < ja->nr; i++)
1013 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1018 void bch2_fs_journal_exit(struct journal *j)
1020 kvpfree(j->buf[1].data, j->buf[1].buf_size);
1021 kvpfree(j->buf[0].data, j->buf[0].buf_size);
1025 int bch2_fs_journal_init(struct journal *j)
1027 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1028 static struct lock_class_key res_key;
1031 pr_verbose_init(c->opts, "");
1033 spin_lock_init(&j->lock);
1034 spin_lock_init(&j->err_lock);
1035 init_waitqueue_head(&j->wait);
1036 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1037 INIT_DELAYED_WORK(&j->reclaim_work, bch2_journal_reclaim_work);
1038 init_waitqueue_head(&j->pin_flush_wait);
1039 mutex_init(&j->blacklist_lock);
1040 INIT_LIST_HEAD(&j->seq_blacklist);
1041 mutex_init(&j->reclaim_lock);
1043 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1045 j->buf[0].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1046 j->buf[1].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1047 j->write_delay_ms = 1000;
1048 j->reclaim_delay_ms = 100;
1051 j->entry_u64s_reserved +=
1052 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1054 atomic64_set(&j->reservations.counter,
1055 ((union journal_res_state)
1056 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1058 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1059 !(j->buf[0].data = kvpmalloc(j->buf[0].buf_size, GFP_KERNEL)) ||
1060 !(j->buf[1].data = kvpmalloc(j->buf[1].buf_size, GFP_KERNEL))) {
1065 j->pin.front = j->pin.back = 1;
1067 pr_verbose_init(c->opts, "ret %i", ret);
1073 ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
1075 struct printbuf out = _PBUF(buf, PAGE_SIZE);
1076 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1077 union journal_res_state s;
1082 spin_lock(&j->lock);
1083 s = READ_ONCE(j->reservations);
1086 "active journal entries:\t%llu\n"
1088 "last_seq:\t\t%llu\n"
1089 "last_seq_ondisk:\t%llu\n"
1090 "current entry:\t\t",
1093 journal_last_seq(j),
1094 j->last_seq_ondisk);
1096 switch (s.cur_entry_offset) {
1097 case JOURNAL_ENTRY_ERROR_VAL:
1098 pr_buf(&out, "error\n");
1100 case JOURNAL_ENTRY_CLOSED_VAL:
1101 pr_buf(&out, "closed\n");
1104 pr_buf(&out, "%u/%u\n",
1111 "current entry refs:\t%u\n"
1112 "prev entry unwritten:\t",
1113 journal_state_count(s, s.idx));
1115 if (s.prev_buf_unwritten)
1116 pr_buf(&out, "yes, ref %u\n",
1117 journal_state_count(s, !s.idx));
1119 pr_buf(&out, "no\n");
1122 "need write:\t\t%i\n"
1123 "replay done:\t\t%i\n",
1124 test_bit(JOURNAL_NEED_WRITE, &j->flags),
1125 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1127 for_each_member_device_rcu(ca, c, iter,
1128 &c->rw_devs[BCH_DATA_JOURNAL]) {
1129 struct journal_device *ja = &ca->journal;
1137 "\tavailable\t%u:%u\n"
1138 "\tcur_idx\t\t%u (seq %llu)\n"
1139 "\tlast_idx\t%u (seq %llu)\n",
1141 bch2_journal_dev_buckets_available(j, ja),
1143 ja->cur_idx, ja->bucket_seq[ja->cur_idx],
1144 ja->last_idx, ja->bucket_seq[ja->last_idx]);
1147 spin_unlock(&j->lock);
1150 return out.pos - buf;
1153 ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
1155 struct printbuf out = _PBUF(buf, PAGE_SIZE);
1156 struct journal_entry_pin_list *pin_list;
1157 struct journal_entry_pin *pin;
1160 spin_lock(&j->lock);
1161 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1162 pr_buf(&out, "%llu: count %u\n",
1163 i, atomic_read(&pin_list->count));
1165 list_for_each_entry(pin, &pin_list->list, list)
1166 pr_buf(&out, "\t%p %pf\n",
1169 if (!list_empty(&pin_list->flushed))
1170 pr_buf(&out, "flushed:\n");
1172 list_for_each_entry(pin, &pin_list->flushed, list)
1173 pr_buf(&out, "\t%p %pf\n",
1176 spin_unlock(&j->lock);
1178 return out.pos - buf;