1 // SPDX-License-Identifier: GPL-2.0
3 * bcachefs journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
12 #include "btree_update.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
21 #include <trace/events/bcachefs.h>
23 static u64 last_unwritten_seq(struct journal *j)
25 union journal_res_state s = READ_ONCE(j->reservations);
27 lockdep_assert_held(&j->lock);
29 return journal_cur_seq(j) - ((s.idx - s.unwritten_idx) & JOURNAL_BUF_MASK);
32 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
34 return seq >= last_unwritten_seq(j);
37 static bool __journal_entry_is_open(union journal_res_state state)
39 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
42 static bool journal_entry_is_open(struct journal *j)
44 return __journal_entry_is_open(j->reservations);
47 static inline struct journal_buf *
48 journal_seq_to_buf(struct journal *j, u64 seq)
50 struct journal_buf *buf = NULL;
52 EBUG_ON(seq > journal_cur_seq(j));
53 EBUG_ON(seq == journal_cur_seq(j) &&
54 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
56 if (journal_seq_unwritten(j, seq)) {
57 buf = j->buf + (seq & JOURNAL_BUF_MASK);
58 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
63 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
65 INIT_LIST_HEAD(&p->list);
66 INIT_LIST_HEAD(&p->key_cache_list);
67 INIT_LIST_HEAD(&p->flushed);
68 atomic_set(&p->count, count);
72 static void journal_pin_new_entry(struct journal *j)
75 * The fifo_push() needs to happen at the same time as j->seq is
76 * incremented for journal_last_seq() to be calculated correctly
78 atomic64_inc(&j->seq);
79 journal_pin_list_init(fifo_push_ref(&j->pin), 1);
82 static void bch2_journal_buf_init(struct journal *j)
84 struct journal_buf *buf = journal_cur_buf(j);
86 bkey_extent_init(&buf->key);
88 buf->must_flush = false;
89 buf->separate_flush = false;
91 memset(buf->data, 0, sizeof(*buf->data));
92 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
96 void bch2_journal_halt(struct journal *j)
98 union journal_res_state old, new;
99 u64 v = atomic64_read(&j->reservations.counter);
103 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
106 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
107 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
108 old.v, new.v)) != old.v);
111 * XXX: we're not using j->lock here because this can be called from
112 * interrupt context, this can race with journal_write_done()
115 j->err_seq = journal_cur_seq(j);
117 closure_wake_up(&journal_cur_buf(j)->wait);
120 /* journal entry close/open: */
122 void __bch2_journal_buf_put(struct journal *j)
124 struct bch_fs *c = container_of(j, struct bch_fs, journal);
126 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
130 * Returns true if journal entry is now closed:
132 * We don't close a journal_buf until the next journal_buf is finished writing,
133 * and can be opened again - this also initializes the next journal_buf:
135 static bool __journal_entry_close(struct journal *j)
137 struct bch_fs *c = container_of(j, struct bch_fs, journal);
138 struct journal_buf *buf = journal_cur_buf(j);
139 union journal_res_state old, new;
140 u64 v = atomic64_read(&j->reservations.counter);
143 lockdep_assert_held(&j->lock);
147 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
150 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
151 /* this entry will never be written: */
152 closure_wake_up(&buf->wait);
156 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
157 set_bit(JOURNAL_NEED_WRITE, &j->flags);
158 j->need_write_time = local_clock();
161 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
164 if (new.idx == new.unwritten_idx)
167 BUG_ON(journal_state_count(new, new.idx));
168 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
169 old.v, new.v)) != old.v);
171 /* Close out old buffer: */
172 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
174 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
175 buf->u64s_reserved) << c->block_bits;
176 BUG_ON(sectors > buf->sectors);
177 buf->sectors = sectors;
180 * We have to set last_seq here, _before_ opening a new journal entry:
182 * A threads may replace an old pin with a new pin on their current
183 * journal reservation - the expectation being that the journal will
184 * contain either what the old pin protected or what the new pin
187 * After the old pin is dropped journal_last_seq() won't include the old
188 * pin, so we can only write the updated last_seq on the entry that
189 * contains whatever the new pin protects.
191 * Restated, we can _not_ update last_seq for a given entry if there
192 * could be a newer entry open with reservations/pins that have been
195 * Hence, we want update/set last_seq on the current journal entry right
196 * before we open a new one:
198 buf->last_seq = journal_last_seq(j);
199 buf->data->last_seq = cpu_to_le64(buf->last_seq);
201 __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
203 /* Initialize new buffer: */
204 journal_pin_new_entry(j);
206 bch2_journal_buf_init(j);
208 cancel_delayed_work(&j->write_work);
209 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
211 bch2_journal_space_available(j);
213 bch2_journal_buf_put(j, old.idx);
217 static bool journal_entry_want_write(struct journal *j)
219 union journal_res_state s = READ_ONCE(j->reservations);
223 * Don't close it yet if we already have a write in flight, but do set
226 if (s.idx != s.unwritten_idx)
227 set_bit(JOURNAL_NEED_WRITE, &j->flags);
229 ret = __journal_entry_close(j);
234 static bool journal_entry_close(struct journal *j)
239 ret = journal_entry_want_write(j);
240 spin_unlock(&j->lock);
246 * should _only_ called from journal_res_get() - when we actually want a
247 * journal reservation - journal entry is open means journal is dirty:
251 * -ENOSPC: journal currently full, must invoke reclaim
252 * -EAGAIN: journal blocked, must wait
253 * -EROFS: insufficient rw devices or journal error
255 static int journal_entry_open(struct journal *j)
257 struct bch_fs *c = container_of(j, struct bch_fs, journal);
258 struct journal_buf *buf = journal_cur_buf(j);
259 union journal_res_state old, new;
263 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
265 lockdep_assert_held(&j->lock);
266 BUG_ON(journal_entry_is_open(j));
269 return cur_entry_blocked;
271 if (j->cur_entry_error)
272 return j->cur_entry_error;
274 BUG_ON(!j->cur_entry_sectors);
276 buf->u64s_reserved = j->entry_u64s_reserved;
277 buf->disk_sectors = j->cur_entry_sectors;
278 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
280 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
281 journal_entry_overhead(j);
282 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
284 if (u64s <= le32_to_cpu(buf->data->u64s))
285 return cur_entry_journal_full;
288 * Must be set before marking the journal entry as open:
290 j->cur_entry_u64s = u64s;
292 v = atomic64_read(&j->reservations.counter);
296 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
297 return cur_entry_insufficient_devices;
299 /* Handle any already added entries */
300 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
302 EBUG_ON(journal_state_count(new, new.idx));
303 journal_state_inc(&new);
304 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
305 old.v, new.v)) != old.v);
307 if (j->res_get_blocked_start)
308 bch2_time_stats_update(j->blocked_time,
309 j->res_get_blocked_start);
310 j->res_get_blocked_start = 0;
312 mod_delayed_work(c->io_complete_wq,
314 msecs_to_jiffies(c->opts.journal_flush_delay));
319 static bool journal_quiesced(struct journal *j)
321 union journal_res_state s = READ_ONCE(j->reservations);
322 bool ret = s.idx == s.unwritten_idx && !__journal_entry_is_open(s);
325 journal_entry_close(j);
329 static void journal_quiesce(struct journal *j)
331 wait_event(j->wait, journal_quiesced(j));
334 static void journal_write_work(struct work_struct *work)
336 struct journal *j = container_of(work, struct journal, write_work.work);
338 journal_entry_close(j);
341 static int __journal_res_get(struct journal *j, struct journal_res *res,
344 struct bch_fs *c = container_of(j, struct bch_fs, journal);
345 struct journal_buf *buf;
349 if (journal_res_get_fast(j, res, flags))
352 if (bch2_journal_error(j))
358 * Recheck after taking the lock, so we don't race with another thread
359 * that just did journal_entry_open() and call journal_entry_close()
362 if (journal_res_get_fast(j, res, flags)) {
363 spin_unlock(&j->lock);
367 if (!(flags & JOURNAL_RES_GET_RESERVED) &&
368 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
370 * Don't want to close current journal entry, just need to
373 ret = cur_entry_journal_full;
378 * If we couldn't get a reservation because the current buf filled up,
379 * and we had room for a bigger entry on disk, signal that we want to
380 * realloc the journal bufs:
382 buf = journal_cur_buf(j);
383 if (journal_entry_is_open(j) &&
384 buf->buf_size >> 9 < buf->disk_sectors &&
385 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
386 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
388 if (journal_entry_is_open(j) &&
389 !__journal_entry_close(j)) {
391 * We failed to get a reservation on the current open journal
392 * entry because it's full, and we can't close it because
393 * there's still a previous one in flight:
395 trace_journal_entry_full(c);
396 ret = cur_entry_blocked;
398 ret = journal_entry_open(j);
401 if ((ret && ret != cur_entry_insufficient_devices) &&
402 !j->res_get_blocked_start) {
403 j->res_get_blocked_start = local_clock() ?: 1;
404 trace_journal_full(c);
407 can_discard = j->can_discard;
408 spin_unlock(&j->lock);
413 if ((ret == cur_entry_journal_full ||
414 ret == cur_entry_journal_pin_full) &&
416 j->reservations.idx == j->reservations.unwritten_idx &&
417 (flags & JOURNAL_RES_GET_RESERVED)) {
418 char *journal_debug_buf = kmalloc(4096, GFP_ATOMIC);
420 bch_err(c, "Journal stuck!");
421 if (journal_debug_buf) {
422 bch2_journal_debug_to_text(&_PBUF(journal_debug_buf, 4096), j);
423 bch_err(c, "%s", journal_debug_buf);
425 bch2_journal_pins_to_text(&_PBUF(journal_debug_buf, 4096), j);
426 bch_err(c, "Journal pins:\n%s", journal_debug_buf);
427 kfree(journal_debug_buf);
435 * Journal is full - can't rely on reclaim from work item due to
438 if ((ret == cur_entry_journal_full ||
439 ret == cur_entry_journal_pin_full) &&
440 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
442 bch2_journal_do_discards(j);
446 if (mutex_trylock(&j->reclaim_lock)) {
447 bch2_journal_reclaim(j);
448 mutex_unlock(&j->reclaim_lock);
452 return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
456 * Essentially the entry function to the journaling code. When bcachefs is doing
457 * a btree insert, it calls this function to get the current journal write.
458 * Journal write is the structure used set up journal writes. The calling
459 * function will then add its keys to the structure, queuing them for the next
462 * To ensure forward progress, the current task must not be holding any
463 * btree node write locks.
465 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
470 closure_wait_event(&j->async_wait,
471 (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
472 (flags & JOURNAL_RES_GET_NONBLOCK));
476 /* journal_preres: */
478 static bool journal_preres_available(struct journal *j,
479 struct journal_preres *res,
483 bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
485 if (!ret && mutex_trylock(&j->reclaim_lock)) {
486 bch2_journal_reclaim(j);
487 mutex_unlock(&j->reclaim_lock);
493 int __bch2_journal_preres_get(struct journal *j,
494 struct journal_preres *res,
500 closure_wait_event(&j->preres_wait,
501 (ret = bch2_journal_error(j)) ||
502 journal_preres_available(j, res, new_u64s, flags));
506 /* journal_entry_res: */
508 void bch2_journal_entry_res_resize(struct journal *j,
509 struct journal_entry_res *res,
512 union journal_res_state state;
513 int d = new_u64s - res->u64s;
517 j->entry_u64s_reserved += d;
521 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
523 state = READ_ONCE(j->reservations);
525 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
526 state.cur_entry_offset > j->cur_entry_u64s) {
527 j->cur_entry_u64s += d;
529 * Not enough room in current journal entry, have to flush it:
531 __journal_entry_close(j);
533 journal_cur_buf(j)->u64s_reserved += d;
536 spin_unlock(&j->lock);
540 /* journal flushing: */
543 * bch2_journal_flush_seq_async - wait for a journal entry to be written
545 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
548 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
549 struct closure *parent)
551 struct journal_buf *buf;
554 if (seq <= j->flushed_seq_ondisk)
559 if (WARN_ONCE(seq > journal_cur_seq(j),
560 "requested to flush journal seq %llu, but currently at %llu",
561 seq, journal_cur_seq(j)))
564 /* Recheck under lock: */
565 if (j->err_seq && seq >= j->err_seq) {
570 if (seq <= j->flushed_seq_ondisk) {
575 /* if seq was written, but not flushed - flush a newer one instead */
576 seq = max(seq, last_unwritten_seq(j));
579 if (seq == journal_cur_seq(j) && !journal_entry_is_open(j)) {
580 struct journal_res res = { 0 };
582 spin_unlock(&j->lock);
584 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
589 buf = j->buf + (seq & JOURNAL_BUF_MASK);
590 buf->must_flush = true;
591 set_bit(JOURNAL_NEED_WRITE, &j->flags);
593 if (parent && !closure_wait(&buf->wait, parent))
596 bch2_journal_res_put(j, &res);
603 * if write was kicked off without a flush, flush the next sequence
606 buf = journal_seq_to_buf(j, seq);
609 goto recheck_need_open;
612 buf->must_flush = true;
614 if (parent && !closure_wait(&buf->wait, parent))
617 if (seq == journal_cur_seq(j))
618 journal_entry_want_write(j);
620 spin_unlock(&j->lock);
624 int bch2_journal_flush_seq(struct journal *j, u64 seq)
626 u64 start_time = local_clock();
630 * Don't update time_stats when @seq is already flushed:
632 if (seq <= j->flushed_seq_ondisk)
635 ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
638 bch2_time_stats_update(j->flush_seq_time, start_time);
640 return ret ?: ret2 < 0 ? ret2 : 0;
643 int bch2_journal_meta(struct journal *j)
645 struct journal_res res;
648 memset(&res, 0, sizeof(res));
650 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
654 bch2_journal_res_put(j, &res);
656 return bch2_journal_flush_seq(j, res.seq);
660 * bch2_journal_flush_async - if there is an open journal entry, or a journal
661 * still being written, write it and wait for the write to complete
663 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
665 u64 seq, journal_seq;
668 journal_seq = journal_cur_seq(j);
670 if (journal_entry_is_open(j)) {
672 } else if (journal_seq) {
673 seq = journal_seq - 1;
675 spin_unlock(&j->lock);
678 spin_unlock(&j->lock);
680 bch2_journal_flush_seq_async(j, seq, parent);
683 int bch2_journal_flush(struct journal *j)
685 u64 seq, journal_seq;
688 journal_seq = journal_cur_seq(j);
690 if (journal_entry_is_open(j)) {
692 } else if (journal_seq) {
693 seq = journal_seq - 1;
695 spin_unlock(&j->lock);
698 spin_unlock(&j->lock);
700 return bch2_journal_flush_seq(j, seq);
703 /* block/unlock the journal: */
705 void bch2_journal_unblock(struct journal *j)
709 spin_unlock(&j->lock);
714 void bch2_journal_block(struct journal *j)
718 spin_unlock(&j->lock);
723 /* allocate journal on a device: */
725 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
726 bool new_fs, struct closure *cl)
728 struct bch_fs *c = ca->fs;
729 struct journal_device *ja = &ca->journal;
730 struct bch_sb_field_journal *journal_buckets;
731 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
734 /* don't handle reducing nr of buckets yet: */
738 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
739 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
740 if (!new_buckets || !new_bucket_seq) {
745 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
746 nr + sizeof(*journal_buckets) / sizeof(u64));
747 if (!journal_buckets) {
753 * We may be called from the device add path, before the new device has
754 * actually been added to the running filesystem:
757 spin_lock(&c->journal.lock);
759 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
760 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
761 swap(new_buckets, ja->buckets);
762 swap(new_bucket_seq, ja->bucket_seq);
765 spin_unlock(&c->journal.lock);
767 while (ja->nr < nr) {
768 struct open_bucket *ob = NULL;
774 percpu_down_read(&c->mark_lock);
775 b = bch2_bucket_alloc_new_fs(ca);
777 percpu_up_read(&c->mark_lock);
783 ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
787 ret = cl ? -EAGAIN : -ENOSPC;
791 b = sector_to_bucket(ca, ob->ptr.offset);
795 spin_lock(&c->journal.lock);
799 * For resize at runtime, we should be writing the new
800 * superblock before inserting into the journal array
803 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
804 __array_insert_item(ja->buckets, ja->nr, pos);
805 __array_insert_item(ja->bucket_seq, ja->nr, pos);
806 __array_insert_item(journal_buckets->buckets, ja->nr, pos);
809 ja->buckets[pos] = b;
810 ja->bucket_seq[pos] = 0;
811 journal_buckets->buckets[pos] = cpu_to_le64(b);
813 if (pos <= ja->discard_idx)
814 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
815 if (pos <= ja->dirty_idx_ondisk)
816 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
817 if (pos <= ja->dirty_idx)
818 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
819 if (pos <= ja->cur_idx)
820 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
823 spin_unlock(&c->journal.lock);
826 bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
828 gc_phase(GC_PHASE_SB),
831 percpu_up_read(&c->mark_lock);
833 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
834 bch2_trans_mark_metadata_bucket(&trans, ca,
836 ca->mi.bucket_size));
838 bch2_open_bucket_put(c, ob);
845 bch2_sb_resize_journal(&ca->disk_sb,
846 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
847 kfree(new_bucket_seq);
854 * Allocate more journal space at runtime - not currently making use if it, but
857 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
860 struct journal_device *ja = &ca->journal;
865 closure_init_stack(&cl);
868 struct disk_reservation disk_res = { 0, 0 };
872 mutex_lock(&c->sb_lock);
876 * note: journal buckets aren't really counted as _sectors_ used yet, so
877 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
878 * when space used goes up without a reservation - but we do need the
879 * reservation to ensure we'll actually be able to allocate:
882 if (bch2_disk_reservation_get(c, &disk_res,
883 bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
884 mutex_unlock(&c->sb_lock);
888 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
890 bch2_disk_reservation_put(c, &disk_res);
892 if (ja->nr != current_nr)
894 mutex_unlock(&c->sb_lock);
895 } while (ret == -EAGAIN);
900 int bch2_dev_journal_alloc(struct bch_dev *ca)
904 if (dynamic_fault("bcachefs:add:journal_alloc"))
907 /* 1/128th of the device by default: */
908 nr = ca->mi.nbuckets >> 7;
911 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
914 nr = clamp_t(unsigned, nr,
915 BCH_JOURNAL_BUCKETS_MIN,
917 (1 << 24) / ca->mi.bucket_size));
919 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
922 /* startup/shutdown: */
924 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
926 union journal_res_state state;
931 state = READ_ONCE(j->reservations);
934 while (i != state.unwritten_idx) {
935 i = (i - 1) & JOURNAL_BUF_MASK;
936 if (bch2_bkey_has_device(bkey_i_to_s_c(&j->buf[i].key), dev_idx))
939 spin_unlock(&j->lock);
944 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
946 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
949 void bch2_fs_journal_stop(struct journal *j)
951 bch2_journal_flush_all_pins(j);
953 wait_event(j->wait, journal_entry_close(j));
956 * Always write a new journal entry, to make sure the clock hands are up
957 * to date (and match the superblock)
959 bch2_journal_meta(j);
963 BUG_ON(!bch2_journal_error(j) &&
964 test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
965 (journal_entry_is_open(j) ||
966 j->last_empty_seq + 1 != journal_cur_seq(j)));
968 cancel_delayed_work_sync(&j->write_work);
969 bch2_journal_reclaim_stop(j);
972 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
973 struct list_head *journal_entries)
975 struct bch_fs *c = container_of(j, struct bch_fs, journal);
976 struct journal_entry_pin_list *p;
977 struct journal_replay *i;
978 u64 last_seq = cur_seq, nr, seq;
980 if (!list_empty(journal_entries))
981 last_seq = le64_to_cpu(list_last_entry(journal_entries,
982 struct journal_replay, list)->j.last_seq);
984 nr = cur_seq - last_seq;
986 if (nr + 1 > j->pin.size) {
988 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
990 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
995 j->replay_journal_seq = last_seq;
996 j->replay_journal_seq_end = cur_seq;
997 j->last_seq_ondisk = last_seq;
998 j->pin.front = last_seq;
999 j->pin.back = cur_seq;
1000 atomic64_set(&j->seq, cur_seq - 1);
1002 fifo_for_each_entry_ptr(p, &j->pin, seq)
1003 journal_pin_list_init(p, 1);
1005 list_for_each_entry(i, journal_entries, list) {
1008 seq = le64_to_cpu(i->j.seq);
1009 BUG_ON(seq >= cur_seq);
1014 p = journal_seq_pin(j, seq);
1017 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1018 bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1021 spin_lock(&j->lock);
1023 set_bit(JOURNAL_STARTED, &j->flags);
1024 j->last_flush_write = jiffies;
1026 journal_pin_new_entry(j);
1028 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1030 bch2_journal_buf_init(j);
1032 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1034 bch2_journal_space_available(j);
1035 spin_unlock(&j->lock);
1037 return bch2_journal_reclaim_start(j);
1042 void bch2_dev_journal_exit(struct bch_dev *ca)
1044 kfree(ca->journal.bio);
1045 kfree(ca->journal.buckets);
1046 kfree(ca->journal.bucket_seq);
1048 ca->journal.bio = NULL;
1049 ca->journal.buckets = NULL;
1050 ca->journal.bucket_seq = NULL;
1053 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1055 struct journal_device *ja = &ca->journal;
1056 struct bch_sb_field_journal *journal_buckets =
1057 bch2_sb_get_journal(sb);
1060 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1062 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1063 if (!ja->bucket_seq)
1066 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1067 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1068 if (!ca->journal.bio)
1071 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1075 for (i = 0; i < ja->nr; i++)
1076 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1081 void bch2_fs_journal_exit(struct journal *j)
1085 for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1086 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1090 int bch2_fs_journal_init(struct journal *j)
1092 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1093 static struct lock_class_key res_key;
1097 pr_verbose_init(c->opts, "");
1099 spin_lock_init(&j->lock);
1100 spin_lock_init(&j->err_lock);
1101 init_waitqueue_head(&j->wait);
1102 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1103 init_waitqueue_head(&j->reclaim_wait);
1104 init_waitqueue_head(&j->pin_flush_wait);
1105 mutex_init(&j->reclaim_lock);
1106 mutex_init(&j->discard_lock);
1108 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1110 atomic64_set(&j->reservations.counter,
1111 ((union journal_res_state)
1112 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1114 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1119 for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1120 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1121 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1122 if (!j->buf[i].data) {
1128 j->pin.front = j->pin.back = 1;
1130 pr_verbose_init(c->opts, "ret %i", ret);
1136 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1138 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1139 union journal_res_state s;
1141 unsigned long now = jiffies;
1145 s = READ_ONCE(j->reservations);
1147 pr_buf(out, "active journal entries:\t%llu\n", fifo_used(&j->pin));
1148 pr_buf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
1149 pr_buf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
1150 pr_buf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
1151 pr_buf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
1152 pr_buf(out, "prereserved:\t\t%u/%u\n", j->prereserved.reserved, j->prereserved.remaining);
1153 pr_buf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
1154 pr_buf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
1155 pr_buf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
1156 pr_buf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
1157 pr_buf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
1158 pr_buf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked);
1159 pr_buf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
1160 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1161 pr_buf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1162 pr_buf(out, "current entry error:\t%u\n", j->cur_entry_error);
1163 pr_buf(out, "current entry:\t\t");
1165 switch (s.cur_entry_offset) {
1166 case JOURNAL_ENTRY_ERROR_VAL:
1167 pr_buf(out, "error\n");
1169 case JOURNAL_ENTRY_CLOSED_VAL:
1170 pr_buf(out, "closed\n");
1173 pr_buf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
1177 pr_buf(out, "current entry:\t\tidx %u refcount %u\n", s.idx, journal_state_count(s, s.idx));
1180 while (i != s.unwritten_idx) {
1181 i = (i - 1) & JOURNAL_BUF_MASK;
1183 pr_buf(out, "unwritten entry:\tidx %u refcount %u sectors %u\n",
1184 i, journal_state_count(s, i), j->buf[i].sectors);
1188 "need write:\t\t%i\n"
1189 "replay done:\t\t%i\n",
1190 test_bit(JOURNAL_NEED_WRITE, &j->flags),
1191 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1193 pr_buf(out, "space:\n");
1194 pr_buf(out, "\tdiscarded\t%u:%u\n",
1195 j->space[journal_space_discarded].next_entry,
1196 j->space[journal_space_discarded].total);
1197 pr_buf(out, "\tclean ondisk\t%u:%u\n",
1198 j->space[journal_space_clean_ondisk].next_entry,
1199 j->space[journal_space_clean_ondisk].total);
1200 pr_buf(out, "\tclean\t\t%u:%u\n",
1201 j->space[journal_space_clean].next_entry,
1202 j->space[journal_space_clean].total);
1203 pr_buf(out, "\ttotal\t\t%u:%u\n",
1204 j->space[journal_space_total].next_entry,
1205 j->space[journal_space_total].total);
1207 for_each_member_device_rcu(ca, c, i,
1208 &c->rw_devs[BCH_DATA_journal]) {
1209 struct journal_device *ja = &ca->journal;
1211 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1217 pr_buf(out, "dev %u:\n", i);
1218 pr_buf(out, "\tnr\t\t%u\n", ja->nr);
1219 pr_buf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
1220 pr_buf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1221 pr_buf(out, "\tdiscard_idx\t%u\n", ja->discard_idx);
1222 pr_buf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
1223 pr_buf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
1224 pr_buf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1230 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1232 spin_lock(&j->lock);
1233 __bch2_journal_debug_to_text(out, j);
1234 spin_unlock(&j->lock);
1237 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1239 struct journal_entry_pin_list *pin_list;
1240 struct journal_entry_pin *pin;
1243 spin_lock(&j->lock);
1244 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1245 pr_buf(out, "%llu: count %u\n",
1246 i, atomic_read(&pin_list->count));
1248 list_for_each_entry(pin, &pin_list->list, list)
1249 pr_buf(out, "\t%px %ps\n",
1252 if (!list_empty(&pin_list->flushed))
1253 pr_buf(out, "flushed:\n");
1255 list_for_each_entry(pin, &pin_list->flushed, list)
1256 pr_buf(out, "\t%px %ps\n",
1259 spin_unlock(&j->lock);