1 // SPDX-License-Identifier: GPL-2.0
3 * bcachefs journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
12 #include "btree_update.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
21 #include <trace/events/bcachefs.h>
23 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
25 return seq > j->seq_ondisk;
28 static bool __journal_entry_is_open(union journal_res_state state)
30 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
33 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
35 return atomic64_read(&j->seq) - j->seq_ondisk;
38 static bool journal_entry_is_open(struct journal *j)
40 return __journal_entry_is_open(j->reservations);
43 static inline struct journal_buf *
44 journal_seq_to_buf(struct journal *j, u64 seq)
46 struct journal_buf *buf = NULL;
48 EBUG_ON(seq > journal_cur_seq(j));
50 if (journal_seq_unwritten(j, seq)) {
51 buf = j->buf + (seq & JOURNAL_BUF_MASK);
52 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
57 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
59 INIT_LIST_HEAD(&p->list);
60 INIT_LIST_HEAD(&p->key_cache_list);
61 INIT_LIST_HEAD(&p->flushed);
62 atomic_set(&p->count, count);
66 /* journal entry close/open: */
68 void __bch2_journal_buf_put(struct journal *j)
70 struct bch_fs *c = container_of(j, struct bch_fs, journal);
72 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
76 * Returns true if journal entry is now closed:
78 * We don't close a journal_buf until the next journal_buf is finished writing,
79 * and can be opened again - this also initializes the next journal_buf:
81 static void __journal_entry_close(struct journal *j, unsigned closed_val)
83 struct bch_fs *c = container_of(j, struct bch_fs, journal);
84 struct journal_buf *buf = journal_cur_buf(j);
85 union journal_res_state old, new;
86 u64 v = atomic64_read(&j->reservations.counter);
89 BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
90 closed_val != JOURNAL_ENTRY_ERROR_VAL);
92 lockdep_assert_held(&j->lock);
96 new.cur_entry_offset = closed_val;
98 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
99 old.cur_entry_offset == new.cur_entry_offset)
101 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
102 old.v, new.v)) != old.v);
104 if (!__journal_entry_is_open(old))
107 /* Close out old buffer: */
108 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
110 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
111 buf->u64s_reserved) << c->block_bits;
112 BUG_ON(sectors > buf->sectors);
113 buf->sectors = sectors;
116 * We have to set last_seq here, _before_ opening a new journal entry:
118 * A threads may replace an old pin with a new pin on their current
119 * journal reservation - the expectation being that the journal will
120 * contain either what the old pin protected or what the new pin
123 * After the old pin is dropped journal_last_seq() won't include the old
124 * pin, so we can only write the updated last_seq on the entry that
125 * contains whatever the new pin protects.
127 * Restated, we can _not_ update last_seq for a given entry if there
128 * could be a newer entry open with reservations/pins that have been
131 * Hence, we want update/set last_seq on the current journal entry right
132 * before we open a new one:
134 buf->last_seq = journal_last_seq(j);
135 buf->data->last_seq = cpu_to_le64(buf->last_seq);
136 BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
138 __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
140 cancel_delayed_work(&j->write_work);
142 bch2_journal_space_available(j);
144 bch2_journal_buf_put(j, old.idx);
147 void bch2_journal_halt(struct journal *j)
150 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
152 j->err_seq = journal_cur_seq(j);
153 spin_unlock(&j->lock);
156 static bool journal_entry_want_write(struct journal *j)
158 bool ret = !journal_entry_is_open(j) ||
159 journal_cur_seq(j) == journal_last_unwritten_seq(j);
161 /* Don't close it yet if we already have a write in flight: */
163 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
164 else if (nr_unwritten_journal_entries(j)) {
165 struct journal_buf *buf = journal_cur_buf(j);
167 if (!buf->flush_time) {
168 buf->flush_time = local_clock() ?: 1;
169 buf->expires = jiffies;
176 static bool journal_entry_close(struct journal *j)
181 ret = journal_entry_want_write(j);
182 spin_unlock(&j->lock);
188 * should _only_ called from journal_res_get() - when we actually want a
189 * journal reservation - journal entry is open means journal is dirty:
193 * -ENOSPC: journal currently full, must invoke reclaim
194 * -EAGAIN: journal blocked, must wait
195 * -EROFS: insufficient rw devices or journal error
197 static int journal_entry_open(struct journal *j)
199 struct bch_fs *c = container_of(j, struct bch_fs, journal);
200 struct journal_buf *buf = j->buf +
201 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
202 union journal_res_state old, new;
206 lockdep_assert_held(&j->lock);
207 BUG_ON(journal_entry_is_open(j));
208 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
211 return cur_entry_blocked;
213 if (j->cur_entry_error)
214 return j->cur_entry_error;
216 if (bch2_journal_error(j))
217 return cur_entry_insufficient_devices; /* -EROFS */
219 if (!fifo_free(&j->pin))
220 return cur_entry_journal_pin_full;
222 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) - 1)
223 return cur_entry_max_in_flight;
225 BUG_ON(!j->cur_entry_sectors);
228 (journal_cur_seq(j) == j->flushed_seq_ondisk
230 : j->last_flush_write) +
231 msecs_to_jiffies(c->opts.journal_flush_delay);
233 buf->u64s_reserved = j->entry_u64s_reserved;
234 buf->disk_sectors = j->cur_entry_sectors;
235 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
237 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
238 journal_entry_overhead(j);
239 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
242 return cur_entry_journal_full;
244 if (fifo_empty(&j->pin) && j->reclaim_thread)
245 wake_up_process(j->reclaim_thread);
248 * The fifo_push() needs to happen at the same time as j->seq is
249 * incremented for journal_last_seq() to be calculated correctly
251 atomic64_inc(&j->seq);
252 journal_pin_list_init(fifo_push_ref(&j->pin), 1);
254 BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
256 bkey_extent_init(&buf->key);
257 buf->noflush = false;
258 buf->must_flush = false;
259 buf->separate_flush = false;
262 memset(buf->data, 0, sizeof(*buf->data));
263 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
267 * Must be set before marking the journal entry as open:
269 j->cur_entry_u64s = u64s;
271 v = atomic64_read(&j->reservations.counter);
275 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
278 BUG_ON(journal_state_count(new, new.idx));
279 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
281 journal_state_inc(&new);
282 new.cur_entry_offset = 0;
283 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
284 old.v, new.v)) != old.v);
286 if (j->res_get_blocked_start)
287 bch2_time_stats_update(j->blocked_time,
288 j->res_get_blocked_start);
289 j->res_get_blocked_start = 0;
291 mod_delayed_work(c->io_complete_wq,
293 msecs_to_jiffies(c->opts.journal_flush_delay));
298 static bool journal_quiesced(struct journal *j)
300 bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
303 journal_entry_close(j);
307 static void journal_quiesce(struct journal *j)
309 wait_event(j->wait, journal_quiesced(j));
312 static void journal_write_work(struct work_struct *work)
314 struct journal *j = container_of(work, struct journal, write_work.work);
315 struct bch_fs *c = container_of(j, struct bch_fs, journal);
319 if (!__journal_entry_is_open(j->reservations))
322 delta = journal_cur_buf(j)->expires - jiffies;
325 mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
327 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
329 spin_unlock(&j->lock);
332 static int __journal_res_get(struct journal *j, struct journal_res *res,
335 struct bch_fs *c = container_of(j, struct bch_fs, journal);
336 struct journal_buf *buf;
340 if (journal_res_get_fast(j, res, flags))
343 if (bch2_journal_error(j))
349 * Recheck after taking the lock, so we don't race with another thread
350 * that just did journal_entry_open() and call journal_entry_close()
353 if (journal_res_get_fast(j, res, flags)) {
354 spin_unlock(&j->lock);
358 if (!(flags & JOURNAL_RES_GET_RESERVED) &&
359 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
361 * Don't want to close current journal entry, just need to
364 ret = cur_entry_journal_full;
369 * If we couldn't get a reservation because the current buf filled up,
370 * and we had room for a bigger entry on disk, signal that we want to
371 * realloc the journal bufs:
373 buf = journal_cur_buf(j);
374 if (journal_entry_is_open(j) &&
375 buf->buf_size >> 9 < buf->disk_sectors &&
376 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
377 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
379 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
380 ret = journal_entry_open(j);
382 if (ret == cur_entry_max_in_flight)
383 trace_journal_entry_full(c);
385 if ((ret && ret != cur_entry_insufficient_devices) &&
386 !j->res_get_blocked_start) {
387 j->res_get_blocked_start = local_clock() ?: 1;
388 trace_journal_full(c);
391 can_discard = j->can_discard;
392 spin_unlock(&j->lock);
397 if ((ret == cur_entry_journal_full ||
398 ret == cur_entry_journal_pin_full) &&
400 !nr_unwritten_journal_entries(j) &&
401 (flags & JOURNAL_RES_GET_RESERVED)) {
402 struct printbuf buf = PRINTBUF;
404 bch_err(c, "Journal stuck! Hava a pre-reservation but journal full");
406 bch2_journal_debug_to_text(&buf, j);
407 bch_err(c, "%s", buf.buf);
409 printbuf_reset(&buf);
410 bch2_journal_pins_to_text(&buf, j);
411 bch_err(c, "Journal pins:\n%s", buf.buf);
419 * Journal is full - can't rely on reclaim from work item due to
422 if ((ret == cur_entry_journal_full ||
423 ret == cur_entry_journal_pin_full) &&
424 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
426 bch2_journal_do_discards(j);
430 if (mutex_trylock(&j->reclaim_lock)) {
431 bch2_journal_reclaim(j);
432 mutex_unlock(&j->reclaim_lock);
436 return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
440 * Essentially the entry function to the journaling code. When bcachefs is doing
441 * a btree insert, it calls this function to get the current journal write.
442 * Journal write is the structure used set up journal writes. The calling
443 * function will then add its keys to the structure, queuing them for the next
446 * To ensure forward progress, the current task must not be holding any
447 * btree node write locks.
449 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
454 closure_wait_event(&j->async_wait,
455 (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
456 (flags & JOURNAL_RES_GET_NONBLOCK));
460 /* journal_preres: */
462 static bool journal_preres_available(struct journal *j,
463 struct journal_preres *res,
467 bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
469 if (!ret && mutex_trylock(&j->reclaim_lock)) {
470 bch2_journal_reclaim(j);
471 mutex_unlock(&j->reclaim_lock);
477 int __bch2_journal_preres_get(struct journal *j,
478 struct journal_preres *res,
484 closure_wait_event(&j->preres_wait,
485 (ret = bch2_journal_error(j)) ||
486 journal_preres_available(j, res, new_u64s, flags));
490 /* journal_entry_res: */
492 void bch2_journal_entry_res_resize(struct journal *j,
493 struct journal_entry_res *res,
496 union journal_res_state state;
497 int d = new_u64s - res->u64s;
501 j->entry_u64s_reserved += d;
505 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
507 state = READ_ONCE(j->reservations);
509 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
510 state.cur_entry_offset > j->cur_entry_u64s) {
511 j->cur_entry_u64s += d;
513 * Not enough room in current journal entry, have to flush it:
515 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
517 journal_cur_buf(j)->u64s_reserved += d;
520 spin_unlock(&j->lock);
524 /* journal flushing: */
527 * bch2_journal_flush_seq_async - wait for a journal entry to be written
529 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
532 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
533 struct closure *parent)
535 struct journal_buf *buf;
538 if (seq <= j->flushed_seq_ondisk)
543 if (WARN_ONCE(seq > journal_cur_seq(j),
544 "requested to flush journal seq %llu, but currently at %llu",
545 seq, journal_cur_seq(j)))
548 /* Recheck under lock: */
549 if (j->err_seq && seq >= j->err_seq) {
554 if (seq <= j->flushed_seq_ondisk) {
559 /* if seq was written, but not flushed - flush a newer one instead */
560 seq = max(seq, journal_last_unwritten_seq(j));
563 if (seq > journal_cur_seq(j)) {
564 struct journal_res res = { 0 };
566 if (journal_entry_is_open(j))
567 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
569 spin_unlock(&j->lock);
571 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
576 buf = j->buf + (seq & JOURNAL_BUF_MASK);
577 buf->must_flush = true;
579 if (!buf->flush_time) {
580 buf->flush_time = local_clock() ?: 1;
581 buf->expires = jiffies;
584 if (parent && !closure_wait(&buf->wait, parent))
587 bch2_journal_res_put(j, &res);
594 * if write was kicked off without a flush, flush the next sequence
597 buf = journal_seq_to_buf(j, seq);
600 goto recheck_need_open;
603 buf->must_flush = true;
605 if (parent && !closure_wait(&buf->wait, parent))
608 if (seq == journal_cur_seq(j))
609 journal_entry_want_write(j);
611 spin_unlock(&j->lock);
615 int bch2_journal_flush_seq(struct journal *j, u64 seq)
617 u64 start_time = local_clock();
621 * Don't update time_stats when @seq is already flushed:
623 if (seq <= j->flushed_seq_ondisk)
626 ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
629 bch2_time_stats_update(j->flush_seq_time, start_time);
631 return ret ?: ret2 < 0 ? ret2 : 0;
635 * bch2_journal_flush_async - if there is an open journal entry, or a journal
636 * still being written, write it and wait for the write to complete
638 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
640 bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
643 int bch2_journal_flush(struct journal *j)
645 return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
649 * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
652 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
654 struct bch_fs *c = container_of(j, struct bch_fs, journal);
658 if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
661 if (seq <= c->journal.flushed_seq_ondisk)
665 if (seq <= c->journal.flushed_seq_ondisk)
668 for (unwritten_seq = journal_last_unwritten_seq(j);
671 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
673 /* journal write is already in flight, and was a flush write: */
674 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
682 spin_unlock(&j->lock);
686 int bch2_journal_meta(struct journal *j)
688 struct journal_buf *buf;
689 struct journal_res res;
692 memset(&res, 0, sizeof(res));
694 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
698 buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
699 buf->must_flush = true;
701 if (!buf->flush_time) {
702 buf->flush_time = local_clock() ?: 1;
703 buf->expires = jiffies;
706 bch2_journal_res_put(j, &res);
708 return bch2_journal_flush_seq(j, res.seq);
711 int bch2_journal_log_msg(struct journal *j, const char *fmt, ...)
713 struct jset_entry_log *entry;
714 struct journal_res res = { 0 };
715 unsigned msglen, u64s;
720 msglen = vsnprintf(NULL, 0, fmt, args) + 1;
723 u64s = jset_u64s(DIV_ROUND_UP(msglen, sizeof(u64)));
725 ret = bch2_journal_res_get(j, &res, u64s, 0);
729 entry = container_of(journal_res_entry(j, &res),
730 struct jset_entry_log, entry);;
731 memset(entry, 0, u64s * sizeof(u64));
732 entry->entry.type = BCH_JSET_ENTRY_log;
733 entry->entry.u64s = u64s - 1;
736 vsnprintf(entry->d, INT_MAX, fmt, args);
739 bch2_journal_res_put(j, &res);
741 return bch2_journal_flush_seq(j, res.seq);
744 /* block/unlock the journal: */
746 void bch2_journal_unblock(struct journal *j)
750 spin_unlock(&j->lock);
755 void bch2_journal_block(struct journal *j)
759 spin_unlock(&j->lock);
764 /* allocate journal on a device: */
766 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
767 bool new_fs, struct closure *cl)
769 struct bch_fs *c = ca->fs;
770 struct journal_device *ja = &ca->journal;
771 struct bch_sb_field_journal *journal_buckets;
772 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
775 /* don't handle reducing nr of buckets yet: */
779 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
780 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
781 if (!new_buckets || !new_bucket_seq) {
786 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
787 nr + sizeof(*journal_buckets) / sizeof(u64));
788 if (!journal_buckets) {
794 * We may be called from the device add path, before the new device has
795 * actually been added to the running filesystem:
798 spin_lock(&c->journal.lock);
800 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
801 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
802 swap(new_buckets, ja->buckets);
803 swap(new_bucket_seq, ja->bucket_seq);
806 spin_unlock(&c->journal.lock);
808 while (ja->nr < nr) {
809 struct open_bucket *ob = NULL;
814 b = bch2_bucket_alloc_new_fs(ca);
821 ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
825 ret = cl ? -EAGAIN : -ENOSPC;
833 spin_lock(&c->journal.lock);
837 * For resize at runtime, we should be writing the new
838 * superblock before inserting into the journal array
841 pos = ja->discard_idx ?: ja->nr;
842 __array_insert_item(ja->buckets, ja->nr, pos);
843 __array_insert_item(ja->bucket_seq, ja->nr, pos);
844 __array_insert_item(journal_buckets->buckets, ja->nr, pos);
847 ja->buckets[pos] = b;
848 ja->bucket_seq[pos] = 0;
849 journal_buckets->buckets[pos] = cpu_to_le64(b);
851 if (pos <= ja->discard_idx)
852 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
853 if (pos <= ja->dirty_idx_ondisk)
854 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
855 if (pos <= ja->dirty_idx)
856 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
857 if (pos <= ja->cur_idx)
858 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
861 spin_unlock(&c->journal.lock);
864 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
865 bch2_trans_mark_metadata_bucket(&trans, ca,
867 ca->mi.bucket_size));
869 bch2_open_bucket_put(c, ob);
876 bch2_sb_resize_journal(&ca->disk_sb,
877 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
878 kfree(new_bucket_seq);
885 * Allocate more journal space at runtime - not currently making use if it, but
888 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
891 struct journal_device *ja = &ca->journal;
896 closure_init_stack(&cl);
899 struct disk_reservation disk_res = { 0, 0 };
903 mutex_lock(&c->sb_lock);
907 * note: journal buckets aren't really counted as _sectors_ used yet, so
908 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
909 * when space used goes up without a reservation - but we do need the
910 * reservation to ensure we'll actually be able to allocate:
913 if (bch2_disk_reservation_get(c, &disk_res,
914 bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
915 mutex_unlock(&c->sb_lock);
919 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
921 bch2_disk_reservation_put(c, &disk_res);
923 if (ja->nr != current_nr)
925 mutex_unlock(&c->sb_lock);
926 } while (ret == -EAGAIN);
931 int bch2_dev_journal_alloc(struct bch_dev *ca)
935 if (dynamic_fault("bcachefs:add:journal_alloc"))
938 /* 1/128th of the device by default: */
939 nr = ca->mi.nbuckets >> 7;
942 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
945 nr = clamp_t(unsigned, nr,
946 BCH_JOURNAL_BUCKETS_MIN,
948 (1 << 24) / ca->mi.bucket_size));
950 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
953 /* startup/shutdown: */
955 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
961 for (seq = journal_last_unwritten_seq(j);
962 seq <= journal_cur_seq(j) && !ret;
964 struct journal_buf *buf = journal_seq_to_buf(j, seq);
966 if (bch2_bkey_has_device(bkey_i_to_s_c(&buf->key), dev_idx))
969 spin_unlock(&j->lock);
974 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
976 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
979 void bch2_fs_journal_stop(struct journal *j)
981 bch2_journal_reclaim_stop(j);
982 bch2_journal_flush_all_pins(j);
984 wait_event(j->wait, journal_entry_close(j));
987 * Always write a new journal entry, to make sure the clock hands are up
988 * to date (and match the superblock)
990 bch2_journal_meta(j);
994 BUG_ON(!bch2_journal_error(j) &&
995 test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
996 j->last_empty_seq != journal_cur_seq(j));
998 cancel_delayed_work_sync(&j->write_work);
1001 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
1002 struct list_head *journal_entries)
1004 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1005 struct journal_entry_pin_list *p;
1006 struct journal_replay *i;
1007 u64 last_seq = cur_seq, nr, seq;
1009 if (!list_empty(journal_entries))
1010 last_seq = le64_to_cpu(list_last_entry(journal_entries,
1011 struct journal_replay, list)->j.last_seq);
1013 nr = cur_seq - last_seq;
1015 if (nr + 1 > j->pin.size) {
1017 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1019 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1024 j->replay_journal_seq = last_seq;
1025 j->replay_journal_seq_end = cur_seq;
1026 j->last_seq_ondisk = last_seq;
1027 j->flushed_seq_ondisk = cur_seq - 1;
1028 j->seq_ondisk = cur_seq - 1;
1029 j->pin.front = last_seq;
1030 j->pin.back = cur_seq;
1031 atomic64_set(&j->seq, cur_seq - 1);
1033 if (list_empty(journal_entries))
1034 j->last_empty_seq = cur_seq - 1;
1036 fifo_for_each_entry_ptr(p, &j->pin, seq)
1037 journal_pin_list_init(p, 1);
1039 list_for_each_entry(i, journal_entries, list) {
1042 seq = le64_to_cpu(i->j.seq);
1043 BUG_ON(seq >= cur_seq);
1048 if (journal_entry_empty(&i->j))
1049 j->last_empty_seq = le64_to_cpu(i->j.seq);
1051 p = journal_seq_pin(j, seq);
1054 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1055 bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1058 if (list_empty(journal_entries))
1059 j->last_empty_seq = cur_seq;
1061 spin_lock(&j->lock);
1063 set_bit(JOURNAL_STARTED, &j->flags);
1064 j->last_flush_write = jiffies;
1066 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1067 j->reservations.unwritten_idx++;
1069 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1071 bch2_journal_space_available(j);
1072 spin_unlock(&j->lock);
1074 return bch2_journal_reclaim_start(j);
1079 void bch2_dev_journal_exit(struct bch_dev *ca)
1081 kfree(ca->journal.bio);
1082 kfree(ca->journal.buckets);
1083 kfree(ca->journal.bucket_seq);
1085 ca->journal.bio = NULL;
1086 ca->journal.buckets = NULL;
1087 ca->journal.bucket_seq = NULL;
1090 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1092 struct journal_device *ja = &ca->journal;
1093 struct bch_sb_field_journal *journal_buckets =
1094 bch2_sb_get_journal(sb);
1097 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1099 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1100 if (!ja->bucket_seq)
1103 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1104 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1105 if (!ca->journal.bio)
1108 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1112 for (i = 0; i < ja->nr; i++)
1113 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1118 void bch2_fs_journal_exit(struct journal *j)
1122 for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1123 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1127 int bch2_fs_journal_init(struct journal *j)
1129 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1130 static struct lock_class_key res_key;
1134 pr_verbose_init(c->opts, "");
1136 spin_lock_init(&j->lock);
1137 spin_lock_init(&j->err_lock);
1138 init_waitqueue_head(&j->wait);
1139 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1140 init_waitqueue_head(&j->reclaim_wait);
1141 init_waitqueue_head(&j->pin_flush_wait);
1142 mutex_init(&j->reclaim_lock);
1143 mutex_init(&j->discard_lock);
1145 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1147 atomic64_set(&j->reservations.counter,
1148 ((union journal_res_state)
1149 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1151 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1156 for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1157 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1158 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1159 if (!j->buf[i].data) {
1165 j->pin.front = j->pin.back = 1;
1167 pr_verbose_init(c->opts, "ret %i", ret);
1173 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1175 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1176 union journal_res_state s;
1178 unsigned long now = jiffies;
1183 out->tabstops[0] = 24;
1186 s = READ_ONCE(j->reservations);
1188 pr_buf(out, "dirty journal entries:\t%llu\n", fifo_used(&j->pin));
1189 pr_buf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
1190 pr_buf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
1191 pr_buf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
1192 pr_buf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
1193 pr_buf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
1194 pr_buf(out, "prereserved:\t\t%u/%u\n", j->prereserved.reserved, j->prereserved.remaining);
1195 pr_buf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
1196 pr_buf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
1197 pr_buf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
1198 pr_buf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
1199 pr_buf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
1200 pr_buf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked);
1201 pr_buf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
1202 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1203 pr_buf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1204 pr_buf(out, "current entry error:\t%u\n", j->cur_entry_error);
1205 pr_buf(out, "current entry:\t\t");
1207 switch (s.cur_entry_offset) {
1208 case JOURNAL_ENTRY_ERROR_VAL:
1209 pr_buf(out, "error");
1211 case JOURNAL_ENTRY_CLOSED_VAL:
1212 pr_buf(out, "closed");
1215 pr_buf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1221 for (seq = journal_cur_seq(j);
1222 seq >= journal_last_unwritten_seq(j);
1224 i = seq & JOURNAL_BUF_MASK;
1226 pr_buf(out, "unwritten entry:");
1228 pr_buf(out, "%llu", seq);
1230 pr_indent_push(out, 2);
1232 pr_buf(out, "refcount:");
1234 pr_buf(out, "%u", journal_state_count(s, i));
1237 pr_buf(out, "sectors:");
1239 pr_buf(out, "%u", j->buf[i].sectors);
1242 pr_buf(out, "expires");
1244 pr_buf(out, "%li jiffies", j->buf[i].expires - jiffies);
1247 pr_indent_pop(out, 2);
1251 "replay done:\t\t%i\n",
1252 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1254 pr_buf(out, "space:\n");
1255 pr_buf(out, "\tdiscarded\t%u:%u\n",
1256 j->space[journal_space_discarded].next_entry,
1257 j->space[journal_space_discarded].total);
1258 pr_buf(out, "\tclean ondisk\t%u:%u\n",
1259 j->space[journal_space_clean_ondisk].next_entry,
1260 j->space[journal_space_clean_ondisk].total);
1261 pr_buf(out, "\tclean\t\t%u:%u\n",
1262 j->space[journal_space_clean].next_entry,
1263 j->space[journal_space_clean].total);
1264 pr_buf(out, "\ttotal\t\t%u:%u\n",
1265 j->space[journal_space_total].next_entry,
1266 j->space[journal_space_total].total);
1268 for_each_member_device_rcu(ca, c, i,
1269 &c->rw_devs[BCH_DATA_journal]) {
1270 struct journal_device *ja = &ca->journal;
1272 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1278 pr_buf(out, "dev %u:\n", i);
1279 pr_buf(out, "\tnr\t\t%u\n", ja->nr);
1280 pr_buf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
1281 pr_buf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1282 pr_buf(out, "\tdiscard_idx\t%u\n", ja->discard_idx);
1283 pr_buf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
1284 pr_buf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
1285 pr_buf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1293 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1295 spin_lock(&j->lock);
1296 __bch2_journal_debug_to_text(out, j);
1297 spin_unlock(&j->lock);
1300 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1302 struct journal_entry_pin_list *pin_list;
1303 struct journal_entry_pin *pin;
1305 spin_lock(&j->lock);
1306 *seq = max(*seq, j->pin.front);
1308 if (*seq >= j->pin.back) {
1309 spin_unlock(&j->lock);
1315 pin_list = journal_seq_pin(j, *seq);
1317 pr_buf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1319 pr_indent_push(out, 2);
1321 list_for_each_entry(pin, &pin_list->list, list) {
1322 pr_buf(out, "\t%px %ps", pin, pin->flush);
1326 list_for_each_entry(pin, &pin_list->key_cache_list, list) {
1327 pr_buf(out, "\t%px %ps", pin, pin->flush);
1331 if (!list_empty(&pin_list->flushed)) {
1332 pr_buf(out, "flushed:");
1336 list_for_each_entry(pin, &pin_list->flushed, list) {
1337 pr_buf(out, "\t%px %ps", pin, pin->flush);
1341 pr_indent_pop(out, 2);
1344 spin_unlock(&j->lock);
1349 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1353 while (!bch2_journal_seq_pins_to_text(out, j, &seq))