1 // SPDX-License-Identifier: GPL-2.0
3 * bcachefs journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
12 #include "btree_update.h"
15 #include "journal_io.h"
16 #include "journal_reclaim.h"
17 #include "journal_seq_blacklist.h"
20 #include <trace/events/bcachefs.h>
22 static u64 last_unwritten_seq(struct journal *j)
24 union journal_res_state s = READ_ONCE(j->reservations);
26 lockdep_assert_held(&j->lock);
28 return journal_cur_seq(j) - ((s.idx - s.unwritten_idx) & JOURNAL_BUF_MASK);
31 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
33 return seq >= last_unwritten_seq(j);
36 static bool __journal_entry_is_open(union journal_res_state state)
38 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
41 static bool journal_entry_is_open(struct journal *j)
43 return __journal_entry_is_open(j->reservations);
46 static inline struct journal_buf *
47 journal_seq_to_buf(struct journal *j, u64 seq)
49 struct journal_buf *buf = NULL;
51 EBUG_ON(seq > journal_cur_seq(j));
52 EBUG_ON(seq == journal_cur_seq(j) &&
53 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
55 if (journal_seq_unwritten(j, seq)) {
56 buf = j->buf + (seq & JOURNAL_BUF_MASK);
57 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
62 static void journal_pin_new_entry(struct journal *j, int count)
64 struct journal_entry_pin_list *p;
67 * The fifo_push() needs to happen at the same time as j->seq is
68 * incremented for journal_last_seq() to be calculated correctly
70 atomic64_inc(&j->seq);
71 p = fifo_push_ref(&j->pin);
73 INIT_LIST_HEAD(&p->list);
74 INIT_LIST_HEAD(&p->flushed);
75 atomic_set(&p->count, count);
79 static void bch2_journal_buf_init(struct journal *j)
81 struct journal_buf *buf = journal_cur_buf(j);
83 bkey_extent_init(&buf->key);
85 buf->must_flush = false;
86 buf->separate_flush = false;
88 memset(buf->has_inode, 0, sizeof(buf->has_inode));
90 memset(buf->data, 0, sizeof(*buf->data));
91 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
95 void bch2_journal_halt(struct journal *j)
97 union journal_res_state old, new;
98 u64 v = atomic64_read(&j->reservations.counter);
102 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
105 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
106 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
107 old.v, new.v)) != old.v);
109 j->err_seq = journal_cur_seq(j);
111 closure_wake_up(&journal_cur_buf(j)->wait);
114 /* journal entry close/open: */
116 void __bch2_journal_buf_put(struct journal *j)
118 closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
122 * Returns true if journal entry is now closed:
124 * We don't close a journal_buf until the next journal_buf is finished writing,
125 * and can be opened again - this also initializes the next journal_buf:
127 static bool __journal_entry_close(struct journal *j)
129 struct bch_fs *c = container_of(j, struct bch_fs, journal);
130 struct journal_buf *buf = journal_cur_buf(j);
131 union journal_res_state old, new;
132 u64 v = atomic64_read(&j->reservations.counter);
135 lockdep_assert_held(&j->lock);
139 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
142 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
143 /* this entry will never be written: */
144 closure_wake_up(&buf->wait);
148 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
149 set_bit(JOURNAL_NEED_WRITE, &j->flags);
150 j->need_write_time = local_clock();
153 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
156 if (new.idx == new.unwritten_idx)
159 BUG_ON(journal_state_count(new, new.idx));
160 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
161 old.v, new.v)) != old.v);
163 /* Close out old buffer: */
164 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
166 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
167 buf->u64s_reserved) << c->block_bits;
168 BUG_ON(sectors > buf->sectors);
169 buf->sectors = sectors;
172 * We have to set last_seq here, _before_ opening a new journal entry:
174 * A threads may replace an old pin with a new pin on their current
175 * journal reservation - the expectation being that the journal will
176 * contain either what the old pin protected or what the new pin
179 * After the old pin is dropped journal_last_seq() won't include the old
180 * pin, so we can only write the updated last_seq on the entry that
181 * contains whatever the new pin protects.
183 * Restated, we can _not_ update last_seq for a given entry if there
184 * could be a newer entry open with reservations/pins that have been
187 * Hence, we want update/set last_seq on the current journal entry right
188 * before we open a new one:
190 buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
192 __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
194 /* Initialize new buffer: */
195 journal_pin_new_entry(j, 1);
197 bch2_journal_buf_init(j);
199 cancel_delayed_work(&j->write_work);
200 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
202 bch2_journal_space_available(j);
204 bch2_journal_buf_put(j, old.idx);
208 static bool journal_entry_want_write(struct journal *j)
210 union journal_res_state s = READ_ONCE(j->reservations);
214 * Don't close it yet if we already have a write in flight, but do set
217 if (s.idx != s.unwritten_idx)
218 set_bit(JOURNAL_NEED_WRITE, &j->flags);
220 ret = __journal_entry_close(j);
225 static bool journal_entry_close(struct journal *j)
230 ret = journal_entry_want_write(j);
231 spin_unlock(&j->lock);
237 * should _only_ called from journal_res_get() - when we actually want a
238 * journal reservation - journal entry is open means journal is dirty:
242 * -ENOSPC: journal currently full, must invoke reclaim
243 * -EAGAIN: journal blocked, must wait
244 * -EROFS: insufficient rw devices or journal error
246 static int journal_entry_open(struct journal *j)
248 struct bch_fs *c = container_of(j, struct bch_fs, journal);
249 struct journal_buf *buf = journal_cur_buf(j);
250 union journal_res_state old, new;
254 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
256 lockdep_assert_held(&j->lock);
257 BUG_ON(journal_entry_is_open(j));
260 return cur_entry_blocked;
262 if (j->cur_entry_error)
263 return j->cur_entry_error;
265 BUG_ON(!j->cur_entry_sectors);
267 buf->u64s_reserved = j->entry_u64s_reserved;
268 buf->disk_sectors = j->cur_entry_sectors;
269 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
271 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
272 journal_entry_overhead(j);
273 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
275 if (u64s <= le32_to_cpu(buf->data->u64s))
276 return cur_entry_journal_full;
279 * Must be set before marking the journal entry as open:
281 j->cur_entry_u64s = u64s;
283 v = atomic64_read(&j->reservations.counter);
287 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
288 return cur_entry_insufficient_devices;
290 /* Handle any already added entries */
291 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
293 EBUG_ON(journal_state_count(new, new.idx));
294 journal_state_inc(&new);
295 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
296 old.v, new.v)) != old.v);
298 if (j->res_get_blocked_start)
299 bch2_time_stats_update(j->blocked_time,
300 j->res_get_blocked_start);
301 j->res_get_blocked_start = 0;
303 mod_delayed_work(system_freezable_wq,
305 msecs_to_jiffies(j->write_delay_ms));
310 static bool journal_quiesced(struct journal *j)
312 union journal_res_state s = READ_ONCE(j->reservations);
313 bool ret = s.idx == s.unwritten_idx && !__journal_entry_is_open(s);
316 journal_entry_close(j);
320 static void journal_quiesce(struct journal *j)
322 wait_event(j->wait, journal_quiesced(j));
325 static void journal_write_work(struct work_struct *work)
327 struct journal *j = container_of(work, struct journal, write_work.work);
329 journal_entry_close(j);
333 * Given an inode number, if that inode number has data in the journal that
334 * hasn't yet been flushed, return the journal sequence number that needs to be
337 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
339 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
340 union journal_res_state s;
346 seq = journal_cur_seq(j);
347 s = READ_ONCE(j->reservations);
351 if (test_bit(h, j->buf[i].has_inode))
354 if (i == s.unwritten_idx)
357 i = (i - 1) & JOURNAL_BUF_MASK;
363 spin_unlock(&j->lock);
368 void bch2_journal_set_has_inum(struct journal *j, u64 inode, u64 seq)
370 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
371 struct journal_buf *buf;
375 if ((buf = journal_seq_to_buf(j, seq)))
376 set_bit(h, buf->has_inode);
378 spin_unlock(&j->lock);
381 static int __journal_res_get(struct journal *j, struct journal_res *res,
384 struct bch_fs *c = container_of(j, struct bch_fs, journal);
385 struct journal_buf *buf;
389 if (journal_res_get_fast(j, res, flags))
392 if (bch2_journal_error(j))
398 * Recheck after taking the lock, so we don't race with another thread
399 * that just did journal_entry_open() and call journal_entry_close()
402 if (journal_res_get_fast(j, res, flags)) {
403 spin_unlock(&j->lock);
407 if (!(flags & JOURNAL_RES_GET_RESERVED) &&
408 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
410 * Don't want to close current journal entry, just need to
413 ret = cur_entry_journal_full;
418 * If we couldn't get a reservation because the current buf filled up,
419 * and we had room for a bigger entry on disk, signal that we want to
420 * realloc the journal bufs:
422 buf = journal_cur_buf(j);
423 if (journal_entry_is_open(j) &&
424 buf->buf_size >> 9 < buf->disk_sectors &&
425 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
426 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
428 if (journal_entry_is_open(j) &&
429 !__journal_entry_close(j)) {
431 * We failed to get a reservation on the current open journal
432 * entry because it's full, and we can't close it because
433 * there's still a previous one in flight:
435 trace_journal_entry_full(c);
436 ret = cur_entry_blocked;
438 ret = journal_entry_open(j);
441 if ((ret && ret != cur_entry_insufficient_devices) &&
442 !j->res_get_blocked_start) {
443 j->res_get_blocked_start = local_clock() ?: 1;
444 trace_journal_full(c);
447 can_discard = j->can_discard;
448 spin_unlock(&j->lock);
454 * Journal is full - can't rely on reclaim from work item due to
457 if ((ret == cur_entry_journal_full ||
458 ret == cur_entry_journal_pin_full) &&
459 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
461 bch2_journal_do_discards(j);
465 if (mutex_trylock(&j->reclaim_lock)) {
466 bch2_journal_reclaim(j);
467 mutex_unlock(&j->reclaim_lock);
471 return ret == cur_entry_insufficient_devices ? -EROFS : -EAGAIN;
475 * Essentially the entry function to the journaling code. When bcachefs is doing
476 * a btree insert, it calls this function to get the current journal write.
477 * Journal write is the structure used set up journal writes. The calling
478 * function will then add its keys to the structure, queuing them for the next
481 * To ensure forward progress, the current task must not be holding any
482 * btree node write locks.
484 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
489 closure_wait_event(&j->async_wait,
490 (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
491 (flags & JOURNAL_RES_GET_NONBLOCK));
495 /* journal_preres: */
497 static bool journal_preres_available(struct journal *j,
498 struct journal_preres *res,
502 bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags);
504 if (!ret && mutex_trylock(&j->reclaim_lock)) {
505 bch2_journal_reclaim(j);
506 mutex_unlock(&j->reclaim_lock);
512 int __bch2_journal_preres_get(struct journal *j,
513 struct journal_preres *res,
519 closure_wait_event(&j->preres_wait,
520 (ret = bch2_journal_error(j)) ||
521 journal_preres_available(j, res, new_u64s, flags));
525 /* journal_entry_res: */
527 void bch2_journal_entry_res_resize(struct journal *j,
528 struct journal_entry_res *res,
531 union journal_res_state state;
532 int d = new_u64s - res->u64s;
536 j->entry_u64s_reserved += d;
540 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
542 state = READ_ONCE(j->reservations);
544 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
545 state.cur_entry_offset > j->cur_entry_u64s) {
546 j->cur_entry_u64s += d;
548 * Not enough room in current journal entry, have to flush it:
550 __journal_entry_close(j);
552 journal_cur_buf(j)->u64s_reserved += d;
555 spin_unlock(&j->lock);
559 /* journal flushing: */
562 * bch2_journal_flush_seq_async - wait for a journal entry to be written
564 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
567 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
568 struct closure *parent)
570 struct journal_buf *buf;
573 if (seq <= j->flushed_seq_ondisk)
578 /* Recheck under lock: */
579 if (j->err_seq && seq >= j->err_seq) {
584 if (seq <= j->flushed_seq_ondisk) {
589 /* if seq was written, but not flushed - flush a newer one instead */
590 seq = max(seq, last_unwritten_seq(j));
593 if (seq == journal_cur_seq(j) && !journal_entry_is_open(j)) {
594 struct journal_res res = { 0 };
596 spin_unlock(&j->lock);
598 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
603 buf = j->buf + (seq & JOURNAL_BUF_MASK);
604 buf->must_flush = true;
605 set_bit(JOURNAL_NEED_WRITE, &j->flags);
607 if (parent && !closure_wait(&buf->wait, parent))
610 bch2_journal_res_put(j, &res);
617 * if write was kicked off without a flush, flush the next sequence
620 buf = journal_seq_to_buf(j, seq);
623 goto recheck_need_open;
626 buf->must_flush = true;
628 if (parent && !closure_wait(&buf->wait, parent))
631 if (seq == journal_cur_seq(j))
632 journal_entry_want_write(j);
634 spin_unlock(&j->lock);
638 int bch2_journal_flush_seq(struct journal *j, u64 seq)
640 u64 start_time = local_clock();
643 ret = wait_event_killable(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
645 bch2_time_stats_update(j->flush_seq_time, start_time);
647 return ret ?: ret2 < 0 ? ret2 : 0;
650 int bch2_journal_meta(struct journal *j)
652 struct journal_res res;
655 memset(&res, 0, sizeof(res));
657 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
661 bch2_journal_res_put(j, &res);
663 return bch2_journal_flush_seq(j, res.seq);
667 * bch2_journal_flush_async - if there is an open journal entry, or a journal
668 * still being written, write it and wait for the write to complete
670 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
672 u64 seq, journal_seq;
675 journal_seq = journal_cur_seq(j);
677 if (journal_entry_is_open(j)) {
679 } else if (journal_seq) {
680 seq = journal_seq - 1;
682 spin_unlock(&j->lock);
685 spin_unlock(&j->lock);
687 bch2_journal_flush_seq_async(j, seq, parent);
690 int bch2_journal_flush(struct journal *j)
692 u64 seq, journal_seq;
695 journal_seq = journal_cur_seq(j);
697 if (journal_entry_is_open(j)) {
699 } else if (journal_seq) {
700 seq = journal_seq - 1;
702 spin_unlock(&j->lock);
705 spin_unlock(&j->lock);
707 return bch2_journal_flush_seq(j, seq);
710 /* block/unlock the journal: */
712 void bch2_journal_unblock(struct journal *j)
716 spin_unlock(&j->lock);
721 void bch2_journal_block(struct journal *j)
725 spin_unlock(&j->lock);
730 /* allocate journal on a device: */
732 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
733 bool new_fs, struct closure *cl)
735 struct bch_fs *c = ca->fs;
736 struct journal_device *ja = &ca->journal;
737 struct bch_sb_field_journal *journal_buckets;
738 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
741 /* don't handle reducing nr of buckets yet: */
745 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
746 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
747 if (!new_buckets || !new_bucket_seq) {
752 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
753 nr + sizeof(*journal_buckets) / sizeof(u64));
754 if (!journal_buckets) {
760 * We may be called from the device add path, before the new device has
761 * actually been added to the running filesystem:
764 spin_lock(&c->journal.lock);
766 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
767 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
768 swap(new_buckets, ja->buckets);
769 swap(new_bucket_seq, ja->bucket_seq);
772 spin_unlock(&c->journal.lock);
774 while (ja->nr < nr) {
775 struct open_bucket *ob = NULL;
780 bucket = bch2_bucket_alloc_new_fs(ca);
787 ob = bch2_bucket_alloc(c, ca, RESERVE_NONE,
791 ret = cl ? -EAGAIN : -ENOSPC;
795 bucket = sector_to_bucket(ca, ob->ptr.offset);
799 percpu_down_read(&c->mark_lock);
800 spin_lock(&c->journal.lock);
805 * For resize at runtime, we should be writing the new
806 * superblock before inserting into the journal array
809 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
810 __array_insert_item(ja->buckets, ja->nr, pos);
811 __array_insert_item(ja->bucket_seq, ja->nr, pos);
812 __array_insert_item(journal_buckets->buckets, ja->nr, pos);
815 ja->buckets[pos] = bucket;
816 ja->bucket_seq[pos] = 0;
817 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
819 if (pos <= ja->discard_idx)
820 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
821 if (pos <= ja->dirty_idx_ondisk)
822 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
823 if (pos <= ja->dirty_idx)
824 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
825 if (pos <= ja->cur_idx)
826 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
829 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
831 gc_phase(GC_PHASE_SB),
835 spin_unlock(&c->journal.lock);
836 percpu_up_read(&c->mark_lock);
840 ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
841 bch2_trans_mark_metadata_bucket(&trans, NULL, ca,
842 bucket, BCH_DATA_journal,
843 ca->mi.bucket_size));
846 bch2_open_bucket_put(c, ob);
852 bch2_sb_resize_journal(&ca->disk_sb,
853 ja->nr + sizeof(*journal_buckets) / sizeof(u64));
854 kfree(new_bucket_seq);
861 * Allocate more journal space at runtime - not currently making use if it, but
864 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
867 struct journal_device *ja = &ca->journal;
872 closure_init_stack(&cl);
875 struct disk_reservation disk_res = { 0, 0 };
879 mutex_lock(&c->sb_lock);
883 * note: journal buckets aren't really counted as _sectors_ used yet, so
884 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
885 * when space used goes up without a reservation - but we do need the
886 * reservation to ensure we'll actually be able to allocate:
889 if (bch2_disk_reservation_get(c, &disk_res,
890 bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
891 mutex_unlock(&c->sb_lock);
895 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
897 bch2_disk_reservation_put(c, &disk_res);
899 if (ja->nr != current_nr)
901 mutex_unlock(&c->sb_lock);
902 } while (ret == -EAGAIN);
907 int bch2_dev_journal_alloc(struct bch_dev *ca)
911 if (dynamic_fault("bcachefs:add:journal_alloc"))
915 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
918 nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
919 BCH_JOURNAL_BUCKETS_MIN,
921 (1 << 20) / ca->mi.bucket_size));
923 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
926 /* startup/shutdown: */
928 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
930 union journal_res_state state;
935 state = READ_ONCE(j->reservations);
938 while (i != state.unwritten_idx) {
939 i = (i - 1) & JOURNAL_BUF_MASK;
940 if (bch2_bkey_has_device(bkey_i_to_s_c(&j->buf[i].key), dev_idx))
943 spin_unlock(&j->lock);
948 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
950 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
953 void bch2_fs_journal_stop(struct journal *j)
955 bch2_journal_flush_all_pins(j);
957 wait_event(j->wait, journal_entry_close(j));
960 * Always write a new journal entry, to make sure the clock hands are up
961 * to date (and match the superblock)
963 bch2_journal_meta(j);
967 BUG_ON(!bch2_journal_error(j) &&
968 test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
969 (journal_entry_is_open(j) ||
970 j->last_empty_seq + 1 != journal_cur_seq(j)));
972 cancel_delayed_work_sync(&j->write_work);
973 bch2_journal_reclaim_stop(j);
976 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
977 struct list_head *journal_entries)
979 struct bch_fs *c = container_of(j, struct bch_fs, journal);
980 struct journal_entry_pin_list *p;
981 struct journal_replay *i;
982 u64 last_seq = cur_seq, nr, seq;
984 if (!list_empty(journal_entries))
985 last_seq = le64_to_cpu(list_last_entry(journal_entries,
986 struct journal_replay, list)->j.last_seq);
988 nr = cur_seq - last_seq;
990 if (nr + 1 > j->pin.size) {
992 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
994 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
999 j->replay_journal_seq = last_seq;
1000 j->replay_journal_seq_end = cur_seq;
1001 j->last_seq_ondisk = last_seq;
1002 j->pin.front = last_seq;
1003 j->pin.back = cur_seq;
1004 atomic64_set(&j->seq, cur_seq - 1);
1006 fifo_for_each_entry_ptr(p, &j->pin, seq) {
1007 INIT_LIST_HEAD(&p->list);
1008 INIT_LIST_HEAD(&p->flushed);
1009 atomic_set(&p->count, 1);
1013 list_for_each_entry(i, journal_entries, list) {
1016 seq = le64_to_cpu(i->j.seq);
1017 BUG_ON(seq >= cur_seq);
1022 p = journal_seq_pin(j, seq);
1025 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1026 bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1029 spin_lock(&j->lock);
1031 set_bit(JOURNAL_STARTED, &j->flags);
1032 j->last_flush_write = jiffies;
1034 journal_pin_new_entry(j, 1);
1036 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1038 bch2_journal_buf_init(j);
1040 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1042 bch2_journal_space_available(j);
1043 spin_unlock(&j->lock);
1050 void bch2_dev_journal_exit(struct bch_dev *ca)
1052 kfree(ca->journal.bio);
1053 kfree(ca->journal.buckets);
1054 kfree(ca->journal.bucket_seq);
1056 ca->journal.bio = NULL;
1057 ca->journal.buckets = NULL;
1058 ca->journal.bucket_seq = NULL;
1061 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1063 struct journal_device *ja = &ca->journal;
1064 struct bch_sb_field_journal *journal_buckets =
1065 bch2_sb_get_journal(sb);
1068 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1070 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1071 if (!ja->bucket_seq)
1074 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1075 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1076 if (!ca->journal.bio)
1079 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1083 for (i = 0; i < ja->nr; i++)
1084 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1089 void bch2_fs_journal_exit(struct journal *j)
1093 for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1094 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1098 int bch2_fs_journal_init(struct journal *j)
1100 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1101 static struct lock_class_key res_key;
1105 pr_verbose_init(c->opts, "");
1107 spin_lock_init(&j->lock);
1108 spin_lock_init(&j->err_lock);
1109 init_waitqueue_head(&j->wait);
1110 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1111 init_waitqueue_head(&j->pin_flush_wait);
1112 mutex_init(&j->reclaim_lock);
1113 mutex_init(&j->discard_lock);
1115 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1117 j->write_delay_ms = 1000;
1118 j->reclaim_delay_ms = 100;
1121 j->entry_u64s_reserved +=
1122 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX);
1124 atomic64_set(&j->reservations.counter,
1125 ((union journal_res_state)
1126 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1128 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1133 for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1134 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1135 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1136 if (!j->buf[i].data) {
1142 j->pin.front = j->pin.back = 1;
1144 pr_verbose_init(c->opts, "ret %i", ret);
1150 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1152 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1153 union journal_res_state s;
1158 s = READ_ONCE(j->reservations);
1161 "active journal entries:\t%llu\n"
1163 "last_seq:\t\t%llu\n"
1164 "last_seq_ondisk:\t%llu\n"
1165 "prereserved:\t\t%u/%u\n"
1166 "nr flush writes:\t%llu\n"
1167 "nr noflush writes:\t%llu\n"
1168 "nr direct reclaim:\t%llu\n"
1169 "nr background reclaim:\t%llu\n"
1170 "current entry sectors:\t%u\n"
1171 "current entry error:\t%u\n"
1172 "current entry:\t\t",
1175 journal_last_seq(j),
1177 j->prereserved.reserved,
1178 j->prereserved.remaining,
1180 j->nr_noflush_writes,
1181 j->nr_direct_reclaim,
1182 j->nr_background_reclaim,
1183 j->cur_entry_sectors,
1184 j->cur_entry_error);
1186 switch (s.cur_entry_offset) {
1187 case JOURNAL_ENTRY_ERROR_VAL:
1188 pr_buf(out, "error\n");
1190 case JOURNAL_ENTRY_CLOSED_VAL:
1191 pr_buf(out, "closed\n");
1194 pr_buf(out, "%u/%u\n",
1201 "current entry:\t\tidx %u refcount %u\n",
1202 s.idx, journal_state_count(s, s.idx));
1205 while (i != s.unwritten_idx) {
1206 i = (i - 1) & JOURNAL_BUF_MASK;
1208 pr_buf(out, "unwritten entry:\tidx %u refcount %u sectors %u\n",
1209 i, journal_state_count(s, i), j->buf[i].sectors);
1213 "need write:\t\t%i\n"
1214 "replay done:\t\t%i\n",
1215 test_bit(JOURNAL_NEED_WRITE, &j->flags),
1216 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1218 pr_buf(out, "space:\n");
1219 pr_buf(out, "\tdiscarded\t%u:%u\n",
1220 j->space[journal_space_discarded].next_entry,
1221 j->space[journal_space_discarded].total);
1222 pr_buf(out, "\tclean ondisk\t%u:%u\n",
1223 j->space[journal_space_clean_ondisk].next_entry,
1224 j->space[journal_space_clean_ondisk].total);
1225 pr_buf(out, "\tclean\t\t%u:%u\n",
1226 j->space[journal_space_clean].next_entry,
1227 j->space[journal_space_clean].total);
1228 pr_buf(out, "\ttotal\t\t%u:%u\n",
1229 j->space[journal_space_total].next_entry,
1230 j->space[journal_space_total].total);
1232 for_each_member_device_rcu(ca, c, i,
1233 &c->rw_devs[BCH_DATA_journal]) {
1234 struct journal_device *ja = &ca->journal;
1242 "\tbucket size\t%u\n"
1243 "\tavailable\t%u:%u\n"
1244 "\tdiscard_idx\t%u\n"
1245 "\tdirty_ondisk\t%u (seq %llu)\n"
1246 "\tdirty_idx\t%u (seq %llu)\n"
1247 "\tcur_idx\t\t%u (seq %llu)\n",
1248 i, ja->nr, ca->mi.bucket_size,
1249 bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
1252 ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk],
1253 ja->dirty_idx, ja->bucket_seq[ja->dirty_idx],
1254 ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1260 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1262 spin_lock(&j->lock);
1263 __bch2_journal_debug_to_text(out, j);
1264 spin_unlock(&j->lock);
1267 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1269 struct journal_entry_pin_list *pin_list;
1270 struct journal_entry_pin *pin;
1273 spin_lock(&j->lock);
1274 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1275 pr_buf(out, "%llu: count %u\n",
1276 i, atomic_read(&pin_list->count));
1278 list_for_each_entry(pin, &pin_list->list, list)
1279 pr_buf(out, "\t%px %ps\n",
1282 if (!list_empty(&pin_list->flushed))
1283 pr_buf(out, "flushed:\n");
1285 list_for_each_entry(pin, &pin_list->flushed, list)
1286 pr_buf(out, "\t%px %ps\n",
1289 spin_unlock(&j->lock);