1 // SPDX-License-Identifier: GPL-2.0
3 * bcachefs journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
14 #include "journal_io.h"
15 #include "journal_reclaim.h"
16 #include "journal_seq_blacklist.h"
19 #include <trace/events/bcachefs.h>
21 static inline struct journal_buf *journal_seq_to_buf(struct journal *, u64);
23 static bool __journal_entry_is_open(union journal_res_state state)
25 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
28 static bool journal_entry_is_open(struct journal *j)
30 return __journal_entry_is_open(j->reservations);
33 static void journal_pin_new_entry(struct journal *j, int count)
35 struct journal_entry_pin_list *p;
38 * The fifo_push() needs to happen at the same time as j->seq is
39 * incremented for journal_last_seq() to be calculated correctly
41 atomic64_inc(&j->seq);
42 p = fifo_push_ref(&j->pin);
44 INIT_LIST_HEAD(&p->list);
45 INIT_LIST_HEAD(&p->flushed);
46 atomic_set(&p->count, count);
50 static void bch2_journal_buf_init(struct journal *j)
52 struct journal_buf *buf = journal_cur_buf(j);
54 memset(buf->has_inode, 0, sizeof(buf->has_inode));
56 memset(buf->data, 0, sizeof(*buf->data));
57 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
61 void bch2_journal_halt(struct journal *j)
63 union journal_res_state old, new;
64 u64 v = atomic64_read(&j->reservations.counter);
68 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
71 new.cur_entry_offset = JOURNAL_ENTRY_ERROR_VAL;
72 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
73 old.v, new.v)) != old.v);
76 closure_wake_up(&journal_cur_buf(j)->wait);
79 /* journal entry close/open: */
81 void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
83 if (!need_write_just_set &&
84 test_bit(JOURNAL_NEED_WRITE, &j->flags))
85 bch2_time_stats_update(j->delay_time,
88 clear_bit(JOURNAL_NEED_WRITE, &j->flags);
90 closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
94 * Returns true if journal entry is now closed:
96 static bool __journal_entry_close(struct journal *j)
98 struct bch_fs *c = container_of(j, struct bch_fs, journal);
99 struct journal_buf *buf = journal_cur_buf(j);
100 union journal_res_state old, new;
101 u64 v = atomic64_read(&j->reservations.counter);
102 bool set_need_write = false;
105 lockdep_assert_held(&j->lock);
109 if (old.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL)
112 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL) {
113 /* this entry will never be written: */
114 closure_wake_up(&buf->wait);
118 if (!test_bit(JOURNAL_NEED_WRITE, &j->flags)) {
119 set_bit(JOURNAL_NEED_WRITE, &j->flags);
120 j->need_write_time = local_clock();
121 set_need_write = true;
124 if (new.prev_buf_unwritten)
127 new.cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL;
129 new.prev_buf_unwritten = 1;
131 BUG_ON(journal_state_count(new, new.idx));
132 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
133 old.v, new.v)) != old.v);
135 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
137 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
138 buf->u64s_reserved) << c->block_bits;
139 BUG_ON(sectors > buf->sectors);
140 buf->sectors = sectors;
142 bkey_extent_init(&buf->key);
145 * We have to set last_seq here, _before_ opening a new journal entry:
147 * A threads may replace an old pin with a new pin on their current
148 * journal reservation - the expectation being that the journal will
149 * contain either what the old pin protected or what the new pin
152 * After the old pin is dropped journal_last_seq() won't include the old
153 * pin, so we can only write the updated last_seq on the entry that
154 * contains whatever the new pin protects.
156 * Restated, we can _not_ update last_seq for a given entry if there
157 * could be a newer entry open with reservations/pins that have been
160 * Hence, we want update/set last_seq on the current journal entry right
161 * before we open a new one:
163 buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
165 if (journal_entry_empty(buf->data))
166 clear_bit(JOURNAL_NOT_EMPTY, &j->flags);
168 set_bit(JOURNAL_NOT_EMPTY, &j->flags);
170 journal_pin_new_entry(j, 1);
172 bch2_journal_buf_init(j);
174 cancel_delayed_work(&j->write_work);
176 bch2_journal_space_available(j);
178 bch2_journal_buf_put(j, old.idx, set_need_write);
182 static bool journal_entry_close(struct journal *j)
187 ret = __journal_entry_close(j);
188 spin_unlock(&j->lock);
194 * should _only_ called from journal_res_get() - when we actually want a
195 * journal reservation - journal entry is open means journal is dirty:
199 * -ENOSPC: journal currently full, must invoke reclaim
200 * -EAGAIN: journal blocked, must wait
201 * -EROFS: insufficient rw devices or journal error
203 static int journal_entry_open(struct journal *j)
205 struct journal_buf *buf = journal_cur_buf(j);
206 union journal_res_state old, new;
210 lockdep_assert_held(&j->lock);
211 BUG_ON(journal_entry_is_open(j));
216 if (j->cur_entry_error)
217 return j->cur_entry_error;
219 BUG_ON(!j->cur_entry_sectors);
221 buf->u64s_reserved = j->entry_u64s_reserved;
222 buf->disk_sectors = j->cur_entry_sectors;
223 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
225 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
226 journal_entry_overhead(j);
227 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
229 if (u64s <= le32_to_cpu(buf->data->u64s))
233 * Must be set before marking the journal entry as open:
235 j->cur_entry_u64s = u64s;
237 v = atomic64_read(&j->reservations.counter);
241 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
244 /* Handle any already added entries */
245 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
247 EBUG_ON(journal_state_count(new, new.idx));
248 journal_state_inc(&new);
249 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
250 old.v, new.v)) != old.v);
252 if (j->res_get_blocked_start)
253 bch2_time_stats_update(j->blocked_time,
254 j->res_get_blocked_start);
255 j->res_get_blocked_start = 0;
257 mod_delayed_work(system_freezable_wq,
259 msecs_to_jiffies(j->write_delay_ms));
264 static bool journal_quiesced(struct journal *j)
266 union journal_res_state state = READ_ONCE(j->reservations);
267 bool ret = !state.prev_buf_unwritten && !__journal_entry_is_open(state);
270 journal_entry_close(j);
274 static void journal_quiesce(struct journal *j)
276 wait_event(j->wait, journal_quiesced(j));
279 static void journal_write_work(struct work_struct *work)
281 struct journal *j = container_of(work, struct journal, write_work.work);
283 journal_entry_close(j);
287 * Given an inode number, if that inode number has data in the journal that
288 * hasn't yet been flushed, return the journal sequence number that needs to be
291 u64 bch2_inode_journal_seq(struct journal *j, u64 inode)
293 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
296 if (!test_bit(h, j->buf[0].has_inode) &&
297 !test_bit(h, j->buf[1].has_inode))
301 if (test_bit(h, journal_cur_buf(j)->has_inode))
302 seq = journal_cur_seq(j);
303 else if (test_bit(h, journal_prev_buf(j)->has_inode))
304 seq = journal_cur_seq(j) - 1;
305 spin_unlock(&j->lock);
310 void bch2_journal_set_has_inum(struct journal *j, u64 inode, u64 seq)
312 size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
313 struct journal_buf *buf;
317 if ((buf = journal_seq_to_buf(j, seq)))
318 set_bit(h, buf->has_inode);
320 spin_unlock(&j->lock);
323 static int __journal_res_get(struct journal *j, struct journal_res *res,
326 struct bch_fs *c = container_of(j, struct bch_fs, journal);
327 struct journal_buf *buf;
331 if (journal_res_get_fast(j, res, flags))
334 if (bch2_journal_error(j))
340 * Recheck after taking the lock, so we don't race with another thread
341 * that just did journal_entry_open() and call journal_entry_close()
344 if (journal_res_get_fast(j, res, flags)) {
345 spin_unlock(&j->lock);
349 if (!(flags & JOURNAL_RES_GET_RESERVED) &&
350 !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
352 * Don't want to close current journal entry, just need to
360 * If we couldn't get a reservation because the current buf filled up,
361 * and we had room for a bigger entry on disk, signal that we want to
362 * realloc the journal bufs:
364 buf = journal_cur_buf(j);
365 if (journal_entry_is_open(j) &&
366 buf->buf_size >> 9 < buf->disk_sectors &&
367 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
368 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
370 if (journal_entry_is_open(j) &&
371 !__journal_entry_close(j)) {
373 * We failed to get a reservation on the current open journal
374 * entry because it's full, and we can't close it because
375 * there's still a previous one in flight:
377 trace_journal_entry_full(c);
380 ret = journal_entry_open(j);
383 if ((ret == -EAGAIN || ret == -ENOSPC) &&
384 !j->res_get_blocked_start)
385 j->res_get_blocked_start = local_clock() ?: 1;
387 can_discard = j->can_discard;
388 spin_unlock(&j->lock);
393 if (ret == -ENOSPC) {
394 WARN_ONCE(!can_discard && (flags & JOURNAL_RES_GET_RESERVED),
395 "JOURNAL_RES_GET_RESERVED set but journal full");
398 * Journal is full - can't rely on reclaim from work item due to
401 trace_journal_full(c);
403 if (!(flags & JOURNAL_RES_GET_NONBLOCK)) {
405 bch2_journal_do_discards(j);
409 if (mutex_trylock(&j->reclaim_lock)) {
410 bch2_journal_reclaim(j);
411 mutex_unlock(&j->reclaim_lock);
422 * Essentially the entry function to the journaling code. When bcachefs is doing
423 * a btree insert, it calls this function to get the current journal write.
424 * Journal write is the structure used set up journal writes. The calling
425 * function will then add its keys to the structure, queuing them for the next
428 * To ensure forward progress, the current task must not be holding any
429 * btree node write locks.
431 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
436 closure_wait_event(&j->async_wait,
437 (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
438 (flags & JOURNAL_RES_GET_NONBLOCK));
442 /* journal_preres: */
444 static bool journal_preres_available(struct journal *j,
445 struct journal_preres *res,
449 bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags);
452 bch2_journal_reclaim_work(&j->reclaim_work.work);
457 int __bch2_journal_preres_get(struct journal *j,
458 struct journal_preres *res,
464 closure_wait_event(&j->preres_wait,
465 (ret = bch2_journal_error(j)) ||
466 journal_preres_available(j, res, new_u64s, flags));
470 /* journal_entry_res: */
472 void bch2_journal_entry_res_resize(struct journal *j,
473 struct journal_entry_res *res,
476 union journal_res_state state;
477 int d = new_u64s - res->u64s;
481 j->entry_u64s_reserved += d;
485 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
487 state = READ_ONCE(j->reservations);
489 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
490 state.cur_entry_offset > j->cur_entry_u64s) {
491 j->cur_entry_u64s += d;
493 * Not enough room in current journal entry, have to flush it:
495 __journal_entry_close(j);
497 journal_cur_buf(j)->u64s_reserved += d;
500 spin_unlock(&j->lock);
504 /* journal flushing: */
506 u64 bch2_journal_last_unwritten_seq(struct journal *j)
511 seq = journal_cur_seq(j);
512 if (j->reservations.prev_buf_unwritten)
514 spin_unlock(&j->lock);
520 * bch2_journal_open_seq_async - try to open a new journal entry if @seq isn't
521 * open yet, or wait if we cannot
523 * used by the btree interior update machinery, when it needs to write a new
524 * btree root - every journal entry contains the roots of all the btrees, so it
525 * doesn't need to bother with getting a journal reservation
527 int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
529 struct bch_fs *c = container_of(j, struct bch_fs, journal);
535 * Can't try to open more than one sequence number ahead:
537 BUG_ON(journal_cur_seq(j) < seq && !journal_entry_is_open(j));
539 if (journal_cur_seq(j) > seq ||
540 journal_entry_is_open(j)) {
541 spin_unlock(&j->lock);
545 if (journal_cur_seq(j) < seq &&
546 !__journal_entry_close(j)) {
547 /* haven't finished writing out the previous one: */
548 trace_journal_entry_full(c);
551 BUG_ON(journal_cur_seq(j) != seq);
553 ret = journal_entry_open(j);
556 if ((ret == -EAGAIN || ret == -ENOSPC) &&
557 !j->res_get_blocked_start)
558 j->res_get_blocked_start = local_clock() ?: 1;
560 if (ret == -EAGAIN || ret == -ENOSPC)
561 closure_wait(&j->async_wait, cl);
563 spin_unlock(&j->lock);
565 if (ret == -ENOSPC) {
566 trace_journal_full(c);
567 bch2_journal_reclaim_work(&j->reclaim_work.work);
574 static int journal_seq_error(struct journal *j, u64 seq)
576 union journal_res_state state = READ_ONCE(j->reservations);
578 if (seq == journal_cur_seq(j))
579 return bch2_journal_error(j);
581 if (seq + 1 == journal_cur_seq(j) &&
582 !state.prev_buf_unwritten &&
589 static inline struct journal_buf *
590 journal_seq_to_buf(struct journal *j, u64 seq)
592 /* seq should be for a journal entry that has been opened: */
593 BUG_ON(seq > journal_cur_seq(j));
594 BUG_ON(seq == journal_cur_seq(j) &&
595 j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
597 if (seq == journal_cur_seq(j))
598 return journal_cur_buf(j);
599 if (seq + 1 == journal_cur_seq(j) &&
600 j->reservations.prev_buf_unwritten)
601 return journal_prev_buf(j);
606 * bch2_journal_wait_on_seq - wait for a journal entry to be written
608 * does _not_ cause @seq to be written immediately - if there is no other
609 * activity to cause the relevant journal entry to be filled up or flushed it
610 * can wait for an arbitrary amount of time (up to @j->write_delay_ms, which is
613 void bch2_journal_wait_on_seq(struct journal *j, u64 seq,
614 struct closure *parent)
616 struct journal_buf *buf;
620 if ((buf = journal_seq_to_buf(j, seq))) {
621 if (!closure_wait(&buf->wait, parent))
624 if (seq == journal_cur_seq(j)) {
626 if (bch2_journal_error(j))
627 closure_wake_up(&buf->wait);
631 spin_unlock(&j->lock);
635 * bch2_journal_flush_seq_async - wait for a journal entry to be written
637 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
640 void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
641 struct closure *parent)
643 struct journal_buf *buf;
648 (buf = journal_seq_to_buf(j, seq)))
649 if (!closure_wait(&buf->wait, parent))
652 if (seq == journal_cur_seq(j))
653 __journal_entry_close(j);
654 spin_unlock(&j->lock);
657 static int journal_seq_flushed(struct journal *j, u64 seq)
662 ret = seq <= j->seq_ondisk ? 1 : journal_seq_error(j, seq);
664 if (seq == journal_cur_seq(j))
665 __journal_entry_close(j);
666 spin_unlock(&j->lock);
671 int bch2_journal_flush_seq(struct journal *j, u64 seq)
673 u64 start_time = local_clock();
676 ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
678 bch2_time_stats_update(j->flush_seq_time, start_time);
680 return ret ?: ret2 < 0 ? ret2 : 0;
684 * bch2_journal_meta_async - force a journal entry to be written
686 void bch2_journal_meta_async(struct journal *j, struct closure *parent)
688 struct journal_res res;
690 memset(&res, 0, sizeof(res));
692 bch2_journal_res_get(j, &res, jset_u64s(0), 0);
693 bch2_journal_res_put(j, &res);
695 bch2_journal_flush_seq_async(j, res.seq, parent);
698 int bch2_journal_meta(struct journal *j)
700 struct journal_res res;
703 memset(&res, 0, sizeof(res));
705 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
709 bch2_journal_res_put(j, &res);
711 return bch2_journal_flush_seq(j, res.seq);
715 * bch2_journal_flush_async - if there is an open journal entry, or a journal
716 * still being written, write it and wait for the write to complete
718 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
720 u64 seq, journal_seq;
723 journal_seq = journal_cur_seq(j);
725 if (journal_entry_is_open(j)) {
727 } else if (journal_seq) {
728 seq = journal_seq - 1;
730 spin_unlock(&j->lock);
733 spin_unlock(&j->lock);
735 bch2_journal_flush_seq_async(j, seq, parent);
738 int bch2_journal_flush(struct journal *j)
740 u64 seq, journal_seq;
743 journal_seq = journal_cur_seq(j);
745 if (journal_entry_is_open(j)) {
747 } else if (journal_seq) {
748 seq = journal_seq - 1;
750 spin_unlock(&j->lock);
753 spin_unlock(&j->lock);
755 return bch2_journal_flush_seq(j, seq);
758 /* block/unlock the journal: */
760 void bch2_journal_unblock(struct journal *j)
764 spin_unlock(&j->lock);
769 void bch2_journal_block(struct journal *j)
773 spin_unlock(&j->lock);
778 /* allocate journal on a device: */
780 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
781 bool new_fs, struct closure *cl)
783 struct bch_fs *c = ca->fs;
784 struct journal_device *ja = &ca->journal;
785 struct bch_sb_field_journal *journal_buckets;
786 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
789 /* don't handle reducing nr of buckets yet: */
794 new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
795 new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
796 if (!new_buckets || !new_bucket_seq)
799 journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
800 nr + sizeof(*journal_buckets) / sizeof(u64));
801 if (!journal_buckets)
805 * We may be called from the device add path, before the new device has
806 * actually been added to the running filesystem:
809 spin_lock(&c->journal.lock);
811 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
812 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
813 swap(new_buckets, ja->buckets);
814 swap(new_bucket_seq, ja->bucket_seq);
817 spin_unlock(&c->journal.lock);
819 while (ja->nr < nr) {
820 struct open_bucket *ob = NULL;
825 bucket = bch2_bucket_alloc_new_fs(ca);
831 ob = bch2_bucket_alloc(c, ca, RESERVE_ALLOC,
834 ret = cl ? -EAGAIN : -ENOSPC;
838 bucket = sector_to_bucket(ca, ob->ptr.offset);
842 percpu_down_read(&c->mark_lock);
843 spin_lock(&c->journal.lock);
846 pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
847 __array_insert_item(ja->buckets, ja->nr, pos);
848 __array_insert_item(ja->bucket_seq, ja->nr, pos);
849 __array_insert_item(journal_buckets->buckets, ja->nr, pos);
852 ja->buckets[pos] = bucket;
853 ja->bucket_seq[pos] = 0;
854 journal_buckets->buckets[pos] = cpu_to_le64(bucket);
856 if (pos <= ja->discard_idx)
857 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
858 if (pos <= ja->dirty_idx_ondisk)
859 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
860 if (pos <= ja->dirty_idx)
861 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
862 if (pos <= ja->cur_idx)
863 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
865 bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
867 gc_phase(GC_PHASE_SB),
871 spin_unlock(&c->journal.lock);
872 percpu_up_read(&c->mark_lock);
876 bch2_open_bucket_put(c, ob);
881 kfree(new_bucket_seq);
888 * Allocate more journal space at runtime - not currently making use if it, but
891 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
894 struct journal_device *ja = &ca->journal;
899 closure_init_stack(&cl);
902 struct disk_reservation disk_res = { 0, 0 };
906 mutex_lock(&c->sb_lock);
910 * note: journal buckets aren't really counted as _sectors_ used yet, so
911 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
912 * when space used goes up without a reservation - but we do need the
913 * reservation to ensure we'll actually be able to allocate:
916 if (bch2_disk_reservation_get(c, &disk_res,
917 bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
918 mutex_unlock(&c->sb_lock);
922 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
924 bch2_disk_reservation_put(c, &disk_res);
926 if (ja->nr != current_nr)
928 mutex_unlock(&c->sb_lock);
929 } while (ret == -EAGAIN);
934 int bch2_dev_journal_alloc(struct bch_dev *ca)
938 if (dynamic_fault("bcachefs:add:journal_alloc"))
942 * clamp journal size to 1024 buckets or 512MB (in sectors), whichever
945 nr = clamp_t(unsigned, ca->mi.nbuckets >> 8,
946 BCH_JOURNAL_BUCKETS_MIN,
948 (1 << 20) / ca->mi.bucket_size));
950 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
953 /* startup/shutdown: */
955 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
957 union journal_res_state state;
958 struct journal_buf *w;
962 state = READ_ONCE(j->reservations);
963 w = j->buf + !state.idx;
965 ret = state.prev_buf_unwritten &&
966 bch2_bkey_has_device(bkey_i_to_s_c(&w->key), dev_idx);
967 spin_unlock(&j->lock);
972 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
974 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
977 void bch2_fs_journal_stop(struct journal *j)
979 bch2_journal_flush_all_pins(j);
981 wait_event(j->wait, journal_entry_close(j));
983 /* do we need to write another journal entry? */
984 if (test_bit(JOURNAL_NOT_EMPTY, &j->flags))
985 bch2_journal_meta(j);
989 BUG_ON(!bch2_journal_error(j) &&
990 test_bit(JOURNAL_NOT_EMPTY, &j->flags));
992 cancel_delayed_work_sync(&j->write_work);
993 cancel_delayed_work_sync(&j->reclaim_work);
996 int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
997 struct list_head *journal_entries)
999 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1000 struct journal_entry_pin_list *p;
1001 struct journal_replay *i;
1002 u64 last_seq = cur_seq, nr, seq;
1004 if (!list_empty(journal_entries))
1005 last_seq = le64_to_cpu(list_last_entry(journal_entries,
1006 struct journal_replay, list)->j.last_seq);
1008 nr = cur_seq - last_seq;
1010 if (nr + 1 > j->pin.size) {
1012 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1014 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1019 j->replay_journal_seq = last_seq;
1020 j->replay_journal_seq_end = cur_seq;
1021 j->last_seq_ondisk = last_seq;
1022 j->pin.front = last_seq;
1023 j->pin.back = cur_seq;
1024 atomic64_set(&j->seq, cur_seq - 1);
1026 fifo_for_each_entry_ptr(p, &j->pin, seq) {
1027 INIT_LIST_HEAD(&p->list);
1028 INIT_LIST_HEAD(&p->flushed);
1029 atomic_set(&p->count, 1);
1033 list_for_each_entry(i, journal_entries, list) {
1034 seq = le64_to_cpu(i->j.seq);
1035 BUG_ON(seq >= cur_seq);
1040 journal_seq_pin(j, seq)->devs = i->devs;
1043 spin_lock(&j->lock);
1045 set_bit(JOURNAL_STARTED, &j->flags);
1047 journal_pin_new_entry(j, 1);
1048 bch2_journal_buf_init(j);
1050 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1052 bch2_journal_space_available(j);
1053 spin_unlock(&j->lock);
1060 void bch2_dev_journal_exit(struct bch_dev *ca)
1062 kfree(ca->journal.bio);
1063 kfree(ca->journal.buckets);
1064 kfree(ca->journal.bucket_seq);
1066 ca->journal.bio = NULL;
1067 ca->journal.buckets = NULL;
1068 ca->journal.bucket_seq = NULL;
1071 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1073 struct journal_device *ja = &ca->journal;
1074 struct bch_sb_field_journal *journal_buckets =
1075 bch2_sb_get_journal(sb);
1078 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1080 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1081 if (!ja->bucket_seq)
1084 ca->journal.bio = bio_kmalloc(GFP_KERNEL,
1085 DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE));
1086 if (!ca->journal.bio)
1089 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1093 for (i = 0; i < ja->nr; i++)
1094 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1099 void bch2_fs_journal_exit(struct journal *j)
1101 kvpfree(j->buf[1].data, j->buf[1].buf_size);
1102 kvpfree(j->buf[0].data, j->buf[0].buf_size);
1106 int bch2_fs_journal_init(struct journal *j)
1108 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1109 static struct lock_class_key res_key;
1112 pr_verbose_init(c->opts, "");
1114 spin_lock_init(&j->lock);
1115 spin_lock_init(&j->err_lock);
1116 init_waitqueue_head(&j->wait);
1117 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1118 INIT_DELAYED_WORK(&j->reclaim_work, bch2_journal_reclaim_work);
1119 init_waitqueue_head(&j->pin_flush_wait);
1120 mutex_init(&j->reclaim_lock);
1121 mutex_init(&j->discard_lock);
1123 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1125 j->buf[0].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1126 j->buf[1].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1127 j->write_delay_ms = 1000;
1128 j->reclaim_delay_ms = 100;
1131 j->entry_u64s_reserved +=
1132 BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_EXTENT_U64s_MAX);
1134 atomic64_set(&j->reservations.counter,
1135 ((union journal_res_state)
1136 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1138 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
1139 !(j->buf[0].data = kvpmalloc(j->buf[0].buf_size, GFP_KERNEL)) ||
1140 !(j->buf[1].data = kvpmalloc(j->buf[1].buf_size, GFP_KERNEL))) {
1145 j->pin.front = j->pin.back = 1;
1147 pr_verbose_init(c->opts, "ret %i", ret);
1153 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1155 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1156 union journal_res_state s;
1161 spin_lock(&j->lock);
1162 s = READ_ONCE(j->reservations);
1165 "active journal entries:\t%llu\n"
1167 "last_seq:\t\t%llu\n"
1168 "last_seq_ondisk:\t%llu\n"
1169 "prereserved:\t\t%u/%u\n"
1170 "current entry sectors:\t%u\n"
1171 "current entry:\t\t",
1174 journal_last_seq(j),
1176 j->prereserved.reserved,
1177 j->prereserved.remaining,
1178 j->cur_entry_sectors);
1180 switch (s.cur_entry_offset) {
1181 case JOURNAL_ENTRY_ERROR_VAL:
1182 pr_buf(out, "error\n");
1184 case JOURNAL_ENTRY_CLOSED_VAL:
1185 pr_buf(out, "closed\n");
1188 pr_buf(out, "%u/%u\n",
1195 "current entry refs:\t%u\n"
1196 "prev entry unwritten:\t",
1197 journal_state_count(s, s.idx));
1199 if (s.prev_buf_unwritten)
1200 pr_buf(out, "yes, ref %u sectors %u\n",
1201 journal_state_count(s, !s.idx),
1202 journal_prev_buf(j)->sectors);
1204 pr_buf(out, "no\n");
1207 "need write:\t\t%i\n"
1208 "replay done:\t\t%i\n",
1209 test_bit(JOURNAL_NEED_WRITE, &j->flags),
1210 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1212 for_each_member_device_rcu(ca, c, iter,
1213 &c->rw_devs[BCH_DATA_journal]) {
1214 struct journal_device *ja = &ca->journal;
1222 "\tavailable\t%u:%u\n"
1223 "\tdiscard_idx\t\t%u\n"
1224 "\tdirty_idx_ondisk\t%u (seq %llu)\n"
1225 "\tdirty_idx\t\t%u (seq %llu)\n"
1226 "\tcur_idx\t\t%u (seq %llu)\n",
1228 bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
1231 ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk],
1232 ja->dirty_idx, ja->bucket_seq[ja->dirty_idx],
1233 ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1236 spin_unlock(&j->lock);
1240 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1242 struct journal_entry_pin_list *pin_list;
1243 struct journal_entry_pin *pin;
1246 spin_lock(&j->lock);
1247 fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
1248 pr_buf(out, "%llu: count %u\n",
1249 i, atomic_read(&pin_list->count));
1251 list_for_each_entry(pin, &pin_list->list, list)
1252 pr_buf(out, "\t%px %ps\n",
1255 if (!list_empty(&pin_list->flushed))
1256 pr_buf(out, "flushed:\n");
1258 list_for_each_entry(pin, &pin_list->flushed, list)
1259 pr_buf(out, "\t%px %ps\n",
1262 spin_unlock(&j->lock);