1 // SPDX-License-Identifier: GPL-2.0
3 * bcachefs journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
12 #include "btree_update.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_sb.h"
19 #include "journal_seq_blacklist.h"
21 #include <trace/events/bcachefs.h>
24 static const char * const bch2_journal_watermarks[] = {
29 static const char * const bch2_journal_errors[] = {
35 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
37 return seq > j->seq_ondisk;
40 static bool __journal_entry_is_open(union journal_res_state state)
42 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
45 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
47 return atomic64_read(&j->seq) - j->seq_ondisk;
50 static bool journal_entry_is_open(struct journal *j)
52 return __journal_entry_is_open(j->reservations);
55 static inline struct journal_buf *
56 journal_seq_to_buf(struct journal *j, u64 seq)
58 struct journal_buf *buf = NULL;
60 EBUG_ON(seq > journal_cur_seq(j));
62 if (journal_seq_unwritten(j, seq)) {
63 buf = j->buf + (seq & JOURNAL_BUF_MASK);
64 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
69 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
71 INIT_LIST_HEAD(&p->list);
72 INIT_LIST_HEAD(&p->key_cache_list);
73 INIT_LIST_HEAD(&p->flushed);
74 atomic_set(&p->count, count);
78 /* journal entry close/open: */
80 void __bch2_journal_buf_put(struct journal *j)
82 struct bch_fs *c = container_of(j, struct bch_fs, journal);
84 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
88 * Returns true if journal entry is now closed:
90 * We don't close a journal_buf until the next journal_buf is finished writing,
91 * and can be opened again - this also initializes the next journal_buf:
93 static void __journal_entry_close(struct journal *j, unsigned closed_val)
95 struct bch_fs *c = container_of(j, struct bch_fs, journal);
96 struct journal_buf *buf = journal_cur_buf(j);
97 union journal_res_state old, new;
98 u64 v = atomic64_read(&j->reservations.counter);
101 BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
102 closed_val != JOURNAL_ENTRY_ERROR_VAL);
104 lockdep_assert_held(&j->lock);
108 new.cur_entry_offset = closed_val;
110 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
111 old.cur_entry_offset == new.cur_entry_offset)
113 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
114 old.v, new.v)) != old.v);
116 if (!__journal_entry_is_open(old))
119 /* Close out old buffer: */
120 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
122 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
123 buf->u64s_reserved) << c->block_bits;
124 BUG_ON(sectors > buf->sectors);
125 buf->sectors = sectors;
128 * We have to set last_seq here, _before_ opening a new journal entry:
130 * A threads may replace an old pin with a new pin on their current
131 * journal reservation - the expectation being that the journal will
132 * contain either what the old pin protected or what the new pin
135 * After the old pin is dropped journal_last_seq() won't include the old
136 * pin, so we can only write the updated last_seq on the entry that
137 * contains whatever the new pin protects.
139 * Restated, we can _not_ update last_seq for a given entry if there
140 * could be a newer entry open with reservations/pins that have been
143 * Hence, we want update/set last_seq on the current journal entry right
144 * before we open a new one:
146 buf->last_seq = journal_last_seq(j);
147 buf->data->last_seq = cpu_to_le64(buf->last_seq);
148 BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
150 __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
152 cancel_delayed_work(&j->write_work);
154 bch2_journal_space_available(j);
156 bch2_journal_buf_put(j, old.idx);
159 void bch2_journal_halt(struct journal *j)
162 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
164 j->err_seq = journal_cur_seq(j);
165 spin_unlock(&j->lock);
168 static bool journal_entry_want_write(struct journal *j)
170 bool ret = !journal_entry_is_open(j) ||
171 journal_cur_seq(j) == journal_last_unwritten_seq(j);
173 /* Don't close it yet if we already have a write in flight: */
175 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
176 else if (nr_unwritten_journal_entries(j)) {
177 struct journal_buf *buf = journal_cur_buf(j);
179 if (!buf->flush_time) {
180 buf->flush_time = local_clock() ?: 1;
181 buf->expires = jiffies;
188 static bool journal_entry_close(struct journal *j)
193 ret = journal_entry_want_write(j);
194 spin_unlock(&j->lock);
200 * should _only_ called from journal_res_get() - when we actually want a
201 * journal reservation - journal entry is open means journal is dirty:
203 static int journal_entry_open(struct journal *j)
205 struct bch_fs *c = container_of(j, struct bch_fs, journal);
206 struct journal_buf *buf = j->buf +
207 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
208 union journal_res_state old, new;
212 lockdep_assert_held(&j->lock);
213 BUG_ON(journal_entry_is_open(j));
214 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
217 return JOURNAL_ERR_blocked;
219 if (j->cur_entry_error)
220 return j->cur_entry_error;
222 if (bch2_journal_error(j))
223 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
225 if (!fifo_free(&j->pin))
226 return JOURNAL_ERR_journal_pin_full;
228 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
229 return JOURNAL_ERR_max_in_flight;
231 BUG_ON(!j->cur_entry_sectors);
234 (journal_cur_seq(j) == j->flushed_seq_ondisk
236 : j->last_flush_write) +
237 msecs_to_jiffies(c->opts.journal_flush_delay);
239 buf->u64s_reserved = j->entry_u64s_reserved;
240 buf->disk_sectors = j->cur_entry_sectors;
241 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
243 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
244 journal_entry_overhead(j);
245 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
247 if (u64s <= (ssize_t) j->early_journal_entries.nr)
248 return JOURNAL_ERR_journal_full;
250 if (fifo_empty(&j->pin) && j->reclaim_thread)
251 wake_up_process(j->reclaim_thread);
254 * The fifo_push() needs to happen at the same time as j->seq is
255 * incremented for journal_last_seq() to be calculated correctly
257 atomic64_inc(&j->seq);
258 journal_pin_list_init(fifo_push_ref(&j->pin), 1);
260 BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
262 bkey_extent_init(&buf->key);
263 buf->noflush = false;
264 buf->must_flush = false;
265 buf->separate_flush = false;
268 memset(buf->data, 0, sizeof(*buf->data));
269 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
272 if (j->early_journal_entries.nr) {
273 memcpy(buf->data->_data, j->early_journal_entries.data,
274 j->early_journal_entries.nr * sizeof(u64));
275 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
279 * Must be set before marking the journal entry as open:
281 j->cur_entry_u64s = u64s;
283 v = atomic64_read(&j->reservations.counter);
287 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
290 BUG_ON(journal_state_count(new, new.idx));
291 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
293 journal_state_inc(&new);
295 /* Handle any already added entries */
296 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
297 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
298 old.v, new.v)) != old.v);
300 if (j->res_get_blocked_start)
301 bch2_time_stats_update(j->blocked_time,
302 j->res_get_blocked_start);
303 j->res_get_blocked_start = 0;
305 mod_delayed_work(c->io_complete_wq,
307 msecs_to_jiffies(c->opts.journal_flush_delay));
310 if (j->early_journal_entries.nr)
311 darray_exit(&j->early_journal_entries);
315 static bool journal_quiesced(struct journal *j)
317 bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
320 journal_entry_close(j);
324 static void journal_quiesce(struct journal *j)
326 wait_event(j->wait, journal_quiesced(j));
329 static void journal_write_work(struct work_struct *work)
331 struct journal *j = container_of(work, struct journal, write_work.work);
332 struct bch_fs *c = container_of(j, struct bch_fs, journal);
336 if (!__journal_entry_is_open(j->reservations))
339 delta = journal_cur_buf(j)->expires - jiffies;
342 mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
344 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
346 spin_unlock(&j->lock);
349 static int __journal_res_get(struct journal *j, struct journal_res *res,
352 struct bch_fs *c = container_of(j, struct bch_fs, journal);
353 struct journal_buf *buf;
357 if (journal_res_get_fast(j, res, flags))
360 if (bch2_journal_error(j))
361 return -BCH_ERR_erofs_journal_err;
366 * Recheck after taking the lock, so we don't race with another thread
367 * that just did journal_entry_open() and call journal_entry_close()
370 if (journal_res_get_fast(j, res, flags)) {
371 spin_unlock(&j->lock);
375 if ((flags & JOURNAL_WATERMARK_MASK) < j->watermark) {
377 * Don't want to close current journal entry, just need to
380 ret = JOURNAL_ERR_journal_full;
385 * If we couldn't get a reservation because the current buf filled up,
386 * and we had room for a bigger entry on disk, signal that we want to
387 * realloc the journal bufs:
389 buf = journal_cur_buf(j);
390 if (journal_entry_is_open(j) &&
391 buf->buf_size >> 9 < buf->disk_sectors &&
392 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
393 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
395 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
396 ret = journal_entry_open(j);
398 if (ret == JOURNAL_ERR_max_in_flight)
399 trace_and_count(c, journal_entry_full, c);
401 if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
402 !j->res_get_blocked_start) {
403 j->res_get_blocked_start = local_clock() ?: 1;
404 trace_and_count(c, journal_full, c);
407 can_discard = j->can_discard;
408 spin_unlock(&j->lock);
413 if ((ret == JOURNAL_ERR_journal_full ||
414 ret == JOURNAL_ERR_journal_pin_full) &&
416 !nr_unwritten_journal_entries(j) &&
417 (flags & JOURNAL_WATERMARK_MASK) == JOURNAL_WATERMARK_reserved) {
418 struct printbuf buf = PRINTBUF;
420 bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (ret %s)",
421 bch2_journal_errors[ret]);
423 bch2_journal_debug_to_text(&buf, j);
424 bch_err(c, "%s", buf.buf);
426 printbuf_reset(&buf);
427 bch2_journal_pins_to_text(&buf, j);
428 bch_err(c, "Journal pins:\n%s", buf.buf);
436 * Journal is full - can't rely on reclaim from work item due to
439 if ((ret == JOURNAL_ERR_journal_full ||
440 ret == JOURNAL_ERR_journal_pin_full) &&
441 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
443 bch2_journal_do_discards(j);
447 if (mutex_trylock(&j->reclaim_lock)) {
448 bch2_journal_reclaim(j);
449 mutex_unlock(&j->reclaim_lock);
453 return ret == JOURNAL_ERR_insufficient_devices
455 : -BCH_ERR_journal_res_get_blocked;
459 * Essentially the entry function to the journaling code. When bcachefs is doing
460 * a btree insert, it calls this function to get the current journal write.
461 * Journal write is the structure used set up journal writes. The calling
462 * function will then add its keys to the structure, queuing them for the next
465 * To ensure forward progress, the current task must not be holding any
466 * btree node write locks.
468 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
473 closure_wait_event(&j->async_wait,
474 (ret = __journal_res_get(j, res, flags)) !=
475 -BCH_ERR_journal_res_get_blocked||
476 (flags & JOURNAL_RES_GET_NONBLOCK));
480 /* journal_preres: */
482 static bool journal_preres_available(struct journal *j,
483 struct journal_preres *res,
487 bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
489 if (!ret && mutex_trylock(&j->reclaim_lock)) {
490 bch2_journal_reclaim(j);
491 mutex_unlock(&j->reclaim_lock);
497 int __bch2_journal_preres_get(struct journal *j,
498 struct journal_preres *res,
504 closure_wait_event(&j->preres_wait,
505 (ret = bch2_journal_error(j)) ||
506 journal_preres_available(j, res, new_u64s, flags));
510 /* journal_entry_res: */
512 void bch2_journal_entry_res_resize(struct journal *j,
513 struct journal_entry_res *res,
516 union journal_res_state state;
517 int d = new_u64s - res->u64s;
521 j->entry_u64s_reserved += d;
525 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
527 state = READ_ONCE(j->reservations);
529 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
530 state.cur_entry_offset > j->cur_entry_u64s) {
531 j->cur_entry_u64s += d;
533 * Not enough room in current journal entry, have to flush it:
535 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
537 journal_cur_buf(j)->u64s_reserved += d;
540 spin_unlock(&j->lock);
544 /* journal flushing: */
547 * bch2_journal_flush_seq_async - wait for a journal entry to be written
549 * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
552 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
553 struct closure *parent)
555 struct journal_buf *buf;
558 if (seq <= j->flushed_seq_ondisk)
563 if (WARN_ONCE(seq > journal_cur_seq(j),
564 "requested to flush journal seq %llu, but currently at %llu",
565 seq, journal_cur_seq(j)))
568 /* Recheck under lock: */
569 if (j->err_seq && seq >= j->err_seq) {
574 if (seq <= j->flushed_seq_ondisk) {
579 /* if seq was written, but not flushed - flush a newer one instead */
580 seq = max(seq, journal_last_unwritten_seq(j));
583 if (seq > journal_cur_seq(j)) {
584 struct journal_res res = { 0 };
586 if (journal_entry_is_open(j))
587 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
589 spin_unlock(&j->lock);
591 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
596 buf = j->buf + (seq & JOURNAL_BUF_MASK);
597 buf->must_flush = true;
599 if (!buf->flush_time) {
600 buf->flush_time = local_clock() ?: 1;
601 buf->expires = jiffies;
604 if (parent && !closure_wait(&buf->wait, parent))
607 bch2_journal_res_put(j, &res);
614 * if write was kicked off without a flush, flush the next sequence
617 buf = journal_seq_to_buf(j, seq);
620 goto recheck_need_open;
623 buf->must_flush = true;
625 if (parent && !closure_wait(&buf->wait, parent))
628 if (seq == journal_cur_seq(j))
629 journal_entry_want_write(j);
631 spin_unlock(&j->lock);
635 int bch2_journal_flush_seq(struct journal *j, u64 seq)
637 u64 start_time = local_clock();
641 * Don't update time_stats when @seq is already flushed:
643 if (seq <= j->flushed_seq_ondisk)
646 ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
649 bch2_time_stats_update(j->flush_seq_time, start_time);
651 return ret ?: ret2 < 0 ? ret2 : 0;
655 * bch2_journal_flush_async - if there is an open journal entry, or a journal
656 * still being written, write it and wait for the write to complete
658 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
660 bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
663 int bch2_journal_flush(struct journal *j)
665 return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
669 * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
672 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
674 struct bch_fs *c = container_of(j, struct bch_fs, journal);
678 if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
681 if (seq <= c->journal.flushed_seq_ondisk)
685 if (seq <= c->journal.flushed_seq_ondisk)
688 for (unwritten_seq = journal_last_unwritten_seq(j);
691 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
693 /* journal write is already in flight, and was a flush write: */
694 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
702 spin_unlock(&j->lock);
706 int bch2_journal_meta(struct journal *j)
708 struct journal_buf *buf;
709 struct journal_res res;
712 memset(&res, 0, sizeof(res));
714 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
718 buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
719 buf->must_flush = true;
721 if (!buf->flush_time) {
722 buf->flush_time = local_clock() ?: 1;
723 buf->expires = jiffies;
726 bch2_journal_res_put(j, &res);
728 return bch2_journal_flush_seq(j, res.seq);
731 /* block/unlock the journal: */
733 void bch2_journal_unblock(struct journal *j)
737 spin_unlock(&j->lock);
742 void bch2_journal_block(struct journal *j)
746 spin_unlock(&j->lock);
751 /* allocate journal on a device: */
753 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
754 bool new_fs, struct closure *cl)
756 struct bch_fs *c = ca->fs;
757 struct journal_device *ja = &ca->journal;
758 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
759 struct open_bucket **ob = NULL;
761 unsigned i, nr_got = 0, nr_want = nr - ja->nr;
762 unsigned old_nr = ja->nr;
763 unsigned old_discard_idx = ja->discard_idx;
764 unsigned old_dirty_idx_ondisk = ja->dirty_idx_ondisk;
765 unsigned old_dirty_idx = ja->dirty_idx;
766 unsigned old_cur_idx = ja->cur_idx;
770 bch2_journal_flush_all_pins(&c->journal);
771 bch2_journal_block(&c->journal);
772 mutex_lock(&c->sb_lock);
775 bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
776 ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
777 new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
778 new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
779 if (!bu || !ob || !new_buckets || !new_bucket_seq) {
784 for (nr_got = 0; nr_got < nr_want; nr_got++) {
786 bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
787 if (bu[nr_got] < 0) {
788 ret = -BCH_ERR_ENOSPC_bucket_alloc;
792 ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl);
793 ret = PTR_ERR_OR_ZERO(ob[nr_got]);
797 bu[nr_got] = ob[nr_got]->bucket;
805 * We may be called from the device add path, before the new device has
806 * actually been added to the running filesystem:
809 spin_lock(&c->journal.lock);
811 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
812 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
813 swap(new_buckets, ja->buckets);
814 swap(new_bucket_seq, ja->bucket_seq);
816 for (i = 0; i < nr_got; i++) {
817 unsigned pos = ja->discard_idx ?: ja->nr;
820 __array_insert_item(ja->buckets, ja->nr, pos);
821 __array_insert_item(ja->bucket_seq, ja->nr, pos);
824 ja->buckets[pos] = b;
825 ja->bucket_seq[pos] = 0;
827 if (pos <= ja->discard_idx)
828 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
829 if (pos <= ja->dirty_idx_ondisk)
830 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
831 if (pos <= ja->dirty_idx)
832 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
833 if (pos <= ja->cur_idx)
834 ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
837 ret = bch2_journal_buckets_to_sb(c, ca);
840 swap(new_buckets, ja->buckets);
841 swap(new_bucket_seq, ja->bucket_seq);
843 ja->discard_idx = old_discard_idx;
844 ja->dirty_idx_ondisk = old_dirty_idx_ondisk;
845 ja->dirty_idx = old_dirty_idx;
846 ja->cur_idx = old_cur_idx;
850 spin_unlock(&c->journal.lock);
852 if (ja->nr != old_nr && !new_fs)
856 bch2_journal_unblock(&c->journal);
862 for (i = 0; i < nr_got; i++) {
863 ret = bch2_trans_run(c,
864 bch2_trans_mark_metadata_bucket(&trans, ca,
865 bu[i], BCH_DATA_journal,
866 ca->mi.bucket_size));
868 bch2_fs_inconsistent(c, "error marking new journal buckets: %i", ret);
875 mutex_unlock(&c->sb_lock);
878 for (i = 0; i < nr_got; i++)
879 bch2_open_bucket_put(c, ob[i]);
881 kfree(new_bucket_seq);
889 bch2_journal_unblock(&c->journal);
894 * Allocate more journal space at runtime - not currently making use if it, but
897 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
900 struct journal_device *ja = &ca->journal;
904 /* don't handle reducing nr of buckets yet: */
908 closure_init_stack(&cl);
910 while (ja->nr != nr) {
911 struct disk_reservation disk_res = { 0, 0 };
914 * note: journal buckets aren't really counted as _sectors_ used yet, so
915 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
916 * when space used goes up without a reservation - but we do need the
917 * reservation to ensure we'll actually be able to allocate:
919 * XXX: that's not right, disk reservations only ensure a
920 * filesystem-wide allocation will succeed, this is a device
921 * specific allocation - we can hang here:
924 ret = bch2_disk_reservation_get(c, &disk_res,
925 bucket_to_sector(ca, nr - ja->nr), 1, 0);
929 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
931 bch2_disk_reservation_put(c, &disk_res);
935 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
940 bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
945 int bch2_dev_journal_alloc(struct bch_dev *ca)
949 if (dynamic_fault("bcachefs:add:journal_alloc"))
952 /* 1/128th of the device by default: */
953 nr = ca->mi.nbuckets >> 7;
956 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
959 nr = clamp_t(unsigned, nr,
960 BCH_JOURNAL_BUCKETS_MIN,
962 (1 << 24) / ca->mi.bucket_size));
964 return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
967 /* startup/shutdown: */
969 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
975 for (seq = journal_last_unwritten_seq(j);
976 seq <= journal_cur_seq(j) && !ret;
978 struct journal_buf *buf = journal_seq_to_buf(j, seq);
980 if (bch2_bkey_has_device(bkey_i_to_s_c(&buf->key), dev_idx))
983 spin_unlock(&j->lock);
988 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
990 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
993 void bch2_fs_journal_stop(struct journal *j)
995 bch2_journal_reclaim_stop(j);
996 bch2_journal_flush_all_pins(j);
998 wait_event(j->wait, journal_entry_close(j));
1001 * Always write a new journal entry, to make sure the clock hands are up
1002 * to date (and match the superblock)
1004 bch2_journal_meta(j);
1008 BUG_ON(!bch2_journal_error(j) &&
1009 test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1010 j->last_empty_seq != journal_cur_seq(j));
1012 cancel_delayed_work_sync(&j->write_work);
1015 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1017 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1018 struct journal_entry_pin_list *p;
1019 struct journal_replay *i, **_i;
1020 struct genradix_iter iter;
1021 bool had_entries = false;
1023 u64 last_seq = cur_seq, nr, seq;
1025 genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1028 if (!i || i->ignore)
1031 last_seq = le64_to_cpu(i->j.last_seq);
1035 nr = cur_seq - last_seq;
1037 if (nr + 1 > j->pin.size) {
1039 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1041 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1046 j->replay_journal_seq = last_seq;
1047 j->replay_journal_seq_end = cur_seq;
1048 j->last_seq_ondisk = last_seq;
1049 j->flushed_seq_ondisk = cur_seq - 1;
1050 j->seq_ondisk = cur_seq - 1;
1051 j->pin.front = last_seq;
1052 j->pin.back = cur_seq;
1053 atomic64_set(&j->seq, cur_seq - 1);
1055 fifo_for_each_entry_ptr(p, &j->pin, seq)
1056 journal_pin_list_init(p, 1);
1058 genradix_for_each(&c->journal_entries, iter, _i) {
1061 if (!i || i->ignore)
1064 seq = le64_to_cpu(i->j.seq);
1065 BUG_ON(seq >= cur_seq);
1070 if (journal_entry_empty(&i->j))
1071 j->last_empty_seq = le64_to_cpu(i->j.seq);
1073 p = journal_seq_pin(j, seq);
1076 for (ptr = 0; ptr < i->nr_ptrs; ptr++)
1077 bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
1083 j->last_empty_seq = cur_seq;
1085 spin_lock(&j->lock);
1087 set_bit(JOURNAL_STARTED, &j->flags);
1088 j->last_flush_write = jiffies;
1090 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1091 j->reservations.unwritten_idx++;
1093 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1095 bch2_journal_space_available(j);
1096 spin_unlock(&j->lock);
1098 return bch2_journal_reclaim_start(j);
1103 void bch2_dev_journal_exit(struct bch_dev *ca)
1105 kfree(ca->journal.bio);
1106 kfree(ca->journal.buckets);
1107 kfree(ca->journal.bucket_seq);
1109 ca->journal.bio = NULL;
1110 ca->journal.buckets = NULL;
1111 ca->journal.bucket_seq = NULL;
1114 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1116 struct journal_device *ja = &ca->journal;
1117 struct bch_sb_field_journal *journal_buckets =
1118 bch2_sb_get_journal(sb);
1119 struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1120 bch2_sb_get_journal_v2(sb);
1121 unsigned i, nr_bvecs;
1125 if (journal_buckets_v2) {
1126 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1128 for (i = 0; i < nr; i++)
1129 ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1130 } else if (journal_buckets) {
1131 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1134 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1135 if (!ja->bucket_seq)
1138 nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1140 ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
1141 if (!ca->journal.bio)
1144 bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
1146 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1150 if (journal_buckets_v2) {
1151 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1152 unsigned j, dst = 0;
1154 for (i = 0; i < nr; i++)
1155 for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1156 ja->buckets[dst++] =
1157 le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1158 } else if (journal_buckets) {
1159 for (i = 0; i < ja->nr; i++)
1160 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1166 void bch2_fs_journal_exit(struct journal *j)
1170 darray_exit(&j->early_journal_entries);
1172 for (i = 0; i < ARRAY_SIZE(j->buf); i++)
1173 kvpfree(j->buf[i].data, j->buf[i].buf_size);
1177 int bch2_fs_journal_init(struct journal *j)
1179 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1180 static struct lock_class_key res_key;
1184 pr_verbose_init(c->opts, "");
1186 spin_lock_init(&j->lock);
1187 spin_lock_init(&j->err_lock);
1188 init_waitqueue_head(&j->wait);
1189 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1190 init_waitqueue_head(&j->reclaim_wait);
1191 init_waitqueue_head(&j->pin_flush_wait);
1192 mutex_init(&j->reclaim_lock);
1193 mutex_init(&j->discard_lock);
1195 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1197 atomic64_set(&j->reservations.counter,
1198 ((union journal_res_state)
1199 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1201 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
1206 for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
1207 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1208 j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
1209 if (!j->buf[i].data) {
1215 j->pin.front = j->pin.back = 1;
1217 pr_verbose_init(c->opts, "ret %i", ret);
1223 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1225 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1226 union journal_res_state s;
1228 unsigned long now = jiffies;
1232 if (!out->nr_tabstops)
1233 printbuf_tabstop_push(out, 24);
1237 s = READ_ONCE(j->reservations);
1239 prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
1240 prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
1241 prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
1242 prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
1243 prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
1244 prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
1245 prt_printf(out, "prereserved:\t\t%u/%u\n", j->prereserved.reserved, j->prereserved.remaining);
1246 prt_printf(out, "watermark:\t\t%s\n", bch2_journal_watermarks[j->watermark]);
1247 prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
1248 prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
1249 prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
1250 prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
1251 prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
1252 prt_printf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked);
1253 prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
1254 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1255 prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1256 prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
1257 prt_printf(out, "current entry:\t\t");
1259 switch (s.cur_entry_offset) {
1260 case JOURNAL_ENTRY_ERROR_VAL:
1261 prt_printf(out, "error");
1263 case JOURNAL_ENTRY_CLOSED_VAL:
1264 prt_printf(out, "closed");
1267 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1273 for (seq = journal_cur_seq(j);
1274 seq >= journal_last_unwritten_seq(j);
1276 i = seq & JOURNAL_BUF_MASK;
1278 prt_printf(out, "unwritten entry:");
1280 prt_printf(out, "%llu", seq);
1282 printbuf_indent_add(out, 2);
1284 prt_printf(out, "refcount:");
1286 prt_printf(out, "%u", journal_state_count(s, i));
1289 prt_printf(out, "sectors:");
1291 prt_printf(out, "%u", j->buf[i].sectors);
1294 prt_printf(out, "expires");
1296 prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
1299 printbuf_indent_sub(out, 2);
1303 "replay done:\t\t%i\n",
1304 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1306 prt_printf(out, "space:\n");
1307 prt_printf(out, "\tdiscarded\t%u:%u\n",
1308 j->space[journal_space_discarded].next_entry,
1309 j->space[journal_space_discarded].total);
1310 prt_printf(out, "\tclean ondisk\t%u:%u\n",
1311 j->space[journal_space_clean_ondisk].next_entry,
1312 j->space[journal_space_clean_ondisk].total);
1313 prt_printf(out, "\tclean\t\t%u:%u\n",
1314 j->space[journal_space_clean].next_entry,
1315 j->space[journal_space_clean].total);
1316 prt_printf(out, "\ttotal\t\t%u:%u\n",
1317 j->space[journal_space_total].next_entry,
1318 j->space[journal_space_total].total);
1320 for_each_member_device_rcu(ca, c, i,
1321 &c->rw_devs[BCH_DATA_journal]) {
1322 struct journal_device *ja = &ca->journal;
1324 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1330 prt_printf(out, "dev %u:\n", i);
1331 prt_printf(out, "\tnr\t\t%u\n", ja->nr);
1332 prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
1333 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1334 prt_printf(out, "\tdiscard_idx\t%u\n", ja->discard_idx);
1335 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
1336 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
1337 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1345 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1347 spin_lock(&j->lock);
1348 __bch2_journal_debug_to_text(out, j);
1349 spin_unlock(&j->lock);
1352 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1354 struct journal_entry_pin_list *pin_list;
1355 struct journal_entry_pin *pin;
1357 spin_lock(&j->lock);
1358 *seq = max(*seq, j->pin.front);
1360 if (*seq >= j->pin.back) {
1361 spin_unlock(&j->lock);
1367 pin_list = journal_seq_pin(j, *seq);
1369 prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1371 printbuf_indent_add(out, 2);
1373 list_for_each_entry(pin, &pin_list->list, list) {
1374 prt_printf(out, "\t%px %ps", pin, pin->flush);
1378 list_for_each_entry(pin, &pin_list->key_cache_list, list) {
1379 prt_printf(out, "\t%px %ps", pin, pin->flush);
1383 if (!list_empty(&pin_list->flushed)) {
1384 prt_printf(out, "flushed:");
1388 list_for_each_entry(pin, &pin_list->flushed, list) {
1389 prt_printf(out, "\t%px %ps", pin, pin->flush);
1393 printbuf_indent_sub(out, 2);
1396 spin_unlock(&j->lock);
1401 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1405 while (!bch2_journal_seq_pins_to_text(out, j, &seq))