1 // SPDX-License-Identifier: GPL-2.0
3 * bcachefs journalling code, for btree insertions
5 * Copyright 2012 Google, Inc.
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
17 #include "journal_io.h"
18 #include "journal_reclaim.h"
19 #include "journal_sb.h"
20 #include "journal_seq_blacklist.h"
23 static const char * const bch2_journal_errors[] = {
30 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
32 return seq > j->seq_ondisk;
35 static bool __journal_entry_is_open(union journal_res_state state)
37 return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
40 static inline unsigned nr_unwritten_journal_entries(struct journal *j)
42 return atomic64_read(&j->seq) - j->seq_ondisk;
45 static bool journal_entry_is_open(struct journal *j)
47 return __journal_entry_is_open(j->reservations);
50 static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
52 union journal_res_state s = READ_ONCE(j->reservations);
53 unsigned i = seq & JOURNAL_BUF_MASK;
54 struct journal_buf *buf = j->buf + i;
56 prt_printf(out, "seq:");
58 prt_printf(out, "%llu", seq);
60 printbuf_indent_add(out, 2);
62 prt_printf(out, "refcount:");
64 prt_printf(out, "%u", journal_state_count(s, i));
67 prt_printf(out, "size:");
69 prt_human_readable_u64(out, vstruct_bytes(buf->data));
72 prt_printf(out, "expires");
74 prt_printf(out, "%li jiffies", buf->expires - jiffies);
78 prt_printf(out, "write done\n");
79 else if (buf->write_allocated)
80 prt_printf(out, "write allocated\n");
81 else if (buf->write_started)
82 prt_printf(out, "write started\n");
84 printbuf_indent_sub(out, 2);
87 static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
89 if (!out->nr_tabstops)
90 printbuf_tabstop_push(out, 24);
92 for (u64 seq = journal_last_unwritten_seq(j);
93 seq <= journal_cur_seq(j);
95 bch2_journal_buf_to_text(out, j, seq);
96 prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
99 static inline struct journal_buf *
100 journal_seq_to_buf(struct journal *j, u64 seq)
102 struct journal_buf *buf = NULL;
104 EBUG_ON(seq > journal_cur_seq(j));
106 if (journal_seq_unwritten(j, seq)) {
107 buf = j->buf + (seq & JOURNAL_BUF_MASK);
108 EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
113 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
117 for (i = 0; i < ARRAY_SIZE(p->list); i++)
118 INIT_LIST_HEAD(&p->list[i]);
119 INIT_LIST_HEAD(&p->flushed);
120 atomic_set(&p->count, count);
125 * Detect stuck journal conditions and trigger shutdown. Technically the journal
126 * can end up stuck for a variety of reasons, such as a blocked I/O, journal
127 * reservation lockup, etc. Since this is a fatal error with potentially
128 * unpredictable characteristics, we want to be fairly conservative before we
129 * decide to shut things down.
131 * Consider the journal stuck when it appears full with no ability to commit
132 * btree transactions, to discard journal buckets, nor acquire priority
133 * (reserved watermark) reservation.
136 journal_error_check_stuck(struct journal *j, int error, unsigned flags)
138 struct bch_fs *c = container_of(j, struct bch_fs, journal);
140 struct printbuf buf = PRINTBUF;
142 if (!(error == JOURNAL_ERR_journal_full ||
143 error == JOURNAL_ERR_journal_pin_full) ||
144 nr_unwritten_journal_entries(j) ||
145 (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
150 if (j->can_discard) {
151 spin_unlock(&j->lock);
158 * The journal shutdown path will set ->err_seq, but do it here first to
159 * serialize against concurrent failures and avoid duplicate error
163 spin_unlock(&j->lock);
166 j->err_seq = journal_cur_seq(j);
167 spin_unlock(&j->lock);
169 bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
170 bch2_journal_errors[error]);
171 bch2_journal_debug_to_text(&buf, j);
172 bch_err(c, "%s", buf.buf);
174 printbuf_reset(&buf);
175 bch2_journal_pins_to_text(&buf, j);
176 bch_err(c, "Journal pins:\n%s", buf.buf);
185 void bch2_journal_do_writes(struct journal *j)
187 for (u64 seq = journal_last_unwritten_seq(j);
188 seq <= journal_cur_seq(j);
190 unsigned idx = seq & JOURNAL_BUF_MASK;
191 struct journal_buf *w = j->buf + idx;
193 if (w->write_started && !w->write_allocated)
195 if (w->write_started)
198 if (!journal_state_count(j->reservations, idx)) {
199 w->write_started = true;
200 closure_call(&w->io, bch2_journal_write, j->wq, NULL);
208 * Final processing when the last reference of a journal buffer has been
209 * dropped. Drop the pin list reference acquired at journal entry open and write
210 * the buffer, if requested.
212 void bch2_journal_buf_put_final(struct journal *j, u64 seq)
214 lockdep_assert_held(&j->lock);
216 if (__bch2_journal_pin_put(j, seq))
217 bch2_journal_reclaim_fast(j);
218 bch2_journal_do_writes(j);
222 * Returns true if journal entry is now closed:
224 * We don't close a journal_buf until the next journal_buf is finished writing,
225 * and can be opened again - this also initializes the next journal_buf:
227 static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
229 struct bch_fs *c = container_of(j, struct bch_fs, journal);
230 struct journal_buf *buf = journal_cur_buf(j);
231 union journal_res_state old, new;
232 u64 v = atomic64_read(&j->reservations.counter);
235 BUG_ON(closed_val != JOURNAL_ENTRY_CLOSED_VAL &&
236 closed_val != JOURNAL_ENTRY_ERROR_VAL);
238 lockdep_assert_held(&j->lock);
242 new.cur_entry_offset = closed_val;
244 if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL ||
245 old.cur_entry_offset == new.cur_entry_offset)
247 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
248 old.v, new.v)) != old.v);
250 if (!__journal_entry_is_open(old))
253 /* Close out old buffer: */
254 buf->data->u64s = cpu_to_le32(old.cur_entry_offset);
256 if (trace_journal_entry_close_enabled() && trace) {
257 struct printbuf pbuf = PRINTBUF;
260 prt_str(&pbuf, "entry size: ");
261 prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
263 bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
264 trace_journal_entry_close(c, pbuf.buf);
265 printbuf_exit(&pbuf);
268 sectors = vstruct_blocks_plus(buf->data, c->block_bits,
269 buf->u64s_reserved) << c->block_bits;
270 BUG_ON(sectors > buf->sectors);
271 buf->sectors = sectors;
274 * We have to set last_seq here, _before_ opening a new journal entry:
276 * A threads may replace an old pin with a new pin on their current
277 * journal reservation - the expectation being that the journal will
278 * contain either what the old pin protected or what the new pin
281 * After the old pin is dropped journal_last_seq() won't include the old
282 * pin, so we can only write the updated last_seq on the entry that
283 * contains whatever the new pin protects.
285 * Restated, we can _not_ update last_seq for a given entry if there
286 * could be a newer entry open with reservations/pins that have been
289 * Hence, we want update/set last_seq on the current journal entry right
290 * before we open a new one:
292 buf->last_seq = journal_last_seq(j);
293 buf->data->last_seq = cpu_to_le64(buf->last_seq);
294 BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
296 cancel_delayed_work(&j->write_work);
298 bch2_journal_space_available(j);
300 __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
303 void bch2_journal_halt(struct journal *j)
306 __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
308 j->err_seq = journal_cur_seq(j);
310 spin_unlock(&j->lock);
313 static bool journal_entry_want_write(struct journal *j)
315 bool ret = !journal_entry_is_open(j) ||
316 journal_cur_seq(j) == journal_last_unwritten_seq(j);
318 /* Don't close it yet if we already have a write in flight: */
320 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
321 else if (nr_unwritten_journal_entries(j)) {
322 struct journal_buf *buf = journal_cur_buf(j);
324 if (!buf->flush_time) {
325 buf->flush_time = local_clock() ?: 1;
326 buf->expires = jiffies;
333 bool bch2_journal_entry_close(struct journal *j)
338 ret = journal_entry_want_write(j);
339 spin_unlock(&j->lock);
345 * should _only_ called from journal_res_get() - when we actually want a
346 * journal reservation - journal entry is open means journal is dirty:
348 static int journal_entry_open(struct journal *j)
350 struct bch_fs *c = container_of(j, struct bch_fs, journal);
351 struct journal_buf *buf = j->buf +
352 ((journal_cur_seq(j) + 1) & JOURNAL_BUF_MASK);
353 union journal_res_state old, new;
357 lockdep_assert_held(&j->lock);
358 BUG_ON(journal_entry_is_open(j));
359 BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
362 return JOURNAL_ERR_blocked;
364 if (j->cur_entry_error)
365 return j->cur_entry_error;
367 if (bch2_journal_error(j))
368 return JOURNAL_ERR_insufficient_devices; /* -EROFS */
370 if (!fifo_free(&j->pin))
371 return JOURNAL_ERR_journal_pin_full;
373 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
374 return JOURNAL_ERR_max_in_flight;
376 BUG_ON(!j->cur_entry_sectors);
379 (journal_cur_seq(j) == j->flushed_seq_ondisk
381 : j->last_flush_write) +
382 msecs_to_jiffies(c->opts.journal_flush_delay);
384 buf->u64s_reserved = j->entry_u64s_reserved;
385 buf->disk_sectors = j->cur_entry_sectors;
386 buf->sectors = min(buf->disk_sectors, buf->buf_size >> 9);
388 u64s = (int) (buf->sectors << 9) / sizeof(u64) -
389 journal_entry_overhead(j);
390 u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
392 if (u64s <= (ssize_t) j->early_journal_entries.nr)
393 return JOURNAL_ERR_journal_full;
395 if (fifo_empty(&j->pin) && j->reclaim_thread)
396 wake_up_process(j->reclaim_thread);
399 * The fifo_push() needs to happen at the same time as j->seq is
400 * incremented for journal_last_seq() to be calculated correctly
402 atomic64_inc(&j->seq);
403 journal_pin_list_init(fifo_push_ref(&j->pin), 1);
405 BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
407 BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
409 bkey_extent_init(&buf->key);
410 buf->noflush = false;
411 buf->must_flush = false;
412 buf->separate_flush = false;
414 buf->need_flush_to_write_buffer = true;
415 buf->write_started = false;
416 buf->write_allocated = false;
417 buf->write_done = false;
419 memset(buf->data, 0, sizeof(*buf->data));
420 buf->data->seq = cpu_to_le64(journal_cur_seq(j));
423 if (j->early_journal_entries.nr) {
424 memcpy(buf->data->_data, j->early_journal_entries.data,
425 j->early_journal_entries.nr * sizeof(u64));
426 le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
430 * Must be set before marking the journal entry as open:
432 j->cur_entry_u64s = u64s;
434 v = atomic64_read(&j->reservations.counter);
438 BUG_ON(old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL);
441 BUG_ON(journal_state_count(new, new.idx));
442 BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
444 journal_state_inc(&new);
446 /* Handle any already added entries */
447 new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
448 } while ((v = atomic64_cmpxchg(&j->reservations.counter,
449 old.v, new.v)) != old.v);
451 if (nr_unwritten_journal_entries(j) == 1)
452 mod_delayed_work(j->wq,
454 msecs_to_jiffies(c->opts.journal_flush_delay));
457 if (j->early_journal_entries.nr)
458 darray_exit(&j->early_journal_entries);
462 static bool journal_quiesced(struct journal *j)
464 bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
467 bch2_journal_entry_close(j);
471 static void journal_quiesce(struct journal *j)
473 wait_event(j->wait, journal_quiesced(j));
476 static void journal_write_work(struct work_struct *work)
478 struct journal *j = container_of(work, struct journal, write_work.work);
481 if (__journal_entry_is_open(j->reservations)) {
482 long delta = journal_cur_buf(j)->expires - jiffies;
485 mod_delayed_work(j->wq, &j->write_work, delta);
487 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
489 spin_unlock(&j->lock);
492 static int __journal_res_get(struct journal *j, struct journal_res *res,
495 struct bch_fs *c = container_of(j, struct bch_fs, journal);
496 struct journal_buf *buf;
500 if (journal_res_get_fast(j, res, flags))
503 if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
504 ret = JOURNAL_ERR_journal_full;
505 can_discard = j->can_discard;
510 return -BCH_ERR_journal_res_get_blocked;
512 if (bch2_journal_error(j))
513 return -BCH_ERR_erofs_journal_err;
515 if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
516 ret = JOURNAL_ERR_max_in_flight;
523 * Recheck after taking the lock, so we don't race with another thread
524 * that just did journal_entry_open() and call bch2_journal_entry_close()
527 if (journal_res_get_fast(j, res, flags)) {
533 * If we couldn't get a reservation because the current buf filled up,
534 * and we had room for a bigger entry on disk, signal that we want to
535 * realloc the journal bufs:
537 buf = journal_cur_buf(j);
538 if (journal_entry_is_open(j) &&
539 buf->buf_size >> 9 < buf->disk_sectors &&
540 buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
541 j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
543 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
544 ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
546 can_discard = j->can_discard;
547 spin_unlock(&j->lock);
549 if (ret == JOURNAL_ERR_retry)
554 if (journal_error_check_stuck(j, ret, flags))
555 ret = -BCH_ERR_journal_res_get_blocked;
557 if (ret == JOURNAL_ERR_max_in_flight &&
558 track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
560 struct printbuf buf = PRINTBUF;
561 prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
562 bch2_journal_bufs_to_text(&buf, j);
563 trace_journal_entry_full(c, buf.buf);
565 count_event(c, journal_entry_full);
569 * Journal is full - can't rely on reclaim from work item due to
572 if ((ret == JOURNAL_ERR_journal_full ||
573 ret == JOURNAL_ERR_journal_pin_full) &&
574 !(flags & JOURNAL_RES_GET_NONBLOCK)) {
576 bch2_journal_do_discards(j);
580 if (mutex_trylock(&j->reclaim_lock)) {
581 bch2_journal_reclaim(j);
582 mutex_unlock(&j->reclaim_lock);
586 return ret == JOURNAL_ERR_insufficient_devices
587 ? -BCH_ERR_erofs_journal_err
588 : -BCH_ERR_journal_res_get_blocked;
592 * Essentially the entry function to the journaling code. When bcachefs is doing
593 * a btree insert, it calls this function to get the current journal write.
594 * Journal write is the structure used set up journal writes. The calling
595 * function will then add its keys to the structure, queuing them for the next
598 * To ensure forward progress, the current task must not be holding any
599 * btree node write locks.
601 int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
606 closure_wait_event(&j->async_wait,
607 (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
608 (flags & JOURNAL_RES_GET_NONBLOCK));
612 /* journal_entry_res: */
614 void bch2_journal_entry_res_resize(struct journal *j,
615 struct journal_entry_res *res,
618 union journal_res_state state;
619 int d = new_u64s - res->u64s;
623 j->entry_u64s_reserved += d;
627 j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
629 state = READ_ONCE(j->reservations);
631 if (state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL &&
632 state.cur_entry_offset > j->cur_entry_u64s) {
633 j->cur_entry_u64s += d;
635 * Not enough room in current journal entry, have to flush it:
637 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
639 journal_cur_buf(j)->u64s_reserved += d;
642 spin_unlock(&j->lock);
646 /* journal flushing: */
649 * bch2_journal_flush_seq_async - wait for a journal entry to be written
652 * @parent: closure object to wait with
653 * Returns: 1 if @seq has already been flushed, 0 if @seq is being flushed,
654 * -EIO if @seq will never be flushed
656 * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
659 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
660 struct closure *parent)
662 struct journal_buf *buf;
665 if (seq <= j->flushed_seq_ondisk)
670 if (WARN_ONCE(seq > journal_cur_seq(j),
671 "requested to flush journal seq %llu, but currently at %llu",
672 seq, journal_cur_seq(j)))
675 /* Recheck under lock: */
676 if (j->err_seq && seq >= j->err_seq) {
681 if (seq <= j->flushed_seq_ondisk) {
686 /* if seq was written, but not flushed - flush a newer one instead */
687 seq = max(seq, journal_last_unwritten_seq(j));
690 if (seq > journal_cur_seq(j)) {
691 struct journal_res res = { 0 };
693 if (journal_entry_is_open(j))
694 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
696 spin_unlock(&j->lock);
698 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
703 buf = j->buf + (seq & JOURNAL_BUF_MASK);
704 buf->must_flush = true;
706 if (!buf->flush_time) {
707 buf->flush_time = local_clock() ?: 1;
708 buf->expires = jiffies;
711 if (parent && !closure_wait(&buf->wait, parent))
714 bch2_journal_res_put(j, &res);
721 * if write was kicked off without a flush, flush the next sequence
724 buf = journal_seq_to_buf(j, seq);
727 goto recheck_need_open;
730 buf->must_flush = true;
732 if (parent && !closure_wait(&buf->wait, parent))
735 if (seq == journal_cur_seq(j))
736 journal_entry_want_write(j);
738 spin_unlock(&j->lock);
742 int bch2_journal_flush_seq(struct journal *j, u64 seq)
744 u64 start_time = local_clock();
748 * Don't update time_stats when @seq is already flushed:
750 if (seq <= j->flushed_seq_ondisk)
753 ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
756 time_stats_update(j->flush_seq_time, start_time);
758 return ret ?: ret2 < 0 ? ret2 : 0;
762 * bch2_journal_flush_async - if there is an open journal entry, or a journal
763 * still being written, write it and wait for the write to complete
765 void bch2_journal_flush_async(struct journal *j, struct closure *parent)
767 bch2_journal_flush_seq_async(j, atomic64_read(&j->seq), parent);
770 int bch2_journal_flush(struct journal *j)
772 return bch2_journal_flush_seq(j, atomic64_read(&j->seq));
776 * bch2_journal_noflush_seq - tell the journal not to issue any flushes before
779 bool bch2_journal_noflush_seq(struct journal *j, u64 seq)
781 struct bch_fs *c = container_of(j, struct bch_fs, journal);
785 if (!(c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush)))
788 if (seq <= c->journal.flushed_seq_ondisk)
792 if (seq <= c->journal.flushed_seq_ondisk)
795 for (unwritten_seq = journal_last_unwritten_seq(j);
798 struct journal_buf *buf = journal_seq_to_buf(j, unwritten_seq);
800 /* journal write is already in flight, and was a flush write: */
801 if (unwritten_seq == journal_last_unwritten_seq(j) && !buf->noflush)
809 spin_unlock(&j->lock);
813 int bch2_journal_meta(struct journal *j)
815 struct journal_buf *buf;
816 struct journal_res res;
819 memset(&res, 0, sizeof(res));
821 ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
825 buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
826 buf->must_flush = true;
828 if (!buf->flush_time) {
829 buf->flush_time = local_clock() ?: 1;
830 buf->expires = jiffies;
833 bch2_journal_res_put(j, &res);
835 return bch2_journal_flush_seq(j, res.seq);
838 /* block/unlock the journal: */
840 void bch2_journal_unblock(struct journal *j)
844 spin_unlock(&j->lock);
849 void bch2_journal_block(struct journal *j)
853 spin_unlock(&j->lock);
858 static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
860 struct journal_buf *ret = NULL;
862 mutex_lock(&j->buf_lock);
864 max_seq = min(max_seq, journal_cur_seq(j));
866 for (u64 seq = journal_last_unwritten_seq(j);
869 unsigned idx = seq & JOURNAL_BUF_MASK;
870 struct journal_buf *buf = j->buf + idx;
872 if (buf->need_flush_to_write_buffer) {
873 if (seq == journal_cur_seq(j))
874 __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
876 union journal_res_state s;
877 s.v = atomic64_read_acquire(&j->reservations.counter);
879 ret = journal_state_count(s, idx)
886 spin_unlock(&j->lock);
887 if (IS_ERR_OR_NULL(ret))
888 mutex_unlock(&j->buf_lock);
892 struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
894 struct journal_buf *ret;
896 wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
900 /* allocate journal on a device: */
902 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
903 bool new_fs, struct closure *cl)
905 struct bch_fs *c = ca->fs;
906 struct journal_device *ja = &ca->journal;
907 u64 *new_bucket_seq = NULL, *new_buckets = NULL;
908 struct open_bucket **ob = NULL;
910 unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
913 BUG_ON(nr <= ja->nr);
915 bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
916 ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
917 new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
918 new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
919 if (!bu || !ob || !new_buckets || !new_bucket_seq) {
920 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
924 for (nr_got = 0; nr_got < nr_want; nr_got++) {
926 bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
927 if (bu[nr_got] < 0) {
928 ret = -BCH_ERR_ENOSPC_bucket_alloc;
932 ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
933 ret = PTR_ERR_OR_ZERO(ob[nr_got]);
937 ret = bch2_trans_run(c,
938 bch2_trans_mark_metadata_bucket(trans, ca,
939 ob[nr_got]->bucket, BCH_DATA_journal,
940 ca->mi.bucket_size));
942 bch2_open_bucket_put(c, ob[nr_got]);
943 bch_err_msg(c, ret, "marking new journal buckets");
947 bu[nr_got] = ob[nr_got]->bucket;
954 /* Don't return an error if we successfully allocated some buckets: */
958 bch2_journal_flush_all_pins(&c->journal);
959 bch2_journal_block(&c->journal);
960 mutex_lock(&c->sb_lock);
963 memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
964 memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
966 BUG_ON(ja->discard_idx > ja->nr);
968 pos = ja->discard_idx ?: ja->nr;
970 memmove(new_buckets + pos + nr_got,
972 sizeof(new_buckets[0]) * (ja->nr - pos));
973 memmove(new_bucket_seq + pos + nr_got,
974 new_bucket_seq + pos,
975 sizeof(new_bucket_seq[0]) * (ja->nr - pos));
977 for (i = 0; i < nr_got; i++) {
978 new_buckets[pos + i] = bu[i];
979 new_bucket_seq[pos + i] = 0;
982 nr = ja->nr + nr_got;
984 ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
993 spin_lock(&c->journal.lock);
995 swap(new_buckets, ja->buckets);
996 swap(new_bucket_seq, ja->bucket_seq);
999 if (pos <= ja->discard_idx)
1000 ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
1001 if (pos <= ja->dirty_idx_ondisk)
1002 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
1003 if (pos <= ja->dirty_idx)
1004 ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
1005 if (pos <= ja->cur_idx)
1006 ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
1009 spin_unlock(&c->journal.lock);
1012 bch2_journal_unblock(&c->journal);
1013 mutex_unlock(&c->sb_lock);
1017 for (i = 0; i < nr_got; i++)
1019 bch2_trans_mark_metadata_bucket(trans, ca,
1020 bu[i], BCH_DATA_free, 0));
1023 for (i = 0; i < nr_got; i++)
1024 bch2_open_bucket_put(c, ob[i]);
1026 kfree(new_bucket_seq);
1034 * Allocate more journal space at runtime - not currently making use if it, but
1037 int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
1040 struct journal_device *ja = &ca->journal;
1044 closure_init_stack(&cl);
1046 down_write(&c->state_lock);
1048 /* don't handle reducing nr of buckets yet: */
1052 while (ja->nr < nr) {
1053 struct disk_reservation disk_res = { 0, 0, 0 };
1056 * note: journal buckets aren't really counted as _sectors_ used yet, so
1057 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
1058 * when space used goes up without a reservation - but we do need the
1059 * reservation to ensure we'll actually be able to allocate:
1061 * XXX: that's not right, disk reservations only ensure a
1062 * filesystem-wide allocation will succeed, this is a device
1063 * specific allocation - we can hang here:
1066 ret = bch2_disk_reservation_get(c, &disk_res,
1067 bucket_to_sector(ca, nr - ja->nr), 1, 0);
1071 ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
1073 bch2_disk_reservation_put(c, &disk_res);
1077 if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
1083 up_write(&c->state_lock);
1087 int bch2_dev_journal_alloc(struct bch_dev *ca)
1092 if (dynamic_fault("bcachefs:add:journal_alloc")) {
1093 ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
1097 /* 1/128th of the device by default: */
1098 nr = ca->mi.nbuckets >> 7;
1101 * clamp journal size to 8192 buckets or 8GB (in sectors), whichever
1104 nr = clamp_t(unsigned, nr,
1105 BCH_JOURNAL_BUCKETS_MIN,
1107 (1 << 24) / ca->mi.bucket_size));
1109 ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
1111 bch_err_fn(ca, ret);
1115 int bch2_fs_journal_alloc(struct bch_fs *c)
1117 for_each_online_member(c, ca) {
1121 int ret = bch2_dev_journal_alloc(ca);
1123 percpu_ref_put(&ca->io_ref);
1131 /* startup/shutdown: */
1133 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
1138 spin_lock(&j->lock);
1139 for (seq = journal_last_unwritten_seq(j);
1140 seq <= journal_cur_seq(j) && !ret;
1142 struct journal_buf *buf = journal_seq_to_buf(j, seq);
1144 if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
1147 spin_unlock(&j->lock);
1152 void bch2_dev_journal_stop(struct journal *j, struct bch_dev *ca)
1154 wait_event(j->wait, !bch2_journal_writing_to_device(j, ca->dev_idx));
1157 void bch2_fs_journal_stop(struct journal *j)
1159 bch2_journal_reclaim_stop(j);
1160 bch2_journal_flush_all_pins(j);
1162 wait_event(j->wait, bch2_journal_entry_close(j));
1165 * Always write a new journal entry, to make sure the clock hands are up
1166 * to date (and match the superblock)
1168 bch2_journal_meta(j);
1172 BUG_ON(!bch2_journal_error(j) &&
1173 test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
1174 j->last_empty_seq != journal_cur_seq(j));
1176 cancel_delayed_work_sync(&j->write_work);
1179 int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
1181 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1182 struct journal_entry_pin_list *p;
1183 struct journal_replay *i, **_i;
1184 struct genradix_iter iter;
1185 bool had_entries = false;
1186 u64 last_seq = cur_seq, nr, seq;
1188 genradix_for_each_reverse(&c->journal_entries, iter, _i) {
1191 if (!i || i->ignore)
1194 last_seq = le64_to_cpu(i->j.last_seq);
1198 nr = cur_seq - last_seq;
1200 if (nr + 1 > j->pin.size) {
1202 init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
1204 bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
1205 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1209 j->replay_journal_seq = last_seq;
1210 j->replay_journal_seq_end = cur_seq;
1211 j->last_seq_ondisk = last_seq;
1212 j->flushed_seq_ondisk = cur_seq - 1;
1213 j->seq_ondisk = cur_seq - 1;
1214 j->pin.front = last_seq;
1215 j->pin.back = cur_seq;
1216 atomic64_set(&j->seq, cur_seq - 1);
1218 fifo_for_each_entry_ptr(p, &j->pin, seq)
1219 journal_pin_list_init(p, 1);
1221 genradix_for_each(&c->journal_entries, iter, _i) {
1224 if (!i || i->ignore)
1227 seq = le64_to_cpu(i->j.seq);
1228 BUG_ON(seq >= cur_seq);
1233 if (journal_entry_empty(&i->j))
1234 j->last_empty_seq = le64_to_cpu(i->j.seq);
1236 p = journal_seq_pin(j, seq);
1239 darray_for_each(i->ptrs, ptr)
1240 bch2_dev_list_add_dev(&p->devs, ptr->dev);
1246 j->last_empty_seq = cur_seq;
1248 spin_lock(&j->lock);
1250 set_bit(JOURNAL_STARTED, &j->flags);
1251 j->last_flush_write = jiffies;
1253 j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
1254 j->reservations.unwritten_idx++;
1256 c->last_bucket_seq_cleanup = journal_cur_seq(j);
1258 bch2_journal_space_available(j);
1259 spin_unlock(&j->lock);
1261 return bch2_journal_reclaim_start(j);
1266 void bch2_dev_journal_exit(struct bch_dev *ca)
1268 struct journal_device *ja = &ca->journal;
1270 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1276 kfree(ja->bucket_seq);
1278 ja->bucket_seq = NULL;
1281 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
1283 struct journal_device *ja = &ca->journal;
1284 struct bch_sb_field_journal *journal_buckets =
1285 bch2_sb_field_get(sb, journal);
1286 struct bch_sb_field_journal_v2 *journal_buckets_v2 =
1287 bch2_sb_field_get(sb, journal_v2);
1291 if (journal_buckets_v2) {
1292 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1294 for (unsigned i = 0; i < nr; i++)
1295 ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
1296 } else if (journal_buckets) {
1297 ja->nr = bch2_nr_journal_buckets(journal_buckets);
1300 ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1301 if (!ja->bucket_seq)
1302 return -BCH_ERR_ENOMEM_dev_journal_init;
1304 unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
1306 for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
1307 ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
1308 nr_bvecs), GFP_KERNEL);
1310 return -BCH_ERR_ENOMEM_dev_journal_init;
1312 ja->bio[i]->ca = ca;
1313 ja->bio[i]->buf_idx = i;
1314 bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
1317 ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
1319 return -BCH_ERR_ENOMEM_dev_journal_init;
1321 if (journal_buckets_v2) {
1322 unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
1325 for (unsigned i = 0; i < nr; i++)
1326 for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
1327 ja->buckets[dst++] =
1328 le64_to_cpu(journal_buckets_v2->d[i].start) + j;
1329 } else if (journal_buckets) {
1330 for (unsigned i = 0; i < ja->nr; i++)
1331 ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
1337 void bch2_fs_journal_exit(struct journal *j)
1340 destroy_workqueue(j->wq);
1342 darray_exit(&j->early_journal_entries);
1344 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
1345 kvfree(j->buf[i].data);
1349 int bch2_fs_journal_init(struct journal *j)
1351 static struct lock_class_key res_key;
1353 mutex_init(&j->buf_lock);
1354 spin_lock_init(&j->lock);
1355 spin_lock_init(&j->err_lock);
1356 init_waitqueue_head(&j->wait);
1357 INIT_DELAYED_WORK(&j->write_work, journal_write_work);
1358 init_waitqueue_head(&j->reclaim_wait);
1359 init_waitqueue_head(&j->pin_flush_wait);
1360 mutex_init(&j->reclaim_lock);
1361 mutex_init(&j->discard_lock);
1363 lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
1365 atomic64_set(&j->reservations.counter,
1366 ((union journal_res_state)
1367 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
1369 if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
1370 return -BCH_ERR_ENOMEM_journal_pin_fifo;
1372 for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
1373 j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
1374 j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
1375 if (!j->buf[i].data)
1376 return -BCH_ERR_ENOMEM_journal_buf;
1380 j->pin.front = j->pin.back = 1;
1382 j->wq = alloc_workqueue("bcachefs_journal",
1383 WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
1385 return -BCH_ERR_ENOMEM_fs_other_alloc;
1391 void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1393 struct bch_fs *c = container_of(j, struct bch_fs, journal);
1394 union journal_res_state s;
1395 unsigned long now = jiffies;
1396 u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
1398 if (!out->nr_tabstops)
1399 printbuf_tabstop_push(out, 24);
1403 s = READ_ONCE(j->reservations);
1405 prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
1406 prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
1407 prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
1408 prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
1409 prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
1410 prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
1411 prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]);
1412 prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
1413 prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
1414 prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
1415 prt_printf(out, "average write size:\t");
1416 prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
1418 prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
1419 prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
1420 prt_printf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked);
1421 prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
1422 ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
1423 prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
1424 prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
1425 prt_printf(out, "current entry:\t\t");
1427 switch (s.cur_entry_offset) {
1428 case JOURNAL_ENTRY_ERROR_VAL:
1429 prt_printf(out, "error");
1431 case JOURNAL_ENTRY_CLOSED_VAL:
1432 prt_printf(out, "closed");
1435 prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
1440 prt_printf(out, "unwritten entries:");
1442 bch2_journal_bufs_to_text(out, j);
1445 "replay done:\t\t%i\n",
1446 test_bit(JOURNAL_REPLAY_DONE, &j->flags));
1448 prt_printf(out, "space:\n");
1449 prt_printf(out, "\tdiscarded\t%u:%u\n",
1450 j->space[journal_space_discarded].next_entry,
1451 j->space[journal_space_discarded].total);
1452 prt_printf(out, "\tclean ondisk\t%u:%u\n",
1453 j->space[journal_space_clean_ondisk].next_entry,
1454 j->space[journal_space_clean_ondisk].total);
1455 prt_printf(out, "\tclean\t\t%u:%u\n",
1456 j->space[journal_space_clean].next_entry,
1457 j->space[journal_space_clean].total);
1458 prt_printf(out, "\ttotal\t\t%u:%u\n",
1459 j->space[journal_space_total].next_entry,
1460 j->space[journal_space_total].total);
1462 for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
1463 struct journal_device *ja = &ca->journal;
1465 if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
1471 prt_printf(out, "dev %u:\n", ca->dev_idx);
1472 prt_printf(out, "\tnr\t\t%u\n", ja->nr);
1473 prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
1474 prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
1475 prt_printf(out, "\tdiscard_idx\t%u\n", ja->discard_idx);
1476 prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
1477 prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
1478 prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
1486 void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
1488 spin_lock(&j->lock);
1489 __bch2_journal_debug_to_text(out, j);
1490 spin_unlock(&j->lock);
1493 bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
1495 struct journal_entry_pin_list *pin_list;
1496 struct journal_entry_pin *pin;
1498 spin_lock(&j->lock);
1499 *seq = max(*seq, j->pin.front);
1501 if (*seq >= j->pin.back) {
1502 spin_unlock(&j->lock);
1508 pin_list = journal_seq_pin(j, *seq);
1510 prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
1512 printbuf_indent_add(out, 2);
1514 for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
1515 list_for_each_entry(pin, &pin_list->list[i], list) {
1516 prt_printf(out, "\t%px %ps", pin, pin->flush);
1520 if (!list_empty(&pin_list->flushed)) {
1521 prt_printf(out, "flushed:");
1525 list_for_each_entry(pin, &pin_list->flushed, list) {
1526 prt_printf(out, "\t%px %ps", pin, pin->flush);
1530 printbuf_indent_sub(out, 2);
1533 spin_unlock(&j->lock);
1538 void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
1542 while (!bch2_journal_seq_pins_to_text(out, j, &seq))