1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
7 #include "journal_io.h"
8 #include "journal_reclaim.h"
12 #include <linux/kthread.h>
13 #include <linux/sched/mm.h>
14 #include <trace/events/bcachefs.h>
16 /* Free space calculations: */
18 static unsigned journal_space_from(struct journal_device *ja,
19 enum journal_space_from from)
22 case journal_space_discarded:
23 return ja->discard_idx;
24 case journal_space_clean_ondisk:
25 return ja->dirty_idx_ondisk;
26 case journal_space_clean:
33 unsigned bch2_journal_dev_buckets_available(struct journal *j,
34 struct journal_device *ja,
35 enum journal_space_from from)
37 unsigned available = (journal_space_from(ja, from) -
38 ja->cur_idx - 1 + ja->nr) % ja->nr;
41 * Don't use the last bucket unless writing the new last_seq
42 * will make another bucket available:
44 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
50 static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
52 union journal_preres_state old, new;
53 u64 v = atomic64_read(&j->prereserved.counter);
57 new.remaining = u64s_remaining;
58 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
59 old.v, new.v)) != old.v);
62 static struct journal_space
63 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
64 enum journal_space_from from)
66 struct journal_device *ja = &ca->journal;
67 unsigned sectors, buckets, unwritten;
70 if (from == journal_space_total)
71 return (struct journal_space) {
72 .next_entry = ca->mi.bucket_size,
73 .total = ca->mi.bucket_size * ja->nr,
76 buckets = bch2_journal_dev_buckets_available(j, ja, from);
77 sectors = ja->sectors_free;
80 * We that we don't allocate the space for a journal entry
81 * until we write it out - thus, account for it here:
83 for (seq = journal_last_unwritten_seq(j);
84 seq <= journal_cur_seq(j);
86 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
91 /* entry won't fit on this device, skip: */
92 if (unwritten > ca->mi.bucket_size)
95 if (unwritten >= sectors) {
102 sectors = ca->mi.bucket_size;
105 sectors -= unwritten;
108 if (sectors < ca->mi.bucket_size && buckets) {
110 sectors = ca->mi.bucket_size;
113 return (struct journal_space) {
114 .next_entry = sectors,
115 .total = sectors + buckets * ca->mi.bucket_size,
119 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
120 enum journal_space_from from)
122 struct bch_fs *c = container_of(j, struct bch_fs, journal);
124 unsigned i, pos, nr_devs = 0;
125 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
127 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
130 for_each_member_device_rcu(ca, c, i,
131 &c->rw_devs[BCH_DATA_journal]) {
135 space = journal_dev_space_available(j, ca, from);
136 if (!space.next_entry)
139 for (pos = 0; pos < nr_devs; pos++)
140 if (space.total > dev_space[pos].total)
143 array_insert_item(dev_space, nr_devs, pos, space);
147 if (nr_devs < nr_devs_want)
148 return (struct journal_space) { 0, 0 };
151 * We sorted largest to smallest, and we want the smallest out of the
152 * @nr_devs_want largest devices:
154 return dev_space[nr_devs_want - 1];
157 void bch2_journal_space_available(struct journal *j)
159 struct bch_fs *c = container_of(j, struct bch_fs, journal);
161 unsigned clean, clean_ondisk, total;
162 s64 u64s_remaining = 0;
163 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
164 j->buf[1].buf_size >> 9);
165 unsigned i, nr_online = 0, nr_devs_want;
166 bool can_discard = false;
169 lockdep_assert_held(&j->lock);
172 for_each_member_device_rcu(ca, c, i,
173 &c->rw_devs[BCH_DATA_journal]) {
174 struct journal_device *ja = &ca->journal;
179 while (ja->dirty_idx != ja->cur_idx &&
180 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
181 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
183 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
184 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
185 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
187 if (ja->discard_idx != ja->dirty_idx_ondisk)
190 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
195 j->can_discard = can_discard;
197 if (nr_online < c->opts.metadata_replicas_required) {
198 ret = JOURNAL_ERR_insufficient_devices;
202 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
204 for (i = 0; i < journal_space_nr; i++)
205 j->space[i] = __journal_space_available(j, nr_devs_want, i);
207 clean_ondisk = j->space[journal_space_clean_ondisk].total;
208 clean = j->space[journal_space_clean].total;
209 total = j->space[journal_space_total].total;
212 journal_cur_seq(j) == j->seq_ondisk) {
213 struct printbuf buf = PRINTBUF;
215 __bch2_journal_debug_to_text(&buf, j);
216 bch_err(c, "journal stuck\n%s", buf.buf);
220 ret = JOURNAL_ERR_journal_stuck;
221 } else if (!j->space[journal_space_discarded].next_entry)
222 ret = JOURNAL_ERR_journal_full;
224 if ((j->space[journal_space_clean_ondisk].next_entry <
225 j->space[journal_space_clean_ondisk].total) &&
226 (clean - clean_ondisk <= total / 8) &&
227 (clean_ondisk * 2 > clean ))
228 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
230 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
232 u64s_remaining = (u64) clean << 6;
233 u64s_remaining -= (u64) total << 3;
234 u64s_remaining = max(0LL, u64s_remaining);
236 u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
238 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
239 j->cur_entry_error = ret;
240 journal_set_remaining(j, u64s_remaining);
241 journal_set_watermark(j);
247 /* Discards - last part of journal reclaim: */
249 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
254 ret = ja->discard_idx != ja->dirty_idx_ondisk;
255 spin_unlock(&j->lock);
261 * Advance ja->discard_idx as long as it points to buckets that are no longer
262 * dirty, issuing discards if necessary:
264 void bch2_journal_do_discards(struct journal *j)
266 struct bch_fs *c = container_of(j, struct bch_fs, journal);
270 mutex_lock(&j->discard_lock);
272 for_each_rw_member(ca, c, iter) {
273 struct journal_device *ja = &ca->journal;
275 while (should_discard_bucket(j, ja)) {
276 if (!c->opts.nochanges &&
278 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
279 blkdev_issue_discard(ca->disk_sb.bdev,
281 ja->buckets[ja->discard_idx]),
282 ca->mi.bucket_size, GFP_NOIO, 0);
285 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
287 bch2_journal_space_available(j);
288 spin_unlock(&j->lock);
292 mutex_unlock(&j->discard_lock);
296 * Journal entry pinning - machinery for holding a reference on a given journal
297 * entry, holding it open to ensure it gets replayed during recovery:
300 static void bch2_journal_reclaim_fast(struct journal *j)
302 struct journal_entry_pin_list temp;
305 lockdep_assert_held(&j->lock);
308 * Unpin journal entries whose reference counts reached zero, meaning
309 * all btree nodes got written out
311 while (!fifo_empty(&j->pin) &&
312 !atomic_read(&fifo_peek_front(&j->pin).count)) {
313 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
314 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
315 BUG_ON(!fifo_pop(&j->pin, temp));
320 bch2_journal_space_available(j);
323 void __bch2_journal_pin_put(struct journal *j, u64 seq)
325 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
327 if (atomic_dec_and_test(&pin_list->count))
328 bch2_journal_reclaim_fast(j);
331 void bch2_journal_pin_put(struct journal *j, u64 seq)
333 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
335 if (atomic_dec_and_test(&pin_list->count)) {
337 bch2_journal_reclaim_fast(j);
338 spin_unlock(&j->lock);
342 static inline void __journal_pin_drop(struct journal *j,
343 struct journal_entry_pin *pin)
345 struct journal_entry_pin_list *pin_list;
347 if (!journal_pin_active(pin))
350 if (j->flush_in_progress == pin)
351 j->flush_in_progress_dropped = true;
353 pin_list = journal_seq_pin(j, pin->seq);
355 list_del_init(&pin->list);
358 * Unpinning a journal entry make make journal_next_bucket() succeed, if
359 * writing a new last_seq will now make another bucket available:
361 if (atomic_dec_and_test(&pin_list->count) &&
362 pin_list == &fifo_peek_front(&j->pin))
363 bch2_journal_reclaim_fast(j);
366 void bch2_journal_pin_drop(struct journal *j,
367 struct journal_entry_pin *pin)
370 __journal_pin_drop(j, pin);
371 spin_unlock(&j->lock);
374 void bch2_journal_pin_set(struct journal *j, u64 seq,
375 struct journal_entry_pin *pin,
376 journal_pin_flush_fn flush_fn)
378 struct journal_entry_pin_list *pin_list;
382 if (seq < journal_last_seq(j)) {
384 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
385 * the src pin - with the pin dropped, the entry to pin might no
386 * longer to exist, but that means there's no longer anything to
387 * copy and we can bail out here:
389 spin_unlock(&j->lock);
393 pin_list = journal_seq_pin(j, seq);
395 __journal_pin_drop(j, pin);
397 atomic_inc(&pin_list->count);
399 pin->flush = flush_fn;
401 if (flush_fn == bch2_btree_key_cache_journal_flush)
402 list_add(&pin->list, &pin_list->key_cache_list);
404 list_add(&pin->list, &pin_list->list);
406 list_add(&pin->list, &pin_list->flushed);
407 spin_unlock(&j->lock);
410 * If the journal is currently full, we might want to call flush_fn
417 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
419 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
421 BUG_ON(journal_pin_active(pin));
423 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
427 * Journal reclaim: flush references to open journal entries to reclaim space in
430 * May be done by the journal code in the background as needed to free up space
431 * for more journal entries, or as part of doing a clean shutdown, or to migrate
432 * data off of a specific device:
435 static struct journal_entry_pin *
436 journal_get_next_pin(struct journal *j,
439 u64 max_seq, u64 *seq)
441 struct journal_entry_pin_list *pin_list;
442 struct journal_entry_pin *ret = NULL;
444 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
445 if (*seq > max_seq && !get_any && !get_key_cache)
448 if (*seq <= max_seq || get_any) {
449 ret = list_first_entry_or_null(&pin_list->list,
450 struct journal_entry_pin, list);
455 if (*seq <= max_seq || get_any || get_key_cache) {
456 ret = list_first_entry_or_null(&pin_list->key_cache_list,
457 struct journal_entry_pin, list);
466 /* returns true if we did work */
467 static size_t journal_flush_pins(struct journal *j, u64 seq_to_flush,
469 unsigned min_key_cache)
471 struct journal_entry_pin *pin;
472 size_t nr_flushed = 0;
473 journal_pin_flush_fn flush_fn;
477 lockdep_assert_held(&j->reclaim_lock);
482 j->last_flushed = jiffies;
485 pin = journal_get_next_pin(j,
490 BUG_ON(j->flush_in_progress);
491 j->flush_in_progress = pin;
492 j->flush_in_progress_dropped = false;
493 flush_fn = pin->flush;
495 spin_unlock(&j->lock);
500 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
506 err = flush_fn(j, pin, seq);
509 /* Pin might have been dropped or rearmed: */
510 if (likely(!err && !j->flush_in_progress_dropped))
511 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
512 j->flush_in_progress = NULL;
513 j->flush_in_progress_dropped = false;
514 spin_unlock(&j->lock);
516 wake_up(&j->pin_flush_wait);
527 static u64 journal_seq_to_flush(struct journal *j)
529 struct bch_fs *c = container_of(j, struct bch_fs, journal);
531 u64 seq_to_flush = 0;
536 for_each_rw_member(ca, c, iter) {
537 struct journal_device *ja = &ca->journal;
538 unsigned nr_buckets, bucket_to_flush;
543 /* Try to keep the journal at most half full: */
544 nr_buckets = ja->nr / 2;
546 /* And include pre-reservations: */
547 nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
548 (ca->mi.bucket_size << 6) -
549 journal_entry_overhead(j));
551 nr_buckets = min(nr_buckets, ja->nr);
553 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
554 seq_to_flush = max(seq_to_flush,
555 ja->bucket_seq[bucket_to_flush]);
558 /* Also flush if the pin fifo is more than half full */
559 seq_to_flush = max_t(s64, seq_to_flush,
560 (s64) journal_cur_seq(j) -
562 spin_unlock(&j->lock);
568 * bch2_journal_reclaim - free up journal buckets
570 * Background journal reclaim writes out btree nodes. It should be run
571 * early enough so that we never completely run out of journal buckets.
573 * High watermarks for triggering background reclaim:
574 * - FIFO has fewer than 512 entries left
575 * - fewer than 25% journal buckets free
577 * Background reclaim runs until low watermarks are reached:
578 * - FIFO has more than 1024 entries left
579 * - more than 50% journal buckets free
581 * As long as a reclaim can complete in the time it takes to fill up
582 * 512 journal entries or 25% of all journal buckets, then
583 * journal_next_bucket() should not stall.
585 static int __bch2_journal_reclaim(struct journal *j, bool direct)
587 struct bch_fs *c = container_of(j, struct bch_fs, journal);
588 bool kthread = (current->flags & PF_KTHREAD) != 0;
590 size_t min_nr, min_key_cache, nr_flushed;
595 * We can't invoke memory reclaim while holding the reclaim_lock -
596 * journal reclaim is required to make progress for memory reclaim
597 * (cleaning the caches), so we can't get stuck in memory reclaim while
598 * we're holding the reclaim lock:
600 lockdep_assert_held(&j->reclaim_lock);
601 flags = memalloc_noreclaim_save();
604 if (kthread && kthread_should_stop())
607 if (bch2_journal_error(j)) {
612 bch2_journal_do_discards(j);
614 seq_to_flush = journal_seq_to_flush(j);
618 * If it's been longer than j->reclaim_delay_ms since we last flushed,
619 * make sure to flush at least one journal pin:
621 if (time_after(jiffies, j->last_flushed +
622 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
625 if (j->prereserved.reserved * 4 > j->prereserved.remaining)
628 if (fifo_free(&j->pin) <= 32)
631 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
634 trace_journal_reclaim_start(c,
636 j->prereserved.reserved,
637 j->prereserved.remaining,
638 atomic_read(&c->btree_cache.dirty),
640 atomic_long_read(&c->btree_key_cache.nr_dirty),
641 atomic_long_read(&c->btree_key_cache.nr_keys));
643 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
645 nr_flushed = journal_flush_pins(j, seq_to_flush,
646 min_nr, min_key_cache);
649 j->nr_direct_reclaim += nr_flushed;
651 j->nr_background_reclaim += nr_flushed;
652 trace_journal_reclaim_finish(c, nr_flushed);
655 wake_up(&j->reclaim_wait);
656 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
658 memalloc_noreclaim_restore(flags);
663 int bch2_journal_reclaim(struct journal *j)
665 return __bch2_journal_reclaim(j, true);
668 static int bch2_journal_reclaim_thread(void *arg)
670 struct journal *j = arg;
671 struct bch_fs *c = container_of(j, struct bch_fs, journal);
672 unsigned long delay, now;
678 j->last_flushed = jiffies;
680 while (!ret && !kthread_should_stop()) {
681 j->reclaim_kicked = false;
683 mutex_lock(&j->reclaim_lock);
684 ret = __bch2_journal_reclaim(j, false);
685 mutex_unlock(&j->reclaim_lock);
688 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
689 j->next_reclaim = j->last_flushed + delay;
691 if (!time_in_range(j->next_reclaim, now, now + delay))
692 j->next_reclaim = now + delay;
695 set_current_state(TASK_INTERRUPTIBLE);
696 if (kthread_should_stop())
698 if (j->reclaim_kicked)
702 journal_empty = fifo_empty(&j->pin);
703 spin_unlock(&j->lock);
706 freezable_schedule();
707 else if (time_after(j->next_reclaim, jiffies))
708 freezable_schedule_timeout(j->next_reclaim - jiffies);
712 __set_current_state(TASK_RUNNING);
718 void bch2_journal_reclaim_stop(struct journal *j)
720 struct task_struct *p = j->reclaim_thread;
722 j->reclaim_thread = NULL;
730 int bch2_journal_reclaim_start(struct journal *j)
732 struct bch_fs *c = container_of(j, struct bch_fs, journal);
733 struct task_struct *p;
735 if (j->reclaim_thread)
738 p = kthread_create(bch2_journal_reclaim_thread, j,
739 "bch-reclaim/%s", c->name);
741 bch_err(c, "error creating journal reclaim thread: %li", PTR_ERR(p));
746 j->reclaim_thread = p;
751 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
756 ret = bch2_journal_error(j);
760 mutex_lock(&j->reclaim_lock);
762 if (journal_flush_pins(j, seq_to_flush, 0, 0))
767 * If journal replay hasn't completed, the unreplayed journal entries
768 * hold refs on their corresponding sequence numbers
770 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
771 journal_last_seq(j) > seq_to_flush ||
774 spin_unlock(&j->lock);
775 mutex_unlock(&j->reclaim_lock);
780 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
782 bool did_work = false;
784 if (!test_bit(JOURNAL_STARTED, &j->flags))
787 closure_wait_event(&j->async_wait,
788 journal_flush_done(j, seq_to_flush, &did_work));
793 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
795 struct bch_fs *c = container_of(j, struct bch_fs, journal);
796 struct journal_entry_pin_list *p;
801 fifo_for_each_entry_ptr(p, &j->pin, iter)
803 ? bch2_dev_list_has_dev(p->devs, dev_idx)
804 : p->devs.nr < c->opts.metadata_replicas)
806 spin_unlock(&j->lock);
808 bch2_journal_flush_pins(j, seq);
810 ret = bch2_journal_error(j);
814 mutex_lock(&c->replicas_gc_lock);
815 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
821 struct bch_replicas_padded replicas;
823 seq = max(seq, journal_last_seq(j));
824 if (seq >= j->pin.back)
826 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
827 journal_seq_pin(j, seq)->devs);
830 spin_unlock(&j->lock);
831 ret = bch2_mark_replicas(c, &replicas.e);
834 spin_unlock(&j->lock);
836 ret = bch2_replicas_gc_end(c, ret);
837 mutex_unlock(&c->replicas_gc_lock);