1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
10 #include "journal_io.h"
11 #include "journal_reclaim.h"
13 #include "sb-members.h"
16 #include <linux/kthread.h>
17 #include <linux/sched/mm.h>
19 /* Free space calculations: */
21 static unsigned journal_space_from(struct journal_device *ja,
22 enum journal_space_from from)
25 case journal_space_discarded:
26 return ja->discard_idx;
27 case journal_space_clean_ondisk:
28 return ja->dirty_idx_ondisk;
29 case journal_space_clean:
36 unsigned bch2_journal_dev_buckets_available(struct journal *j,
37 struct journal_device *ja,
38 enum journal_space_from from)
40 unsigned available = (journal_space_from(ja, from) -
41 ja->cur_idx - 1 + ja->nr) % ja->nr;
44 * Don't use the last bucket unless writing the new last_seq
45 * will make another bucket available:
47 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
53 static inline void journal_set_watermark(struct journal *j)
55 struct bch_fs *c = container_of(j, struct bch_fs, journal);
56 bool low_on_space = j->space[journal_space_clean].total * 4 <=
57 j->space[journal_space_total].total;
58 bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
59 unsigned watermark = low_on_space || low_on_pin
60 ? BCH_WATERMARK_reclaim
61 : BCH_WATERMARK_stripe;
63 if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space],
64 &j->low_on_space_start, low_on_space) ||
65 track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin],
66 &j->low_on_pin_start, low_on_pin))
67 trace_and_count(c, journal_full, c);
69 swap(watermark, j->watermark);
70 if (watermark > j->watermark)
74 static struct journal_space
75 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
76 enum journal_space_from from)
78 struct journal_device *ja = &ca->journal;
79 unsigned sectors, buckets, unwritten;
82 if (from == journal_space_total)
83 return (struct journal_space) {
84 .next_entry = ca->mi.bucket_size,
85 .total = ca->mi.bucket_size * ja->nr,
88 buckets = bch2_journal_dev_buckets_available(j, ja, from);
89 sectors = ja->sectors_free;
92 * We that we don't allocate the space for a journal entry
93 * until we write it out - thus, account for it here:
95 for (seq = journal_last_unwritten_seq(j);
96 seq <= journal_cur_seq(j);
98 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
103 /* entry won't fit on this device, skip: */
104 if (unwritten > ca->mi.bucket_size)
107 if (unwritten >= sectors) {
114 sectors = ca->mi.bucket_size;
117 sectors -= unwritten;
120 if (sectors < ca->mi.bucket_size && buckets) {
122 sectors = ca->mi.bucket_size;
125 return (struct journal_space) {
126 .next_entry = sectors,
127 .total = sectors + buckets * ca->mi.bucket_size,
131 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
132 enum journal_space_from from)
134 struct bch_fs *c = container_of(j, struct bch_fs, journal);
136 unsigned i, pos, nr_devs = 0;
137 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
139 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
142 for_each_member_device_rcu(ca, c, i,
143 &c->rw_devs[BCH_DATA_journal]) {
147 space = journal_dev_space_available(j, ca, from);
148 if (!space.next_entry)
151 for (pos = 0; pos < nr_devs; pos++)
152 if (space.total > dev_space[pos].total)
155 array_insert_item(dev_space, nr_devs, pos, space);
159 if (nr_devs < nr_devs_want)
160 return (struct journal_space) { 0, 0 };
163 * We sorted largest to smallest, and we want the smallest out of the
164 * @nr_devs_want largest devices:
166 return dev_space[nr_devs_want - 1];
169 void bch2_journal_space_available(struct journal *j)
171 struct bch_fs *c = container_of(j, struct bch_fs, journal);
173 unsigned clean, clean_ondisk, total;
174 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
175 j->buf[1].buf_size >> 9);
176 unsigned i, nr_online = 0, nr_devs_want;
177 bool can_discard = false;
180 lockdep_assert_held(&j->lock);
183 for_each_member_device_rcu(ca, c, i,
184 &c->rw_devs[BCH_DATA_journal]) {
185 struct journal_device *ja = &ca->journal;
190 while (ja->dirty_idx != ja->cur_idx &&
191 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
192 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
194 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
195 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
196 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
198 if (ja->discard_idx != ja->dirty_idx_ondisk)
201 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
206 j->can_discard = can_discard;
208 if (nr_online < c->opts.metadata_replicas_required) {
209 ret = JOURNAL_ERR_insufficient_devices;
213 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
215 for (i = 0; i < journal_space_nr; i++)
216 j->space[i] = __journal_space_available(j, nr_devs_want, i);
218 clean_ondisk = j->space[journal_space_clean_ondisk].total;
219 clean = j->space[journal_space_clean].total;
220 total = j->space[journal_space_total].total;
222 if (!j->space[journal_space_discarded].next_entry)
223 ret = JOURNAL_ERR_journal_full;
225 if ((j->space[journal_space_clean_ondisk].next_entry <
226 j->space[journal_space_clean_ondisk].total) &&
227 (clean - clean_ondisk <= total / 8) &&
228 (clean_ondisk * 2 > clean))
229 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
231 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
233 journal_set_watermark(j);
235 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
236 j->cur_entry_error = ret;
242 /* Discards - last part of journal reclaim: */
244 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
249 ret = ja->discard_idx != ja->dirty_idx_ondisk;
250 spin_unlock(&j->lock);
256 * Advance ja->discard_idx as long as it points to buckets that are no longer
257 * dirty, issuing discards if necessary:
259 void bch2_journal_do_discards(struct journal *j)
261 struct bch_fs *c = container_of(j, struct bch_fs, journal);
265 mutex_lock(&j->discard_lock);
267 for_each_rw_member(ca, c, iter) {
268 struct journal_device *ja = &ca->journal;
270 while (should_discard_bucket(j, ja)) {
271 if (!c->opts.nochanges &&
273 bdev_max_discard_sectors(ca->disk_sb.bdev))
274 blkdev_issue_discard(ca->disk_sb.bdev,
276 ja->buckets[ja->discard_idx]),
277 ca->mi.bucket_size, GFP_NOFS);
280 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
282 bch2_journal_space_available(j);
283 spin_unlock(&j->lock);
287 mutex_unlock(&j->discard_lock);
291 * Journal entry pinning - machinery for holding a reference on a given journal
292 * entry, holding it open to ensure it gets replayed during recovery:
295 void bch2_journal_reclaim_fast(struct journal *j)
299 lockdep_assert_held(&j->lock);
302 * Unpin journal entries whose reference counts reached zero, meaning
303 * all btree nodes got written out
305 while (!fifo_empty(&j->pin) &&
306 !atomic_read(&fifo_peek_front(&j->pin).count)) {
312 bch2_journal_space_available(j);
315 bool __bch2_journal_pin_put(struct journal *j, u64 seq)
317 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
319 return atomic_dec_and_test(&pin_list->count);
322 void bch2_journal_pin_put(struct journal *j, u64 seq)
324 if (__bch2_journal_pin_put(j, seq)) {
326 bch2_journal_reclaim_fast(j);
327 spin_unlock(&j->lock);
331 static inline bool __journal_pin_drop(struct journal *j,
332 struct journal_entry_pin *pin)
334 struct journal_entry_pin_list *pin_list;
336 if (!journal_pin_active(pin))
339 if (j->flush_in_progress == pin)
340 j->flush_in_progress_dropped = true;
342 pin_list = journal_seq_pin(j, pin->seq);
344 list_del_init(&pin->list);
347 * Unpinning a journal entry may make journal_next_bucket() succeed, if
348 * writing a new last_seq will now make another bucket available:
350 return atomic_dec_and_test(&pin_list->count) &&
351 pin_list == &fifo_peek_front(&j->pin);
354 void bch2_journal_pin_drop(struct journal *j,
355 struct journal_entry_pin *pin)
358 if (__journal_pin_drop(j, pin))
359 bch2_journal_reclaim_fast(j);
360 spin_unlock(&j->lock);
363 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
365 if (fn == bch2_btree_node_flush0 ||
366 fn == bch2_btree_node_flush1)
367 return JOURNAL_PIN_btree;
368 else if (fn == bch2_btree_key_cache_journal_flush)
369 return JOURNAL_PIN_key_cache;
371 return JOURNAL_PIN_other;
374 static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
375 struct journal_entry_pin *pin,
376 journal_pin_flush_fn flush_fn,
377 enum journal_pin_type type)
379 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
382 * flush_fn is how we identify journal pins in debugfs, so must always
383 * exist, even if it doesn't do anything:
387 atomic_inc(&pin_list->count);
389 pin->flush = flush_fn;
390 list_add(&pin->list, &pin_list->list[type]);
393 void bch2_journal_pin_copy(struct journal *j,
394 struct journal_entry_pin *dst,
395 struct journal_entry_pin *src,
396 journal_pin_flush_fn flush_fn)
402 u64 seq = READ_ONCE(src->seq);
404 if (seq < journal_last_seq(j)) {
406 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
407 * the src pin - with the pin dropped, the entry to pin might no
408 * longer to exist, but that means there's no longer anything to
409 * copy and we can bail out here:
411 spin_unlock(&j->lock);
415 reclaim = __journal_pin_drop(j, dst);
417 bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
420 bch2_journal_reclaim_fast(j);
421 spin_unlock(&j->lock);
424 * If the journal is currently full, we might want to call flush_fn
430 void bch2_journal_pin_set(struct journal *j, u64 seq,
431 struct journal_entry_pin *pin,
432 journal_pin_flush_fn flush_fn)
438 BUG_ON(seq < journal_last_seq(j));
440 reclaim = __journal_pin_drop(j, pin);
442 bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
445 bch2_journal_reclaim_fast(j);
446 spin_unlock(&j->lock);
449 * If the journal is currently full, we might want to call flush_fn
456 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
460 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
462 BUG_ON(journal_pin_active(pin));
464 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
468 * Journal reclaim: flush references to open journal entries to reclaim space in
471 * May be done by the journal code in the background as needed to free up space
472 * for more journal entries, or as part of doing a clean shutdown, or to migrate
473 * data off of a specific device:
476 static struct journal_entry_pin *
477 journal_get_next_pin(struct journal *j,
479 unsigned allowed_below_seq,
480 unsigned allowed_above_seq,
483 struct journal_entry_pin_list *pin_list;
484 struct journal_entry_pin *ret = NULL;
487 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
488 if (*seq > seq_to_flush && !allowed_above_seq)
491 for (i = 0; i < JOURNAL_PIN_NR; i++)
492 if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
493 ((1U << i) & allowed_above_seq)) {
494 ret = list_first_entry_or_null(&pin_list->list[i],
495 struct journal_entry_pin, list);
504 /* returns true if we did work */
505 static size_t journal_flush_pins(struct journal *j,
507 unsigned allowed_below_seq,
508 unsigned allowed_above_seq,
510 unsigned min_key_cache)
512 struct journal_entry_pin *pin;
513 size_t nr_flushed = 0;
514 journal_pin_flush_fn flush_fn;
518 lockdep_assert_held(&j->reclaim_lock);
521 unsigned allowed_above = allowed_above_seq;
522 unsigned allowed_below = allowed_below_seq;
530 allowed_above |= 1U << JOURNAL_PIN_key_cache;
531 allowed_below |= 1U << JOURNAL_PIN_key_cache;
536 j->last_flushed = jiffies;
539 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
541 BUG_ON(j->flush_in_progress);
542 j->flush_in_progress = pin;
543 j->flush_in_progress_dropped = false;
544 flush_fn = pin->flush;
546 spin_unlock(&j->lock);
551 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
557 err = flush_fn(j, pin, seq);
560 /* Pin might have been dropped or rearmed: */
561 if (likely(!err && !j->flush_in_progress_dropped))
562 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
563 j->flush_in_progress = NULL;
564 j->flush_in_progress_dropped = false;
565 spin_unlock(&j->lock);
567 wake_up(&j->pin_flush_wait);
578 static u64 journal_seq_to_flush(struct journal *j)
580 struct bch_fs *c = container_of(j, struct bch_fs, journal);
582 u64 seq_to_flush = 0;
587 for_each_rw_member(ca, c, iter) {
588 struct journal_device *ja = &ca->journal;
589 unsigned nr_buckets, bucket_to_flush;
594 /* Try to keep the journal at most half full: */
595 nr_buckets = ja->nr / 2;
597 nr_buckets = min(nr_buckets, ja->nr);
599 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
600 seq_to_flush = max(seq_to_flush,
601 ja->bucket_seq[bucket_to_flush]);
604 /* Also flush if the pin fifo is more than half full */
605 seq_to_flush = max_t(s64, seq_to_flush,
606 (s64) journal_cur_seq(j) -
608 spin_unlock(&j->lock);
614 * __bch2_journal_reclaim - free up journal buckets
616 * @direct: direct or background reclaim?
617 * @kicked: requested to run since we last ran?
618 * Returns: 0 on success, or -EIO if the journal has been shutdown
620 * Background journal reclaim writes out btree nodes. It should be run
621 * early enough so that we never completely run out of journal buckets.
623 * High watermarks for triggering background reclaim:
624 * - FIFO has fewer than 512 entries left
625 * - fewer than 25% journal buckets free
627 * Background reclaim runs until low watermarks are reached:
628 * - FIFO has more than 1024 entries left
629 * - more than 50% journal buckets free
631 * As long as a reclaim can complete in the time it takes to fill up
632 * 512 journal entries or 25% of all journal buckets, then
633 * journal_next_bucket() should not stall.
635 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
637 struct bch_fs *c = container_of(j, struct bch_fs, journal);
639 size_t min_nr, min_key_cache, nr_flushed;
644 * We can't invoke memory reclaim while holding the reclaim_lock -
645 * journal reclaim is required to make progress for memory reclaim
646 * (cleaning the caches), so we can't get stuck in memory reclaim while
647 * we're holding the reclaim lock:
649 lockdep_assert_held(&j->reclaim_lock);
650 flags = memalloc_noreclaim_save();
653 if (kthread_should_stop())
656 if (bch2_journal_error(j)) {
661 bch2_journal_do_discards(j);
663 seq_to_flush = journal_seq_to_flush(j);
667 * If it's been longer than j->reclaim_delay_ms since we last flushed,
668 * make sure to flush at least one journal pin:
670 if (time_after(jiffies, j->last_flushed +
671 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
674 if (j->watermark != BCH_WATERMARK_stripe)
677 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
680 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
682 trace_and_count(c, journal_reclaim_start, c,
684 min_nr, min_key_cache,
685 atomic_read(&c->btree_cache.dirty),
687 atomic_long_read(&c->btree_key_cache.nr_dirty),
688 atomic_long_read(&c->btree_key_cache.nr_keys));
690 nr_flushed = journal_flush_pins(j, seq_to_flush,
692 min_nr, min_key_cache);
695 j->nr_direct_reclaim += nr_flushed;
697 j->nr_background_reclaim += nr_flushed;
698 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
701 wake_up(&j->reclaim_wait);
702 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
704 memalloc_noreclaim_restore(flags);
709 int bch2_journal_reclaim(struct journal *j)
711 return __bch2_journal_reclaim(j, true, true);
714 static int bch2_journal_reclaim_thread(void *arg)
716 struct journal *j = arg;
717 struct bch_fs *c = container_of(j, struct bch_fs, journal);
718 unsigned long delay, now;
724 j->last_flushed = jiffies;
726 while (!ret && !kthread_should_stop()) {
727 bool kicked = j->reclaim_kicked;
729 j->reclaim_kicked = false;
731 mutex_lock(&j->reclaim_lock);
732 ret = __bch2_journal_reclaim(j, false, kicked);
733 mutex_unlock(&j->reclaim_lock);
736 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
737 j->next_reclaim = j->last_flushed + delay;
739 if (!time_in_range(j->next_reclaim, now, now + delay))
740 j->next_reclaim = now + delay;
743 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
744 if (kthread_should_stop())
746 if (j->reclaim_kicked)
750 journal_empty = fifo_empty(&j->pin);
751 spin_unlock(&j->lock);
755 else if (time_after(j->next_reclaim, jiffies))
756 schedule_timeout(j->next_reclaim - jiffies);
760 __set_current_state(TASK_RUNNING);
766 void bch2_journal_reclaim_stop(struct journal *j)
768 struct task_struct *p = j->reclaim_thread;
770 j->reclaim_thread = NULL;
778 int bch2_journal_reclaim_start(struct journal *j)
780 struct bch_fs *c = container_of(j, struct bch_fs, journal);
781 struct task_struct *p;
784 if (j->reclaim_thread)
787 p = kthread_create(bch2_journal_reclaim_thread, j,
788 "bch-reclaim/%s", c->name);
789 ret = PTR_ERR_OR_ZERO(p);
791 bch_err_msg(c, ret, "creating journal reclaim thread");
796 j->reclaim_thread = p;
801 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
806 ret = bch2_journal_error(j);
810 mutex_lock(&j->reclaim_lock);
812 if (journal_flush_pins(j, seq_to_flush,
813 (1U << JOURNAL_PIN_key_cache)|
814 (1U << JOURNAL_PIN_other), 0, 0, 0) ||
815 journal_flush_pins(j, seq_to_flush,
816 (1U << JOURNAL_PIN_btree), 0, 0, 0))
821 * If journal replay hasn't completed, the unreplayed journal entries
822 * hold refs on their corresponding sequence numbers
824 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
825 journal_last_seq(j) > seq_to_flush ||
828 spin_unlock(&j->lock);
829 mutex_unlock(&j->reclaim_lock);
834 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
836 /* time_stats this */
837 bool did_work = false;
839 if (!test_bit(JOURNAL_STARTED, &j->flags))
842 closure_wait_event(&j->async_wait,
843 journal_flush_done(j, seq_to_flush, &did_work));
848 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
850 struct bch_fs *c = container_of(j, struct bch_fs, journal);
851 struct journal_entry_pin_list *p;
856 fifo_for_each_entry_ptr(p, &j->pin, iter)
858 ? bch2_dev_list_has_dev(p->devs, dev_idx)
859 : p->devs.nr < c->opts.metadata_replicas)
861 spin_unlock(&j->lock);
863 bch2_journal_flush_pins(j, seq);
865 ret = bch2_journal_error(j);
869 mutex_lock(&c->replicas_gc_lock);
870 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
873 * Now that we've populated replicas_gc, write to the journal to mark
874 * active journal devices. This handles the case where the journal might
875 * be empty. Otherwise we could clear all journal replicas and
876 * temporarily put the fs into an unrecoverable state. Journal recovery
877 * expects to find devices marked for journal data on unclean mount.
879 ret = bch2_journal_meta(&c->journal);
886 struct bch_replicas_padded replicas;
888 seq = max(seq, journal_last_seq(j));
889 if (seq >= j->pin.back)
891 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
892 journal_seq_pin(j, seq)->devs);
895 spin_unlock(&j->lock);
896 ret = bch2_mark_replicas(c, &replicas.e);
899 spin_unlock(&j->lock);
901 ret = bch2_replicas_gc_end(c, ret);
902 mutex_unlock(&c->replicas_gc_lock);