1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
9 #include "journal_io.h"
10 #include "journal_reclaim.h"
14 #include <linux/kthread.h>
15 #include <linux/sched/mm.h>
16 #include <trace/events/bcachefs.h>
18 /* Free space calculations: */
20 static unsigned journal_space_from(struct journal_device *ja,
21 enum journal_space_from from)
24 case journal_space_discarded:
25 return ja->discard_idx;
26 case journal_space_clean_ondisk:
27 return ja->dirty_idx_ondisk;
28 case journal_space_clean:
35 unsigned bch2_journal_dev_buckets_available(struct journal *j,
36 struct journal_device *ja,
37 enum journal_space_from from)
39 unsigned available = (journal_space_from(ja, from) -
40 ja->cur_idx - 1 + ja->nr) % ja->nr;
43 * Don't use the last bucket unless writing the new last_seq
44 * will make another bucket available:
46 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
52 static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
54 union journal_preres_state old, new;
55 u64 v = atomic64_read(&j->prereserved.counter);
59 new.remaining = u64s_remaining;
60 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
61 old.v, new.v)) != old.v);
64 static struct journal_space
65 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
66 enum journal_space_from from)
68 struct journal_device *ja = &ca->journal;
69 unsigned sectors, buckets, unwritten;
72 if (from == journal_space_total)
73 return (struct journal_space) {
74 .next_entry = ca->mi.bucket_size,
75 .total = ca->mi.bucket_size * ja->nr,
78 buckets = bch2_journal_dev_buckets_available(j, ja, from);
79 sectors = ja->sectors_free;
82 * We that we don't allocate the space for a journal entry
83 * until we write it out - thus, account for it here:
85 for (seq = journal_last_unwritten_seq(j);
86 seq <= journal_cur_seq(j);
88 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
93 /* entry won't fit on this device, skip: */
94 if (unwritten > ca->mi.bucket_size)
97 if (unwritten >= sectors) {
104 sectors = ca->mi.bucket_size;
107 sectors -= unwritten;
110 if (sectors < ca->mi.bucket_size && buckets) {
112 sectors = ca->mi.bucket_size;
115 return (struct journal_space) {
116 .next_entry = sectors,
117 .total = sectors + buckets * ca->mi.bucket_size,
121 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
122 enum journal_space_from from)
124 struct bch_fs *c = container_of(j, struct bch_fs, journal);
126 unsigned i, pos, nr_devs = 0;
127 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
129 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
132 for_each_member_device_rcu(ca, c, i,
133 &c->rw_devs[BCH_DATA_journal]) {
137 space = journal_dev_space_available(j, ca, from);
138 if (!space.next_entry)
141 for (pos = 0; pos < nr_devs; pos++)
142 if (space.total > dev_space[pos].total)
145 array_insert_item(dev_space, nr_devs, pos, space);
149 if (nr_devs < nr_devs_want)
150 return (struct journal_space) { 0, 0 };
153 * We sorted largest to smallest, and we want the smallest out of the
154 * @nr_devs_want largest devices:
156 return dev_space[nr_devs_want - 1];
159 void bch2_journal_space_available(struct journal *j)
161 struct bch_fs *c = container_of(j, struct bch_fs, journal);
163 unsigned clean, clean_ondisk, total;
164 s64 u64s_remaining = 0;
165 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
166 j->buf[1].buf_size >> 9);
167 unsigned i, nr_online = 0, nr_devs_want;
168 bool can_discard = false;
171 lockdep_assert_held(&j->lock);
174 for_each_member_device_rcu(ca, c, i,
175 &c->rw_devs[BCH_DATA_journal]) {
176 struct journal_device *ja = &ca->journal;
181 while (ja->dirty_idx != ja->cur_idx &&
182 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
183 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
185 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
186 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
187 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
189 if (ja->discard_idx != ja->dirty_idx_ondisk)
192 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
197 j->can_discard = can_discard;
199 if (nr_online < c->opts.metadata_replicas_required) {
200 ret = JOURNAL_ERR_insufficient_devices;
204 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
206 for (i = 0; i < journal_space_nr; i++)
207 j->space[i] = __journal_space_available(j, nr_devs_want, i);
209 clean_ondisk = j->space[journal_space_clean_ondisk].total;
210 clean = j->space[journal_space_clean].total;
211 total = j->space[journal_space_total].total;
214 journal_cur_seq(j) == j->seq_ondisk) {
215 struct printbuf buf = PRINTBUF;
217 __bch2_journal_debug_to_text(&buf, j);
218 bch_err(c, "journal stuck\n%s", buf.buf);
222 * Hack: bch2_fatal_error() calls bch2_journal_halt() which
223 * takes journal lock:
225 spin_unlock(&j->lock);
229 ret = JOURNAL_ERR_journal_stuck;
230 } else if (!j->space[journal_space_discarded].next_entry)
231 ret = JOURNAL_ERR_journal_full;
233 if ((j->space[journal_space_clean_ondisk].next_entry <
234 j->space[journal_space_clean_ondisk].total) &&
235 (clean - clean_ondisk <= total / 8) &&
236 (clean_ondisk * 2 > clean))
237 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
239 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
241 u64s_remaining = (u64) clean << 6;
242 u64s_remaining -= (u64) total << 3;
243 u64s_remaining = max(0LL, u64s_remaining);
245 u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
247 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
248 j->cur_entry_error = ret;
249 journal_set_remaining(j, u64s_remaining);
250 journal_set_watermark(j);
256 /* Discards - last part of journal reclaim: */
258 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
263 ret = ja->discard_idx != ja->dirty_idx_ondisk;
264 spin_unlock(&j->lock);
270 * Advance ja->discard_idx as long as it points to buckets that are no longer
271 * dirty, issuing discards if necessary:
273 void bch2_journal_do_discards(struct journal *j)
275 struct bch_fs *c = container_of(j, struct bch_fs, journal);
279 mutex_lock(&j->discard_lock);
281 for_each_rw_member(ca, c, iter) {
282 struct journal_device *ja = &ca->journal;
284 while (should_discard_bucket(j, ja)) {
285 if (!c->opts.nochanges &&
287 bdev_max_discard_sectors(ca->disk_sb.bdev))
288 blkdev_issue_discard(ca->disk_sb.bdev,
290 ja->buckets[ja->discard_idx]),
291 ca->mi.bucket_size, GFP_NOIO);
294 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
296 bch2_journal_space_available(j);
297 spin_unlock(&j->lock);
301 mutex_unlock(&j->discard_lock);
305 * Journal entry pinning - machinery for holding a reference on a given journal
306 * entry, holding it open to ensure it gets replayed during recovery:
309 static void bch2_journal_reclaim_fast(struct journal *j)
311 struct journal_entry_pin_list temp;
314 lockdep_assert_held(&j->lock);
317 * Unpin journal entries whose reference counts reached zero, meaning
318 * all btree nodes got written out
320 while (!fifo_empty(&j->pin) &&
321 !atomic_read(&fifo_peek_front(&j->pin).count)) {
322 fifo_pop(&j->pin, temp);
327 bch2_journal_space_available(j);
330 void __bch2_journal_pin_put(struct journal *j, u64 seq)
332 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
334 if (atomic_dec_and_test(&pin_list->count))
335 bch2_journal_reclaim_fast(j);
338 void bch2_journal_pin_put(struct journal *j, u64 seq)
340 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
342 if (atomic_dec_and_test(&pin_list->count)) {
344 bch2_journal_reclaim_fast(j);
345 spin_unlock(&j->lock);
349 static inline bool __journal_pin_drop(struct journal *j,
350 struct journal_entry_pin *pin)
352 struct journal_entry_pin_list *pin_list;
354 if (!journal_pin_active(pin))
357 if (j->flush_in_progress == pin)
358 j->flush_in_progress_dropped = true;
360 pin_list = journal_seq_pin(j, pin->seq);
362 list_del_init(&pin->list);
365 * Unpinning a journal entry make make journal_next_bucket() succeed, if
366 * writing a new last_seq will now make another bucket available:
368 return atomic_dec_and_test(&pin_list->count) &&
369 pin_list == &fifo_peek_front(&j->pin);
372 void bch2_journal_pin_drop(struct journal *j,
373 struct journal_entry_pin *pin)
376 if (__journal_pin_drop(j, pin))
377 bch2_journal_reclaim_fast(j);
378 spin_unlock(&j->lock);
381 enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
383 if (fn == bch2_btree_node_flush0 ||
384 fn == bch2_btree_node_flush1)
385 return JOURNAL_PIN_btree;
386 else if (fn == bch2_btree_key_cache_journal_flush)
387 return JOURNAL_PIN_key_cache;
389 return JOURNAL_PIN_other;
392 void bch2_journal_pin_set(struct journal *j, u64 seq,
393 struct journal_entry_pin *pin,
394 journal_pin_flush_fn flush_fn)
396 struct journal_entry_pin_list *pin_list;
401 if (seq < journal_last_seq(j)) {
403 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
404 * the src pin - with the pin dropped, the entry to pin might no
405 * longer to exist, but that means there's no longer anything to
406 * copy and we can bail out here:
408 spin_unlock(&j->lock);
412 pin_list = journal_seq_pin(j, seq);
414 reclaim = __journal_pin_drop(j, pin);
416 atomic_inc(&pin_list->count);
418 pin->flush = flush_fn;
421 list_add(&pin->list, &pin_list->list[journal_pin_type(flush_fn)]);
423 list_add(&pin->list, &pin_list->flushed);
426 bch2_journal_reclaim_fast(j);
427 spin_unlock(&j->lock);
430 * If the journal is currently full, we might want to call flush_fn
437 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
439 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
441 BUG_ON(journal_pin_active(pin));
443 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
447 * Journal reclaim: flush references to open journal entries to reclaim space in
450 * May be done by the journal code in the background as needed to free up space
451 * for more journal entries, or as part of doing a clean shutdown, or to migrate
452 * data off of a specific device:
455 static struct journal_entry_pin *
456 journal_get_next_pin(struct journal *j,
458 unsigned allowed_below_seq,
459 unsigned allowed_above_seq,
462 struct journal_entry_pin_list *pin_list;
463 struct journal_entry_pin *ret = NULL;
466 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
467 if (*seq > seq_to_flush && !allowed_above_seq)
470 for (i = 0; i < JOURNAL_PIN_NR; i++)
471 if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
472 ((1U << i) & allowed_above_seq)) {
473 ret = list_first_entry_or_null(&pin_list->list[i],
474 struct journal_entry_pin, list);
483 /* returns true if we did work */
484 static size_t journal_flush_pins(struct journal *j,
486 unsigned allowed_below_seq,
487 unsigned allowed_above_seq,
489 unsigned min_key_cache)
491 struct journal_entry_pin *pin;
492 size_t nr_flushed = 0;
493 journal_pin_flush_fn flush_fn;
497 lockdep_assert_held(&j->reclaim_lock);
500 unsigned allowed_above = allowed_above_seq;
501 unsigned allowed_below = allowed_below_seq;
509 allowed_above |= 1U << JOURNAL_PIN_key_cache;
510 allowed_below |= 1U << JOURNAL_PIN_key_cache;
515 j->last_flushed = jiffies;
518 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
520 BUG_ON(j->flush_in_progress);
521 j->flush_in_progress = pin;
522 j->flush_in_progress_dropped = false;
523 flush_fn = pin->flush;
525 spin_unlock(&j->lock);
530 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
536 err = flush_fn(j, pin, seq);
539 /* Pin might have been dropped or rearmed: */
540 if (likely(!err && !j->flush_in_progress_dropped))
541 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
542 j->flush_in_progress = NULL;
543 j->flush_in_progress_dropped = false;
544 spin_unlock(&j->lock);
546 wake_up(&j->pin_flush_wait);
557 static u64 journal_seq_to_flush(struct journal *j)
559 struct bch_fs *c = container_of(j, struct bch_fs, journal);
561 u64 seq_to_flush = 0;
566 for_each_rw_member(ca, c, iter) {
567 struct journal_device *ja = &ca->journal;
568 unsigned nr_buckets, bucket_to_flush;
573 /* Try to keep the journal at most half full: */
574 nr_buckets = ja->nr / 2;
576 /* And include pre-reservations: */
577 nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
578 (ca->mi.bucket_size << 6) -
579 journal_entry_overhead(j));
581 nr_buckets = min(nr_buckets, ja->nr);
583 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
584 seq_to_flush = max(seq_to_flush,
585 ja->bucket_seq[bucket_to_flush]);
588 /* Also flush if the pin fifo is more than half full */
589 seq_to_flush = max_t(s64, seq_to_flush,
590 (s64) journal_cur_seq(j) -
592 spin_unlock(&j->lock);
598 * bch2_journal_reclaim - free up journal buckets
600 * Background journal reclaim writes out btree nodes. It should be run
601 * early enough so that we never completely run out of journal buckets.
603 * High watermarks for triggering background reclaim:
604 * - FIFO has fewer than 512 entries left
605 * - fewer than 25% journal buckets free
607 * Background reclaim runs until low watermarks are reached:
608 * - FIFO has more than 1024 entries left
609 * - more than 50% journal buckets free
611 * As long as a reclaim can complete in the time it takes to fill up
612 * 512 journal entries or 25% of all journal buckets, then
613 * journal_next_bucket() should not stall.
615 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
617 struct bch_fs *c = container_of(j, struct bch_fs, journal);
618 bool kthread = (current->flags & PF_KTHREAD) != 0;
620 size_t min_nr, min_key_cache, nr_flushed;
625 * We can't invoke memory reclaim while holding the reclaim_lock -
626 * journal reclaim is required to make progress for memory reclaim
627 * (cleaning the caches), so we can't get stuck in memory reclaim while
628 * we're holding the reclaim lock:
630 lockdep_assert_held(&j->reclaim_lock);
631 flags = memalloc_noreclaim_save();
634 if (kthread && kthread_should_stop())
637 if (bch2_journal_error(j)) {
642 bch2_journal_do_discards(j);
644 seq_to_flush = journal_seq_to_flush(j);
648 * If it's been longer than j->reclaim_delay_ms since we last flushed,
649 * make sure to flush at least one journal pin:
651 if (time_after(jiffies, j->last_flushed +
652 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
655 if (j->prereserved.reserved * 4 > j->prereserved.remaining)
658 if (fifo_free(&j->pin) <= 32)
661 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
664 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
666 trace_and_count(c, journal_reclaim_start, c,
668 min_nr, min_key_cache,
669 j->prereserved.reserved,
670 j->prereserved.remaining,
671 atomic_read(&c->btree_cache.dirty),
673 atomic_long_read(&c->btree_key_cache.nr_dirty),
674 atomic_long_read(&c->btree_key_cache.nr_keys));
676 nr_flushed = journal_flush_pins(j, seq_to_flush,
678 min_nr, min_key_cache);
681 j->nr_direct_reclaim += nr_flushed;
683 j->nr_background_reclaim += nr_flushed;
684 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
687 wake_up(&j->reclaim_wait);
688 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
690 memalloc_noreclaim_restore(flags);
695 int bch2_journal_reclaim(struct journal *j)
697 return __bch2_journal_reclaim(j, true, true);
700 static int bch2_journal_reclaim_thread(void *arg)
702 struct journal *j = arg;
703 struct bch_fs *c = container_of(j, struct bch_fs, journal);
704 unsigned long delay, now;
710 j->last_flushed = jiffies;
712 while (!ret && !kthread_should_stop()) {
713 bool kicked = j->reclaim_kicked;
715 j->reclaim_kicked = false;
717 mutex_lock(&j->reclaim_lock);
718 ret = __bch2_journal_reclaim(j, false, kicked);
719 mutex_unlock(&j->reclaim_lock);
722 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
723 j->next_reclaim = j->last_flushed + delay;
725 if (!time_in_range(j->next_reclaim, now, now + delay))
726 j->next_reclaim = now + delay;
729 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
730 if (kthread_should_stop())
732 if (j->reclaim_kicked)
736 journal_empty = fifo_empty(&j->pin);
737 spin_unlock(&j->lock);
741 else if (time_after(j->next_reclaim, jiffies))
742 schedule_timeout(j->next_reclaim - jiffies);
746 __set_current_state(TASK_RUNNING);
752 void bch2_journal_reclaim_stop(struct journal *j)
754 struct task_struct *p = j->reclaim_thread;
756 j->reclaim_thread = NULL;
764 int bch2_journal_reclaim_start(struct journal *j)
766 struct bch_fs *c = container_of(j, struct bch_fs, journal);
767 struct task_struct *p;
770 if (j->reclaim_thread)
773 p = kthread_create(bch2_journal_reclaim_thread, j,
774 "bch-reclaim/%s", c->name);
775 ret = PTR_ERR_OR_ZERO(p);
777 bch_err(c, "error creating journal reclaim thread: %s", bch2_err_str(ret));
782 j->reclaim_thread = p;
787 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
792 ret = bch2_journal_error(j);
796 mutex_lock(&j->reclaim_lock);
798 if (journal_flush_pins(j, seq_to_flush,
799 (1U << JOURNAL_PIN_key_cache)|
800 (1U << JOURNAL_PIN_other), 0, 0, 0) ||
801 journal_flush_pins(j, seq_to_flush,
802 (1U << JOURNAL_PIN_btree), 0, 0, 0))
807 * If journal replay hasn't completed, the unreplayed journal entries
808 * hold refs on their corresponding sequence numbers
810 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
811 journal_last_seq(j) > seq_to_flush ||
814 spin_unlock(&j->lock);
815 mutex_unlock(&j->reclaim_lock);
820 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
822 bool did_work = false;
824 if (!test_bit(JOURNAL_STARTED, &j->flags))
827 closure_wait_event(&j->async_wait,
828 journal_flush_done(j, seq_to_flush, &did_work));
833 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
835 struct bch_fs *c = container_of(j, struct bch_fs, journal);
836 struct journal_entry_pin_list *p;
841 fifo_for_each_entry_ptr(p, &j->pin, iter)
843 ? bch2_dev_list_has_dev(p->devs, dev_idx)
844 : p->devs.nr < c->opts.metadata_replicas)
846 spin_unlock(&j->lock);
848 bch2_journal_flush_pins(j, seq);
850 ret = bch2_journal_error(j);
854 mutex_lock(&c->replicas_gc_lock);
855 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
861 struct bch_replicas_padded replicas;
863 seq = max(seq, journal_last_seq(j));
864 if (seq >= j->pin.back)
866 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
867 journal_seq_pin(j, seq)->devs);
870 spin_unlock(&j->lock);
871 ret = bch2_mark_replicas(c, &replicas.e);
874 spin_unlock(&j->lock);
876 ret = bch2_replicas_gc_end(c, ret);
877 mutex_unlock(&c->replicas_gc_lock);