1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
5 #include "btree_update.h"
9 #include "journal_io.h"
10 #include "journal_reclaim.h"
15 #include <linux/kthread.h>
16 #include <linux/sched/mm.h>
18 /* Free space calculations: */
20 static unsigned journal_space_from(struct journal_device *ja,
21 enum journal_space_from from)
24 case journal_space_discarded:
25 return ja->discard_idx;
26 case journal_space_clean_ondisk:
27 return ja->dirty_idx_ondisk;
28 case journal_space_clean:
35 unsigned bch2_journal_dev_buckets_available(struct journal *j,
36 struct journal_device *ja,
37 enum journal_space_from from)
39 unsigned available = (journal_space_from(ja, from) -
40 ja->cur_idx - 1 + ja->nr) % ja->nr;
43 * Don't use the last bucket unless writing the new last_seq
44 * will make another bucket available:
46 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
52 static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
54 union journal_preres_state old, new;
55 u64 v = atomic64_read(&j->prereserved.counter);
59 new.remaining = u64s_remaining;
60 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
61 old.v, new.v)) != old.v);
64 static struct journal_space
65 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
66 enum journal_space_from from)
68 struct journal_device *ja = &ca->journal;
69 unsigned sectors, buckets, unwritten;
72 if (from == journal_space_total)
73 return (struct journal_space) {
74 .next_entry = ca->mi.bucket_size,
75 .total = ca->mi.bucket_size * ja->nr,
78 buckets = bch2_journal_dev_buckets_available(j, ja, from);
79 sectors = ja->sectors_free;
82 * We that we don't allocate the space for a journal entry
83 * until we write it out - thus, account for it here:
85 for (seq = journal_last_unwritten_seq(j);
86 seq <= journal_cur_seq(j);
88 unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
93 /* entry won't fit on this device, skip: */
94 if (unwritten > ca->mi.bucket_size)
97 if (unwritten >= sectors) {
104 sectors = ca->mi.bucket_size;
107 sectors -= unwritten;
110 if (sectors < ca->mi.bucket_size && buckets) {
112 sectors = ca->mi.bucket_size;
115 return (struct journal_space) {
116 .next_entry = sectors,
117 .total = sectors + buckets * ca->mi.bucket_size,
121 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
122 enum journal_space_from from)
124 struct bch_fs *c = container_of(j, struct bch_fs, journal);
126 unsigned i, pos, nr_devs = 0;
127 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
129 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
132 for_each_member_device_rcu(ca, c, i,
133 &c->rw_devs[BCH_DATA_journal]) {
137 space = journal_dev_space_available(j, ca, from);
138 if (!space.next_entry)
141 for (pos = 0; pos < nr_devs; pos++)
142 if (space.total > dev_space[pos].total)
145 array_insert_item(dev_space, nr_devs, pos, space);
149 if (nr_devs < nr_devs_want)
150 return (struct journal_space) { 0, 0 };
153 * We sorted largest to smallest, and we want the smallest out of the
154 * @nr_devs_want largest devices:
156 return dev_space[nr_devs_want - 1];
159 void bch2_journal_space_available(struct journal *j)
161 struct bch_fs *c = container_of(j, struct bch_fs, journal);
163 unsigned clean, clean_ondisk, total;
164 s64 u64s_remaining = 0;
165 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
166 j->buf[1].buf_size >> 9);
167 unsigned i, nr_online = 0, nr_devs_want;
168 bool can_discard = false;
171 lockdep_assert_held(&j->lock);
174 for_each_member_device_rcu(ca, c, i,
175 &c->rw_devs[BCH_DATA_journal]) {
176 struct journal_device *ja = &ca->journal;
181 while (ja->dirty_idx != ja->cur_idx &&
182 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
183 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
185 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
186 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
187 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
189 if (ja->discard_idx != ja->dirty_idx_ondisk)
192 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
197 j->can_discard = can_discard;
199 if (nr_online < c->opts.metadata_replicas_required) {
200 ret = JOURNAL_ERR_insufficient_devices;
204 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
206 for (i = 0; i < journal_space_nr; i++)
207 j->space[i] = __journal_space_available(j, nr_devs_want, i);
209 clean_ondisk = j->space[journal_space_clean_ondisk].total;
210 clean = j->space[journal_space_clean].total;
211 total = j->space[journal_space_total].total;
213 if (!j->space[journal_space_discarded].next_entry)
214 ret = JOURNAL_ERR_journal_full;
216 if ((j->space[journal_space_clean_ondisk].next_entry <
217 j->space[journal_space_clean_ondisk].total) &&
218 (clean - clean_ondisk <= total / 8) &&
219 (clean_ondisk * 2 > clean))
220 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
222 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
224 u64s_remaining = (u64) clean << 6;
225 u64s_remaining -= (u64) total << 3;
226 u64s_remaining = max(0LL, u64s_remaining);
228 u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
230 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
231 j->cur_entry_error = ret;
232 journal_set_remaining(j, u64s_remaining);
233 journal_set_watermark(j);
239 /* Discards - last part of journal reclaim: */
241 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
246 ret = ja->discard_idx != ja->dirty_idx_ondisk;
247 spin_unlock(&j->lock);
253 * Advance ja->discard_idx as long as it points to buckets that are no longer
254 * dirty, issuing discards if necessary:
256 void bch2_journal_do_discards(struct journal *j)
258 struct bch_fs *c = container_of(j, struct bch_fs, journal);
262 mutex_lock(&j->discard_lock);
264 for_each_rw_member(ca, c, iter) {
265 struct journal_device *ja = &ca->journal;
267 while (should_discard_bucket(j, ja)) {
268 if (!c->opts.nochanges &&
270 bdev_max_discard_sectors(ca->disk_sb.bdev))
271 blkdev_issue_discard(ca->disk_sb.bdev,
273 ja->buckets[ja->discard_idx]),
274 ca->mi.bucket_size, GFP_NOFS);
277 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
279 bch2_journal_space_available(j);
280 spin_unlock(&j->lock);
284 mutex_unlock(&j->discard_lock);
288 * Journal entry pinning - machinery for holding a reference on a given journal
289 * entry, holding it open to ensure it gets replayed during recovery:
292 static void bch2_journal_reclaim_fast(struct journal *j)
294 struct journal_entry_pin_list temp;
297 lockdep_assert_held(&j->lock);
300 * Unpin journal entries whose reference counts reached zero, meaning
301 * all btree nodes got written out
303 while (!fifo_empty(&j->pin) &&
304 !atomic_read(&fifo_peek_front(&j->pin).count)) {
305 fifo_pop(&j->pin, temp);
310 bch2_journal_space_available(j);
313 void __bch2_journal_pin_put(struct journal *j, u64 seq)
315 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
317 if (atomic_dec_and_test(&pin_list->count))
318 bch2_journal_reclaim_fast(j);
321 void bch2_journal_pin_put(struct journal *j, u64 seq)
323 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
325 if (atomic_dec_and_test(&pin_list->count)) {
327 bch2_journal_reclaim_fast(j);
328 spin_unlock(&j->lock);
332 static inline bool __journal_pin_drop(struct journal *j,
333 struct journal_entry_pin *pin)
335 struct journal_entry_pin_list *pin_list;
337 if (!journal_pin_active(pin))
340 if (j->flush_in_progress == pin)
341 j->flush_in_progress_dropped = true;
343 pin_list = journal_seq_pin(j, pin->seq);
345 list_del_init(&pin->list);
348 * Unpinning a journal entry make make journal_next_bucket() succeed, if
349 * writing a new last_seq will now make another bucket available:
351 return atomic_dec_and_test(&pin_list->count) &&
352 pin_list == &fifo_peek_front(&j->pin);
355 void bch2_journal_pin_drop(struct journal *j,
356 struct journal_entry_pin *pin)
359 if (__journal_pin_drop(j, pin))
360 bch2_journal_reclaim_fast(j);
361 spin_unlock(&j->lock);
364 static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
366 if (fn == bch2_btree_node_flush0 ||
367 fn == bch2_btree_node_flush1)
368 return JOURNAL_PIN_btree;
369 else if (fn == bch2_btree_key_cache_journal_flush)
370 return JOURNAL_PIN_key_cache;
372 return JOURNAL_PIN_other;
375 void bch2_journal_pin_set(struct journal *j, u64 seq,
376 struct journal_entry_pin *pin,
377 journal_pin_flush_fn flush_fn)
379 struct journal_entry_pin_list *pin_list;
384 if (seq < journal_last_seq(j)) {
386 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
387 * the src pin - with the pin dropped, the entry to pin might no
388 * longer to exist, but that means there's no longer anything to
389 * copy and we can bail out here:
391 spin_unlock(&j->lock);
395 pin_list = journal_seq_pin(j, seq);
397 reclaim = __journal_pin_drop(j, pin);
399 atomic_inc(&pin_list->count);
401 pin->flush = flush_fn;
404 list_add(&pin->list, &pin_list->list[journal_pin_type(flush_fn)]);
406 list_add(&pin->list, &pin_list->flushed);
409 bch2_journal_reclaim_fast(j);
410 spin_unlock(&j->lock);
413 * If the journal is currently full, we might want to call flush_fn
420 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
422 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
424 BUG_ON(journal_pin_active(pin));
426 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
430 * Journal reclaim: flush references to open journal entries to reclaim space in
433 * May be done by the journal code in the background as needed to free up space
434 * for more journal entries, or as part of doing a clean shutdown, or to migrate
435 * data off of a specific device:
438 static struct journal_entry_pin *
439 journal_get_next_pin(struct journal *j,
441 unsigned allowed_below_seq,
442 unsigned allowed_above_seq,
445 struct journal_entry_pin_list *pin_list;
446 struct journal_entry_pin *ret = NULL;
449 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
450 if (*seq > seq_to_flush && !allowed_above_seq)
453 for (i = 0; i < JOURNAL_PIN_NR; i++)
454 if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
455 ((1U << i) & allowed_above_seq)) {
456 ret = list_first_entry_or_null(&pin_list->list[i],
457 struct journal_entry_pin, list);
466 /* returns true if we did work */
467 static size_t journal_flush_pins(struct journal *j,
469 unsigned allowed_below_seq,
470 unsigned allowed_above_seq,
472 unsigned min_key_cache)
474 struct journal_entry_pin *pin;
475 size_t nr_flushed = 0;
476 journal_pin_flush_fn flush_fn;
480 lockdep_assert_held(&j->reclaim_lock);
483 unsigned allowed_above = allowed_above_seq;
484 unsigned allowed_below = allowed_below_seq;
492 allowed_above |= 1U << JOURNAL_PIN_key_cache;
493 allowed_below |= 1U << JOURNAL_PIN_key_cache;
498 j->last_flushed = jiffies;
501 pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
503 BUG_ON(j->flush_in_progress);
504 j->flush_in_progress = pin;
505 j->flush_in_progress_dropped = false;
506 flush_fn = pin->flush;
508 spin_unlock(&j->lock);
513 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
519 err = flush_fn(j, pin, seq);
522 /* Pin might have been dropped or rearmed: */
523 if (likely(!err && !j->flush_in_progress_dropped))
524 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
525 j->flush_in_progress = NULL;
526 j->flush_in_progress_dropped = false;
527 spin_unlock(&j->lock);
529 wake_up(&j->pin_flush_wait);
540 static u64 journal_seq_to_flush(struct journal *j)
542 struct bch_fs *c = container_of(j, struct bch_fs, journal);
544 u64 seq_to_flush = 0;
549 for_each_rw_member(ca, c, iter) {
550 struct journal_device *ja = &ca->journal;
551 unsigned nr_buckets, bucket_to_flush;
556 /* Try to keep the journal at most half full: */
557 nr_buckets = ja->nr / 2;
559 /* And include pre-reservations: */
560 nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
561 (ca->mi.bucket_size << 6) -
562 journal_entry_overhead(j));
564 nr_buckets = min(nr_buckets, ja->nr);
566 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
567 seq_to_flush = max(seq_to_flush,
568 ja->bucket_seq[bucket_to_flush]);
571 /* Also flush if the pin fifo is more than half full */
572 seq_to_flush = max_t(s64, seq_to_flush,
573 (s64) journal_cur_seq(j) -
575 spin_unlock(&j->lock);
581 * bch2_journal_reclaim - free up journal buckets
583 * Background journal reclaim writes out btree nodes. It should be run
584 * early enough so that we never completely run out of journal buckets.
586 * High watermarks for triggering background reclaim:
587 * - FIFO has fewer than 512 entries left
588 * - fewer than 25% journal buckets free
590 * Background reclaim runs until low watermarks are reached:
591 * - FIFO has more than 1024 entries left
592 * - more than 50% journal buckets free
594 * As long as a reclaim can complete in the time it takes to fill up
595 * 512 journal entries or 25% of all journal buckets, then
596 * journal_next_bucket() should not stall.
598 static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
600 struct bch_fs *c = container_of(j, struct bch_fs, journal);
601 bool kthread = (current->flags & PF_KTHREAD) != 0;
603 size_t min_nr, min_key_cache, nr_flushed;
608 * We can't invoke memory reclaim while holding the reclaim_lock -
609 * journal reclaim is required to make progress for memory reclaim
610 * (cleaning the caches), so we can't get stuck in memory reclaim while
611 * we're holding the reclaim lock:
613 lockdep_assert_held(&j->reclaim_lock);
614 flags = memalloc_noreclaim_save();
617 if (kthread && kthread_should_stop())
620 if (bch2_journal_error(j)) {
625 bch2_journal_do_discards(j);
627 seq_to_flush = journal_seq_to_flush(j);
631 * If it's been longer than j->reclaim_delay_ms since we last flushed,
632 * make sure to flush at least one journal pin:
634 if (time_after(jiffies, j->last_flushed +
635 msecs_to_jiffies(c->opts.journal_reclaim_delay)))
638 if (j->prereserved.reserved * 4 > j->prereserved.remaining)
641 if (fifo_free(&j->pin) <= 32)
644 if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
647 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
649 trace_and_count(c, journal_reclaim_start, c,
651 min_nr, min_key_cache,
652 j->prereserved.reserved,
653 j->prereserved.remaining,
654 atomic_read(&c->btree_cache.dirty),
656 atomic_long_read(&c->btree_key_cache.nr_dirty),
657 atomic_long_read(&c->btree_key_cache.nr_keys));
659 nr_flushed = journal_flush_pins(j, seq_to_flush,
661 min_nr, min_key_cache);
664 j->nr_direct_reclaim += nr_flushed;
666 j->nr_background_reclaim += nr_flushed;
667 trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
670 wake_up(&j->reclaim_wait);
671 } while ((min_nr || min_key_cache) && nr_flushed && !direct);
673 memalloc_noreclaim_restore(flags);
678 int bch2_journal_reclaim(struct journal *j)
680 return __bch2_journal_reclaim(j, true, true);
683 static int bch2_journal_reclaim_thread(void *arg)
685 struct journal *j = arg;
686 struct bch_fs *c = container_of(j, struct bch_fs, journal);
687 unsigned long delay, now;
693 j->last_flushed = jiffies;
695 while (!ret && !kthread_should_stop()) {
696 bool kicked = j->reclaim_kicked;
698 j->reclaim_kicked = false;
700 mutex_lock(&j->reclaim_lock);
701 ret = __bch2_journal_reclaim(j, false, kicked);
702 mutex_unlock(&j->reclaim_lock);
705 delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
706 j->next_reclaim = j->last_flushed + delay;
708 if (!time_in_range(j->next_reclaim, now, now + delay))
709 j->next_reclaim = now + delay;
712 set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
713 if (kthread_should_stop())
715 if (j->reclaim_kicked)
719 journal_empty = fifo_empty(&j->pin);
720 spin_unlock(&j->lock);
724 else if (time_after(j->next_reclaim, jiffies))
725 schedule_timeout(j->next_reclaim - jiffies);
729 __set_current_state(TASK_RUNNING);
735 void bch2_journal_reclaim_stop(struct journal *j)
737 struct task_struct *p = j->reclaim_thread;
739 j->reclaim_thread = NULL;
747 int bch2_journal_reclaim_start(struct journal *j)
749 struct bch_fs *c = container_of(j, struct bch_fs, journal);
750 struct task_struct *p;
753 if (j->reclaim_thread)
756 p = kthread_create(bch2_journal_reclaim_thread, j,
757 "bch-reclaim/%s", c->name);
758 ret = PTR_ERR_OR_ZERO(p);
760 bch_err(c, "error creating journal reclaim thread: %s", bch2_err_str(ret));
765 j->reclaim_thread = p;
770 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
775 ret = bch2_journal_error(j);
779 mutex_lock(&j->reclaim_lock);
781 if (journal_flush_pins(j, seq_to_flush,
782 (1U << JOURNAL_PIN_key_cache)|
783 (1U << JOURNAL_PIN_other), 0, 0, 0) ||
784 journal_flush_pins(j, seq_to_flush,
785 (1U << JOURNAL_PIN_btree), 0, 0, 0))
790 * If journal replay hasn't completed, the unreplayed journal entries
791 * hold refs on their corresponding sequence numbers
793 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
794 journal_last_seq(j) > seq_to_flush ||
797 spin_unlock(&j->lock);
798 mutex_unlock(&j->reclaim_lock);
803 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
805 bool did_work = false;
807 if (!test_bit(JOURNAL_STARTED, &j->flags))
810 closure_wait_event(&j->async_wait,
811 journal_flush_done(j, seq_to_flush, &did_work));
816 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
818 struct bch_fs *c = container_of(j, struct bch_fs, journal);
819 struct journal_entry_pin_list *p;
824 fifo_for_each_entry_ptr(p, &j->pin, iter)
826 ? bch2_dev_list_has_dev(p->devs, dev_idx)
827 : p->devs.nr < c->opts.metadata_replicas)
829 spin_unlock(&j->lock);
831 bch2_journal_flush_pins(j, seq);
833 ret = bch2_journal_error(j);
837 mutex_lock(&c->replicas_gc_lock);
838 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
841 * Now that we've populated replicas_gc, write to the journal to mark
842 * active journal devices. This handles the case where the journal might
843 * be empty. Otherwise we could clear all journal replicas and
844 * temporarily put the fs into an unrecoverable state. Journal recovery
845 * expects to find devices marked for journal data on unclean mount.
847 ret = bch2_journal_meta(&c->journal);
854 struct bch_replicas_padded replicas;
856 seq = max(seq, journal_last_seq(j));
857 if (seq >= j->pin.back)
859 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
860 journal_seq_pin(j, seq)->devs);
863 spin_unlock(&j->lock);
864 ret = bch2_mark_replicas(c, &replicas.e);
867 spin_unlock(&j->lock);
869 ret = bch2_replicas_gc_end(c, ret);
870 mutex_unlock(&c->replicas_gc_lock);