1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
7 #include "journal_io.h"
8 #include "journal_reclaim.h"
12 #include <linux/kthread.h>
13 #include <linux/sched/mm.h>
14 #include <trace/events/bcachefs.h>
16 /* Free space calculations: */
18 static unsigned journal_space_from(struct journal_device *ja,
19 enum journal_space_from from)
22 case journal_space_discarded:
23 return ja->discard_idx;
24 case journal_space_clean_ondisk:
25 return ja->dirty_idx_ondisk;
26 case journal_space_clean:
33 unsigned bch2_journal_dev_buckets_available(struct journal *j,
34 struct journal_device *ja,
35 enum journal_space_from from)
37 unsigned available = !test_bit(JOURNAL_NOCHANGES, &j->flags)
38 ? ((journal_space_from(ja, from) -
39 ja->cur_idx - 1 + ja->nr) % ja->nr)
43 * Don't use the last bucket unless writing the new last_seq
44 * will make another bucket available:
46 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
52 static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
54 union journal_preres_state old, new;
55 u64 v = atomic64_read(&j->prereserved.counter);
59 new.remaining = u64s_remaining;
60 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
61 old.v, new.v)) != old.v);
64 static inline unsigned get_unwritten_sectors(struct journal *j, unsigned *idx)
68 while (!sectors && *idx != j->reservations.idx) {
69 sectors = j->buf[*idx].sectors;
71 *idx = (*idx + 1) & JOURNAL_BUF_MASK;
77 static struct journal_space
78 journal_dev_space_available(struct journal *j, struct bch_dev *ca,
79 enum journal_space_from from)
81 struct journal_device *ja = &ca->journal;
82 unsigned sectors, buckets, unwritten, idx = j->reservations.unwritten_idx;
84 if (from == journal_space_total)
85 return (struct journal_space) {
86 .next_entry = ca->mi.bucket_size,
87 .total = ca->mi.bucket_size * ja->nr,
90 buckets = bch2_journal_dev_buckets_available(j, ja, from);
91 sectors = ja->sectors_free;
94 * We that we don't allocate the space for a journal entry
95 * until we write it out - thus, account for it here:
97 while ((unwritten = get_unwritten_sectors(j, &idx))) {
98 /* entry won't fit on this device, skip: */
99 if (unwritten > ca->mi.bucket_size)
102 if (unwritten >= sectors) {
109 sectors = ca->mi.bucket_size;
112 sectors -= unwritten;
115 if (sectors < ca->mi.bucket_size && buckets) {
117 sectors = ca->mi.bucket_size;
120 return (struct journal_space) {
121 .next_entry = sectors,
122 .total = sectors + buckets * ca->mi.bucket_size,
126 static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
127 enum journal_space_from from)
129 struct bch_fs *c = container_of(j, struct bch_fs, journal);
131 unsigned i, pos, nr_devs = 0;
132 struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
134 BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
137 for_each_member_device_rcu(ca, c, i,
138 &c->rw_devs[BCH_DATA_journal]) {
142 space = journal_dev_space_available(j, ca, from);
143 if (!space.next_entry)
146 for (pos = 0; pos < nr_devs; pos++)
147 if (space.total > dev_space[pos].total)
150 array_insert_item(dev_space, nr_devs, pos, space);
154 if (nr_devs < nr_devs_want)
155 return (struct journal_space) { 0, 0 };
158 * We sorted largest to smallest, and we want the smallest out of the
159 * @nr_devs_want largest devices:
161 return dev_space[nr_devs_want - 1];
164 void bch2_journal_space_available(struct journal *j)
166 struct bch_fs *c = container_of(j, struct bch_fs, journal);
168 unsigned clean, clean_ondisk, total;
169 s64 u64s_remaining = 0;
170 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
171 j->buf[1].buf_size >> 9);
172 unsigned i, nr_online = 0, nr_devs_want;
173 bool can_discard = false;
176 lockdep_assert_held(&j->lock);
179 for_each_member_device_rcu(ca, c, i,
180 &c->rw_devs[BCH_DATA_journal]) {
181 struct journal_device *ja = &ca->journal;
186 while (ja->dirty_idx != ja->cur_idx &&
187 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
188 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
190 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
191 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
192 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
194 if (ja->discard_idx != ja->dirty_idx_ondisk)
197 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
202 j->can_discard = can_discard;
204 if (nr_online < c->opts.metadata_replicas_required) {
205 ret = cur_entry_insufficient_devices;
209 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
211 for (i = 0; i < journal_space_nr; i++)
212 j->space[i] = __journal_space_available(j, nr_devs_want, i);
214 clean_ondisk = j->space[journal_space_clean_ondisk].total;
215 clean = j->space[journal_space_clean].total;
216 total = j->space[journal_space_total].total;
219 j->reservations.idx ==
220 j->reservations.unwritten_idx) {
221 char *buf = kmalloc(4096, GFP_ATOMIC);
223 bch_err(c, "journal stuck");
225 __bch2_journal_debug_to_text(&_PBUF(buf, 4096), j);
231 ret = cur_entry_journal_stuck;
232 } else if (!j->space[journal_space_discarded].next_entry)
233 ret = cur_entry_journal_full;
234 else if (!fifo_free(&j->pin))
235 ret = cur_entry_journal_pin_full;
237 if ((j->space[journal_space_clean_ondisk].next_entry <
238 j->space[journal_space_clean_ondisk].total) &&
239 (clean - clean_ondisk <= total / 8) &&
240 (clean_ondisk * 2 > clean ))
241 set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
243 clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
245 u64s_remaining = (u64) clean << 6;
246 u64s_remaining -= (u64) total << 3;
247 u64s_remaining = max(0LL, u64s_remaining);
249 u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
251 j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
252 j->cur_entry_error = ret;
253 journal_set_remaining(j, u64s_remaining);
254 journal_check_may_get_unreserved(j);
260 /* Discards - last part of journal reclaim: */
262 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
267 ret = ja->discard_idx != ja->dirty_idx_ondisk;
268 spin_unlock(&j->lock);
274 * Advance ja->discard_idx as long as it points to buckets that are no longer
275 * dirty, issuing discards if necessary:
277 void bch2_journal_do_discards(struct journal *j)
279 struct bch_fs *c = container_of(j, struct bch_fs, journal);
283 mutex_lock(&j->discard_lock);
285 for_each_rw_member(ca, c, iter) {
286 struct journal_device *ja = &ca->journal;
288 while (should_discard_bucket(j, ja)) {
289 if (ca->mi.discard &&
290 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
291 blkdev_issue_discard(ca->disk_sb.bdev,
293 ja->buckets[ja->discard_idx]),
294 ca->mi.bucket_size, GFP_NOIO, 0);
297 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
299 bch2_journal_space_available(j);
300 spin_unlock(&j->lock);
304 mutex_unlock(&j->discard_lock);
308 * Journal entry pinning - machinery for holding a reference on a given journal
309 * entry, holding it open to ensure it gets replayed during recovery:
312 static void bch2_journal_reclaim_fast(struct journal *j)
314 struct journal_entry_pin_list temp;
317 lockdep_assert_held(&j->lock);
320 * Unpin journal entries whose reference counts reached zero, meaning
321 * all btree nodes got written out
323 while (!fifo_empty(&j->pin) &&
324 !atomic_read(&fifo_peek_front(&j->pin).count)) {
325 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
326 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
327 BUG_ON(!fifo_pop(&j->pin, temp));
332 bch2_journal_space_available(j);
335 void __bch2_journal_pin_put(struct journal *j, u64 seq)
337 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
339 if (atomic_dec_and_test(&pin_list->count))
340 bch2_journal_reclaim_fast(j);
343 void bch2_journal_pin_put(struct journal *j, u64 seq)
345 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
347 if (atomic_dec_and_test(&pin_list->count)) {
349 bch2_journal_reclaim_fast(j);
350 spin_unlock(&j->lock);
354 static inline void __journal_pin_drop(struct journal *j,
355 struct journal_entry_pin *pin)
357 struct journal_entry_pin_list *pin_list;
359 if (!journal_pin_active(pin))
362 if (j->flush_in_progress == pin)
363 j->flush_in_progress_dropped = true;
365 pin_list = journal_seq_pin(j, pin->seq);
367 list_del_init(&pin->list);
370 * Unpinning a journal entry make make journal_next_bucket() succeed, if
371 * writing a new last_seq will now make another bucket available:
373 if (atomic_dec_and_test(&pin_list->count) &&
374 pin_list == &fifo_peek_front(&j->pin))
375 bch2_journal_reclaim_fast(j);
376 else if (fifo_used(&j->pin) == 1 &&
377 atomic_read(&pin_list->count) == 1)
381 void bch2_journal_pin_drop(struct journal *j,
382 struct journal_entry_pin *pin)
385 __journal_pin_drop(j, pin);
386 spin_unlock(&j->lock);
389 void bch2_journal_pin_set(struct journal *j, u64 seq,
390 struct journal_entry_pin *pin,
391 journal_pin_flush_fn flush_fn)
393 struct journal_entry_pin_list *pin_list;
397 if (seq < journal_last_seq(j)) {
399 * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
400 * the src pin - with the pin dropped, the entry to pin might no
401 * longer to exist, but that means there's no longer anything to
402 * copy and we can bail out here:
404 spin_unlock(&j->lock);
408 pin_list = journal_seq_pin(j, seq);
410 __journal_pin_drop(j, pin);
412 atomic_inc(&pin_list->count);
414 pin->flush = flush_fn;
416 if (flush_fn == bch2_btree_key_cache_journal_flush)
417 list_add(&pin->list, &pin_list->key_cache_list);
419 list_add(&pin->list, &pin_list->list);
421 list_add(&pin->list, &pin_list->flushed);
422 spin_unlock(&j->lock);
425 * If the journal is currently full, we might want to call flush_fn
432 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
434 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
436 BUG_ON(journal_pin_active(pin));
438 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
442 * Journal reclaim: flush references to open journal entries to reclaim space in
445 * May be done by the journal code in the background as needed to free up space
446 * for more journal entries, or as part of doing a clean shutdown, or to migrate
447 * data off of a specific device:
450 static struct journal_entry_pin *
451 journal_get_next_pin(struct journal *j,
454 u64 max_seq, u64 *seq)
456 struct journal_entry_pin_list *pin_list;
457 struct journal_entry_pin *ret = NULL;
459 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
460 if (*seq > max_seq && !get_any && !get_key_cache)
463 if (*seq <= max_seq || get_any) {
464 ret = list_first_entry_or_null(&pin_list->list,
465 struct journal_entry_pin, list);
470 if (*seq <= max_seq || get_any || get_key_cache) {
471 ret = list_first_entry_or_null(&pin_list->key_cache_list,
472 struct journal_entry_pin, list);
481 /* returns true if we did work */
482 static size_t journal_flush_pins(struct journal *j, u64 seq_to_flush,
484 unsigned min_key_cache)
486 struct journal_entry_pin *pin;
487 size_t nr_flushed = 0;
488 journal_pin_flush_fn flush_fn;
492 if (!test_bit(JOURNAL_RECLAIM_STARTED, &j->flags))
495 lockdep_assert_held(&j->reclaim_lock);
500 j->last_flushed = jiffies;
503 pin = journal_get_next_pin(j,
508 BUG_ON(j->flush_in_progress);
509 j->flush_in_progress = pin;
510 j->flush_in_progress_dropped = false;
511 flush_fn = pin->flush;
513 spin_unlock(&j->lock);
518 if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
524 err = flush_fn(j, pin, seq);
527 /* Pin might have been dropped or rearmed: */
528 if (likely(!err && !j->flush_in_progress_dropped))
529 list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
530 j->flush_in_progress = NULL;
531 j->flush_in_progress_dropped = false;
532 spin_unlock(&j->lock);
534 wake_up(&j->pin_flush_wait);
545 static u64 journal_seq_to_flush(struct journal *j)
547 struct bch_fs *c = container_of(j, struct bch_fs, journal);
549 u64 seq_to_flush = 0;
554 for_each_rw_member(ca, c, iter) {
555 struct journal_device *ja = &ca->journal;
556 unsigned nr_buckets, bucket_to_flush;
561 /* Try to keep the journal at most half full: */
562 nr_buckets = ja->nr / 2;
564 /* And include pre-reservations: */
565 nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
566 (ca->mi.bucket_size << 6) -
567 journal_entry_overhead(j));
569 nr_buckets = min(nr_buckets, ja->nr);
571 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
572 seq_to_flush = max(seq_to_flush,
573 ja->bucket_seq[bucket_to_flush]);
576 /* Also flush if the pin fifo is more than half full */
577 seq_to_flush = max_t(s64, seq_to_flush,
578 (s64) journal_cur_seq(j) -
580 spin_unlock(&j->lock);
586 * bch2_journal_reclaim - free up journal buckets
588 * Background journal reclaim writes out btree nodes. It should be run
589 * early enough so that we never completely run out of journal buckets.
591 * High watermarks for triggering background reclaim:
592 * - FIFO has fewer than 512 entries left
593 * - fewer than 25% journal buckets free
595 * Background reclaim runs until low watermarks are reached:
596 * - FIFO has more than 1024 entries left
597 * - more than 50% journal buckets free
599 * As long as a reclaim can complete in the time it takes to fill up
600 * 512 journal entries or 25% of all journal buckets, then
601 * journal_next_bucket() should not stall.
603 static int __bch2_journal_reclaim(struct journal *j, bool direct)
605 struct bch_fs *c = container_of(j, struct bch_fs, journal);
606 bool kthread = (current->flags & PF_KTHREAD) != 0;
608 size_t min_nr, min_key_cache, nr_flushed;
613 * We can't invoke memory reclaim while holding the reclaim_lock -
614 * journal reclaim is required to make progress for memory reclaim
615 * (cleaning the caches), so we can't get stuck in memory reclaim while
616 * we're holding the reclaim lock:
618 lockdep_assert_held(&j->reclaim_lock);
619 flags = memalloc_noreclaim_save();
622 if (kthread && kthread_should_stop())
625 if (bch2_journal_error(j)) {
630 bch2_journal_do_discards(j);
632 seq_to_flush = journal_seq_to_flush(j);
636 * If it's been longer than j->reclaim_delay_ms since we last flushed,
637 * make sure to flush at least one journal pin:
639 if (time_after(jiffies, j->last_flushed +
640 msecs_to_jiffies(j->reclaim_delay_ms)))
643 if (j->prereserved.reserved * 4 > j->prereserved.remaining)
646 if (fifo_free(&j->pin) <= 32)
649 trace_journal_reclaim_start(c,
651 j->prereserved.reserved,
652 j->prereserved.remaining,
653 atomic_read(&c->btree_cache.dirty),
655 atomic_long_read(&c->btree_key_cache.nr_dirty),
656 atomic_long_read(&c->btree_key_cache.nr_keys));
658 min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
660 nr_flushed = journal_flush_pins(j, seq_to_flush,
661 min_nr, min_key_cache);
664 j->nr_direct_reclaim += nr_flushed;
666 j->nr_background_reclaim += nr_flushed;
667 trace_journal_reclaim_finish(c, nr_flushed);
670 wake_up(&j->reclaim_wait);
671 } while ((min_nr || min_key_cache) && !direct);
673 memalloc_noreclaim_restore(flags);
678 int bch2_journal_reclaim(struct journal *j)
680 return __bch2_journal_reclaim(j, true);
683 static int bch2_journal_reclaim_thread(void *arg)
685 struct journal *j = arg;
686 unsigned long delay, now;
691 kthread_wait_freezable(test_bit(JOURNAL_RECLAIM_STARTED, &j->flags));
693 j->last_flushed = jiffies;
695 while (!ret && !kthread_should_stop()) {
696 j->reclaim_kicked = false;
698 mutex_lock(&j->reclaim_lock);
699 ret = __bch2_journal_reclaim(j, false);
700 mutex_unlock(&j->reclaim_lock);
703 delay = msecs_to_jiffies(j->reclaim_delay_ms);
704 j->next_reclaim = j->last_flushed + delay;
706 if (!time_in_range(j->next_reclaim, now, now + delay))
707 j->next_reclaim = now + delay;
710 set_current_state(TASK_INTERRUPTIBLE);
711 if (kthread_should_stop())
713 if (j->reclaim_kicked)
715 if (time_after_eq(jiffies, j->next_reclaim))
717 freezable_schedule_timeout(j->next_reclaim - jiffies);
720 __set_current_state(TASK_RUNNING);
726 void bch2_journal_reclaim_stop(struct journal *j)
728 struct task_struct *p = j->reclaim_thread;
730 j->reclaim_thread = NULL;
738 int bch2_journal_reclaim_start(struct journal *j)
740 struct bch_fs *c = container_of(j, struct bch_fs, journal);
741 struct task_struct *p;
743 if (j->reclaim_thread)
746 p = kthread_create(bch2_journal_reclaim_thread, j,
747 "bch-reclaim/%s", c->name);
749 bch_err(c, "error creating journal reclaim thread: %li", PTR_ERR(p));
754 j->reclaim_thread = p;
759 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
764 ret = bch2_journal_error(j);
768 mutex_lock(&j->reclaim_lock);
770 *did_work = journal_flush_pins(j, seq_to_flush, 0, 0) != 0;
774 * If journal replay hasn't completed, the unreplayed journal entries
775 * hold refs on their corresponding sequence numbers
777 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
778 journal_last_seq(j) > seq_to_flush ||
779 (fifo_used(&j->pin) == 1 &&
780 atomic_read(&fifo_peek_front(&j->pin).count) == 1);
782 spin_unlock(&j->lock);
783 mutex_unlock(&j->reclaim_lock);
788 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
790 bool did_work = false;
792 if (!test_bit(JOURNAL_STARTED, &j->flags))
795 closure_wait_event(&j->async_wait,
796 journal_flush_done(j, seq_to_flush, &did_work));
801 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
803 struct bch_fs *c = container_of(j, struct bch_fs, journal);
804 struct journal_entry_pin_list *p;
809 fifo_for_each_entry_ptr(p, &j->pin, iter)
811 ? bch2_dev_list_has_dev(p->devs, dev_idx)
812 : p->devs.nr < c->opts.metadata_replicas)
814 spin_unlock(&j->lock);
816 bch2_journal_flush_pins(j, seq);
818 ret = bch2_journal_error(j);
822 mutex_lock(&c->replicas_gc_lock);
823 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
828 while (!ret && seq < j->pin.back) {
829 struct bch_replicas_padded replicas;
831 seq = max(seq, journal_last_seq(j));
832 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
833 journal_seq_pin(j, seq)->devs);
836 spin_unlock(&j->lock);
837 ret = bch2_mark_replicas(c, &replicas.e);
840 spin_unlock(&j->lock);
842 ret = bch2_replicas_gc_end(c, ret);
843 mutex_unlock(&c->replicas_gc_lock);