1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
6 #include "journal_io.h"
7 #include "journal_reclaim.h"
11 #include <linux/kthread.h>
12 #include <linux/sched/mm.h>
13 #include <trace/events/bcachefs.h>
15 /* Free space calculations: */
17 static unsigned journal_space_from(struct journal_device *ja,
18 enum journal_space_from from)
21 case journal_space_discarded:
22 return ja->discard_idx;
23 case journal_space_clean_ondisk:
24 return ja->dirty_idx_ondisk;
25 case journal_space_clean:
32 unsigned bch2_journal_dev_buckets_available(struct journal *j,
33 struct journal_device *ja,
34 enum journal_space_from from)
36 unsigned available = (journal_space_from(ja, from) -
37 ja->cur_idx - 1 + ja->nr) % ja->nr;
40 * Don't use the last bucket unless writing the new last_seq
41 * will make another bucket available:
43 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
49 static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
51 union journal_preres_state old, new;
52 u64 v = atomic64_read(&j->prereserved.counter);
56 new.remaining = u64s_remaining;
57 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
58 old.v, new.v)) != old.v);
61 static struct journal_space {
64 } __journal_space_available(struct journal *j, unsigned nr_devs_want,
65 enum journal_space_from from)
67 struct bch_fs *c = container_of(j, struct bch_fs, journal);
69 unsigned sectors_next_entry = UINT_MAX;
70 unsigned sectors_total = UINT_MAX;
71 unsigned i, nr_devs = 0;
72 unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
73 ? journal_prev_buf(j)->sectors
77 for_each_member_device_rcu(ca, c, i,
78 &c->rw_devs[BCH_DATA_journal]) {
79 struct journal_device *ja = &ca->journal;
80 unsigned buckets_this_device, sectors_this_device;
85 buckets_this_device = bch2_journal_dev_buckets_available(j, ja, from);
86 sectors_this_device = ja->sectors_free;
89 * We that we don't allocate the space for a journal entry
90 * until we write it out - thus, account for it here:
92 if (unwritten_sectors >= sectors_this_device) {
93 if (!buckets_this_device)
96 buckets_this_device--;
97 sectors_this_device = ca->mi.bucket_size;
100 sectors_this_device -= unwritten_sectors;
102 if (sectors_this_device < ca->mi.bucket_size &&
103 buckets_this_device) {
104 buckets_this_device--;
105 sectors_this_device = ca->mi.bucket_size;
108 if (!sectors_this_device)
111 sectors_next_entry = min(sectors_next_entry,
112 sectors_this_device);
114 sectors_total = min(sectors_total,
115 buckets_this_device * ca->mi.bucket_size +
116 sectors_this_device);
122 if (nr_devs < nr_devs_want)
123 return (struct journal_space) { 0, 0 };
125 return (struct journal_space) {
126 .next_entry = sectors_next_entry,
127 .remaining = max_t(int, 0, sectors_total - sectors_next_entry),
131 void bch2_journal_space_available(struct journal *j)
133 struct bch_fs *c = container_of(j, struct bch_fs, journal);
135 struct journal_space discarded, clean_ondisk, clean;
136 unsigned overhead, u64s_remaining = 0;
137 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
138 j->buf[1].buf_size >> 9);
139 unsigned i, nr_online = 0, nr_devs_want;
140 bool can_discard = false;
143 lockdep_assert_held(&j->lock);
146 for_each_member_device_rcu(ca, c, i,
147 &c->rw_devs[BCH_DATA_journal]) {
148 struct journal_device *ja = &ca->journal;
153 while (ja->dirty_idx != ja->cur_idx &&
154 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
155 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
157 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
158 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
159 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
161 if (ja->discard_idx != ja->dirty_idx_ondisk)
164 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
169 j->can_discard = can_discard;
171 if (nr_online < c->opts.metadata_replicas_required) {
172 ret = cur_entry_insufficient_devices;
176 if (!fifo_free(&j->pin)) {
177 ret = cur_entry_journal_pin_full;
181 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
183 discarded = __journal_space_available(j, nr_devs_want, journal_space_discarded);
184 clean_ondisk = __journal_space_available(j, nr_devs_want, journal_space_clean_ondisk);
185 clean = __journal_space_available(j, nr_devs_want, journal_space_clean);
187 if (!discarded.next_entry)
188 ret = cur_entry_journal_full;
190 overhead = DIV_ROUND_UP(clean.remaining, max_entry_size) *
191 journal_entry_overhead(j);
192 u64s_remaining = clean.remaining << 6;
193 u64s_remaining = max_t(int, 0, u64s_remaining - overhead);
196 j->cur_entry_sectors = !ret ? discarded.next_entry : 0;
197 j->cur_entry_error = ret;
198 journal_set_remaining(j, u64s_remaining);
199 journal_check_may_get_unreserved(j);
205 /* Discards - last part of journal reclaim: */
207 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
212 ret = ja->discard_idx != ja->dirty_idx_ondisk;
213 spin_unlock(&j->lock);
219 * Advance ja->discard_idx as long as it points to buckets that are no longer
220 * dirty, issuing discards if necessary:
222 void bch2_journal_do_discards(struct journal *j)
224 struct bch_fs *c = container_of(j, struct bch_fs, journal);
228 mutex_lock(&j->discard_lock);
230 for_each_rw_member(ca, c, iter) {
231 struct journal_device *ja = &ca->journal;
233 while (should_discard_bucket(j, ja)) {
234 if (ca->mi.discard &&
235 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
236 blkdev_issue_discard(ca->disk_sb.bdev,
238 ja->buckets[ja->discard_idx]),
239 ca->mi.bucket_size, GFP_NOIO, 0);
242 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
244 bch2_journal_space_available(j);
245 spin_unlock(&j->lock);
249 mutex_unlock(&j->discard_lock);
253 * Journal entry pinning - machinery for holding a reference on a given journal
254 * entry, holding it open to ensure it gets replayed during recovery:
257 static void bch2_journal_reclaim_fast(struct journal *j)
259 struct journal_entry_pin_list temp;
262 lockdep_assert_held(&j->lock);
265 * Unpin journal entries whose reference counts reached zero, meaning
266 * all btree nodes got written out
268 while (!fifo_empty(&j->pin) &&
269 !atomic_read(&fifo_peek_front(&j->pin).count)) {
270 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
271 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
272 BUG_ON(!fifo_pop(&j->pin, temp));
277 bch2_journal_space_available(j);
280 void bch2_journal_pin_put(struct journal *j, u64 seq)
282 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
284 if (atomic_dec_and_test(&pin_list->count)) {
286 bch2_journal_reclaim_fast(j);
287 spin_unlock(&j->lock);
291 static inline void __journal_pin_drop(struct journal *j,
292 struct journal_entry_pin *pin)
294 struct journal_entry_pin_list *pin_list;
296 if (!journal_pin_active(pin))
299 pin_list = journal_seq_pin(j, pin->seq);
301 list_del_init(&pin->list);
304 * Unpinning a journal entry make make journal_next_bucket() succeed, if
305 * writing a new last_seq will now make another bucket available:
307 if (atomic_dec_and_test(&pin_list->count) &&
308 pin_list == &fifo_peek_front(&j->pin))
309 bch2_journal_reclaim_fast(j);
310 else if (fifo_used(&j->pin) == 1 &&
311 atomic_read(&pin_list->count) == 1)
315 void bch2_journal_pin_drop(struct journal *j,
316 struct journal_entry_pin *pin)
319 __journal_pin_drop(j, pin);
320 spin_unlock(&j->lock);
323 static void bch2_journal_pin_add_locked(struct journal *j, u64 seq,
324 struct journal_entry_pin *pin,
325 journal_pin_flush_fn flush_fn)
327 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
329 __journal_pin_drop(j, pin);
331 BUG_ON(!atomic_read(&pin_list->count) && seq == journal_last_seq(j));
333 atomic_inc(&pin_list->count);
335 pin->flush = flush_fn;
337 list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
340 void __bch2_journal_pin_add(struct journal *j, u64 seq,
341 struct journal_entry_pin *pin,
342 journal_pin_flush_fn flush_fn)
345 bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
346 spin_unlock(&j->lock);
349 * If the journal is currently full, we might want to call flush_fn
355 void bch2_journal_pin_update(struct journal *j, u64 seq,
356 struct journal_entry_pin *pin,
357 journal_pin_flush_fn flush_fn)
359 if (journal_pin_active(pin) && pin->seq < seq)
364 if (pin->seq != seq) {
365 bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
367 struct journal_entry_pin_list *pin_list =
368 journal_seq_pin(j, seq);
371 * If the pin is already pinning the right sequence number, it
372 * still might've already been flushed:
374 list_move(&pin->list, &pin_list->list);
377 spin_unlock(&j->lock);
380 * If the journal is currently full, we might want to call flush_fn
386 void bch2_journal_pin_copy(struct journal *j,
387 struct journal_entry_pin *dst,
388 struct journal_entry_pin *src,
389 journal_pin_flush_fn flush_fn)
393 if (journal_pin_active(src) &&
394 (!journal_pin_active(dst) || src->seq < dst->seq))
395 bch2_journal_pin_add_locked(j, src->seq, dst, flush_fn);
397 spin_unlock(&j->lock);
401 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
403 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
405 BUG_ON(journal_pin_active(pin));
407 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
411 * Journal reclaim: flush references to open journal entries to reclaim space in
414 * May be done by the journal code in the background as needed to free up space
415 * for more journal entries, or as part of doing a clean shutdown, or to migrate
416 * data off of a specific device:
419 static struct journal_entry_pin *
420 journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
422 struct journal_entry_pin_list *pin_list;
423 struct journal_entry_pin *ret = NULL;
425 if (!test_bit(JOURNAL_RECLAIM_STARTED, &j->flags))
430 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
431 if (*seq > max_seq ||
432 (ret = list_first_entry_or_null(&pin_list->list,
433 struct journal_entry_pin, list)))
437 list_move(&ret->list, &pin_list->flushed);
438 BUG_ON(j->flush_in_progress);
439 j->flush_in_progress = ret;
442 spin_unlock(&j->lock);
447 /* returns true if we did work */
448 static u64 journal_flush_pins(struct journal *j, u64 seq_to_flush,
451 struct journal_entry_pin *pin;
454 lockdep_assert_held(&j->reclaim_lock);
459 j->last_flushed = jiffies;
461 pin = journal_get_next_pin(j, min_nr
462 ? U64_MAX : seq_to_flush, &seq);
469 pin->flush(j, pin, seq);
471 BUG_ON(j->flush_in_progress != pin);
472 j->flush_in_progress = NULL;
473 wake_up(&j->pin_flush_wait);
480 static u64 journal_seq_to_flush(struct journal *j)
482 struct bch_fs *c = container_of(j, struct bch_fs, journal);
484 u64 seq_to_flush = 0;
489 for_each_rw_member(ca, c, iter) {
490 struct journal_device *ja = &ca->journal;
491 unsigned nr_buckets, bucket_to_flush;
496 /* Try to keep the journal at most half full: */
497 nr_buckets = ja->nr / 2;
499 /* And include pre-reservations: */
500 nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
501 (ca->mi.bucket_size << 6) -
502 journal_entry_overhead(j));
504 nr_buckets = min(nr_buckets, ja->nr);
506 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
507 seq_to_flush = max(seq_to_flush,
508 ja->bucket_seq[bucket_to_flush]);
511 /* Also flush if the pin fifo is more than half full */
512 seq_to_flush = max_t(s64, seq_to_flush,
513 (s64) journal_cur_seq(j) -
515 spin_unlock(&j->lock);
521 * bch2_journal_reclaim - free up journal buckets
523 * Background journal reclaim writes out btree nodes. It should be run
524 * early enough so that we never completely run out of journal buckets.
526 * High watermarks for triggering background reclaim:
527 * - FIFO has fewer than 512 entries left
528 * - fewer than 25% journal buckets free
530 * Background reclaim runs until low watermarks are reached:
531 * - FIFO has more than 1024 entries left
532 * - more than 50% journal buckets free
534 * As long as a reclaim can complete in the time it takes to fill up
535 * 512 journal entries or 25% of all journal buckets, then
536 * journal_next_bucket() should not stall.
538 static void __bch2_journal_reclaim(struct journal *j, bool direct)
540 struct bch_fs *c = container_of(j, struct bch_fs, journal);
541 bool kthread = (current->flags & PF_KTHREAD) != 0;
542 u64 seq_to_flush, nr_flushed = 0;
547 * We can't invoke memory reclaim while holding the reclaim_lock -
548 * journal reclaim is required to make progress for memory reclaim
549 * (cleaning the caches), so we can't get stuck in memory reclaim while
550 * we're holding the reclaim lock:
552 lockdep_assert_held(&j->reclaim_lock);
553 flags = memalloc_noreclaim_save();
556 if (kthread && kthread_should_stop())
559 bch2_journal_do_discards(j);
561 seq_to_flush = journal_seq_to_flush(j);
565 * If it's been longer than j->reclaim_delay_ms since we last flushed,
566 * make sure to flush at least one journal pin:
568 if (time_after(jiffies, j->last_flushed +
569 msecs_to_jiffies(j->reclaim_delay_ms)))
572 if (j->prereserved.reserved * 2 > j->prereserved.remaining)
575 if (atomic_read(&c->btree_cache.dirty) * 4 >
576 c->btree_cache.used * 3)
579 min_nr = max(min_nr, bch2_nr_btree_keys_need_flush(c));
581 trace_journal_reclaim_start(c,
583 j->prereserved.reserved,
584 j->prereserved.remaining,
585 atomic_read(&c->btree_cache.dirty),
587 c->btree_key_cache.nr_dirty,
588 c->btree_key_cache.nr_keys);
590 nr_flushed = journal_flush_pins(j, seq_to_flush, min_nr);
593 j->nr_direct_reclaim += nr_flushed;
595 j->nr_background_reclaim += nr_flushed;
596 trace_journal_reclaim_finish(c, nr_flushed);
599 memalloc_noreclaim_restore(flags);
602 void bch2_journal_reclaim(struct journal *j)
604 __bch2_journal_reclaim(j, true);
607 static int bch2_journal_reclaim_thread(void *arg)
609 struct journal *j = arg;
614 kthread_wait_freezable(test_bit(JOURNAL_RECLAIM_STARTED, &j->flags));
616 while (!kthread_should_stop()) {
617 j->reclaim_kicked = false;
619 mutex_lock(&j->reclaim_lock);
620 __bch2_journal_reclaim(j, false);
621 mutex_unlock(&j->reclaim_lock);
623 next = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
626 set_current_state(TASK_INTERRUPTIBLE);
627 if (kthread_should_stop())
629 if (j->reclaim_kicked)
631 if (time_after_eq(jiffies, next))
633 schedule_timeout(next - jiffies);
637 __set_current_state(TASK_RUNNING);
643 void bch2_journal_reclaim_stop(struct journal *j)
645 struct task_struct *p = j->reclaim_thread;
647 j->reclaim_thread = NULL;
655 int bch2_journal_reclaim_start(struct journal *j)
657 struct bch_fs *c = container_of(j, struct bch_fs, journal);
658 struct task_struct *p;
660 if (j->reclaim_thread)
663 p = kthread_create(bch2_journal_reclaim_thread, j,
664 "bch-reclaim/%s", c->name);
669 j->reclaim_thread = p;
674 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
679 ret = bch2_journal_error(j);
683 mutex_lock(&j->reclaim_lock);
685 *did_work = journal_flush_pins(j, seq_to_flush, 0) != 0;
689 * If journal replay hasn't completed, the unreplayed journal entries
690 * hold refs on their corresponding sequence numbers
692 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
693 journal_last_seq(j) > seq_to_flush ||
694 (fifo_used(&j->pin) == 1 &&
695 atomic_read(&fifo_peek_front(&j->pin).count) == 1);
697 spin_unlock(&j->lock);
698 mutex_unlock(&j->reclaim_lock);
703 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
705 bool did_work = false;
707 if (!test_bit(JOURNAL_STARTED, &j->flags))
710 closure_wait_event(&j->async_wait,
711 journal_flush_done(j, seq_to_flush, &did_work));
716 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
718 struct bch_fs *c = container_of(j, struct bch_fs, journal);
719 struct journal_entry_pin_list *p;
724 fifo_for_each_entry_ptr(p, &j->pin, iter)
726 ? bch2_dev_list_has_dev(p->devs, dev_idx)
727 : p->devs.nr < c->opts.metadata_replicas)
729 spin_unlock(&j->lock);
731 bch2_journal_flush_pins(j, seq);
733 ret = bch2_journal_error(j);
737 mutex_lock(&c->replicas_gc_lock);
738 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
743 while (!ret && seq < j->pin.back) {
744 struct bch_replicas_padded replicas;
746 seq = max(seq, journal_last_seq(j));
747 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
748 journal_seq_pin(j, seq)->devs);
751 spin_unlock(&j->lock);
752 ret = bch2_mark_replicas(c, &replicas.e);
755 spin_unlock(&j->lock);
757 ret = bch2_replicas_gc_end(c, ret);
758 mutex_unlock(&c->replicas_gc_lock);