1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_key_cache.h"
6 #include "journal_io.h"
7 #include "journal_reclaim.h"
11 #include <linux/kthread.h>
12 #include <linux/sched/mm.h>
13 #include <trace/events/bcachefs.h>
15 /* Free space calculations: */
17 static unsigned journal_space_from(struct journal_device *ja,
18 enum journal_space_from from)
21 case journal_space_discarded:
22 return ja->discard_idx;
23 case journal_space_clean_ondisk:
24 return ja->dirty_idx_ondisk;
25 case journal_space_clean:
32 unsigned bch2_journal_dev_buckets_available(struct journal *j,
33 struct journal_device *ja,
34 enum journal_space_from from)
36 unsigned available = (journal_space_from(ja, from) -
37 ja->cur_idx - 1 + ja->nr) % ja->nr;
40 * Don't use the last bucket unless writing the new last_seq
41 * will make another bucket available:
43 if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
49 static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
51 union journal_preres_state old, new;
52 u64 v = atomic64_read(&j->prereserved.counter);
56 new.remaining = u64s_remaining;
57 } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
58 old.v, new.v)) != old.v);
61 static struct journal_space {
64 } __journal_space_available(struct journal *j, unsigned nr_devs_want,
65 enum journal_space_from from)
67 struct bch_fs *c = container_of(j, struct bch_fs, journal);
69 unsigned sectors_next_entry = UINT_MAX;
70 unsigned sectors_total = UINT_MAX;
71 unsigned i, nr_devs = 0;
72 unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
73 ? journal_prev_buf(j)->sectors
77 for_each_member_device_rcu(ca, c, i,
78 &c->rw_devs[BCH_DATA_journal]) {
79 struct journal_device *ja = &ca->journal;
80 unsigned buckets_this_device, sectors_this_device;
85 buckets_this_device = bch2_journal_dev_buckets_available(j, ja, from);
86 sectors_this_device = ja->sectors_free;
89 * We that we don't allocate the space for a journal entry
90 * until we write it out - thus, account for it here:
92 if (unwritten_sectors >= sectors_this_device) {
93 if (!buckets_this_device)
96 buckets_this_device--;
97 sectors_this_device = ca->mi.bucket_size;
100 sectors_this_device -= unwritten_sectors;
102 if (sectors_this_device < ca->mi.bucket_size &&
103 buckets_this_device) {
104 buckets_this_device--;
105 sectors_this_device = ca->mi.bucket_size;
108 if (!sectors_this_device)
111 sectors_next_entry = min(sectors_next_entry,
112 sectors_this_device);
114 sectors_total = min(sectors_total,
115 buckets_this_device * ca->mi.bucket_size +
116 sectors_this_device);
122 if (nr_devs < nr_devs_want)
123 return (struct journal_space) { 0, 0 };
125 return (struct journal_space) {
126 .next_entry = sectors_next_entry,
127 .remaining = max_t(int, 0, sectors_total - sectors_next_entry),
131 void bch2_journal_space_available(struct journal *j)
133 struct bch_fs *c = container_of(j, struct bch_fs, journal);
135 struct journal_space discarded, clean_ondisk, clean;
136 unsigned overhead, u64s_remaining = 0;
137 unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
138 j->buf[1].buf_size >> 9);
139 unsigned i, nr_online = 0, nr_devs_want;
140 bool can_discard = false;
143 lockdep_assert_held(&j->lock);
146 for_each_member_device_rcu(ca, c, i,
147 &c->rw_devs[BCH_DATA_journal]) {
148 struct journal_device *ja = &ca->journal;
153 while (ja->dirty_idx != ja->cur_idx &&
154 ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
155 ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
157 while (ja->dirty_idx_ondisk != ja->dirty_idx &&
158 ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
159 ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
161 if (ja->discard_idx != ja->dirty_idx_ondisk)
164 max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
169 j->can_discard = can_discard;
171 if (nr_online < c->opts.metadata_replicas_required) {
172 ret = cur_entry_insufficient_devices;
176 if (!fifo_free(&j->pin)) {
177 ret = cur_entry_journal_pin_full;
181 nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
183 discarded = __journal_space_available(j, nr_devs_want, journal_space_discarded);
184 clean_ondisk = __journal_space_available(j, nr_devs_want, journal_space_clean_ondisk);
185 clean = __journal_space_available(j, nr_devs_want, journal_space_clean);
187 if (!discarded.next_entry)
188 ret = cur_entry_journal_full;
190 overhead = DIV_ROUND_UP(clean.remaining, max_entry_size) *
191 journal_entry_overhead(j);
192 u64s_remaining = clean.remaining << 6;
193 u64s_remaining = max_t(int, 0, u64s_remaining - overhead);
196 j->cur_entry_sectors = !ret ? discarded.next_entry : 0;
197 j->cur_entry_error = ret;
198 journal_set_remaining(j, u64s_remaining);
199 journal_check_may_get_unreserved(j);
205 /* Discards - last part of journal reclaim: */
207 static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
212 ret = ja->discard_idx != ja->dirty_idx_ondisk;
213 spin_unlock(&j->lock);
219 * Advance ja->discard_idx as long as it points to buckets that are no longer
220 * dirty, issuing discards if necessary:
222 void bch2_journal_do_discards(struct journal *j)
224 struct bch_fs *c = container_of(j, struct bch_fs, journal);
228 mutex_lock(&j->discard_lock);
230 for_each_rw_member(ca, c, iter) {
231 struct journal_device *ja = &ca->journal;
233 while (should_discard_bucket(j, ja)) {
234 if (ca->mi.discard &&
235 blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
236 blkdev_issue_discard(ca->disk_sb.bdev,
238 ja->buckets[ja->discard_idx]),
239 ca->mi.bucket_size, GFP_NOIO, 0);
242 ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
244 bch2_journal_space_available(j);
245 spin_unlock(&j->lock);
249 mutex_unlock(&j->discard_lock);
253 * Journal entry pinning - machinery for holding a reference on a given journal
254 * entry, holding it open to ensure it gets replayed during recovery:
257 static void bch2_journal_reclaim_fast(struct journal *j)
259 struct journal_entry_pin_list temp;
262 lockdep_assert_held(&j->lock);
265 * Unpin journal entries whose reference counts reached zero, meaning
266 * all btree nodes got written out
268 while (!fifo_empty(&j->pin) &&
269 !atomic_read(&fifo_peek_front(&j->pin).count)) {
270 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
271 BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
272 BUG_ON(!fifo_pop(&j->pin, temp));
277 bch2_journal_space_available(j);
280 void bch2_journal_pin_put(struct journal *j, u64 seq)
282 struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
284 if (atomic_dec_and_test(&pin_list->count)) {
286 bch2_journal_reclaim_fast(j);
287 spin_unlock(&j->lock);
291 static inline void __journal_pin_drop(struct journal *j,
292 struct journal_entry_pin *pin)
294 struct journal_entry_pin_list *pin_list;
296 if (!journal_pin_active(pin))
299 pin_list = journal_seq_pin(j, pin->seq);
301 list_del_init(&pin->list);
304 * Unpinning a journal entry make make journal_next_bucket() succeed, if
305 * writing a new last_seq will now make another bucket available:
307 if (atomic_dec_and_test(&pin_list->count) &&
308 pin_list == &fifo_peek_front(&j->pin))
309 bch2_journal_reclaim_fast(j);
310 else if (fifo_used(&j->pin) == 1 &&
311 atomic_read(&pin_list->count) == 1)
315 void bch2_journal_pin_drop(struct journal *j,
316 struct journal_entry_pin *pin)
319 __journal_pin_drop(j, pin);
320 spin_unlock(&j->lock);
323 void bch2_journal_pin_set(struct journal *j, u64 seq,
324 struct journal_entry_pin *pin,
325 journal_pin_flush_fn flush_fn)
327 struct journal_entry_pin_list *pin_list;
330 pin_list = journal_seq_pin(j, seq);
332 __journal_pin_drop(j, pin);
334 BUG_ON(!atomic_read(&pin_list->count) && seq == journal_last_seq(j));
336 atomic_inc(&pin_list->count);
338 pin->flush = flush_fn;
340 list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
341 spin_unlock(&j->lock);
344 * If the journal is currently full, we might want to call flush_fn
351 * bch2_journal_pin_flush: ensure journal pin callback is no longer running
353 void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
355 BUG_ON(journal_pin_active(pin));
357 wait_event(j->pin_flush_wait, j->flush_in_progress != pin);
361 * Journal reclaim: flush references to open journal entries to reclaim space in
364 * May be done by the journal code in the background as needed to free up space
365 * for more journal entries, or as part of doing a clean shutdown, or to migrate
366 * data off of a specific device:
369 static struct journal_entry_pin *
370 journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
372 struct journal_entry_pin_list *pin_list;
373 struct journal_entry_pin *ret = NULL;
375 if (!test_bit(JOURNAL_RECLAIM_STARTED, &j->flags))
380 fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
381 if (*seq > max_seq ||
382 (ret = list_first_entry_or_null(&pin_list->list,
383 struct journal_entry_pin, list)))
387 list_move(&ret->list, &pin_list->flushed);
388 BUG_ON(j->flush_in_progress);
389 j->flush_in_progress = ret;
392 spin_unlock(&j->lock);
397 /* returns true if we did work */
398 static u64 journal_flush_pins(struct journal *j, u64 seq_to_flush,
401 struct journal_entry_pin *pin;
404 lockdep_assert_held(&j->reclaim_lock);
409 j->last_flushed = jiffies;
411 pin = journal_get_next_pin(j, min_nr
412 ? U64_MAX : seq_to_flush, &seq);
419 pin->flush(j, pin, seq);
421 BUG_ON(j->flush_in_progress != pin);
422 j->flush_in_progress = NULL;
423 wake_up(&j->pin_flush_wait);
430 static u64 journal_seq_to_flush(struct journal *j)
432 struct bch_fs *c = container_of(j, struct bch_fs, journal);
434 u64 seq_to_flush = 0;
439 for_each_rw_member(ca, c, iter) {
440 struct journal_device *ja = &ca->journal;
441 unsigned nr_buckets, bucket_to_flush;
446 /* Try to keep the journal at most half full: */
447 nr_buckets = ja->nr / 2;
449 /* And include pre-reservations: */
450 nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
451 (ca->mi.bucket_size << 6) -
452 journal_entry_overhead(j));
454 nr_buckets = min(nr_buckets, ja->nr);
456 bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
457 seq_to_flush = max(seq_to_flush,
458 ja->bucket_seq[bucket_to_flush]);
461 /* Also flush if the pin fifo is more than half full */
462 seq_to_flush = max_t(s64, seq_to_flush,
463 (s64) journal_cur_seq(j) -
465 spin_unlock(&j->lock);
471 * bch2_journal_reclaim - free up journal buckets
473 * Background journal reclaim writes out btree nodes. It should be run
474 * early enough so that we never completely run out of journal buckets.
476 * High watermarks for triggering background reclaim:
477 * - FIFO has fewer than 512 entries left
478 * - fewer than 25% journal buckets free
480 * Background reclaim runs until low watermarks are reached:
481 * - FIFO has more than 1024 entries left
482 * - more than 50% journal buckets free
484 * As long as a reclaim can complete in the time it takes to fill up
485 * 512 journal entries or 25% of all journal buckets, then
486 * journal_next_bucket() should not stall.
488 static void __bch2_journal_reclaim(struct journal *j, bool direct)
490 struct bch_fs *c = container_of(j, struct bch_fs, journal);
491 bool kthread = (current->flags & PF_KTHREAD) != 0;
492 u64 seq_to_flush, nr_flushed = 0;
497 * We can't invoke memory reclaim while holding the reclaim_lock -
498 * journal reclaim is required to make progress for memory reclaim
499 * (cleaning the caches), so we can't get stuck in memory reclaim while
500 * we're holding the reclaim lock:
502 lockdep_assert_held(&j->reclaim_lock);
503 flags = memalloc_noreclaim_save();
506 if (kthread && kthread_should_stop())
509 bch2_journal_do_discards(j);
511 seq_to_flush = journal_seq_to_flush(j);
515 * If it's been longer than j->reclaim_delay_ms since we last flushed,
516 * make sure to flush at least one journal pin:
518 if (time_after(jiffies, j->last_flushed +
519 msecs_to_jiffies(j->reclaim_delay_ms)))
522 if (j->prereserved.reserved * 2 > j->prereserved.remaining)
525 if (atomic_read(&c->btree_cache.dirty) * 4 >
526 c->btree_cache.used * 3)
529 min_nr = max(min_nr, bch2_nr_btree_keys_need_flush(c));
531 trace_journal_reclaim_start(c,
533 j->prereserved.reserved,
534 j->prereserved.remaining,
535 atomic_read(&c->btree_cache.dirty),
537 c->btree_key_cache.nr_dirty,
538 c->btree_key_cache.nr_keys);
540 nr_flushed = journal_flush_pins(j, seq_to_flush, min_nr);
543 j->nr_direct_reclaim += nr_flushed;
545 j->nr_background_reclaim += nr_flushed;
546 trace_journal_reclaim_finish(c, nr_flushed);
549 memalloc_noreclaim_restore(flags);
552 void bch2_journal_reclaim(struct journal *j)
554 __bch2_journal_reclaim(j, true);
557 static int bch2_journal_reclaim_thread(void *arg)
559 struct journal *j = arg;
564 kthread_wait_freezable(test_bit(JOURNAL_RECLAIM_STARTED, &j->flags));
566 while (!kthread_should_stop()) {
567 j->reclaim_kicked = false;
569 mutex_lock(&j->reclaim_lock);
570 __bch2_journal_reclaim(j, false);
571 mutex_unlock(&j->reclaim_lock);
573 next = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
576 set_current_state(TASK_INTERRUPTIBLE);
577 if (kthread_should_stop())
579 if (j->reclaim_kicked)
581 if (time_after_eq(jiffies, next))
583 schedule_timeout(next - jiffies);
587 __set_current_state(TASK_RUNNING);
593 void bch2_journal_reclaim_stop(struct journal *j)
595 struct task_struct *p = j->reclaim_thread;
597 j->reclaim_thread = NULL;
605 int bch2_journal_reclaim_start(struct journal *j)
607 struct bch_fs *c = container_of(j, struct bch_fs, journal);
608 struct task_struct *p;
610 if (j->reclaim_thread)
613 p = kthread_create(bch2_journal_reclaim_thread, j,
614 "bch-reclaim/%s", c->name);
619 j->reclaim_thread = p;
624 static int journal_flush_done(struct journal *j, u64 seq_to_flush,
629 ret = bch2_journal_error(j);
633 mutex_lock(&j->reclaim_lock);
635 *did_work = journal_flush_pins(j, seq_to_flush, 0) != 0;
639 * If journal replay hasn't completed, the unreplayed journal entries
640 * hold refs on their corresponding sequence numbers
642 ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
643 journal_last_seq(j) > seq_to_flush ||
644 (fifo_used(&j->pin) == 1 &&
645 atomic_read(&fifo_peek_front(&j->pin).count) == 1);
647 spin_unlock(&j->lock);
648 mutex_unlock(&j->reclaim_lock);
653 bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
655 bool did_work = false;
657 if (!test_bit(JOURNAL_STARTED, &j->flags))
660 closure_wait_event(&j->async_wait,
661 journal_flush_done(j, seq_to_flush, &did_work));
666 int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
668 struct bch_fs *c = container_of(j, struct bch_fs, journal);
669 struct journal_entry_pin_list *p;
674 fifo_for_each_entry_ptr(p, &j->pin, iter)
676 ? bch2_dev_list_has_dev(p->devs, dev_idx)
677 : p->devs.nr < c->opts.metadata_replicas)
679 spin_unlock(&j->lock);
681 bch2_journal_flush_pins(j, seq);
683 ret = bch2_journal_error(j);
687 mutex_lock(&c->replicas_gc_lock);
688 bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
693 while (!ret && seq < j->pin.back) {
694 struct bch_replicas_padded replicas;
696 seq = max(seq, journal_last_seq(j));
697 bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
698 journal_seq_pin(j, seq)->devs);
701 spin_unlock(&j->lock);
702 ret = bch2_mark_replicas(c, &replicas.e);
705 spin_unlock(&j->lock);
707 ret = bch2_replicas_gc_end(c, ret);
708 mutex_unlock(&c->replicas_gc_lock);