+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "btree_key_cache.h"
+#include "error.h"
#include "journal.h"
+#include "journal_io.h"
#include "journal_reclaim.h"
#include "replicas.h"
#include "super.h"
-/*
- * Journal entry pinning - machinery for holding a reference on a given journal
- * entry, holding it open to ensure it gets replayed during recovery:
- */
+#include <linux/kthread.h>
+#include <linux/sched/mm.h>
+#include <trace/events/bcachefs.h>
+
+/* Free space calculations: */
-static inline void __journal_pin_add(struct journal *j,
- u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+static unsigned journal_space_from(struct journal_device *ja,
+ enum journal_space_from from)
{
- struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+ switch (from) {
+ case journal_space_discarded:
+ return ja->discard_idx;
+ case journal_space_clean_ondisk:
+ return ja->dirty_idx_ondisk;
+ case journal_space_clean:
+ return ja->dirty_idx;
+ default:
+ BUG();
+ }
+}
- BUG_ON(journal_pin_active(pin));
- BUG_ON(!atomic_read(&pin_list->count));
+unsigned bch2_journal_dev_buckets_available(struct journal *j,
+ struct journal_device *ja,
+ enum journal_space_from from)
+{
+ unsigned available = (journal_space_from(ja, from) -
+ ja->cur_idx - 1 + ja->nr) % ja->nr;
- atomic_inc(&pin_list->count);
- pin->seq = seq;
- pin->flush = flush_fn;
+ /*
+ * Don't use the last bucket unless writing the new last_seq
+ * will make another bucket available:
+ */
+ if (available && ja->dirty_idx_ondisk == ja->dirty_idx)
+ --available;
- if (flush_fn)
- list_add(&pin->list, &pin_list->list);
- else
- INIT_LIST_HEAD(&pin->list);
+ return available;
+}
+
+static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
+{
+ union journal_preres_state old, new;
+ u64 v = atomic64_read(&j->prereserved.counter);
+
+ do {
+ old.v = new.v = v;
+ new.remaining = u64s_remaining;
+ } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
+ old.v, new.v)) != old.v);
+}
+
+static struct journal_space
+journal_dev_space_available(struct journal *j, struct bch_dev *ca,
+ enum journal_space_from from)
+{
+ struct journal_device *ja = &ca->journal;
+ unsigned sectors, buckets, unwritten;
+ u64 seq;
+
+ if (from == journal_space_total)
+ return (struct journal_space) {
+ .next_entry = ca->mi.bucket_size,
+ .total = ca->mi.bucket_size * ja->nr,
+ };
+
+ buckets = bch2_journal_dev_buckets_available(j, ja, from);
+ sectors = ja->sectors_free;
/*
- * If the journal is currently full, we might want to call flush_fn
- * immediately:
+ * We that we don't allocate the space for a journal entry
+ * until we write it out - thus, account for it here:
*/
- journal_wake(j);
+ for (seq = journal_last_unwritten_seq(j);
+ seq <= journal_cur_seq(j);
+ seq++) {
+ unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
+
+ if (!unwritten)
+ continue;
+
+ /* entry won't fit on this device, skip: */
+ if (unwritten > ca->mi.bucket_size)
+ continue;
+
+ if (unwritten >= sectors) {
+ if (!buckets) {
+ sectors = 0;
+ break;
+ }
+
+ buckets--;
+ sectors = ca->mi.bucket_size;
+ }
+
+ sectors -= unwritten;
+ }
+
+ if (sectors < ca->mi.bucket_size && buckets) {
+ buckets--;
+ sectors = ca->mi.bucket_size;
+ }
+
+ return (struct journal_space) {
+ .next_entry = sectors,
+ .total = sectors + buckets * ca->mi.bucket_size,
+ };
}
-void bch2_journal_pin_add(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
+ enum journal_space_from from)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned i, pos, nr_devs = 0;
+ struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
+
+ BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_journal]) {
+ if (!ca->journal.nr)
+ continue;
+
+ space = journal_dev_space_available(j, ca, from);
+ if (!space.next_entry)
+ continue;
+
+ for (pos = 0; pos < nr_devs; pos++)
+ if (space.total > dev_space[pos].total)
+ break;
+
+ array_insert_item(dev_space, nr_devs, pos, space);
+ }
+ rcu_read_unlock();
+
+ if (nr_devs < nr_devs_want)
+ return (struct journal_space) { 0, 0 };
+
+ /*
+ * We sorted largest to smallest, and we want the smallest out of the
+ * @nr_devs_want largest devices:
+ */
+ return dev_space[nr_devs_want - 1];
+}
+
+void bch2_journal_space_available(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned clean, clean_ondisk, total;
+ s64 u64s_remaining = 0;
+ unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
+ j->buf[1].buf_size >> 9);
+ unsigned i, nr_online = 0, nr_devs_want;
+ bool can_discard = false;
+ int ret = 0;
+
+ lockdep_assert_held(&j->lock);
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_journal]) {
+ struct journal_device *ja = &ca->journal;
+
+ if (!ja->nr)
+ continue;
+
+ while (ja->dirty_idx != ja->cur_idx &&
+ ja->bucket_seq[ja->dirty_idx] < journal_last_seq(j))
+ ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
+
+ while (ja->dirty_idx_ondisk != ja->dirty_idx &&
+ ja->bucket_seq[ja->dirty_idx_ondisk] < j->last_seq_ondisk)
+ ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
+
+ if (ja->discard_idx != ja->dirty_idx_ondisk)
+ can_discard = true;
+
+ max_entry_size = min_t(unsigned, max_entry_size, ca->mi.bucket_size);
+ nr_online++;
+ }
+ rcu_read_unlock();
+
+ j->can_discard = can_discard;
+
+ if (nr_online < c->opts.metadata_replicas_required) {
+ ret = JOURNAL_ERR_insufficient_devices;
+ goto out;
+ }
+
+ nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
+
+ for (i = 0; i < journal_space_nr; i++)
+ j->space[i] = __journal_space_available(j, nr_devs_want, i);
+
+ clean_ondisk = j->space[journal_space_clean_ondisk].total;
+ clean = j->space[journal_space_clean].total;
+ total = j->space[journal_space_total].total;
+
+ if (!clean_ondisk &&
+ journal_cur_seq(j) == j->seq_ondisk) {
+ struct printbuf buf = PRINTBUF;
+
+ __bch2_journal_debug_to_text(&buf, j);
+ bch_err(c, "journal stuck\n%s", buf.buf);
+ printbuf_exit(&buf);
+
+ bch2_fatal_error(c);
+ ret = JOURNAL_ERR_journal_stuck;
+ } else if (!j->space[journal_space_discarded].next_entry)
+ ret = JOURNAL_ERR_journal_full;
+
+ if ((j->space[journal_space_clean_ondisk].next_entry <
+ j->space[journal_space_clean_ondisk].total) &&
+ (clean - clean_ondisk <= total / 8) &&
+ (clean_ondisk * 2 > clean ))
+ set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+ else
+ clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+
+ u64s_remaining = (u64) clean << 6;
+ u64s_remaining -= (u64) total << 3;
+ u64s_remaining = max(0LL, u64s_remaining);
+ u64s_remaining /= 4;
+ u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
+out:
+ j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
+ j->cur_entry_error = ret;
+ journal_set_remaining(j, u64s_remaining);
+ journal_set_watermark(j);
+
+ if (!ret)
+ journal_wake(j);
+}
+
+/* Discards - last part of journal reclaim: */
+
+static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
{
+ bool ret;
+
spin_lock(&j->lock);
- __journal_pin_add(j, seq, pin, flush_fn);
+ ret = ja->discard_idx != ja->dirty_idx_ondisk;
spin_unlock(&j->lock);
+
+ return ret;
+}
+
+/*
+ * Advance ja->discard_idx as long as it points to buckets that are no longer
+ * dirty, issuing discards if necessary:
+ */
+void bch2_journal_do_discards(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned iter;
+
+ mutex_lock(&j->discard_lock);
+
+ for_each_rw_member(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+
+ while (should_discard_bucket(j, ja)) {
+ if (!c->opts.nochanges &&
+ ca->mi.discard &&
+ blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
+ blkdev_issue_discard(ca->disk_sb.bdev,
+ bucket_to_sector(ca,
+ ja->buckets[ja->discard_idx]),
+ ca->mi.bucket_size, GFP_NOIO, 0);
+
+ spin_lock(&j->lock);
+ ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
+
+ bch2_journal_space_available(j);
+ spin_unlock(&j->lock);
+ }
+ }
+
+ mutex_unlock(&j->discard_lock);
+}
+
+/*
+ * Journal entry pinning - machinery for holding a reference on a given journal
+ * entry, holding it open to ensure it gets replayed during recovery:
+ */
+
+static void bch2_journal_reclaim_fast(struct journal *j)
+{
+ struct journal_entry_pin_list temp;
+ bool popped = false;
+
+ lockdep_assert_held(&j->lock);
+
+ /*
+ * Unpin journal entries whose reference counts reached zero, meaning
+ * all btree nodes got written out
+ */
+ while (!fifo_empty(&j->pin) &&
+ !atomic_read(&fifo_peek_front(&j->pin).count)) {
+ BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
+ BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
+ BUG_ON(!fifo_pop(&j->pin, temp));
+ popped = true;
+ }
+
+ if (popped)
+ bch2_journal_space_available(j);
+}
+
+void __bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+
+ if (atomic_dec_and_test(&pin_list->count))
+ bch2_journal_reclaim_fast(j);
+}
+
+void bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+
+ if (atomic_dec_and_test(&pin_list->count)) {
+ spin_lock(&j->lock);
+ bch2_journal_reclaim_fast(j);
+ spin_unlock(&j->lock);
+ }
}
static inline void __journal_pin_drop(struct journal *j,
if (!journal_pin_active(pin))
return;
+ if (j->flush_in_progress == pin)
+ j->flush_in_progress_dropped = true;
+
pin_list = journal_seq_pin(j, pin->seq);
pin->seq = 0;
list_del_init(&pin->list);
spin_unlock(&j->lock);
}
-void bch2_journal_pin_add_if_older(struct journal *j,
- struct journal_entry_pin *src_pin,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+void bch2_journal_pin_set(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
{
+ struct journal_entry_pin_list *pin_list;
+
spin_lock(&j->lock);
- if (journal_pin_active(src_pin) &&
- (!journal_pin_active(pin) ||
- src_pin->seq < pin->seq)) {
- __journal_pin_drop(j, pin);
- __journal_pin_add(j, src_pin->seq, pin, flush_fn);
+ if (seq < journal_last_seq(j)) {
+ /*
+ * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
+ * the src pin - with the pin dropped, the entry to pin might no
+ * longer to exist, but that means there's no longer anything to
+ * copy and we can bail out here:
+ */
+ spin_unlock(&j->lock);
+ return;
}
+ pin_list = journal_seq_pin(j, seq);
+
+ __journal_pin_drop(j, pin);
+
+ atomic_inc(&pin_list->count);
+ pin->seq = seq;
+ pin->flush = flush_fn;
+
+ if (flush_fn == bch2_btree_key_cache_journal_flush)
+ list_add(&pin->list, &pin_list->key_cache_list);
+ else if (flush_fn)
+ list_add(&pin->list, &pin_list->list);
+ else
+ list_add(&pin->list, &pin_list->flushed);
spin_unlock(&j->lock);
+
+ /*
+ * If the journal is currently full, we might want to call flush_fn
+ * immediately:
+ */
+ journal_wake(j);
}
+/**
+ * bch2_journal_pin_flush: ensure journal pin callback is no longer running
+ */
void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
{
BUG_ON(journal_pin_active(pin));
* data off of a specific device:
*/
-/**
- * bch2_journal_reclaim_fast - do the fast part of journal reclaim
- *
- * Called from IO submission context, does not block. Cleans up after btree
- * write completions by advancing the journal pin and each cache's last_idx,
- * kicking off discards and background reclaim as necessary.
- */
-void bch2_journal_reclaim_fast(struct journal *j)
+static struct journal_entry_pin *
+journal_get_next_pin(struct journal *j,
+ bool get_any,
+ bool get_key_cache,
+ u64 max_seq, u64 *seq)
{
- struct journal_entry_pin_list temp;
- bool popped = false;
+ struct journal_entry_pin_list *pin_list;
+ struct journal_entry_pin *ret = NULL;
- lockdep_assert_held(&j->lock);
+ fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
+ if (*seq > max_seq && !get_any && !get_key_cache)
+ break;
- /*
- * Unpin journal entries whose reference counts reached zero, meaning
- * all btree nodes got written out
- */
- while (!atomic_read(&fifo_peek_front(&j->pin).count)) {
- BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
- BUG_ON(!fifo_pop(&j->pin, temp));
- popped = true;
+ if (*seq <= max_seq || get_any) {
+ ret = list_first_entry_or_null(&pin_list->list,
+ struct journal_entry_pin, list);
+ if (ret)
+ return ret;
+ }
+
+ if (*seq <= max_seq || get_any || get_key_cache) {
+ ret = list_first_entry_or_null(&pin_list->key_cache_list,
+ struct journal_entry_pin, list);
+ if (ret)
+ return ret;
+ }
}
- if (popped)
- journal_wake(j);
+ return NULL;
}
-static void journal_pin_mark_flushing(struct journal *j,
- struct journal_entry_pin *pin,
- u64 seq)
+/* returns true if we did work */
+static size_t journal_flush_pins(struct journal *j, u64 seq_to_flush,
+ unsigned min_any,
+ unsigned min_key_cache)
{
+ struct journal_entry_pin *pin;
+ size_t nr_flushed = 0;
+ journal_pin_flush_fn flush_fn;
+ u64 seq;
+ int err;
+
lockdep_assert_held(&j->reclaim_lock);
- list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
- BUG_ON(j->flush_in_progress);
- j->flush_in_progress = pin;
-}
+ while (1) {
+ cond_resched();
-static void journal_pin_flush(struct journal *j,
- struct journal_entry_pin *pin,
- u64 seq)
-{
- pin->flush(j, pin, seq);
+ j->last_flushed = jiffies;
- BUG_ON(j->flush_in_progress != pin);
- j->flush_in_progress = NULL;
- wake_up(&j->pin_flush_wait);
-}
+ spin_lock(&j->lock);
+ pin = journal_get_next_pin(j,
+ min_any != 0,
+ min_key_cache != 0,
+ seq_to_flush, &seq);
+ if (pin) {
+ BUG_ON(j->flush_in_progress);
+ j->flush_in_progress = pin;
+ j->flush_in_progress_dropped = false;
+ flush_fn = pin->flush;
+ }
+ spin_unlock(&j->lock);
-static struct journal_entry_pin *
-journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
-{
- struct journal_entry_pin_list *pin_list;
- struct journal_entry_pin *ret = NULL;
+ if (!pin)
+ break;
+
+ if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
+ min_key_cache--;
+
+ if (min_any)
+ min_any--;
+
+ err = flush_fn(j, pin, seq);
+
+ spin_lock(&j->lock);
+ /* Pin might have been dropped or rearmed: */
+ if (likely(!err && !j->flush_in_progress_dropped))
+ list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
+ j->flush_in_progress = NULL;
+ j->flush_in_progress_dropped = false;
+ spin_unlock(&j->lock);
- /* no need to iterate over empty fifo entries: */
- bch2_journal_reclaim_fast(j);
+ wake_up(&j->pin_flush_wait);
- fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
- if (*seq > seq_to_flush ||
- (ret = list_first_entry_or_null(&pin_list->list,
- struct journal_entry_pin, list)))
+ if (err)
break;
- return ret;
+ nr_flushed++;
+ }
+
+ return nr_flushed;
}
-static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
+static u64 journal_seq_to_flush(struct journal *j)
{
- bool ret;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ u64 seq_to_flush = 0;
+ unsigned iter;
spin_lock(&j->lock);
- ret = ja->nr &&
- (ja->last_idx != ja->cur_idx &&
- ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
+
+ for_each_rw_member(ca, c, iter) {
+ struct journal_device *ja = &ca->journal;
+ unsigned nr_buckets, bucket_to_flush;
+
+ if (!ja->nr)
+ continue;
+
+ /* Try to keep the journal at most half full: */
+ nr_buckets = ja->nr / 2;
+
+ /* And include pre-reservations: */
+ nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
+ (ca->mi.bucket_size << 6) -
+ journal_entry_overhead(j));
+
+ nr_buckets = min(nr_buckets, ja->nr);
+
+ bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
+ seq_to_flush = max(seq_to_flush,
+ ja->bucket_seq[bucket_to_flush]);
+ }
+
+ /* Also flush if the pin fifo is more than half full */
+ seq_to_flush = max_t(s64, seq_to_flush,
+ (s64) journal_cur_seq(j) -
+ (j->pin.size >> 1));
spin_unlock(&j->lock);
- return ret;
+ return seq_to_flush;
}
/**
- * bch2_journal_reclaim_work - free up journal buckets
+ * bch2_journal_reclaim - free up journal buckets
*
* Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets.
* 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall.
*/
-void bch2_journal_reclaim_work(struct work_struct *work)
+static int __bch2_journal_reclaim(struct journal *j, bool direct)
{
- struct bch_fs *c = container_of(to_delayed_work(work),
- struct bch_fs, journal.reclaim_work);
- struct journal *j = &c->journal;
- struct bch_dev *ca;
- struct journal_entry_pin *pin;
- u64 seq, seq_to_flush = 0;
- unsigned iter, bucket_to_flush;
- unsigned long next_flush;
- bool reclaim_lock_held = false, need_flush;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bool kthread = (current->flags & PF_KTHREAD) != 0;
+ u64 seq_to_flush;
+ size_t min_nr, min_key_cache, nr_flushed;
+ unsigned flags;
+ int ret = 0;
/*
- * Advance last_idx to point to the oldest journal entry containing
- * btree node updates that have not yet been written out
+ * We can't invoke memory reclaim while holding the reclaim_lock -
+ * journal reclaim is required to make progress for memory reclaim
+ * (cleaning the caches), so we can't get stuck in memory reclaim while
+ * we're holding the reclaim lock:
*/
- for_each_rw_member(ca, c, iter) {
- struct journal_device *ja = &ca->journal;
-
- if (!ja->nr)
- continue;
+ lockdep_assert_held(&j->reclaim_lock);
+ flags = memalloc_noreclaim_save();
- while (should_discard_bucket(j, ja)) {
- if (!reclaim_lock_held) {
- /*
- * ugh:
- * might be called from __journal_res_get()
- * under wait_event() - have to go back to
- * TASK_RUNNING before doing something that
- * would block, but only if we're doing work:
- */
- __set_current_state(TASK_RUNNING);
-
- mutex_lock(&j->reclaim_lock);
- reclaim_lock_held = true;
- /* recheck under reclaim_lock: */
- continue;
- }
+ do {
+ if (kthread && kthread_should_stop())
+ break;
- if (ca->mi.discard &&
- blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
- blkdev_issue_discard(ca->disk_sb.bdev,
- bucket_to_sector(ca,
- ja->buckets[ja->last_idx]),
- ca->mi.bucket_size, GFP_NOIO, 0);
+ if (bch2_journal_error(j)) {
+ ret = -EIO;
+ break;
+ }
- spin_lock(&j->lock);
- ja->last_idx = (ja->last_idx + 1) % ja->nr;
- spin_unlock(&j->lock);
+ bch2_journal_do_discards(j);
- journal_wake(j);
- }
+ seq_to_flush = journal_seq_to_flush(j);
+ min_nr = 0;
/*
- * Write out enough btree nodes to free up 50% journal
- * buckets
+ * If it's been longer than j->reclaim_delay_ms since we last flushed,
+ * make sure to flush at least one journal pin:
*/
- spin_lock(&j->lock);
- bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
- seq_to_flush = max_t(u64, seq_to_flush,
- ja->bucket_seq[bucket_to_flush]);
- spin_unlock(&j->lock);
- }
+ if (time_after(jiffies, j->last_flushed +
+ msecs_to_jiffies(c->opts.journal_reclaim_delay)))
+ min_nr = 1;
- /* Also flush if the pin fifo is more than half full */
- spin_lock(&j->lock);
- seq_to_flush = max_t(s64, seq_to_flush,
- (s64) journal_cur_seq(j) -
- (j->pin.size >> 1));
+ if (j->prereserved.reserved * 4 > j->prereserved.remaining)
+ min_nr = 1;
- /*
- * If it's been longer than j->reclaim_delay_ms since we last flushed,
- * make sure to flush at least one journal pin:
- */
- next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
- need_flush = time_after(jiffies, next_flush);
+ if (fifo_free(&j->pin) <= 32)
+ min_nr = 1;
+
+ if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
+ min_nr = 1;
+
+ trace_journal_reclaim_start(c,
+ min_nr,
+ j->prereserved.reserved,
+ j->prereserved.remaining,
+ atomic_read(&c->btree_cache.dirty),
+ c->btree_cache.used,
+ atomic_long_read(&c->btree_key_cache.nr_dirty),
+ atomic_long_read(&c->btree_key_cache.nr_keys));
+
+ min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
+
+ nr_flushed = journal_flush_pins(j, seq_to_flush,
+ min_nr, min_key_cache);
+
+ if (direct)
+ j->nr_direct_reclaim += nr_flushed;
+ else
+ j->nr_background_reclaim += nr_flushed;
+ trace_journal_reclaim_finish(c, nr_flushed);
+
+ if (nr_flushed)
+ wake_up(&j->reclaim_wait);
+ } while ((min_nr || min_key_cache) && nr_flushed && !direct);
+
+ memalloc_noreclaim_restore(flags);
+
+ return ret;
+}
+
+int bch2_journal_reclaim(struct journal *j)
+{
+ return __bch2_journal_reclaim(j, true);
+}
+
+static int bch2_journal_reclaim_thread(void *arg)
+{
+ struct journal *j = arg;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ unsigned long delay, now;
+ bool journal_empty;
+ int ret = 0;
+
+ set_freezable();
+
+ j->last_flushed = jiffies;
+
+ while (!ret && !kthread_should_stop()) {
+ j->reclaim_kicked = false;
+
+ mutex_lock(&j->reclaim_lock);
+ ret = __bch2_journal_reclaim(j, false);
+ mutex_unlock(&j->reclaim_lock);
+
+ now = jiffies;
+ delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
+ j->next_reclaim = j->last_flushed + delay;
+
+ if (!time_in_range(j->next_reclaim, now, now + delay))
+ j->next_reclaim = now + delay;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ if (j->reclaim_kicked)
+ break;
- while ((pin = journal_get_next_pin(j, need_flush
- ? U64_MAX
- : seq_to_flush, &seq))) {
- if (!reclaim_lock_held) {
- spin_unlock(&j->lock);
- __set_current_state(TASK_RUNNING);
- mutex_lock(&j->reclaim_lock);
- reclaim_lock_held = true;
spin_lock(&j->lock);
- continue;
+ journal_empty = fifo_empty(&j->pin);
+ spin_unlock(&j->lock);
+
+ if (journal_empty)
+ freezable_schedule();
+ else if (time_after(j->next_reclaim, jiffies))
+ freezable_schedule_timeout(j->next_reclaim - jiffies);
+ else
+ break;
}
+ __set_current_state(TASK_RUNNING);
+ }
- journal_pin_mark_flushing(j, pin, seq);
- spin_unlock(&j->lock);
+ return 0;
+}
- journal_pin_flush(j, pin, seq);
+void bch2_journal_reclaim_stop(struct journal *j)
+{
+ struct task_struct *p = j->reclaim_thread;
- need_flush = false;
- j->last_flushed = jiffies;
+ j->reclaim_thread = NULL;
- spin_lock(&j->lock);
+ if (p) {
+ kthread_stop(p);
+ put_task_struct(p);
}
+}
- spin_unlock(&j->lock);
+int bch2_journal_reclaim_start(struct journal *j)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct task_struct *p;
- if (reclaim_lock_held)
- mutex_unlock(&j->reclaim_lock);
+ if (j->reclaim_thread)
+ return 0;
+
+ p = kthread_create(bch2_journal_reclaim_thread, j,
+ "bch-reclaim/%s", c->name);
+ if (IS_ERR(p)) {
+ bch_err(c, "error creating journal reclaim thread: %li", PTR_ERR(p));
+ return PTR_ERR(p);
+ }
- if (!test_bit(BCH_FS_RO, &c->flags))
- queue_delayed_work(system_freezable_wq, &j->reclaim_work,
- msecs_to_jiffies(j->reclaim_delay_ms));
+ get_task_struct(p);
+ j->reclaim_thread = p;
+ wake_up_process(p);
+ return 0;
}
static int journal_flush_done(struct journal *j, u64 seq_to_flush,
- struct journal_entry_pin **pin,
- u64 *pin_seq)
+ bool *did_work)
{
int ret;
- *pin = NULL;
-
ret = bch2_journal_error(j);
if (ret)
return ret;
+ mutex_lock(&j->reclaim_lock);
+
+ if (journal_flush_pins(j, seq_to_flush, 0, 0))
+ *did_work = true;
+
spin_lock(&j->lock);
/*
* If journal replay hasn't completed, the unreplayed journal entries
* hold refs on their corresponding sequence numbers
*/
- ret = (*pin = journal_get_next_pin(j, seq_to_flush, pin_seq)) != NULL ||
- !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
+ ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
journal_last_seq(j) > seq_to_flush ||
- (fifo_used(&j->pin) == 1 &&
- atomic_read(&fifo_peek_front(&j->pin).count) == 1);
- if (*pin)
- journal_pin_mark_flushing(j, *pin, *pin_seq);
+ !fifo_used(&j->pin);
spin_unlock(&j->lock);
+ mutex_unlock(&j->reclaim_lock);
return ret;
}
-void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
+bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
{
- struct journal_entry_pin *pin;
- u64 pin_seq;
+ bool did_work = false;
if (!test_bit(JOURNAL_STARTED, &j->flags))
- return;
+ return false;
- mutex_lock(&j->reclaim_lock);
-
- while (1) {
- wait_event(j->wait, journal_flush_done(j, seq_to_flush,
- &pin, &pin_seq));
- if (!pin)
- break;
+ closure_wait_event(&j->async_wait,
+ journal_flush_done(j, seq_to_flush, &did_work));
- journal_pin_flush(j, pin, pin_seq);
- }
-
- mutex_unlock(&j->reclaim_lock);
+ return did_work;
}
int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_entry_pin_list *p;
- struct bch_devs_list devs;
u64 iter, seq = 0;
int ret = 0;
return ret;
mutex_lock(&c->replicas_gc_lock);
- bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL);
+ bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
seq = 0;
spin_lock(&j->lock);
- while (!ret && seq < j->pin.back) {
+ while (!ret) {
+ struct bch_replicas_padded replicas;
+
seq = max(seq, journal_last_seq(j));
- devs = journal_seq_pin(j, seq)->devs;
+ if (seq >= j->pin.back)
+ break;
+ bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
+ journal_seq_pin(j, seq)->devs);
seq++;
spin_unlock(&j->lock);
- ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs);
+ ret = bch2_mark_replicas(c, &replicas.e);
spin_lock(&j->lock);
}
spin_unlock(&j->lock);