// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
+#include "btree_key_cache.h"
+#include "error.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "replicas.h"
#include "super.h"
+#include <linux/kthread.h>
+#include <linux/sched/mm.h>
+#include <trace/events/bcachefs.h>
+
/* Free space calculations: */
static unsigned journal_space_from(struct journal_device *ja,
struct journal_device *ja,
enum journal_space_from from)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
unsigned available = (journal_space_from(ja, from) -
ja->cur_idx - 1 + ja->nr) % ja->nr;
- /*
- * Allocator startup needs some journal space before we can do journal
- * replay:
- */
- if (available && test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags))
- --available;
-
/*
* Don't use the last bucket unless writing the new last_seq
* will make another bucket available:
old.v, new.v)) != old.v);
}
-static struct journal_space {
- unsigned next_entry;
- unsigned remaining;
-} __journal_space_available(struct journal *j, unsigned nr_devs_want,
+static struct journal_space
+journal_dev_space_available(struct journal *j, struct bch_dev *ca,
enum journal_space_from from)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bch_dev *ca;
- unsigned sectors_next_entry = UINT_MAX;
- unsigned sectors_total = UINT_MAX;
- unsigned i, nr_devs = 0;
- unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
- ? journal_prev_buf(j)->sectors
- : 0;
+ struct journal_device *ja = &ca->journal;
+ unsigned sectors, buckets, unwritten;
+ u64 seq;
- rcu_read_lock();
- for_each_member_device_rcu(ca, c, i,
- &c->rw_devs[BCH_DATA_JOURNAL]) {
- struct journal_device *ja = &ca->journal;
- unsigned buckets_this_device, sectors_this_device;
+ if (from == journal_space_total)
+ return (struct journal_space) {
+ .next_entry = ca->mi.bucket_size,
+ .total = ca->mi.bucket_size * ja->nr,
+ };
- if (!ja->nr)
+ buckets = bch2_journal_dev_buckets_available(j, ja, from);
+ sectors = ja->sectors_free;
+
+ /*
+ * We that we don't allocate the space for a journal entry
+ * until we write it out - thus, account for it here:
+ */
+ for (seq = journal_last_unwritten_seq(j);
+ seq <= journal_cur_seq(j);
+ seq++) {
+ unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
+
+ if (!unwritten)
continue;
- buckets_this_device = bch2_journal_dev_buckets_available(j, ja, from);
- sectors_this_device = ja->sectors_free;
+ /* entry won't fit on this device, skip: */
+ if (unwritten > ca->mi.bucket_size)
+ continue;
- /*
- * We that we don't allocate the space for a journal entry
- * until we write it out - thus, account for it here:
- */
- if (unwritten_sectors >= sectors_this_device) {
- if (!buckets_this_device)
- continue;
+ if (unwritten >= sectors) {
+ if (!buckets) {
+ sectors = 0;
+ break;
+ }
- buckets_this_device--;
- sectors_this_device = ca->mi.bucket_size;
+ buckets--;
+ sectors = ca->mi.bucket_size;
}
- sectors_this_device -= unwritten_sectors;
+ sectors -= unwritten;
+ }
- if (sectors_this_device < ca->mi.bucket_size &&
- buckets_this_device) {
- buckets_this_device--;
- sectors_this_device = ca->mi.bucket_size;
- }
+ if (sectors < ca->mi.bucket_size && buckets) {
+ buckets--;
+ sectors = ca->mi.bucket_size;
+ }
- if (!sectors_this_device)
+ return (struct journal_space) {
+ .next_entry = sectors,
+ .total = sectors + buckets * ca->mi.bucket_size,
+ };
+}
+
+static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
+ enum journal_space_from from)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned i, pos, nr_devs = 0;
+ struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
+
+ BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_journal]) {
+ if (!ca->journal.nr)
continue;
- sectors_next_entry = min(sectors_next_entry,
- sectors_this_device);
+ space = journal_dev_space_available(j, ca, from);
+ if (!space.next_entry)
+ continue;
- sectors_total = min(sectors_total,
- buckets_this_device * ca->mi.bucket_size +
- sectors_this_device);
+ for (pos = 0; pos < nr_devs; pos++)
+ if (space.total > dev_space[pos].total)
+ break;
- nr_devs++;
+ array_insert_item(dev_space, nr_devs, pos, space);
}
rcu_read_unlock();
if (nr_devs < nr_devs_want)
return (struct journal_space) { 0, 0 };
- return (struct journal_space) {
- .next_entry = sectors_next_entry,
- .remaining = max_t(int, 0, sectors_total - sectors_next_entry),
- };
+ /*
+ * We sorted largest to smallest, and we want the smallest out of the
+ * @nr_devs_want largest devices:
+ */
+ return dev_space[nr_devs_want - 1];
}
void bch2_journal_space_available(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
- struct journal_space discarded, clean_ondisk, clean;
- unsigned overhead, u64s_remaining = 0;
+ unsigned clean, clean_ondisk, total;
+ s64 u64s_remaining = 0;
unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
j->buf[1].buf_size >> 9);
unsigned i, nr_online = 0, nr_devs_want;
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
- &c->rw_devs[BCH_DATA_JOURNAL]) {
+ &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
if (!ja->nr)
j->can_discard = can_discard;
if (nr_online < c->opts.metadata_replicas_required) {
- ret = -EROFS;
- goto out;
- }
-
- if (!fifo_free(&j->pin)) {
- ret = -ENOSPC;
+ ret = JOURNAL_ERR_insufficient_devices;
goto out;
}
nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
- discarded = __journal_space_available(j, nr_devs_want, journal_space_discarded);
- clean_ondisk = __journal_space_available(j, nr_devs_want, journal_space_clean_ondisk);
- clean = __journal_space_available(j, nr_devs_want, journal_space_clean);
-
- if (!discarded.next_entry)
- ret = -ENOSPC;
-
- overhead = DIV_ROUND_UP(clean.remaining, max_entry_size) *
- journal_entry_overhead(j);
- u64s_remaining = clean.remaining << 6;
- u64s_remaining = max_t(int, 0, u64s_remaining - overhead);
+ for (i = 0; i < journal_space_nr; i++)
+ j->space[i] = __journal_space_available(j, nr_devs_want, i);
+
+ clean_ondisk = j->space[journal_space_clean_ondisk].total;
+ clean = j->space[journal_space_clean].total;
+ total = j->space[journal_space_total].total;
+
+ if (!clean_ondisk &&
+ journal_cur_seq(j) == j->seq_ondisk) {
+ struct printbuf buf = PRINTBUF;
+
+ __bch2_journal_debug_to_text(&buf, j);
+ bch_err(c, "journal stuck\n%s", buf.buf);
+ printbuf_exit(&buf);
+
+ bch2_fatal_error(c);
+ ret = JOURNAL_ERR_journal_stuck;
+ } else if (!j->space[journal_space_discarded].next_entry)
+ ret = JOURNAL_ERR_journal_full;
+
+ if ((j->space[journal_space_clean_ondisk].next_entry <
+ j->space[journal_space_clean_ondisk].total) &&
+ (clean - clean_ondisk <= total / 8) &&
+ (clean_ondisk * 2 > clean ))
+ set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+ else
+ clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+
+ u64s_remaining = (u64) clean << 6;
+ u64s_remaining -= (u64) total << 3;
+ u64s_remaining = max(0LL, u64s_remaining);
u64s_remaining /= 4;
+ u64s_remaining = min_t(u64, u64s_remaining, U32_MAX);
out:
- j->cur_entry_sectors = !ret ? discarded.next_entry : 0;
+ j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
j->cur_entry_error = ret;
journal_set_remaining(j, u64s_remaining);
- journal_check_may_get_unreserved(j);
+ journal_set_watermark(j);
if (!ret)
journal_wake(j);
struct journal_device *ja = &ca->journal;
while (should_discard_bucket(j, ja)) {
- if (ca->mi.discard &&
+ if (!c->opts.nochanges &&
+ ca->mi.discard &&
blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca,
while (!fifo_empty(&j->pin) &&
!atomic_read(&fifo_peek_front(&j->pin).count)) {
BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
+ BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
BUG_ON(!fifo_pop(&j->pin, temp));
popped = true;
}
bch2_journal_space_available(j);
}
+void __bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+
+ if (atomic_dec_and_test(&pin_list->count))
+ bch2_journal_reclaim_fast(j);
+}
+
void bch2_journal_pin_put(struct journal *j, u64 seq)
{
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
if (!journal_pin_active(pin))
return;
+ if (j->flush_in_progress == pin)
+ j->flush_in_progress_dropped = true;
+
pin_list = journal_seq_pin(j, pin->seq);
pin->seq = 0;
list_del_init(&pin->list);
if (atomic_dec_and_test(&pin_list->count) &&
pin_list == &fifo_peek_front(&j->pin))
bch2_journal_reclaim_fast(j);
- else if (fifo_used(&j->pin) == 1 &&
- atomic_read(&pin_list->count) == 1)
- journal_wake(j);
}
void bch2_journal_pin_drop(struct journal *j,
spin_unlock(&j->lock);
}
-void __bch2_journal_pin_add(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+void bch2_journal_pin_set(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
{
- struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+ struct journal_entry_pin_list *pin_list;
spin_lock(&j->lock);
- __journal_pin_drop(j, pin);
+ if (seq < journal_last_seq(j)) {
+ /*
+ * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
+ * the src pin - with the pin dropped, the entry to pin might no
+ * longer to exist, but that means there's no longer anything to
+ * copy and we can bail out here:
+ */
+ spin_unlock(&j->lock);
+ return;
+ }
- BUG_ON(!atomic_read(&pin_list->count));
+ pin_list = journal_seq_pin(j, seq);
+
+ __journal_pin_drop(j, pin);
atomic_inc(&pin_list->count);
pin->seq = seq;
pin->flush = flush_fn;
- list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
-
+ if (flush_fn == bch2_btree_key_cache_journal_flush)
+ list_add(&pin->list, &pin_list->key_cache_list);
+ else if (flush_fn)
+ list_add(&pin->list, &pin_list->list);
+ else
+ list_add(&pin->list, &pin_list->flushed);
spin_unlock(&j->lock);
/*
journal_wake(j);
}
-void bch2_journal_pin_copy(struct journal *j,
- struct journal_entry_pin *dst,
- struct journal_entry_pin *src,
- journal_pin_flush_fn flush_fn)
-{
- if (journal_pin_active(src) &&
- (!journal_pin_active(dst) || src->seq < dst->seq))
- __bch2_journal_pin_add(j, src->seq, dst, flush_fn);
-}
-
/**
* bch2_journal_pin_flush: ensure journal pin callback is no longer running
*/
*/
static struct journal_entry_pin *
-journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
+journal_get_next_pin(struct journal *j,
+ bool get_any,
+ bool get_key_cache,
+ u64 max_seq, u64 *seq)
{
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *ret = NULL;
- spin_lock(&j->lock);
-
- fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
- if (*seq > max_seq ||
- (ret = list_first_entry_or_null(&pin_list->list,
- struct journal_entry_pin, list)))
+ fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
+ if (*seq > max_seq && !get_any && !get_key_cache)
break;
- if (ret) {
- list_move(&ret->list, &pin_list->flushed);
- BUG_ON(j->flush_in_progress);
- j->flush_in_progress = ret;
- j->last_flushed = jiffies;
- }
+ if (*seq <= max_seq || get_any) {
+ ret = list_first_entry_or_null(&pin_list->list,
+ struct journal_entry_pin, list);
+ if (ret)
+ return ret;
+ }
- spin_unlock(&j->lock);
+ if (*seq <= max_seq || get_any || get_key_cache) {
+ ret = list_first_entry_or_null(&pin_list->key_cache_list,
+ struct journal_entry_pin, list);
+ if (ret)
+ return ret;
+ }
+ }
- return ret;
+ return NULL;
}
-static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
- unsigned min_nr)
+/* returns true if we did work */
+static size_t journal_flush_pins(struct journal *j, u64 seq_to_flush,
+ unsigned min_any,
+ unsigned min_key_cache)
{
struct journal_entry_pin *pin;
+ size_t nr_flushed = 0;
+ journal_pin_flush_fn flush_fn;
u64 seq;
+ int err;
lockdep_assert_held(&j->reclaim_lock);
- while ((pin = journal_get_next_pin(j, min_nr
- ? U64_MAX : seq_to_flush, &seq))) {
- if (min_nr)
- min_nr--;
+ while (1) {
+ cond_resched();
+
+ j->last_flushed = jiffies;
+
+ spin_lock(&j->lock);
+ pin = journal_get_next_pin(j,
+ min_any != 0,
+ min_key_cache != 0,
+ seq_to_flush, &seq);
+ if (pin) {
+ BUG_ON(j->flush_in_progress);
+ j->flush_in_progress = pin;
+ j->flush_in_progress_dropped = false;
+ flush_fn = pin->flush;
+ }
+ spin_unlock(&j->lock);
+
+ if (!pin)
+ break;
+
+ if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
+ min_key_cache--;
+
+ if (min_any)
+ min_any--;
- pin->flush(j, pin, seq);
+ err = flush_fn(j, pin, seq);
- BUG_ON(j->flush_in_progress != pin);
+ spin_lock(&j->lock);
+ /* Pin might have been dropped or rearmed: */
+ if (likely(!err && !j->flush_in_progress_dropped))
+ list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
j->flush_in_progress = NULL;
+ j->flush_in_progress_dropped = false;
+ spin_unlock(&j->lock);
+
wake_up(&j->pin_flush_wait);
+
+ if (err)
+ break;
+
+ nr_flushed++;
}
+
+ return nr_flushed;
}
-/**
- * bch2_journal_reclaim - free up journal buckets
- *
- * Background journal reclaim writes out btree nodes. It should be run
- * early enough so that we never completely run out of journal buckets.
- *
- * High watermarks for triggering background reclaim:
- * - FIFO has fewer than 512 entries left
- * - fewer than 25% journal buckets free
- *
- * Background reclaim runs until low watermarks are reached:
- * - FIFO has more than 1024 entries left
- * - more than 50% journal buckets free
- *
- * As long as a reclaim can complete in the time it takes to fill up
- * 512 journal entries or 25% of all journal buckets, then
- * journal_next_bucket() should not stall.
- */
-void bch2_journal_reclaim(struct journal *j)
+static u64 journal_seq_to_flush(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
- unsigned iter, min_nr = 0;
u64 seq_to_flush = 0;
-
- lockdep_assert_held(&j->reclaim_lock);
-
- bch2_journal_do_discards(j);
+ unsigned iter;
spin_lock(&j->lock);
(j->pin.size >> 1));
spin_unlock(&j->lock);
+ return seq_to_flush;
+}
+
+/**
+ * bch2_journal_reclaim - free up journal buckets
+ *
+ * Background journal reclaim writes out btree nodes. It should be run
+ * early enough so that we never completely run out of journal buckets.
+ *
+ * High watermarks for triggering background reclaim:
+ * - FIFO has fewer than 512 entries left
+ * - fewer than 25% journal buckets free
+ *
+ * Background reclaim runs until low watermarks are reached:
+ * - FIFO has more than 1024 entries left
+ * - more than 50% journal buckets free
+ *
+ * As long as a reclaim can complete in the time it takes to fill up
+ * 512 journal entries or 25% of all journal buckets, then
+ * journal_next_bucket() should not stall.
+ */
+static int __bch2_journal_reclaim(struct journal *j, bool direct)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bool kthread = (current->flags & PF_KTHREAD) != 0;
+ u64 seq_to_flush;
+ size_t min_nr, min_key_cache, nr_flushed;
+ unsigned flags;
+ int ret = 0;
+
/*
- * If it's been longer than j->reclaim_delay_ms since we last flushed,
- * make sure to flush at least one journal pin:
+ * We can't invoke memory reclaim while holding the reclaim_lock -
+ * journal reclaim is required to make progress for memory reclaim
+ * (cleaning the caches), so we can't get stuck in memory reclaim while
+ * we're holding the reclaim lock:
*/
- if (time_after(jiffies, j->last_flushed +
- msecs_to_jiffies(j->reclaim_delay_ms)))
- min_nr = 1;
+ lockdep_assert_held(&j->reclaim_lock);
+ flags = memalloc_noreclaim_save();
+
+ do {
+ if (kthread && kthread_should_stop())
+ break;
+
+ if (bch2_journal_error(j)) {
+ ret = -EIO;
+ break;
+ }
+
+ bch2_journal_do_discards(j);
- if (j->prereserved.reserved * 2 > j->prereserved.remaining) {
- seq_to_flush = max(seq_to_flush, journal_last_seq(j));
- min_nr = 1;
+ seq_to_flush = journal_seq_to_flush(j);
+ min_nr = 0;
+
+ /*
+ * If it's been longer than j->reclaim_delay_ms since we last flushed,
+ * make sure to flush at least one journal pin:
+ */
+ if (time_after(jiffies, j->last_flushed +
+ msecs_to_jiffies(c->opts.journal_reclaim_delay)))
+ min_nr = 1;
+
+ if (j->prereserved.reserved * 4 > j->prereserved.remaining)
+ min_nr = 1;
+
+ if (fifo_free(&j->pin) <= 32)
+ min_nr = 1;
+
+ if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
+ min_nr = 1;
+
+ trace_journal_reclaim_start(c,
+ min_nr,
+ j->prereserved.reserved,
+ j->prereserved.remaining,
+ atomic_read(&c->btree_cache.dirty),
+ c->btree_cache.used,
+ atomic_long_read(&c->btree_key_cache.nr_dirty),
+ atomic_long_read(&c->btree_key_cache.nr_keys));
+
+ min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
+
+ nr_flushed = journal_flush_pins(j, seq_to_flush,
+ min_nr, min_key_cache);
+
+ if (direct)
+ j->nr_direct_reclaim += nr_flushed;
+ else
+ j->nr_background_reclaim += nr_flushed;
+ trace_journal_reclaim_finish(c, nr_flushed);
+
+ if (nr_flushed)
+ wake_up(&j->reclaim_wait);
+ } while ((min_nr || min_key_cache) && nr_flushed && !direct);
+
+ memalloc_noreclaim_restore(flags);
+
+ return ret;
+}
+
+int bch2_journal_reclaim(struct journal *j)
+{
+ return __bch2_journal_reclaim(j, true);
+}
+
+static int bch2_journal_reclaim_thread(void *arg)
+{
+ struct journal *j = arg;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ unsigned long delay, now;
+ bool journal_empty;
+ int ret = 0;
+
+ set_freezable();
+
+ j->last_flushed = jiffies;
+
+ while (!ret && !kthread_should_stop()) {
+ j->reclaim_kicked = false;
+
+ mutex_lock(&j->reclaim_lock);
+ ret = __bch2_journal_reclaim(j, false);
+ mutex_unlock(&j->reclaim_lock);
+
+ now = jiffies;
+ delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
+ j->next_reclaim = j->last_flushed + delay;
+
+ if (!time_in_range(j->next_reclaim, now, now + delay))
+ j->next_reclaim = now + delay;
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
+ if (j->reclaim_kicked)
+ break;
+
+ spin_lock(&j->lock);
+ journal_empty = fifo_empty(&j->pin);
+ spin_unlock(&j->lock);
+
+ if (journal_empty)
+ freezable_schedule();
+ else if (time_after(j->next_reclaim, jiffies))
+ freezable_schedule_timeout(j->next_reclaim - jiffies);
+ else
+ break;
+ }
+ __set_current_state(TASK_RUNNING);
}
- journal_flush_pins(j, seq_to_flush, min_nr);
+ return 0;
+}
+
+void bch2_journal_reclaim_stop(struct journal *j)
+{
+ struct task_struct *p = j->reclaim_thread;
+
+ j->reclaim_thread = NULL;
- if (!bch2_journal_error(j))
- queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work,
- msecs_to_jiffies(j->reclaim_delay_ms));
+ if (p) {
+ kthread_stop(p);
+ put_task_struct(p);
+ }
}
-void bch2_journal_reclaim_work(struct work_struct *work)
+int bch2_journal_reclaim_start(struct journal *j)
{
- struct journal *j = container_of(to_delayed_work(work),
- struct journal, reclaim_work);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct task_struct *p;
- mutex_lock(&j->reclaim_lock);
- bch2_journal_reclaim(j);
- mutex_unlock(&j->reclaim_lock);
+ if (j->reclaim_thread)
+ return 0;
+
+ p = kthread_create(bch2_journal_reclaim_thread, j,
+ "bch-reclaim/%s", c->name);
+ if (IS_ERR(p)) {
+ bch_err(c, "error creating journal reclaim thread: %li", PTR_ERR(p));
+ return PTR_ERR(p);
+ }
+
+ get_task_struct(p);
+ j->reclaim_thread = p;
+ wake_up_process(p);
+ return 0;
}
-static int journal_flush_done(struct journal *j, u64 seq_to_flush)
+static int journal_flush_done(struct journal *j, u64 seq_to_flush,
+ bool *did_work)
{
int ret;
mutex_lock(&j->reclaim_lock);
- journal_flush_pins(j, seq_to_flush, 0);
+ if (journal_flush_pins(j, seq_to_flush, 0, 0))
+ *did_work = true;
spin_lock(&j->lock);
/*
*/
ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
journal_last_seq(j) > seq_to_flush ||
- (fifo_used(&j->pin) == 1 &&
- atomic_read(&fifo_peek_front(&j->pin).count) == 1);
+ !fifo_used(&j->pin);
spin_unlock(&j->lock);
mutex_unlock(&j->reclaim_lock);
return ret;
}
-void bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
+bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
{
+ bool did_work = false;
+
if (!test_bit(JOURNAL_STARTED, &j->flags))
- return;
+ return false;
+
+ closure_wait_event(&j->async_wait,
+ journal_flush_done(j, seq_to_flush, &did_work));
- closure_wait_event(&j->async_wait, journal_flush_done(j, seq_to_flush));
+ return did_work;
}
int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
return ret;
mutex_lock(&c->replicas_gc_lock);
- bch2_replicas_gc_start(c, 1 << BCH_DATA_JOURNAL);
+ bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
seq = 0;
spin_lock(&j->lock);
- while (!ret && seq < j->pin.back) {
+ while (!ret) {
struct bch_replicas_padded replicas;
seq = max(seq, journal_last_seq(j));
- bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL,
+ if (seq >= j->pin.back)
+ break;
+ bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
journal_seq_pin(j, seq)->devs);
seq++;