#include "bcachefs.h"
#include "btree_key_cache.h"
+#include "btree_update.h"
+#include "buckets.h"
+#include "errcode.h"
+#include "error.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "replicas.h"
-#include "super.h"
+#include "sb-members.h"
+#include "trace.h"
#include <linux/kthread.h>
#include <linux/sched/mm.h>
-#include <trace/events/bcachefs.h>
/* Free space calculations: */
return available;
}
-static void journal_set_remaining(struct journal *j, unsigned u64s_remaining)
+static inline void journal_set_watermark(struct journal *j)
{
- union journal_preres_state old, new;
- u64 v = atomic64_read(&j->prereserved.counter);
-
- do {
- old.v = new.v = v;
- new.remaining = u64s_remaining;
- } while ((v = atomic64_cmpxchg(&j->prereserved.counter,
- old.v, new.v)) != old.v);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bool low_on_space = j->space[journal_space_clean].total * 4 <=
+ j->space[journal_space_total].total;
+ bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4;
+ unsigned watermark = low_on_space || low_on_pin
+ ? BCH_WATERMARK_reclaim
+ : BCH_WATERMARK_stripe;
+
+ if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space],
+ &j->low_on_space_start, low_on_space) ||
+ track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin],
+ &j->low_on_pin_start, low_on_pin))
+ trace_and_count(c, journal_full, c);
+
+ swap(watermark, j->watermark);
+ if (watermark > j->watermark)
+ journal_wake(j);
}
-static struct journal_space {
- unsigned next_entry;
- unsigned remaining;
-} __journal_space_available(struct journal *j, unsigned nr_devs_want,
+static struct journal_space
+journal_dev_space_available(struct journal *j, struct bch_dev *ca,
enum journal_space_from from)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bch_dev *ca;
- unsigned sectors_next_entry = UINT_MAX;
- unsigned sectors_total = UINT_MAX;
- unsigned i, nr_devs = 0;
- unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
- ? journal_prev_buf(j)->sectors
- : 0;
+ struct journal_device *ja = &ca->journal;
+ unsigned sectors, buckets, unwritten;
+ u64 seq;
- rcu_read_lock();
- for_each_member_device_rcu(ca, c, i,
- &c->rw_devs[BCH_DATA_journal]) {
- struct journal_device *ja = &ca->journal;
- unsigned buckets_this_device, sectors_this_device;
+ if (from == journal_space_total)
+ return (struct journal_space) {
+ .next_entry = ca->mi.bucket_size,
+ .total = ca->mi.bucket_size * ja->nr,
+ };
- if (!ja->nr)
+ buckets = bch2_journal_dev_buckets_available(j, ja, from);
+ sectors = ja->sectors_free;
+
+ /*
+ * We that we don't allocate the space for a journal entry
+ * until we write it out - thus, account for it here:
+ */
+ for (seq = journal_last_unwritten_seq(j);
+ seq <= journal_cur_seq(j);
+ seq++) {
+ unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
+
+ if (!unwritten)
continue;
- buckets_this_device = bch2_journal_dev_buckets_available(j, ja, from);
- sectors_this_device = ja->sectors_free;
+ /* entry won't fit on this device, skip: */
+ if (unwritten > ca->mi.bucket_size)
+ continue;
- /*
- * We that we don't allocate the space for a journal entry
- * until we write it out - thus, account for it here:
- */
- if (unwritten_sectors >= sectors_this_device) {
- if (!buckets_this_device)
- continue;
+ if (unwritten >= sectors) {
+ if (!buckets) {
+ sectors = 0;
+ break;
+ }
- buckets_this_device--;
- sectors_this_device = ca->mi.bucket_size;
+ buckets--;
+ sectors = ca->mi.bucket_size;
}
- sectors_this_device -= unwritten_sectors;
+ sectors -= unwritten;
+ }
- if (sectors_this_device < ca->mi.bucket_size &&
- buckets_this_device) {
- buckets_this_device--;
- sectors_this_device = ca->mi.bucket_size;
- }
+ if (sectors < ca->mi.bucket_size && buckets) {
+ buckets--;
+ sectors = ca->mi.bucket_size;
+ }
+
+ return (struct journal_space) {
+ .next_entry = sectors,
+ .total = sectors + buckets * ca->mi.bucket_size,
+ };
+}
- if (!sectors_this_device)
+static struct journal_space __journal_space_available(struct journal *j, unsigned nr_devs_want,
+ enum journal_space_from from)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ unsigned i, pos, nr_devs = 0;
+ struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
+
+ BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
+
+ rcu_read_lock();
+ for_each_member_device_rcu(ca, c, i,
+ &c->rw_devs[BCH_DATA_journal]) {
+ if (!ca->journal.nr)
continue;
- sectors_next_entry = min(sectors_next_entry,
- sectors_this_device);
+ space = journal_dev_space_available(j, ca, from);
+ if (!space.next_entry)
+ continue;
- sectors_total = min(sectors_total,
- buckets_this_device * ca->mi.bucket_size +
- sectors_this_device);
+ for (pos = 0; pos < nr_devs; pos++)
+ if (space.total > dev_space[pos].total)
+ break;
- nr_devs++;
+ array_insert_item(dev_space, nr_devs, pos, space);
}
rcu_read_unlock();
if (nr_devs < nr_devs_want)
return (struct journal_space) { 0, 0 };
- return (struct journal_space) {
- .next_entry = sectors_next_entry,
- .remaining = max_t(int, 0, sectors_total - sectors_next_entry),
- };
+ /*
+ * We sorted largest to smallest, and we want the smallest out of the
+ * @nr_devs_want largest devices:
+ */
+ return dev_space[nr_devs_want - 1];
}
void bch2_journal_space_available(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
- struct journal_space discarded, clean_ondisk, clean;
- unsigned overhead, u64s_remaining = 0;
+ unsigned clean, clean_ondisk, total;
unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
j->buf[1].buf_size >> 9);
unsigned i, nr_online = 0, nr_devs_want;
j->can_discard = can_discard;
if (nr_online < c->opts.metadata_replicas_required) {
- ret = cur_entry_insufficient_devices;
- goto out;
- }
-
- if (!fifo_free(&j->pin)) {
- ret = cur_entry_journal_pin_full;
+ ret = JOURNAL_ERR_insufficient_devices;
goto out;
}
nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
- discarded = __journal_space_available(j, nr_devs_want, journal_space_discarded);
- clean_ondisk = __journal_space_available(j, nr_devs_want, journal_space_clean_ondisk);
- clean = __journal_space_available(j, nr_devs_want, journal_space_clean);
+ for (i = 0; i < journal_space_nr; i++)
+ j->space[i] = __journal_space_available(j, nr_devs_want, i);
+
+ clean_ondisk = j->space[journal_space_clean_ondisk].total;
+ clean = j->space[journal_space_clean].total;
+ total = j->space[journal_space_total].total;
+
+ if (!j->space[journal_space_discarded].next_entry)
+ ret = JOURNAL_ERR_journal_full;
- if (!discarded.next_entry)
- ret = cur_entry_journal_full;
+ if ((j->space[journal_space_clean_ondisk].next_entry <
+ j->space[journal_space_clean_ondisk].total) &&
+ (clean - clean_ondisk <= total / 8) &&
+ (clean_ondisk * 2 > clean))
+ set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
+ else
+ clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
- overhead = DIV_ROUND_UP(clean.remaining, max_entry_size) *
- journal_entry_overhead(j);
- u64s_remaining = clean.remaining << 6;
- u64s_remaining = max_t(int, 0, u64s_remaining - overhead);
- u64s_remaining /= 4;
+ journal_set_watermark(j);
out:
- j->cur_entry_sectors = !ret ? discarded.next_entry : 0;
+ j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
j->cur_entry_error = ret;
- journal_set_remaining(j, u64s_remaining);
- journal_check_may_get_unreserved(j);
if (!ret)
journal_wake(j);
struct journal_device *ja = &ca->journal;
while (should_discard_bucket(j, ja)) {
- if (ca->mi.discard &&
- blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
+ if (!c->opts.nochanges &&
+ ca->mi.discard &&
+ bdev_max_discard_sectors(ca->disk_sb.bdev))
blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca,
ja->buckets[ja->discard_idx]),
- ca->mi.bucket_size, GFP_NOIO, 0);
+ ca->mi.bucket_size, GFP_NOFS);
spin_lock(&j->lock);
ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
* entry, holding it open to ensure it gets replayed during recovery:
*/
-static void bch2_journal_reclaim_fast(struct journal *j)
+void bch2_journal_reclaim_fast(struct journal *j)
{
- struct journal_entry_pin_list temp;
bool popped = false;
lockdep_assert_held(&j->lock);
*/
while (!fifo_empty(&j->pin) &&
!atomic_read(&fifo_peek_front(&j->pin).count)) {
- BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
- BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed));
- BUG_ON(!fifo_pop(&j->pin, temp));
+ j->pin.front++;
popped = true;
}
bch2_journal_space_available(j);
}
-void bch2_journal_pin_put(struct journal *j, u64 seq)
+bool __bch2_journal_pin_put(struct journal *j, u64 seq)
{
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
- if (atomic_dec_and_test(&pin_list->count)) {
+ return atomic_dec_and_test(&pin_list->count);
+}
+
+void bch2_journal_pin_put(struct journal *j, u64 seq)
+{
+ if (__bch2_journal_pin_put(j, seq)) {
spin_lock(&j->lock);
bch2_journal_reclaim_fast(j);
spin_unlock(&j->lock);
}
}
-static inline void __journal_pin_drop(struct journal *j,
+static inline bool __journal_pin_drop(struct journal *j,
struct journal_entry_pin *pin)
{
struct journal_entry_pin_list *pin_list;
if (!journal_pin_active(pin))
- return;
+ return false;
+
+ if (j->flush_in_progress == pin)
+ j->flush_in_progress_dropped = true;
pin_list = journal_seq_pin(j, pin->seq);
pin->seq = 0;
list_del_init(&pin->list);
/*
- * Unpinning a journal entry make make journal_next_bucket() succeed, if
+ * Unpinning a journal entry may make journal_next_bucket() succeed, if
* writing a new last_seq will now make another bucket available:
*/
- if (atomic_dec_and_test(&pin_list->count) &&
- pin_list == &fifo_peek_front(&j->pin))
- bch2_journal_reclaim_fast(j);
- else if (fifo_used(&j->pin) == 1 &&
- atomic_read(&pin_list->count) == 1)
- journal_wake(j);
+ return atomic_dec_and_test(&pin_list->count) &&
+ pin_list == &fifo_peek_front(&j->pin);
}
void bch2_journal_pin_drop(struct journal *j,
struct journal_entry_pin *pin)
{
spin_lock(&j->lock);
- __journal_pin_drop(j, pin);
+ if (__journal_pin_drop(j, pin))
+ bch2_journal_reclaim_fast(j);
spin_unlock(&j->lock);
}
-static void bch2_journal_pin_add_locked(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn)
{
- struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
+ if (fn == bch2_btree_node_flush0 ||
+ fn == bch2_btree_node_flush1)
+ return JOURNAL_PIN_btree;
+ else if (fn == bch2_btree_key_cache_journal_flush)
+ return JOURNAL_PIN_key_cache;
+ else
+ return JOURNAL_PIN_other;
+}
- __journal_pin_drop(j, pin);
+static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn,
+ enum journal_pin_type type)
+{
+ struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
- BUG_ON(!atomic_read(&pin_list->count) && seq == journal_last_seq(j));
+ /*
+ * flush_fn is how we identify journal pins in debugfs, so must always
+ * exist, even if it doesn't do anything:
+ */
+ BUG_ON(!flush_fn);
atomic_inc(&pin_list->count);
pin->seq = seq;
pin->flush = flush_fn;
-
- list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
+ list_add(&pin->list, &pin_list->list[type]);
}
-void __bch2_journal_pin_add(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+void bch2_journal_pin_copy(struct journal *j,
+ struct journal_entry_pin *dst,
+ struct journal_entry_pin *src,
+ journal_pin_flush_fn flush_fn)
{
+ bool reclaim;
+
spin_lock(&j->lock);
- bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
+
+ u64 seq = READ_ONCE(src->seq);
+
+ if (seq < journal_last_seq(j)) {
+ /*
+ * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on
+ * the src pin - with the pin dropped, the entry to pin might no
+ * longer to exist, but that means there's no longer anything to
+ * copy and we can bail out here:
+ */
+ spin_unlock(&j->lock);
+ return;
+ }
+
+ reclaim = __journal_pin_drop(j, dst);
+
+ bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn));
+
+ if (reclaim)
+ bch2_journal_reclaim_fast(j);
spin_unlock(&j->lock);
/*
journal_wake(j);
}
-void bch2_journal_pin_update(struct journal *j, u64 seq,
- struct journal_entry_pin *pin,
- journal_pin_flush_fn flush_fn)
+void bch2_journal_pin_set(struct journal *j, u64 seq,
+ struct journal_entry_pin *pin,
+ journal_pin_flush_fn flush_fn)
{
- if (journal_pin_active(pin) && pin->seq < seq)
- return;
+ bool reclaim;
spin_lock(&j->lock);
- if (pin->seq != seq) {
- bch2_journal_pin_add_locked(j, seq, pin, flush_fn);
- } else {
- struct journal_entry_pin_list *pin_list =
- journal_seq_pin(j, seq);
+ BUG_ON(seq < journal_last_seq(j));
- /*
- * If the pin is already pinning the right sequence number, it
- * still might've already been flushed:
- */
- list_move(&pin->list, &pin_list->list);
- }
+ reclaim = __journal_pin_drop(j, pin);
+
+ bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn));
+ if (reclaim)
+ bch2_journal_reclaim_fast(j);
spin_unlock(&j->lock);
/*
journal_wake(j);
}
-void bch2_journal_pin_copy(struct journal *j,
- struct journal_entry_pin *dst,
- struct journal_entry_pin *src,
- journal_pin_flush_fn flush_fn)
-{
- spin_lock(&j->lock);
-
- if (journal_pin_active(src) &&
- (!journal_pin_active(dst) || src->seq < dst->seq))
- bch2_journal_pin_add_locked(j, src->seq, dst, flush_fn);
-
- spin_unlock(&j->lock);
-}
-
/**
* bch2_journal_pin_flush: ensure journal pin callback is no longer running
+ * @j: journal object
+ * @pin: pin to flush
*/
void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
{
*/
static struct journal_entry_pin *
-journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
+journal_get_next_pin(struct journal *j,
+ u64 seq_to_flush,
+ unsigned allowed_below_seq,
+ unsigned allowed_above_seq,
+ u64 *seq)
{
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *ret = NULL;
+ unsigned i;
- if (!test_bit(JOURNAL_RECLAIM_STARTED, &j->flags))
- return NULL;
-
- spin_lock(&j->lock);
-
- fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
- if (*seq > max_seq ||
- (ret = list_first_entry_or_null(&pin_list->list,
- struct journal_entry_pin, list)))
+ fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) {
+ if (*seq > seq_to_flush && !allowed_above_seq)
break;
- if (ret) {
- list_move(&ret->list, &pin_list->flushed);
- BUG_ON(j->flush_in_progress);
- j->flush_in_progress = ret;
+ for (i = 0; i < JOURNAL_PIN_NR; i++)
+ if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) ||
+ ((1U << i) & allowed_above_seq)) {
+ ret = list_first_entry_or_null(&pin_list->list[i],
+ struct journal_entry_pin, list);
+ if (ret)
+ return ret;
+ }
}
- spin_unlock(&j->lock);
-
- return ret;
+ return NULL;
}
/* returns true if we did work */
-static u64 journal_flush_pins(struct journal *j, u64 seq_to_flush,
- unsigned min_nr)
+static size_t journal_flush_pins(struct journal *j,
+ u64 seq_to_flush,
+ unsigned allowed_below_seq,
+ unsigned allowed_above_seq,
+ unsigned min_any,
+ unsigned min_key_cache)
{
struct journal_entry_pin *pin;
- u64 seq, ret = 0;
+ size_t nr_flushed = 0;
+ journal_pin_flush_fn flush_fn;
+ u64 seq;
+ int err;
lockdep_assert_held(&j->reclaim_lock);
while (1) {
+ unsigned allowed_above = allowed_above_seq;
+ unsigned allowed_below = allowed_below_seq;
+
+ if (min_any) {
+ allowed_above |= ~0;
+ allowed_below |= ~0;
+ }
+
+ if (min_key_cache) {
+ allowed_above |= 1U << JOURNAL_PIN_key_cache;
+ allowed_below |= 1U << JOURNAL_PIN_key_cache;
+ }
+
cond_resched();
j->last_flushed = jiffies;
- pin = journal_get_next_pin(j, min_nr
- ? U64_MAX : seq_to_flush, &seq);
+ spin_lock(&j->lock);
+ pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq);
+ if (pin) {
+ BUG_ON(j->flush_in_progress);
+ j->flush_in_progress = pin;
+ j->flush_in_progress_dropped = false;
+ flush_fn = pin->flush;
+ }
+ spin_unlock(&j->lock);
+
if (!pin)
break;
- if (min_nr)
- min_nr--;
+ if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush)
+ min_key_cache--;
+
+ if (min_any)
+ min_any--;
- pin->flush(j, pin, seq);
+ err = flush_fn(j, pin, seq);
- BUG_ON(j->flush_in_progress != pin);
+ spin_lock(&j->lock);
+ /* Pin might have been dropped or rearmed: */
+ if (likely(!err && !j->flush_in_progress_dropped))
+ list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
j->flush_in_progress = NULL;
+ j->flush_in_progress_dropped = false;
+ spin_unlock(&j->lock);
+
wake_up(&j->pin_flush_wait);
- ret++;
+
+ if (err)
+ break;
+
+ nr_flushed++;
}
- return ret;
+ return nr_flushed;
}
static u64 journal_seq_to_flush(struct journal *j)
/* Try to keep the journal at most half full: */
nr_buckets = ja->nr / 2;
- /* And include pre-reservations: */
- nr_buckets += DIV_ROUND_UP(j->prereserved.reserved,
- (ca->mi.bucket_size << 6) -
- journal_entry_overhead(j));
-
nr_buckets = min(nr_buckets, ja->nr);
bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr;
}
/**
- * bch2_journal_reclaim - free up journal buckets
+ * __bch2_journal_reclaim - free up journal buckets
+ * @j: journal object
+ * @direct: direct or background reclaim?
+ * @kicked: requested to run since we last ran?
+ * Returns: 0 on success, or -EIO if the journal has been shutdown
*
* Background journal reclaim writes out btree nodes. It should be run
* early enough so that we never completely run out of journal buckets.
* 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall.
*/
-static void __bch2_journal_reclaim(struct journal *j, bool direct)
+static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
- bool kthread = (current->flags & PF_KTHREAD) != 0;
- u64 seq_to_flush, nr_flushed = 0;
- size_t min_nr;
+ u64 seq_to_flush;
+ size_t min_nr, min_key_cache, nr_flushed;
unsigned flags;
+ int ret = 0;
/*
* We can't invoke memory reclaim while holding the reclaim_lock -
flags = memalloc_noreclaim_save();
do {
- if (kthread && kthread_should_stop())
+ if (kthread_should_stop())
break;
+ if (bch2_journal_error(j)) {
+ ret = -EIO;
+ break;
+ }
+
bch2_journal_do_discards(j);
seq_to_flush = journal_seq_to_flush(j);
* make sure to flush at least one journal pin:
*/
if (time_after(jiffies, j->last_flushed +
- msecs_to_jiffies(j->reclaim_delay_ms)))
+ msecs_to_jiffies(c->opts.journal_reclaim_delay)))
min_nr = 1;
- if (j->prereserved.reserved * 2 > j->prereserved.remaining)
+ if (j->watermark != BCH_WATERMARK_stripe)
min_nr = 1;
- if (atomic_read(&c->btree_cache.dirty) * 4 >
- c->btree_cache.used * 3)
+ if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
min_nr = 1;
- min_nr = max(min_nr, bch2_nr_btree_keys_need_flush(c));
+ min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
- trace_journal_reclaim_start(c,
- min_nr,
- j->prereserved.reserved,
- j->prereserved.remaining,
+ trace_and_count(c, journal_reclaim_start, c,
+ direct, kicked,
+ min_nr, min_key_cache,
atomic_read(&c->btree_cache.dirty),
c->btree_cache.used,
- c->btree_key_cache.nr_dirty,
- c->btree_key_cache.nr_keys);
+ atomic_long_read(&c->btree_key_cache.nr_dirty),
+ atomic_long_read(&c->btree_key_cache.nr_keys));
- nr_flushed = journal_flush_pins(j, seq_to_flush, min_nr);
+ nr_flushed = journal_flush_pins(j, seq_to_flush,
+ ~0, 0,
+ min_nr, min_key_cache);
if (direct)
j->nr_direct_reclaim += nr_flushed;
else
j->nr_background_reclaim += nr_flushed;
- trace_journal_reclaim_finish(c, nr_flushed);
- } while (min_nr);
+ trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
+
+ if (nr_flushed)
+ wake_up(&j->reclaim_wait);
+ } while ((min_nr || min_key_cache) && nr_flushed && !direct);
memalloc_noreclaim_restore(flags);
+
+ return ret;
}
-void bch2_journal_reclaim(struct journal *j)
+int bch2_journal_reclaim(struct journal *j)
{
- __bch2_journal_reclaim(j, true);
+ return __bch2_journal_reclaim(j, true, true);
}
static int bch2_journal_reclaim_thread(void *arg)
{
struct journal *j = arg;
- unsigned long next;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ unsigned long delay, now;
+ bool journal_empty;
+ int ret = 0;
set_freezable();
- kthread_wait_freezable(test_bit(JOURNAL_RECLAIM_STARTED, &j->flags));
+ j->last_flushed = jiffies;
+
+ while (!ret && !kthread_should_stop()) {
+ bool kicked = j->reclaim_kicked;
- while (!kthread_should_stop()) {
j->reclaim_kicked = false;
mutex_lock(&j->reclaim_lock);
- __bch2_journal_reclaim(j, false);
+ ret = __bch2_journal_reclaim(j, false, kicked);
mutex_unlock(&j->reclaim_lock);
- next = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
+ now = jiffies;
+ delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
+ j->next_reclaim = j->last_flushed + delay;
+
+ if (!time_in_range(j->next_reclaim, now, now + delay))
+ j->next_reclaim = now + delay;
while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
if (kthread_should_stop())
break;
if (j->reclaim_kicked)
break;
- if (time_after_eq(jiffies, next))
- break;
- schedule_timeout(next - jiffies);
- try_to_freeze();
+ spin_lock(&j->lock);
+ journal_empty = fifo_empty(&j->pin);
+ spin_unlock(&j->lock);
+
+ if (journal_empty)
+ schedule();
+ else if (time_after(j->next_reclaim, jiffies))
+ schedule_timeout(j->next_reclaim - jiffies);
+ else
+ break;
}
__set_current_state(TASK_RUNNING);
}
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct task_struct *p;
+ int ret;
if (j->reclaim_thread)
return 0;
p = kthread_create(bch2_journal_reclaim_thread, j,
"bch-reclaim/%s", c->name);
- if (IS_ERR(p))
- return PTR_ERR(p);
+ ret = PTR_ERR_OR_ZERO(p);
+ if (ret) {
+ bch_err_msg(c, ret, "creating journal reclaim thread");
+ return ret;
+ }
get_task_struct(p);
j->reclaim_thread = p;
mutex_lock(&j->reclaim_lock);
- *did_work = journal_flush_pins(j, seq_to_flush, 0) != 0;
+ if (journal_flush_pins(j, seq_to_flush,
+ (1U << JOURNAL_PIN_key_cache)|
+ (1U << JOURNAL_PIN_other), 0, 0, 0) ||
+ journal_flush_pins(j, seq_to_flush,
+ (1U << JOURNAL_PIN_btree), 0, 0, 0))
+ *did_work = true;
spin_lock(&j->lock);
/*
*/
ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
journal_last_seq(j) > seq_to_flush ||
- (fifo_used(&j->pin) == 1 &&
- atomic_read(&fifo_peek_front(&j->pin).count) == 1);
+ !fifo_used(&j->pin);
spin_unlock(&j->lock);
mutex_unlock(&j->reclaim_lock);
bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
{
+ /* time_stats this */
bool did_work = false;
if (!test_bit(JOURNAL_STARTED, &j->flags))
mutex_lock(&c->replicas_gc_lock);
bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
- seq = 0;
+ /*
+ * Now that we've populated replicas_gc, write to the journal to mark
+ * active journal devices. This handles the case where the journal might
+ * be empty. Otherwise we could clear all journal replicas and
+ * temporarily put the fs into an unrecoverable state. Journal recovery
+ * expects to find devices marked for journal data on unclean mount.
+ */
+ ret = bch2_journal_meta(&c->journal);
+ if (ret)
+ goto err;
+ seq = 0;
spin_lock(&j->lock);
- while (!ret && seq < j->pin.back) {
+ while (!ret) {
struct bch_replicas_padded replicas;
seq = max(seq, journal_last_seq(j));
+ if (seq >= j->pin.back)
+ break;
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
journal_seq_pin(j, seq)->devs);
seq++;
spin_lock(&j->lock);
}
spin_unlock(&j->lock);
-
+err:
ret = bch2_replicas_gc_end(c, ret);
mutex_unlock(&c->replicas_gc_lock);