X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fjournal_reclaim.c;h=ab811c0dad26accfb4924eaef4cccb3ab957087c;hb=e5b2870d05753c1dedd79261ba5e321ce953f5ab;hp=4a5b50ed71b0dd46519915f57706eb2d1d89be59;hpb=a2094890a90a2f865e49f94e8448deca7e5852ef;p=bcachefs-tools-debian diff --git a/libbcachefs/journal_reclaim.c b/libbcachefs/journal_reclaim.c index 4a5b50e..ab811c0 100644 --- a/libbcachefs/journal_reclaim.c +++ b/libbcachefs/journal_reclaim.c @@ -2,16 +2,20 @@ #include "bcachefs.h" #include "btree_key_cache.h" +#include "btree_update.h" +#include "btree_write_buffer.h" +#include "buckets.h" +#include "errcode.h" #include "error.h" #include "journal.h" #include "journal_io.h" #include "journal_reclaim.h" #include "replicas.h" -#include "super.h" +#include "sb-members.h" +#include "trace.h" #include #include -#include /* Free space calculations: */ @@ -47,29 +51,25 @@ unsigned bch2_journal_dev_buckets_available(struct journal *j, return available; } -static void journal_set_remaining(struct journal *j, unsigned u64s_remaining) +void bch2_journal_set_watermark(struct journal *j) { - union journal_preres_state old, new; - u64 v = atomic64_read(&j->prereserved.counter); - - do { - old.v = new.v = v; - new.remaining = u64s_remaining; - } while ((v = atomic64_cmpxchg(&j->prereserved.counter, - old.v, new.v)) != old.v); -} - -static inline unsigned get_unwritten_sectors(struct journal *j, unsigned *idx) -{ - unsigned sectors = 0; - - while (!sectors && *idx != j->reservations.idx) { - sectors = j->buf[*idx].sectors; - - *idx = (*idx + 1) & JOURNAL_BUF_MASK; - } - - return sectors; + struct bch_fs *c = container_of(j, struct bch_fs, journal); + bool low_on_space = j->space[journal_space_clean].total * 4 <= + j->space[journal_space_total].total; + bool low_on_pin = fifo_free(&j->pin) < j->pin.size / 4; + bool low_on_wb = bch2_btree_write_buffer_must_wait(c); + unsigned watermark = low_on_space || low_on_pin || low_on_wb + ? BCH_WATERMARK_reclaim + : BCH_WATERMARK_stripe; + + if (track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_space], low_on_space) || + track_event_change(&c->times[BCH_TIME_blocked_journal_low_on_pin], low_on_pin) || + track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb)) + trace_and_count(c, journal_full, c); + + swap(watermark, j->watermark); + if (watermark > j->watermark) + journal_wake(j); } static struct journal_space @@ -77,7 +77,8 @@ journal_dev_space_available(struct journal *j, struct bch_dev *ca, enum journal_space_from from) { struct journal_device *ja = &ca->journal; - unsigned sectors, buckets, unwritten, idx = j->reservations.unwritten_idx; + unsigned sectors, buckets, unwritten; + u64 seq; if (from == journal_space_total) return (struct journal_space) { @@ -92,7 +93,18 @@ journal_dev_space_available(struct journal *j, struct bch_dev *ca, * We that we don't allocate the space for a journal entry * until we write it out - thus, account for it here: */ - while ((unwritten = get_unwritten_sectors(j, &idx))) { + for (seq = journal_last_unwritten_seq(j); + seq <= journal_cur_seq(j); + seq++) { + unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors; + + if (!unwritten) + continue; + + /* entry won't fit on this device, skip: */ + if (unwritten > ca->mi.bucket_size) + continue; + if (unwritten >= sectors) { if (!buckets) { sectors = 0; @@ -121,15 +133,13 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne enum journal_space_from from) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct bch_dev *ca; - unsigned i, pos, nr_devs = 0; + unsigned pos, nr_devs = 0; struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX]; BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space)); rcu_read_lock(); - for_each_member_device_rcu(ca, c, i, - &c->rw_devs[BCH_DATA_journal]) { + for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { if (!ca->journal.nr) continue; @@ -158,20 +168,17 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne void bch2_journal_space_available(struct journal *j) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct bch_dev *ca; unsigned clean, clean_ondisk, total; - s64 u64s_remaining = 0; unsigned max_entry_size = min(j->buf[0].buf_size >> 9, j->buf[1].buf_size >> 9); - unsigned i, nr_online = 0, nr_devs_want; + unsigned nr_online = 0, nr_devs_want; bool can_discard = false; int ret = 0; lockdep_assert_held(&j->lock); rcu_read_lock(); - for_each_member_device_rcu(ca, c, i, - &c->rw_devs[BCH_DATA_journal]) { + for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) { struct journal_device *ja = &ca->journal; if (!ja->nr) @@ -195,57 +202,35 @@ void bch2_journal_space_available(struct journal *j) j->can_discard = can_discard; - if (nr_online < c->opts.metadata_replicas_required) { - ret = cur_entry_insufficient_devices; + if (nr_online < metadata_replicas_required(c)) { + ret = JOURNAL_ERR_insufficient_devices; goto out; } nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas); - for (i = 0; i < journal_space_nr; i++) + for (unsigned i = 0; i < journal_space_nr; i++) j->space[i] = __journal_space_available(j, nr_devs_want, i); clean_ondisk = j->space[journal_space_clean_ondisk].total; clean = j->space[journal_space_clean].total; total = j->space[journal_space_total].total; - if (!clean_ondisk && - j->reservations.idx == - j->reservations.unwritten_idx) { - char *buf = kmalloc(4096, GFP_ATOMIC); - - bch_err(c, "journal stuck"); - if (buf) { - __bch2_journal_debug_to_text(&_PBUF(buf, 4096), j); - pr_err("\n%s", buf); - kfree(buf); - } - - bch2_fatal_error(c); - ret = cur_entry_journal_stuck; - } else if (!j->space[journal_space_discarded].next_entry) - ret = cur_entry_journal_full; - else if (!fifo_free(&j->pin)) - ret = cur_entry_journal_pin_full; + if (!j->space[journal_space_discarded].next_entry) + ret = JOURNAL_ERR_journal_full; if ((j->space[journal_space_clean_ondisk].next_entry < j->space[journal_space_clean_ondisk].total) && (clean - clean_ondisk <= total / 8) && - (clean_ondisk * 2 > clean )) + (clean_ondisk * 2 > clean)) set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags); else clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags); - u64s_remaining = (u64) clean << 6; - u64s_remaining -= (u64) total << 3; - u64s_remaining = max(0LL, u64s_remaining); - u64s_remaining /= 2; - u64s_remaining = min_t(u64, u64s_remaining, U32_MAX); + bch2_journal_set_watermark(j); out: j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0; j->cur_entry_error = ret; - journal_set_remaining(j, u64s_remaining); - journal_check_may_get_unreserved(j); if (!ret) journal_wake(j); @@ -271,21 +256,20 @@ static bool should_discard_bucket(struct journal *j, struct journal_device *ja) void bch2_journal_do_discards(struct journal *j) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct bch_dev *ca; - unsigned iter; mutex_lock(&j->discard_lock); - for_each_rw_member(ca, c, iter) { + for_each_rw_member(c, ca) { struct journal_device *ja = &ca->journal; while (should_discard_bucket(j, ja)) { - if (ca->mi.discard && - blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev))) + if (!c->opts.nochanges && + ca->mi.discard && + bdev_max_discard_sectors(ca->disk_sb.bdev)) blkdev_issue_discard(ca->disk_sb.bdev, bucket_to_sector(ca, ja->buckets[ja->discard_idx]), - ca->mi.bucket_size, GFP_NOIO, 0); + ca->mi.bucket_size, GFP_NOFS); spin_lock(&j->lock); ja->discard_idx = (ja->discard_idx + 1) % ja->nr; @@ -303,9 +287,8 @@ void bch2_journal_do_discards(struct journal *j) * entry, holding it open to ensure it gets replayed during recovery: */ -static void bch2_journal_reclaim_fast(struct journal *j) +void bch2_journal_reclaim_fast(struct journal *j) { - struct journal_entry_pin_list temp; bool popped = false; lockdep_assert_held(&j->lock); @@ -315,10 +298,9 @@ static void bch2_journal_reclaim_fast(struct journal *j) * all btree nodes got written out */ while (!fifo_empty(&j->pin) && + j->pin.front <= j->seq_ondisk && !atomic_read(&fifo_peek_front(&j->pin).count)) { - BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list)); - BUG_ON(!list_empty(&fifo_peek_front(&j->pin).flushed)); - BUG_ON(!fifo_pop(&j->pin, temp)); + j->pin.front++; popped = true; } @@ -326,65 +308,93 @@ static void bch2_journal_reclaim_fast(struct journal *j) bch2_journal_space_available(j); } -void __bch2_journal_pin_put(struct journal *j, u64 seq) +bool __bch2_journal_pin_put(struct journal *j, u64 seq) { struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq); - if (atomic_dec_and_test(&pin_list->count)) - bch2_journal_reclaim_fast(j); + return atomic_dec_and_test(&pin_list->count); } void bch2_journal_pin_put(struct journal *j, u64 seq) { - struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq); - - if (atomic_dec_and_test(&pin_list->count)) { + if (__bch2_journal_pin_put(j, seq)) { spin_lock(&j->lock); bch2_journal_reclaim_fast(j); spin_unlock(&j->lock); } } -static inline void __journal_pin_drop(struct journal *j, +static inline bool __journal_pin_drop(struct journal *j, struct journal_entry_pin *pin) { struct journal_entry_pin_list *pin_list; if (!journal_pin_active(pin)) - return; + return false; + + if (j->flush_in_progress == pin) + j->flush_in_progress_dropped = true; pin_list = journal_seq_pin(j, pin->seq); pin->seq = 0; list_del_init(&pin->list); /* - * Unpinning a journal entry make make journal_next_bucket() succeed, if + * Unpinning a journal entry may make journal_next_bucket() succeed, if * writing a new last_seq will now make another bucket available: */ - if (atomic_dec_and_test(&pin_list->count) && - pin_list == &fifo_peek_front(&j->pin)) - bch2_journal_reclaim_fast(j); - else if (fifo_used(&j->pin) == 1 && - atomic_read(&pin_list->count) == 1) - journal_wake(j); + return atomic_dec_and_test(&pin_list->count) && + pin_list == &fifo_peek_front(&j->pin); } void bch2_journal_pin_drop(struct journal *j, struct journal_entry_pin *pin) { spin_lock(&j->lock); - __journal_pin_drop(j, pin); + if (__journal_pin_drop(j, pin)) + bch2_journal_reclaim_fast(j); spin_unlock(&j->lock); } -void bch2_journal_pin_set(struct journal *j, u64 seq, +static enum journal_pin_type journal_pin_type(journal_pin_flush_fn fn) +{ + if (fn == bch2_btree_node_flush0 || + fn == bch2_btree_node_flush1) + return JOURNAL_PIN_btree; + else if (fn == bch2_btree_key_cache_journal_flush) + return JOURNAL_PIN_key_cache; + else + return JOURNAL_PIN_other; +} + +static inline void bch2_journal_pin_set_locked(struct journal *j, u64 seq, struct journal_entry_pin *pin, - journal_pin_flush_fn flush_fn) + journal_pin_flush_fn flush_fn, + enum journal_pin_type type) { - struct journal_entry_pin_list *pin_list; + struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq); + + /* + * flush_fn is how we identify journal pins in debugfs, so must always + * exist, even if it doesn't do anything: + */ + BUG_ON(!flush_fn); + atomic_inc(&pin_list->count); + pin->seq = seq; + pin->flush = flush_fn; + list_add(&pin->list, &pin_list->list[type]); +} + +void bch2_journal_pin_copy(struct journal *j, + struct journal_entry_pin *dst, + struct journal_entry_pin *src, + journal_pin_flush_fn flush_fn) +{ spin_lock(&j->lock); + u64 seq = READ_ONCE(src->seq); + if (seq < journal_last_seq(j)) { /* * bch2_journal_pin_copy() raced with bch2_journal_pin_drop() on @@ -396,26 +406,50 @@ void bch2_journal_pin_set(struct journal *j, u64 seq, return; } - pin_list = journal_seq_pin(j, seq); + bool reclaim = __journal_pin_drop(j, dst); - __journal_pin_drop(j, pin); + bch2_journal_pin_set_locked(j, seq, dst, flush_fn, journal_pin_type(flush_fn)); - atomic_inc(&pin_list->count); - pin->seq = seq; - pin->flush = flush_fn; + if (reclaim) + bch2_journal_reclaim_fast(j); - list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed); + /* + * If the journal is currently full, we might want to call flush_fn + * immediately: + */ + if (seq == journal_last_seq(j)) + journal_wake(j); spin_unlock(&j->lock); +} + +void bch2_journal_pin_set(struct journal *j, u64 seq, + struct journal_entry_pin *pin, + journal_pin_flush_fn flush_fn) +{ + spin_lock(&j->lock); + + BUG_ON(seq < journal_last_seq(j)); + + bool reclaim = __journal_pin_drop(j, pin); + + bch2_journal_pin_set_locked(j, seq, pin, flush_fn, journal_pin_type(flush_fn)); + if (reclaim) + bch2_journal_reclaim_fast(j); /* * If the journal is currently full, we might want to call flush_fn * immediately: */ - journal_wake(j); + if (seq == journal_last_seq(j)) + journal_wake(j); + + spin_unlock(&j->lock); } /** * bch2_journal_pin_flush: ensure journal pin callback is no longer running + * @j: journal object + * @pin: pin to flush */ void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin) { @@ -434,76 +468,115 @@ void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin) */ static struct journal_entry_pin * -journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq) +journal_get_next_pin(struct journal *j, + u64 seq_to_flush, + unsigned allowed_below_seq, + unsigned allowed_above_seq, + u64 *seq) { struct journal_entry_pin_list *pin_list; struct journal_entry_pin *ret = NULL; + unsigned i; - if (!test_bit(JOURNAL_RECLAIM_STARTED, &j->flags)) - return NULL; - - spin_lock(&j->lock); - - fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) - if (*seq > max_seq || - (ret = list_first_entry_or_null(&pin_list->list, - struct journal_entry_pin, list))) + fifo_for_each_entry_ptr(pin_list, &j->pin, *seq) { + if (*seq > seq_to_flush && !allowed_above_seq) break; - if (ret) { - list_move(&ret->list, &pin_list->flushed); - BUG_ON(j->flush_in_progress); - j->flush_in_progress = ret; + for (i = 0; i < JOURNAL_PIN_NR; i++) + if ((((1U << i) & allowed_below_seq) && *seq <= seq_to_flush) || + ((1U << i) & allowed_above_seq)) { + ret = list_first_entry_or_null(&pin_list->list[i], + struct journal_entry_pin, list); + if (ret) + return ret; + } } - spin_unlock(&j->lock); - - return ret; + return NULL; } /* returns true if we did work */ -static u64 journal_flush_pins(struct journal *j, u64 seq_to_flush, - unsigned min_nr) +static size_t journal_flush_pins(struct journal *j, + u64 seq_to_flush, + unsigned allowed_below_seq, + unsigned allowed_above_seq, + unsigned min_any, + unsigned min_key_cache) { struct journal_entry_pin *pin; - u64 seq, ret = 0; + size_t nr_flushed = 0; + journal_pin_flush_fn flush_fn; + u64 seq; + int err; lockdep_assert_held(&j->reclaim_lock); while (1) { + unsigned allowed_above = allowed_above_seq; + unsigned allowed_below = allowed_below_seq; + + if (min_any) { + allowed_above |= ~0; + allowed_below |= ~0; + } + + if (min_key_cache) { + allowed_above |= 1U << JOURNAL_PIN_key_cache; + allowed_below |= 1U << JOURNAL_PIN_key_cache; + } + cond_resched(); j->last_flushed = jiffies; - pin = journal_get_next_pin(j, min_nr - ? U64_MAX : seq_to_flush, &seq); + spin_lock(&j->lock); + pin = journal_get_next_pin(j, seq_to_flush, allowed_below, allowed_above, &seq); + if (pin) { + BUG_ON(j->flush_in_progress); + j->flush_in_progress = pin; + j->flush_in_progress_dropped = false; + flush_fn = pin->flush; + } + spin_unlock(&j->lock); + if (!pin) break; - if (min_nr) - min_nr--; + if (min_key_cache && pin->flush == bch2_btree_key_cache_journal_flush) + min_key_cache--; - pin->flush(j, pin, seq); + if (min_any) + min_any--; - BUG_ON(j->flush_in_progress != pin); + err = flush_fn(j, pin, seq); + + spin_lock(&j->lock); + /* Pin might have been dropped or rearmed: */ + if (likely(!err && !j->flush_in_progress_dropped)) + list_move(&pin->list, &journal_seq_pin(j, seq)->flushed); j->flush_in_progress = NULL; + j->flush_in_progress_dropped = false; + spin_unlock(&j->lock); + wake_up(&j->pin_flush_wait); - ret++; + + if (err) + break; + + nr_flushed++; } - return ret; + return nr_flushed; } static u64 journal_seq_to_flush(struct journal *j) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct bch_dev *ca; u64 seq_to_flush = 0; - unsigned iter; spin_lock(&j->lock); - for_each_rw_member(ca, c, iter) { + for_each_rw_member(c, ca) { struct journal_device *ja = &ca->journal; unsigned nr_buckets, bucket_to_flush; @@ -513,11 +586,6 @@ static u64 journal_seq_to_flush(struct journal *j) /* Try to keep the journal at most half full: */ nr_buckets = ja->nr / 2; - /* And include pre-reservations: */ - nr_buckets += DIV_ROUND_UP(j->prereserved.reserved, - (ca->mi.bucket_size << 6) - - journal_entry_overhead(j)); - nr_buckets = min(nr_buckets, ja->nr); bucket_to_flush = (ja->cur_idx + nr_buckets) % ja->nr; @@ -535,7 +603,11 @@ static u64 journal_seq_to_flush(struct journal *j) } /** - * bch2_journal_reclaim - free up journal buckets + * __bch2_journal_reclaim - free up journal buckets + * @j: journal object + * @direct: direct or background reclaim? + * @kicked: requested to run since we last ran? + * Returns: 0 on success, or -EIO if the journal has been shutdown * * Background journal reclaim writes out btree nodes. It should be run * early enough so that we never completely run out of journal buckets. @@ -552,12 +624,12 @@ static u64 journal_seq_to_flush(struct journal *j) * 512 journal entries or 25% of all journal buckets, then * journal_next_bucket() should not stall. */ -static int __bch2_journal_reclaim(struct journal *j, bool direct) +static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) { struct bch_fs *c = container_of(j, struct bch_fs, journal); bool kthread = (current->flags & PF_KTHREAD) != 0; - u64 seq_to_flush, nr_flushed = 0; - size_t min_nr; + u64 seq_to_flush; + size_t min_nr, min_key_cache, nr_flushed; unsigned flags; int ret = 0; @@ -589,38 +661,38 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct) * make sure to flush at least one journal pin: */ if (time_after(jiffies, j->last_flushed + - msecs_to_jiffies(j->reclaim_delay_ms))) - min_nr = 1; - - if (j->prereserved.reserved * 2 > j->prereserved.remaining) + msecs_to_jiffies(c->opts.journal_reclaim_delay))) min_nr = 1; - if (atomic_read(&c->btree_cache.dirty) * 4 > - c->btree_cache.used * 3) + if (j->watermark != BCH_WATERMARK_stripe) min_nr = 1; - if (fifo_free(&j->pin) <= 32) + if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used) min_nr = 1; - min_nr = max(min_nr, bch2_nr_btree_keys_need_flush(c)); + min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128); - trace_journal_reclaim_start(c, - min_nr, - j->prereserved.reserved, - j->prereserved.remaining, + trace_and_count(c, journal_reclaim_start, c, + direct, kicked, + min_nr, min_key_cache, atomic_read(&c->btree_cache.dirty), c->btree_cache.used, atomic_long_read(&c->btree_key_cache.nr_dirty), atomic_long_read(&c->btree_key_cache.nr_keys)); - nr_flushed = journal_flush_pins(j, seq_to_flush, min_nr); + nr_flushed = journal_flush_pins(j, seq_to_flush, + ~0, 0, + min_nr, min_key_cache); if (direct) j->nr_direct_reclaim += nr_flushed; else j->nr_background_reclaim += nr_flushed; - trace_journal_reclaim_finish(c, nr_flushed); - } while (min_nr && nr_flushed); + trace_and_count(c, journal_reclaim_finish, c, nr_flushed); + + if (nr_flushed) + wake_up(&j->reclaim_wait); + } while ((min_nr || min_key_cache) && nr_flushed && !direct); memalloc_noreclaim_restore(flags); @@ -629,39 +701,54 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct) int bch2_journal_reclaim(struct journal *j) { - return __bch2_journal_reclaim(j, true); + return __bch2_journal_reclaim(j, true, true); } static int bch2_journal_reclaim_thread(void *arg) { struct journal *j = arg; - unsigned long next; + struct bch_fs *c = container_of(j, struct bch_fs, journal); + unsigned long delay, now; + bool journal_empty; int ret = 0; set_freezable(); - kthread_wait_freezable(test_bit(JOURNAL_RECLAIM_STARTED, &j->flags)); + j->last_flushed = jiffies; while (!ret && !kthread_should_stop()) { + bool kicked = j->reclaim_kicked; + j->reclaim_kicked = false; mutex_lock(&j->reclaim_lock); - ret = __bch2_journal_reclaim(j, false); + ret = __bch2_journal_reclaim(j, false, kicked); mutex_unlock(&j->reclaim_lock); - next = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms); + now = jiffies; + delay = msecs_to_jiffies(c->opts.journal_reclaim_delay); + j->next_reclaim = j->last_flushed + delay; + + if (!time_in_range(j->next_reclaim, now, now + delay)) + j->next_reclaim = now + delay; while (1) { - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE); if (kthread_should_stop()) break; if (j->reclaim_kicked) break; - if (time_after_eq(jiffies, next)) - break; - schedule_timeout(next - jiffies); - try_to_freeze(); + spin_lock(&j->lock); + journal_empty = fifo_empty(&j->pin); + spin_unlock(&j->lock); + + if (journal_empty) + schedule(); + else if (time_after(j->next_reclaim, jiffies)) + schedule_timeout(j->next_reclaim - jiffies); + else + break; } __set_current_state(TASK_RUNNING); } @@ -685,16 +772,17 @@ int bch2_journal_reclaim_start(struct journal *j) { struct bch_fs *c = container_of(j, struct bch_fs, journal); struct task_struct *p; + int ret; if (j->reclaim_thread) return 0; p = kthread_create(bch2_journal_reclaim_thread, j, "bch-reclaim/%s", c->name); - if (IS_ERR(p)) { - bch_err(c, "error creating journal reclaim thread: %li", PTR_ERR(p)); - return PTR_ERR(p); - } + ret = PTR_ERR_OR_ZERO(p); + bch_err_msg(c, ret, "creating journal reclaim thread"); + if (ret) + return ret; get_task_struct(p); j->reclaim_thread = p; @@ -713,7 +801,15 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush, mutex_lock(&j->reclaim_lock); - *did_work = journal_flush_pins(j, seq_to_flush, 0) != 0; + if (journal_flush_pins(j, seq_to_flush, + (1U << JOURNAL_PIN_key_cache)| + (1U << JOURNAL_PIN_other), 0, 0, 0) || + journal_flush_pins(j, seq_to_flush, + (1U << JOURNAL_PIN_btree), 0, 0, 0)) + *did_work = true; + + if (seq_to_flush > journal_cur_seq(j)) + bch2_journal_entry_close(j); spin_lock(&j->lock); /* @@ -722,8 +818,7 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush, */ ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) || journal_last_seq(j) > seq_to_flush || - (fifo_used(&j->pin) == 1 && - atomic_read(&fifo_peek_front(&j->pin).count) == 1); + !fifo_used(&j->pin); spin_unlock(&j->lock); mutex_unlock(&j->reclaim_lock); @@ -733,6 +828,7 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush, bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush) { + /* time_stats this */ bool did_work = false; if (!test_bit(JOURNAL_STARTED, &j->flags)) @@ -768,23 +864,37 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx) mutex_lock(&c->replicas_gc_lock); bch2_replicas_gc_start(c, 1 << BCH_DATA_journal); - seq = 0; + /* + * Now that we've populated replicas_gc, write to the journal to mark + * active journal devices. This handles the case where the journal might + * be empty. Otherwise we could clear all journal replicas and + * temporarily put the fs into an unrecoverable state. Journal recovery + * expects to find devices marked for journal data on unclean mount. + */ + ret = bch2_journal_meta(&c->journal); + if (ret) + goto err; + seq = 0; spin_lock(&j->lock); - while (!ret && seq < j->pin.back) { + while (!ret) { struct bch_replicas_padded replicas; seq = max(seq, journal_last_seq(j)); + if (seq >= j->pin.back) + break; bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, journal_seq_pin(j, seq)->devs); seq++; - spin_unlock(&j->lock); - ret = bch2_mark_replicas(c, &replicas.e); - spin_lock(&j->lock); + if (replicas.e.nr_devs) { + spin_unlock(&j->lock); + ret = bch2_mark_replicas(c, &replicas.e); + spin_lock(&j->lock); + } } spin_unlock(&j->lock); - +err: ret = bch2_replicas_gc_end(c, ret); mutex_unlock(&c->replicas_gc_lock);