old.v, new.v)) != old.v);
}
-static inline unsigned get_unwritten_sectors(struct journal *j, unsigned *idx)
-{
- unsigned sectors = 0;
-
- while (!sectors && *idx != j->reservations.idx) {
- sectors = j->buf[*idx].sectors;
-
- *idx = (*idx + 1) & JOURNAL_BUF_MASK;
- }
-
- return sectors;
-}
-
static struct journal_space
journal_dev_space_available(struct journal *j, struct bch_dev *ca,
enum journal_space_from from)
{
struct journal_device *ja = &ca->journal;
- unsigned sectors, buckets, unwritten, idx = j->reservations.unwritten_idx;
+ unsigned sectors, buckets, unwritten;
+ u64 seq;
if (from == journal_space_total)
return (struct journal_space) {
* We that we don't allocate the space for a journal entry
* until we write it out - thus, account for it here:
*/
- while ((unwritten = get_unwritten_sectors(j, &idx))) {
+ for (seq = journal_last_unwritten_seq(j);
+ seq <= journal_cur_seq(j);
+ seq++) {
+ unwritten = j->buf[seq & JOURNAL_BUF_MASK].sectors;
+
+ if (!unwritten)
+ continue;
+
/* entry won't fit on this device, skip: */
if (unwritten > ca->mi.bucket_size)
continue;
j->can_discard = can_discard;
if (nr_online < c->opts.metadata_replicas_required) {
- ret = cur_entry_insufficient_devices;
+ ret = JOURNAL_ERR_insufficient_devices;
goto out;
}
total = j->space[journal_space_total].total;
if (!clean_ondisk &&
- j->reservations.idx ==
- j->reservations.unwritten_idx) {
- char *buf = kmalloc(4096, GFP_ATOMIC);
-
- bch_err(c, "journal stuck");
- if (buf) {
- __bch2_journal_debug_to_text(&_PBUF(buf, 4096), j);
- pr_err("\n%s", buf);
- kfree(buf);
- }
+ journal_cur_seq(j) == j->seq_ondisk) {
+ struct printbuf buf = PRINTBUF;
+
+ __bch2_journal_debug_to_text(&buf, j);
+ bch_err(c, "journal stuck\n%s", buf.buf);
+ printbuf_exit(&buf);
+ /*
+ * Hack: bch2_fatal_error() calls bch2_journal_halt() which
+ * takes journal lock:
+ */
+ spin_unlock(&j->lock);
bch2_fatal_error(c);
- ret = cur_entry_journal_stuck;
+ spin_lock(&j->lock);
+
+ ret = JOURNAL_ERR_journal_stuck;
} else if (!j->space[journal_space_discarded].next_entry)
- ret = cur_entry_journal_full;
- else if (!fifo_free(&j->pin))
- ret = cur_entry_journal_pin_full;
+ ret = JOURNAL_ERR_journal_full;
if ((j->space[journal_space_clean_ondisk].next_entry <
j->space[journal_space_clean_ondisk].total) &&
j->cur_entry_sectors = !ret ? j->space[journal_space_discarded].next_entry : 0;
j->cur_entry_error = ret;
journal_set_remaining(j, u64s_remaining);
- journal_check_may_get_unreserved(j);
+ journal_set_watermark(j);
if (!ret)
journal_wake(j);
struct journal_device *ja = &ca->journal;
while (should_discard_bucket(j, ja)) {
- if (ca->mi.discard &&
+ if (!c->opts.nochanges &&
+ ca->mi.discard &&
blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca,
if (atomic_dec_and_test(&pin_list->count) &&
pin_list == &fifo_peek_front(&j->pin))
bch2_journal_reclaim_fast(j);
- else if (fifo_used(&j->pin) == 1 &&
- atomic_read(&pin_list->count) == 1)
- journal_wake(j);
}
void bch2_journal_pin_drop(struct journal *j,
u64 seq;
int err;
- if (!test_bit(JOURNAL_RECLAIM_STARTED, &j->flags))
- return 0;
-
lockdep_assert_held(&j->reclaim_lock);
while (1) {
* 512 journal entries or 25% of all journal buckets, then
* journal_next_bucket() should not stall.
*/
-static int __bch2_journal_reclaim(struct journal *j, bool direct)
+static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
bool kthread = (current->flags & PF_KTHREAD) != 0;
* make sure to flush at least one journal pin:
*/
if (time_after(jiffies, j->last_flushed +
- msecs_to_jiffies(j->reclaim_delay_ms)))
+ msecs_to_jiffies(c->opts.journal_reclaim_delay)))
min_nr = 1;
if (j->prereserved.reserved * 4 > j->prereserved.remaining)
if (fifo_free(&j->pin) <= 32)
min_nr = 1;
- trace_journal_reclaim_start(c,
- min_nr,
+ if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used)
+ min_nr = 1;
+
+ min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
+
+ trace_journal_reclaim_start(c, direct, kicked,
+ min_nr, min_key_cache,
j->prereserved.reserved,
j->prereserved.remaining,
atomic_read(&c->btree_cache.dirty),
atomic_long_read(&c->btree_key_cache.nr_dirty),
atomic_long_read(&c->btree_key_cache.nr_keys));
- min_key_cache = min(bch2_nr_btree_keys_need_flush(c), 128UL);
-
nr_flushed = journal_flush_pins(j, seq_to_flush,
min_nr, min_key_cache);
if (nr_flushed)
wake_up(&j->reclaim_wait);
- } while ((min_nr || min_key_cache) && !direct);
+ } while ((min_nr || min_key_cache) && nr_flushed && !direct);
memalloc_noreclaim_restore(flags);
int bch2_journal_reclaim(struct journal *j)
{
- return __bch2_journal_reclaim(j, true);
+ return __bch2_journal_reclaim(j, true, true);
}
static int bch2_journal_reclaim_thread(void *arg)
{
struct journal *j = arg;
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
unsigned long delay, now;
+ bool journal_empty;
int ret = 0;
set_freezable();
- kthread_wait_freezable(test_bit(JOURNAL_RECLAIM_STARTED, &j->flags));
-
j->last_flushed = jiffies;
while (!ret && !kthread_should_stop()) {
+ bool kicked = j->reclaim_kicked;
+
j->reclaim_kicked = false;
mutex_lock(&j->reclaim_lock);
- ret = __bch2_journal_reclaim(j, false);
+ ret = __bch2_journal_reclaim(j, false, kicked);
mutex_unlock(&j->reclaim_lock);
now = jiffies;
- delay = msecs_to_jiffies(j->reclaim_delay_ms);
+ delay = msecs_to_jiffies(c->opts.journal_reclaim_delay);
j->next_reclaim = j->last_flushed + delay;
if (!time_in_range(j->next_reclaim, now, now + delay))
break;
if (j->reclaim_kicked)
break;
- if (time_after_eq(jiffies, j->next_reclaim))
- break;
- freezable_schedule_timeout(j->next_reclaim - jiffies);
+ spin_lock(&j->lock);
+ journal_empty = fifo_empty(&j->pin);
+ spin_unlock(&j->lock);
+
+ if (journal_empty)
+ freezable_schedule();
+ else if (time_after(j->next_reclaim, jiffies))
+ freezable_schedule_timeout(j->next_reclaim - jiffies);
+ else
+ break;
}
__set_current_state(TASK_RUNNING);
}
mutex_lock(&j->reclaim_lock);
- *did_work = journal_flush_pins(j, seq_to_flush, 0, 0) != 0;
+ if (journal_flush_pins(j, seq_to_flush, 0, 0))
+ *did_work = true;
spin_lock(&j->lock);
/*
*/
ret = !test_bit(JOURNAL_REPLAY_DONE, &j->flags) ||
journal_last_seq(j) > seq_to_flush ||
- (fifo_used(&j->pin) == 1 &&
- atomic_read(&fifo_peek_front(&j->pin).count) == 1);
+ !fifo_used(&j->pin);
spin_unlock(&j->lock);
mutex_unlock(&j->reclaim_lock);
seq = 0;
spin_lock(&j->lock);
- while (!ret && seq < j->pin.back) {
+ while (!ret) {
struct bch_replicas_padded replicas;
seq = max(seq, journal_last_seq(j));
+ if (seq >= j->pin.back)
+ break;
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
journal_seq_pin(j, seq)->devs);
seq++;