+// SPDX-License-Identifier: GPL-2.0
/*
* bcachefs journalling code, for btree insertions
*
#include <trace/events/bcachefs.h>
+static u64 last_unwritten_seq(struct journal *j)
+{
+ union journal_res_state s = READ_ONCE(j->reservations);
+
+ lockdep_assert_held(&j->lock);
+
+ return journal_cur_seq(j) - s.prev_buf_unwritten;
+}
+
+static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
+{
+ return seq >= last_unwritten_seq(j);
+}
+
static bool __journal_entry_is_open(union journal_res_state state)
{
return state.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL;
return __journal_entry_is_open(j->reservations);
}
+static inline struct journal_buf *
+journal_seq_to_buf(struct journal *j, u64 seq)
+{
+ struct journal_buf *buf = NULL;
+
+ EBUG_ON(seq > journal_cur_seq(j));
+ EBUG_ON(seq == journal_cur_seq(j) &&
+ j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
+
+ if (journal_seq_unwritten(j, seq)) {
+ buf = j->buf + (seq & 1);
+ EBUG_ON(le64_to_cpu(buf->data->seq) != seq);
+ }
+ return buf;
+}
+
static void journal_pin_new_entry(struct journal *j, int count)
{
struct journal_entry_pin_list *p;
{
struct journal_buf *buf = journal_cur_buf(j);
+ bkey_extent_init(&buf->key);
+
memset(buf->has_inode, 0, sizeof(buf->has_inode));
memset(buf->data, 0, sizeof(*buf->data));
buf->data->u64s = 0;
}
-static inline bool journal_entry_empty(struct jset *j)
-{
- struct jset_entry *i;
-
- if (j->seq != j->last_seq)
- return false;
-
- vstruct_for_each(j, i)
- if (i->type || i->u64s)
- return false;
- return true;
-}
-
void bch2_journal_halt(struct journal *j)
{
union journal_res_state old, new;
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
old.v, new.v)) != old.v);
+ j->err_seq = journal_cur_seq(j);
journal_wake(j);
closure_wake_up(&journal_cur_buf(j)->wait);
- closure_wake_up(&journal_prev_buf(j)->wait);
}
/* journal entry close/open: */
void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
{
- struct journal_buf *w = journal_prev_buf(j);
-
- atomic_dec_bug(&journal_seq_pin(j, le64_to_cpu(w->data->seq))->count);
-
if (!need_write_just_set &&
test_bit(JOURNAL_NEED_WRITE, &j->flags))
bch2_time_stats_update(j->delay_time,
BUG_ON(sectors > buf->sectors);
buf->sectors = sectors;
- bkey_extent_init(&buf->key);
-
/*
* We have to set last_seq here, _before_ opening a new journal entry:
*
* Hence, we want update/set last_seq on the current journal entry right
* before we open a new one:
*/
- bch2_journal_reclaim_fast(j);
buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
- if (journal_entry_empty(buf->data))
- clear_bit(JOURNAL_NOT_EMPTY, &j->flags);
- else
- set_bit(JOURNAL_NOT_EMPTY, &j->flags);
-
journal_pin_new_entry(j, 1);
bch2_journal_buf_init(j);
cancel_delayed_work(&j->write_work);
- /* ugh - might be called from __journal_res_get() under wait_event() */
- __set_current_state(TASK_RUNNING);
+ bch2_journal_space_available(j);
+
bch2_journal_buf_put(j, old.idx, set_need_write);
return true;
}
{
struct journal_buf *buf = journal_cur_buf(j);
union journal_res_state old, new;
- int u64s, ret;
+ int u64s;
u64 v;
lockdep_assert_held(&j->lock);
if (j->blocked)
return -EAGAIN;
- if (!fifo_free(&j->pin))
- return -ENOSPC;
+ if (j->cur_entry_error)
+ return j->cur_entry_error;
- ret = bch2_journal_space_available(j);
- if (ret)
- return ret;
+ BUG_ON(!j->cur_entry_sectors);
buf->u64s_reserved = j->entry_u64s_reserved;
buf->disk_sectors = j->cur_entry_sectors;
/* Handle any already added entries */
new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
+
+ EBUG_ON(journal_state_count(new, new.idx));
journal_state_inc(&new);
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
old.v, new.v)) != old.v);
return seq;
}
+void bch2_journal_set_has_inum(struct journal *j, u64 inode, u64 seq)
+{
+ size_t h = hash_64(inode, ilog2(sizeof(j->buf[0].has_inode) * 8));
+ struct journal_buf *buf;
+
+ spin_lock(&j->lock);
+
+ if ((buf = journal_seq_to_buf(j, seq)))
+ set_bit(h, buf->has_inode);
+
+ spin_unlock(&j->lock);
+}
+
static int __journal_res_get(struct journal *j, struct journal_res *res,
unsigned flags)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_buf *buf;
+ bool can_discard;
int ret;
retry:
if (journal_res_get_fast(j, res, flags))
return 0;
}
+ if (!(flags & JOURNAL_RES_GET_RESERVED) &&
+ !test_bit(JOURNAL_MAY_GET_UNRESERVED, &j->flags)) {
+ /*
+ * Don't want to close current journal entry, just need to
+ * invoke reclaim:
+ */
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
/*
* If we couldn't get a reservation because the current buf filled up,
* and we had room for a bigger entry on disk, signal that we want to
} else {
ret = journal_entry_open(j);
}
-
+unlock:
if ((ret == -EAGAIN || ret == -ENOSPC) &&
!j->res_get_blocked_start)
j->res_get_blocked_start = local_clock() ?: 1;
+ can_discard = j->can_discard;
spin_unlock(&j->lock);
if (!ret)
goto retry;
+
if (ret == -ENOSPC) {
+ if (WARN_ONCE(!can_discard && (flags & JOURNAL_RES_GET_RESERVED),
+ "JOURNAL_RES_GET_RESERVED set but journal full")) {
+ char *buf;
+
+ buf = kmalloc(4096, GFP_NOFS);
+ if (buf) {
+ bch2_journal_debug_to_text(&PBUF(buf), j);
+ pr_err("\n%s", buf);
+ kfree(buf);
+ }
+ }
+
/*
* Journal is full - can't rely on reclaim from work item due to
* freezing:
*/
trace_journal_full(c);
- bch2_journal_reclaim_work(&j->reclaim_work.work);
+
+ if (!(flags & JOURNAL_RES_GET_NONBLOCK)) {
+ if (can_discard) {
+ bch2_journal_do_discards(j);
+ goto retry;
+ }
+
+ if (mutex_trylock(&j->reclaim_lock)) {
+ bch2_journal_reclaim(j);
+ mutex_unlock(&j->reclaim_lock);
+ }
+ }
+
ret = -EAGAIN;
}
{
int ret;
- wait_event(j->wait,
+ closure_wait_event(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
(flags & JOURNAL_RES_GET_NONBLOCK));
return ret;
}
+/* journal_preres: */
+
+static bool journal_preres_available(struct journal *j,
+ struct journal_preres *res,
+ unsigned new_u64s,
+ unsigned flags)
+{
+ bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags);
+
+ if (!ret)
+ bch2_journal_reclaim_work(&j->reclaim_work.work);
+
+ return ret;
+}
+
+int __bch2_journal_preres_get(struct journal *j,
+ struct journal_preres *res,
+ unsigned new_u64s,
+ unsigned flags)
+{
+ int ret;
+
+ closure_wait_event(&j->preres_wait,
+ (ret = bch2_journal_error(j)) ||
+ journal_preres_available(j, res, new_u64s, flags));
+ return ret;
+}
+
/* journal_entry_res: */
void bch2_journal_entry_res_resize(struct journal *j,
if (d <= 0)
goto out;
- j->cur_entry_u64s -= d;
+ j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
smp_mb();
state = READ_ONCE(j->reservations);
/* journal flushing: */
-u64 bch2_journal_last_unwritten_seq(struct journal *j)
-{
- u64 seq;
-
- spin_lock(&j->lock);
- seq = journal_cur_seq(j);
- if (j->reservations.prev_buf_unwritten)
- seq--;
- spin_unlock(&j->lock);
-
- return seq;
-}
-
-/**
- * bch2_journal_open_seq_async - try to open a new journal entry if @seq isn't
- * open yet, or wait if we cannot
- *
- * used by the btree interior update machinery, when it needs to write a new
- * btree root - every journal entry contains the roots of all the btrees, so it
- * doesn't need to bother with getting a journal reservation
- */
-int bch2_journal_open_seq_async(struct journal *j, u64 seq, struct closure *cl)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- int ret;
-
- spin_lock(&j->lock);
-
- /*
- * Can't try to open more than one sequence number ahead:
- */
- BUG_ON(journal_cur_seq(j) < seq && !journal_entry_is_open(j));
-
- if (journal_cur_seq(j) > seq ||
- journal_entry_is_open(j)) {
- spin_unlock(&j->lock);
- return 0;
- }
-
- if (journal_cur_seq(j) < seq &&
- !__journal_entry_close(j)) {
- /* haven't finished writing out the previous one: */
- trace_journal_entry_full(c);
- ret = -EAGAIN;
- } else {
- BUG_ON(journal_cur_seq(j) != seq);
-
- ret = journal_entry_open(j);
- }
-
- if ((ret == -EAGAIN || ret == -ENOSPC) &&
- !j->res_get_blocked_start)
- j->res_get_blocked_start = local_clock() ?: 1;
-
- if (ret == -EAGAIN || ret == -ENOSPC)
- closure_wait(&j->async_wait, cl);
-
- spin_unlock(&j->lock);
-
- if (ret == -ENOSPC) {
- trace_journal_full(c);
- bch2_journal_reclaim_work(&j->reclaim_work.work);
- ret = -EAGAIN;
- }
-
- return ret;
-}
-
-static int journal_seq_error(struct journal *j, u64 seq)
-{
- union journal_res_state state = READ_ONCE(j->reservations);
-
- if (seq == journal_cur_seq(j))
- return bch2_journal_error(j);
-
- if (seq + 1 == journal_cur_seq(j) &&
- !state.prev_buf_unwritten &&
- seq > j->seq_ondisk)
- return -EIO;
-
- return 0;
-}
-
-static inline struct journal_buf *
-journal_seq_to_buf(struct journal *j, u64 seq)
-{
- /* seq should be for a journal entry that has been opened: */
- BUG_ON(seq > journal_cur_seq(j));
- BUG_ON(seq == journal_cur_seq(j) &&
- j->reservations.cur_entry_offset == JOURNAL_ENTRY_CLOSED_VAL);
-
- if (seq == journal_cur_seq(j))
- return journal_cur_buf(j);
- if (seq + 1 == journal_cur_seq(j) &&
- j->reservations.prev_buf_unwritten)
- return journal_prev_buf(j);
- return NULL;
-}
-
-/**
- * bch2_journal_wait_on_seq - wait for a journal entry to be written
- *
- * does _not_ cause @seq to be written immediately - if there is no other
- * activity to cause the relevant journal entry to be filled up or flushed it
- * can wait for an arbitrary amount of time (up to @j->write_delay_ms, which is
- * configurable).
- */
-void bch2_journal_wait_on_seq(struct journal *j, u64 seq,
- struct closure *parent)
-{
- struct journal_buf *buf;
-
- spin_lock(&j->lock);
-
- if ((buf = journal_seq_to_buf(j, seq))) {
- if (!closure_wait(&buf->wait, parent))
- BUG();
-
- if (seq == journal_cur_seq(j)) {
- smp_mb();
- if (bch2_journal_error(j))
- closure_wake_up(&buf->wait);
- }
- }
-
- spin_unlock(&j->lock);
-}
-
/**
* bch2_journal_flush_seq_async - wait for a journal entry to be written
*
* like bch2_journal_wait_on_seq, except that it triggers a write immediately if
* necessary
*/
-void bch2_journal_flush_seq_async(struct journal *j, u64 seq,
+int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
struct closure *parent)
{
struct journal_buf *buf;
+ int ret = 0;
spin_lock(&j->lock);
+ if (seq <= j->err_seq) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (seq <= j->seq_ondisk) {
+ ret = 1;
+ goto out;
+ }
if (parent &&
(buf = journal_seq_to_buf(j, seq)))
if (seq == journal_cur_seq(j))
__journal_entry_close(j);
+out:
spin_unlock(&j->lock);
-}
-
-static int journal_seq_flushed(struct journal *j, u64 seq)
-{
- int ret;
-
- spin_lock(&j->lock);
- ret = seq <= j->seq_ondisk ? 1 : journal_seq_error(j, seq);
-
- if (seq == journal_cur_seq(j))
- __journal_entry_close(j);
- spin_unlock(&j->lock);
-
return ret;
}
u64 start_time = local_clock();
int ret, ret2;
- ret = wait_event_killable(j->wait, (ret2 = journal_seq_flushed(j, seq)));
+ ret = wait_event_killable(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
bch2_time_stats_update(j->flush_seq_time, start_time);
return ret ?: ret2 < 0 ? ret2 : 0;
}
-/**
- * bch2_journal_meta_async - force a journal entry to be written
- */
-void bch2_journal_meta_async(struct journal *j, struct closure *parent)
-{
- struct journal_res res;
-
- memset(&res, 0, sizeof(res));
-
- bch2_journal_res_get(j, &res, jset_u64s(0), 0);
- bch2_journal_res_put(j, &res);
-
- bch2_journal_flush_seq_async(j, res.seq, parent);
-}
-
int bch2_journal_meta(struct journal *j)
{
struct journal_res res;
goto err;
journal_buckets = bch2_sb_resize_journal(&ca->disk_sb,
- nr + sizeof(*journal_buckets) / sizeof(u64));
+ nr + sizeof(*journal_buckets) / sizeof(u64));
if (!journal_buckets)
goto err;
while (ja->nr < nr) {
struct open_bucket *ob = NULL;
+ unsigned pos;
long bucket;
if (new_fs) {
}
if (c) {
- percpu_down_read_preempt_disable(&c->mark_lock);
+ percpu_down_read(&c->mark_lock);
spin_lock(&c->journal.lock);
- } else {
- preempt_disable();
}
- __array_insert_item(ja->buckets, ja->nr, ja->last_idx);
- __array_insert_item(ja->bucket_seq, ja->nr, ja->last_idx);
- __array_insert_item(journal_buckets->buckets, ja->nr, ja->last_idx);
-
- ja->buckets[ja->last_idx] = bucket;
- ja->bucket_seq[ja->last_idx] = 0;
- journal_buckets->buckets[ja->last_idx] = cpu_to_le64(bucket);
+ /*
+ * XXX
+ * For resize at runtime, we should be writing the new
+ * superblock before inserting into the journal array
+ */
- if (ja->last_idx < ja->nr) {
- if (ja->cur_idx >= ja->last_idx)
- ja->cur_idx++;
- ja->last_idx++;
- }
+ pos = ja->nr ? (ja->cur_idx + 1) % ja->nr : 0;
+ __array_insert_item(ja->buckets, ja->nr, pos);
+ __array_insert_item(ja->bucket_seq, ja->nr, pos);
+ __array_insert_item(journal_buckets->buckets, ja->nr, pos);
ja->nr++;
- bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_JOURNAL,
+ ja->buckets[pos] = bucket;
+ ja->bucket_seq[pos] = 0;
+ journal_buckets->buckets[pos] = cpu_to_le64(bucket);
+
+ if (pos <= ja->discard_idx)
+ ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
+ if (pos <= ja->dirty_idx_ondisk)
+ ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
+ if (pos <= ja->dirty_idx)
+ ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
+ if (pos <= ja->cur_idx)
+ ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
+
+ bch2_mark_metadata_bucket(c, ca, bucket, BCH_DATA_journal,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB),
0);
if (c) {
spin_unlock(&c->journal.lock);
- percpu_up_read_preempt_enable(&c->mark_lock);
- } else {
- preempt_enable();
+ percpu_up_read(&c->mark_lock);
}
if (!new_fs)
ret = 0;
err:
+ bch2_sb_resize_journal(&ca->disk_sb,
+ ja->nr + sizeof(*journal_buckets) / sizeof(u64));
kfree(new_bucket_seq);
kfree(new_buckets);
w = j->buf + !state.idx;
ret = state.prev_buf_unwritten &&
- bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), dev_idx);
+ bch2_bkey_has_device(bkey_i_to_s_c(&w->key), dev_idx);
spin_unlock(&j->lock);
return ret;
void bch2_fs_journal_stop(struct journal *j)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bch2_journal_flush_all_pins(j);
wait_event(j->wait, journal_entry_close(j));
- /* do we need to write another journal entry? */
- if (test_bit(JOURNAL_NOT_EMPTY, &j->flags) ||
- c->btree_roots_dirty)
- bch2_journal_meta(j);
+ /*
+ * Always write a new journal entry, to make sure the clock hands are up
+ * to date (and match the superblock)
+ */
+ bch2_journal_meta(j);
journal_quiesce(j);
BUG_ON(!bch2_journal_error(j) &&
- test_bit(JOURNAL_NOT_EMPTY, &j->flags));
+ (journal_entry_is_open(j) ||
+ j->last_empty_seq + 1 != journal_cur_seq(j)));
cancel_delayed_work_sync(&j->write_work);
cancel_delayed_work_sync(&j->reclaim_work);
}
-void bch2_fs_journal_start(struct journal *j)
+int bch2_fs_journal_start(struct journal *j, u64 cur_seq,
+ struct list_head *journal_entries)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_seq_blacklist *bl;
- u64 blacklist = 0;
+ struct journal_entry_pin_list *p;
+ struct journal_replay *i;
+ u64 last_seq = cur_seq, nr, seq;
- list_for_each_entry(bl, &j->seq_blacklist, list)
- blacklist = max(blacklist, bl->end);
+ if (!list_empty(journal_entries))
+ last_seq = le64_to_cpu(list_last_entry(journal_entries,
+ struct journal_replay, list)->j.last_seq);
+
+ nr = cur_seq - last_seq;
+
+ if (nr + 1 > j->pin.size) {
+ free_fifo(&j->pin);
+ init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
+ if (!j->pin.data) {
+ bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
+ return -ENOMEM;
+ }
+ }
+
+ j->replay_journal_seq = last_seq;
+ j->replay_journal_seq_end = cur_seq;
+ j->last_seq_ondisk = last_seq;
+ j->pin.front = last_seq;
+ j->pin.back = cur_seq;
+ atomic64_set(&j->seq, cur_seq - 1);
+
+ fifo_for_each_entry_ptr(p, &j->pin, seq) {
+ INIT_LIST_HEAD(&p->list);
+ INIT_LIST_HEAD(&p->flushed);
+ atomic_set(&p->count, 1);
+ p->devs.nr = 0;
+ }
+
+ list_for_each_entry(i, journal_entries, list) {
+ seq = le64_to_cpu(i->j.seq);
+ BUG_ON(seq >= cur_seq);
+
+ if (seq < last_seq)
+ continue;
+
+ journal_seq_pin(j, seq)->devs = i->devs;
+ }
spin_lock(&j->lock);
set_bit(JOURNAL_STARTED, &j->flags);
- while (journal_cur_seq(j) < blacklist)
- journal_pin_new_entry(j, 0);
-
- /*
- * __journal_entry_close() only inits the next journal entry when it
- * closes an open journal entry - the very first journal entry gets
- * initialized here:
- */
journal_pin_new_entry(j, 1);
+
+ j->reservations.idx = journal_cur_seq(j);
+
bch2_journal_buf_init(j);
c->last_bucket_seq_cleanup = journal_cur_seq(j);
+ bch2_journal_space_available(j);
spin_unlock(&j->lock);
- /*
- * Adding entries to the next journal entry before allocating space on
- * disk for the next journal entry - this is ok, because these entries
- * only have to go down with the next journal entry we write:
- */
- bch2_journal_seq_blacklist_write(j);
-
- queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
+ return 0;
}
/* init/exit: */
INIT_DELAYED_WORK(&j->write_work, journal_write_work);
INIT_DELAYED_WORK(&j->reclaim_work, bch2_journal_reclaim_work);
init_waitqueue_head(&j->pin_flush_wait);
- mutex_init(&j->blacklist_lock);
- INIT_LIST_HEAD(&j->seq_blacklist);
mutex_init(&j->reclaim_lock);
+ mutex_init(&j->discard_lock);
lockdep_init_map(&j->res_map, "journal res", &res_key, 0);
/* debug: */
-ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
+void bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
- union journal_res_state *s = &j->reservations;
+ union journal_res_state s;
struct bch_dev *ca;
unsigned iter;
rcu_read_lock();
spin_lock(&j->lock);
+ s = READ_ONCE(j->reservations);
- pr_buf(&out,
+ pr_buf(out,
"active journal entries:\t%llu\n"
"seq:\t\t\t%llu\n"
"last_seq:\t\t%llu\n"
"last_seq_ondisk:\t%llu\n"
- "reservation count:\t%u\n"
- "reservation offset:\t%u\n"
- "current entry u64s:\t%u\n"
- "io in flight:\t\t%i\n"
- "need write:\t\t%i\n"
- "dirty:\t\t\t%i\n"
- "replay done:\t\t%i\n",
+ "prereserved:\t\t%u/%u\n"
+ "current entry sectors:\t%u\n"
+ "current entry:\t\t",
fifo_used(&j->pin),
journal_cur_seq(j),
journal_last_seq(j),
j->last_seq_ondisk,
- journal_state_count(*s, s->idx),
- s->cur_entry_offset,
- j->cur_entry_u64s,
- s->prev_buf_unwritten,
+ j->prereserved.reserved,
+ j->prereserved.remaining,
+ j->cur_entry_sectors);
+
+ switch (s.cur_entry_offset) {
+ case JOURNAL_ENTRY_ERROR_VAL:
+ pr_buf(out, "error\n");
+ break;
+ case JOURNAL_ENTRY_CLOSED_VAL:
+ pr_buf(out, "closed\n");
+ break;
+ default:
+ pr_buf(out, "%u/%u\n",
+ s.cur_entry_offset,
+ j->cur_entry_u64s);
+ break;
+ }
+
+ pr_buf(out,
+ "current entry refs:\t%u\n"
+ "prev entry unwritten:\t",
+ journal_state_count(s, s.idx));
+
+ if (s.prev_buf_unwritten)
+ pr_buf(out, "yes, ref %u sectors %u\n",
+ journal_state_count(s, !s.idx),
+ journal_prev_buf(j)->sectors);
+ else
+ pr_buf(out, "no\n");
+
+ pr_buf(out,
+ "need write:\t\t%i\n"
+ "replay done:\t\t%i\n",
test_bit(JOURNAL_NEED_WRITE, &j->flags),
- journal_entry_is_open(j),
test_bit(JOURNAL_REPLAY_DONE, &j->flags));
for_each_member_device_rcu(ca, c, iter,
- &c->rw_devs[BCH_DATA_JOURNAL]) {
+ &c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal;
if (!ja->nr)
continue;
- pr_buf(&out,
+ pr_buf(out,
"dev %u:\n"
"\tnr\t\t%u\n"
- "\tcur_idx\t\t%u (seq %llu)\n"
- "\tlast_idx\t%u (seq %llu)\n",
+ "\tavailable\t%u:%u\n"
+ "\tdiscard_idx\t\t%u\n"
+ "\tdirty_idx_ondisk\t%u (seq %llu)\n"
+ "\tdirty_idx\t\t%u (seq %llu)\n"
+ "\tcur_idx\t\t%u (seq %llu)\n",
iter, ja->nr,
- ja->cur_idx, ja->bucket_seq[ja->cur_idx],
- ja->last_idx, ja->bucket_seq[ja->last_idx]);
+ bch2_journal_dev_buckets_available(j, ja, journal_space_discarded),
+ ja->sectors_free,
+ ja->discard_idx,
+ ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk],
+ ja->dirty_idx, ja->bucket_seq[ja->dirty_idx],
+ ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
}
spin_unlock(&j->lock);
rcu_read_unlock();
-
- return out.pos - buf;
}
-ssize_t bch2_journal_print_pins(struct journal *j, char *buf)
+void bch2_journal_pins_to_text(struct printbuf *out, struct journal *j)
{
- struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *pin;
u64 i;
spin_lock(&j->lock);
fifo_for_each_entry_ptr(pin_list, &j->pin, i) {
- pr_buf(&out, "%llu: count %u\n",
+ pr_buf(out, "%llu: count %u\n",
i, atomic_read(&pin_list->count));
list_for_each_entry(pin, &pin_list->list, list)
- pr_buf(&out, "\t%p %pf\n",
+ pr_buf(out, "\t%px %ps\n",
pin, pin->flush);
if (!list_empty(&pin_list->flushed))
- pr_buf(&out, "flushed:\n");
+ pr_buf(out, "flushed:\n");
list_for_each_entry(pin, &pin_list->flushed, list)
- pr_buf(&out, "\t%p %pf\n",
+ pr_buf(out, "\t%px %ps\n",
pin, pin->flush);
}
spin_unlock(&j->lock);
-
- return out.pos - buf;
}