]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/journal.c
Merge remote-tracking branch 'github/master'
[bcachefs-tools-debian] / libbcachefs / journal.c
index d77092aa069ee66826c30da8c1d5316cac857b06..214c8030048292430b07721bd04bac8ea3c44f50 100644 (file)
@@ -10,6 +10,7 @@
 #include "bkey_methods.h"
 #include "btree_gc.h"
 #include "btree_update.h"
+#include "btree_write_buffer.h"
 #include "buckets.h"
 #include "error.h"
 #include "journal.h"
 #include "journal_reclaim.h"
 #include "journal_sb.h"
 #include "journal_seq_blacklist.h"
-
-#include <trace/events/bcachefs.h>
-
-#define x(n)   #n,
-static const char * const bch2_journal_watermarks[] = {
-       JOURNAL_WATERMARKS()
-       NULL
-};
+#include "trace.h"
 
 static const char * const bch2_journal_errors[] = {
+#define x(n)   #n,
        JOURNAL_ERRORS()
+#undef x
        NULL
 };
-#undef x
 
 static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
 {
@@ -52,6 +47,55 @@ static bool journal_entry_is_open(struct journal *j)
        return __journal_entry_is_open(j->reservations);
 }
 
+static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u64 seq)
+{
+       union journal_res_state s = READ_ONCE(j->reservations);
+       unsigned i = seq & JOURNAL_BUF_MASK;
+       struct journal_buf *buf = j->buf + i;
+
+       prt_printf(out, "seq:");
+       prt_tab(out);
+       prt_printf(out, "%llu", seq);
+       prt_newline(out);
+       printbuf_indent_add(out, 2);
+
+       prt_printf(out, "refcount:");
+       prt_tab(out);
+       prt_printf(out, "%u", journal_state_count(s, i));
+       prt_newline(out);
+
+       prt_printf(out, "size:");
+       prt_tab(out);
+       prt_human_readable_u64(out, vstruct_bytes(buf->data));
+       prt_newline(out);
+
+       prt_printf(out, "expires");
+       prt_tab(out);
+       prt_printf(out, "%li jiffies", buf->expires - jiffies);
+       prt_newline(out);
+
+       if (buf->write_done)
+               prt_printf(out, "write done\n");
+       else if (buf->write_allocated)
+               prt_printf(out, "write allocated\n");
+       else if (buf->write_started)
+               prt_printf(out, "write started\n");
+
+       printbuf_indent_sub(out, 2);
+}
+
+static void bch2_journal_bufs_to_text(struct printbuf *out, struct journal *j)
+{
+       if (!out->nr_tabstops)
+               printbuf_tabstop_push(out, 24);
+
+       for (u64 seq = journal_last_unwritten_seq(j);
+            seq <= journal_cur_seq(j);
+            seq++)
+               bch2_journal_buf_to_text(out, j, seq);
+       prt_printf(out, "last buf %s\n", journal_entry_is_open(j) ? "open" : "closed");
+}
+
 static inline struct journal_buf *
 journal_seq_to_buf(struct journal *j, u64 seq)
 {
@@ -68,20 +112,110 @@ journal_seq_to_buf(struct journal *j, u64 seq)
 
 static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
 {
-       INIT_LIST_HEAD(&p->list);
-       INIT_LIST_HEAD(&p->key_cache_list);
+       unsigned i;
+
+       for (i = 0; i < ARRAY_SIZE(p->list); i++)
+               INIT_LIST_HEAD(&p->list[i]);
        INIT_LIST_HEAD(&p->flushed);
        atomic_set(&p->count, count);
        p->devs.nr = 0;
 }
 
-/* journal entry close/open: */
-
-void __bch2_journal_buf_put(struct journal *j)
+/*
+ * Detect stuck journal conditions and trigger shutdown. Technically the journal
+ * can end up stuck for a variety of reasons, such as a blocked I/O, journal
+ * reservation lockup, etc. Since this is a fatal error with potentially
+ * unpredictable characteristics, we want to be fairly conservative before we
+ * decide to shut things down.
+ *
+ * Consider the journal stuck when it appears full with no ability to commit
+ * btree transactions, to discard journal buckets, nor acquire priority
+ * (reserved watermark) reservation.
+ */
+static inline bool
+journal_error_check_stuck(struct journal *j, int error, unsigned flags)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
+       bool stuck = false;
+       struct printbuf buf = PRINTBUF;
+
+       if (!(error == JOURNAL_ERR_journal_full ||
+             error == JOURNAL_ERR_journal_pin_full) ||
+           nr_unwritten_journal_entries(j) ||
+           (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
+               return stuck;
 
-       closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL);
+       spin_lock(&j->lock);
+
+       if (j->can_discard) {
+               spin_unlock(&j->lock);
+               return stuck;
+       }
+
+       stuck = true;
+
+       /*
+        * The journal shutdown path will set ->err_seq, but do it here first to
+        * serialize against concurrent failures and avoid duplicate error
+        * reports.
+        */
+       if (j->err_seq) {
+               spin_unlock(&j->lock);
+               return stuck;
+       }
+       j->err_seq = journal_cur_seq(j);
+       spin_unlock(&j->lock);
+
+       bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
+               bch2_journal_errors[error]);
+       bch2_journal_debug_to_text(&buf, j);
+       bch_err(c, "%s", buf.buf);
+
+       printbuf_reset(&buf);
+       bch2_journal_pins_to_text(&buf, j);
+       bch_err(c, "Journal pins:\n%s", buf.buf);
+       printbuf_exit(&buf);
+
+       bch2_fatal_error(c);
+       dump_stack();
+
+       return stuck;
+}
+
+void bch2_journal_do_writes(struct journal *j)
+{
+       for (u64 seq = journal_last_unwritten_seq(j);
+            seq <= journal_cur_seq(j);
+            seq++) {
+               unsigned idx = seq & JOURNAL_BUF_MASK;
+               struct journal_buf *w = j->buf + idx;
+
+               if (w->write_started && !w->write_allocated)
+                       break;
+               if (w->write_started)
+                       continue;
+
+               if (!journal_state_count(j->reservations, idx)) {
+                       w->write_started = true;
+                       closure_call(&w->io, bch2_journal_write, j->wq, NULL);
+               }
+
+               break;
+       }
+}
+
+/*
+ * Final processing when the last reference of a journal buffer has been
+ * dropped. Drop the pin list reference acquired at journal entry open and write
+ * the buffer, if requested.
+ */
+void bch2_journal_buf_put_final(struct journal *j, u64 seq)
+{
+       lockdep_assert_held(&j->lock);
+
+       if (__bch2_journal_pin_put(j, seq))
+               bch2_journal_reclaim_fast(j);
+       bch2_journal_do_writes(j);
 }
 
 /*
@@ -90,7 +224,7 @@ void __bch2_journal_buf_put(struct journal *j)
  * We don't close a journal_buf until the next journal_buf is finished writing,
  * and can be opened again - this also initializes the next journal_buf:
  */
-static void __journal_entry_close(struct journal *j, unsigned closed_val)
+static void __journal_entry_close(struct journal *j, unsigned closed_val, bool trace)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct journal_buf *buf = journal_cur_buf(j);
@@ -119,6 +253,18 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val)
        /* Close out old buffer: */
        buf->data->u64s         = cpu_to_le32(old.cur_entry_offset);
 
+       if (trace_journal_entry_close_enabled() && trace) {
+               struct printbuf pbuf = PRINTBUF;
+               pbuf.atomic++;
+
+               prt_str(&pbuf, "entry size: ");
+               prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
+               prt_newline(&pbuf);
+               bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
+               trace_journal_entry_close(c, pbuf.buf);
+               printbuf_exit(&pbuf);
+       }
+
        sectors = vstruct_blocks_plus(buf->data, c->block_bits,
                                      buf->u64s_reserved) << c->block_bits;
        BUG_ON(sectors > buf->sectors);
@@ -147,21 +293,20 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val)
        buf->data->last_seq     = cpu_to_le64(buf->last_seq);
        BUG_ON(buf->last_seq > le64_to_cpu(buf->data->seq));
 
-       __bch2_journal_pin_put(j, le64_to_cpu(buf->data->seq));
-
        cancel_delayed_work(&j->write_work);
 
        bch2_journal_space_available(j);
 
-       bch2_journal_buf_put(j, old.idx);
+       __bch2_journal_buf_put(j, old.idx, le64_to_cpu(buf->data->seq));
 }
 
 void bch2_journal_halt(struct journal *j)
 {
        spin_lock(&j->lock);
-       __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
+       __journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL, true);
        if (!j->err_seq)
                j->err_seq = journal_cur_seq(j);
+       journal_wake(j);
        spin_unlock(&j->lock);
 }
 
@@ -172,7 +317,7 @@ static bool journal_entry_want_write(struct journal *j)
 
        /* Don't close it yet if we already have a write in flight: */
        if (ret)
-               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
        else if (nr_unwritten_journal_entries(j)) {
                struct journal_buf *buf = journal_cur_buf(j);
 
@@ -185,7 +330,7 @@ static bool journal_entry_want_write(struct journal *j)
        return ret;
 }
 
-static bool journal_entry_close(struct journal *j)
+bool bch2_journal_entry_close(struct journal *j)
 {
        bool ret;
 
@@ -199,12 +344,6 @@ static bool journal_entry_close(struct journal *j)
 /*
  * should _only_ called from journal_res_get() - when we actually want a
  * journal reservation - journal entry is open means journal is dirty:
- *
- * returns:
- * 0:          success
- * -ENOSPC:    journal currently full, must invoke reclaim
- * -EAGAIN:    journal blocked, must wait
- * -EROFS:     insufficient rw devices or journal error
  */
 static int journal_entry_open(struct journal *j)
 {
@@ -231,7 +370,7 @@ static int journal_entry_open(struct journal *j)
        if (!fifo_free(&j->pin))
                return JOURNAL_ERR_journal_pin_full;
 
-       if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) - 1)
+       if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
                return JOURNAL_ERR_max_in_flight;
 
        BUG_ON(!j->cur_entry_sectors);
@@ -250,7 +389,7 @@ static int journal_entry_open(struct journal *j)
                journal_entry_overhead(j);
        u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
 
-       if (u64s <= 0)
+       if (u64s <= (ssize_t) j->early_journal_entries.nr)
                return JOURNAL_ERR_journal_full;
 
        if (fifo_empty(&j->pin) && j->reclaim_thread)
@@ -263,18 +402,30 @@ static int journal_entry_open(struct journal *j)
        atomic64_inc(&j->seq);
        journal_pin_list_init(fifo_push_ref(&j->pin), 1);
 
+       BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
+
        BUG_ON(j->buf + (journal_cur_seq(j) & JOURNAL_BUF_MASK) != buf);
 
        bkey_extent_init(&buf->key);
-       buf->noflush    = false;
-       buf->must_flush = false;
-       buf->separate_flush = false;
-       buf->flush_time = 0;
+       buf->noflush            = false;
+       buf->must_flush         = false;
+       buf->separate_flush     = false;
+       buf->flush_time         = 0;
+       buf->need_flush_to_write_buffer = true;
+       buf->write_started      = false;
+       buf->write_allocated    = false;
+       buf->write_done         = false;
 
        memset(buf->data, 0, sizeof(*buf->data));
        buf->data->seq  = cpu_to_le64(journal_cur_seq(j));
        buf->data->u64s = 0;
 
+       if (j->early_journal_entries.nr) {
+               memcpy(buf->data->_data, j->early_journal_entries.data,
+                      j->early_journal_entries.nr * sizeof(u64));
+               le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
+       }
+
        /*
         * Must be set before marking the journal entry as open:
         */
@@ -291,19 +442,20 @@ static int journal_entry_open(struct journal *j)
                BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
 
                journal_state_inc(&new);
-               new.cur_entry_offset = 0;
+
+               /* Handle any already added entries */
+               new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
        } while ((v = atomic64_cmpxchg(&j->reservations.counter,
                                       old.v, new.v)) != old.v);
 
-       if (j->res_get_blocked_start)
-               bch2_time_stats_update(j->blocked_time,
-                                      j->res_get_blocked_start);
-       j->res_get_blocked_start = 0;
-
-       mod_delayed_work(c->io_complete_wq,
-                        &j->write_work,
-                        msecs_to_jiffies(c->opts.journal_flush_delay));
+       if (nr_unwritten_journal_entries(j) == 1)
+               mod_delayed_work(j->wq,
+                                &j->write_work,
+                                msecs_to_jiffies(c->opts.journal_flush_delay));
        journal_wake(j);
+
+       if (j->early_journal_entries.nr)
+               darray_exit(&j->early_journal_entries);
        return 0;
 }
 
@@ -312,7 +464,7 @@ static bool journal_quiesced(struct journal *j)
        bool ret = atomic64_read(&j->seq) == j->seq_ondisk;
 
        if (!ret)
-               journal_entry_close(j);
+               bch2_journal_entry_close(j);
        return ret;
 }
 
@@ -324,20 +476,16 @@ static void journal_quiesce(struct journal *j)
 static void journal_write_work(struct work_struct *work)
 {
        struct journal *j = container_of(work, struct journal, write_work.work);
-       struct bch_fs *c = container_of(j, struct bch_fs, journal);
-       long delta;
 
        spin_lock(&j->lock);
-       if (!__journal_entry_is_open(j->reservations))
-               goto unlock;
+       if (__journal_entry_is_open(j->reservations)) {
+               long delta = journal_cur_buf(j)->expires - jiffies;
 
-       delta = journal_cur_buf(j)->expires - jiffies;
-
-       if (delta > 0)
-               mod_delayed_work(c->io_complete_wq, &j->write_work, delta);
-       else
-               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
-unlock:
+               if (delta > 0)
+                       mod_delayed_work(j->wq, &j->write_work, delta);
+               else
+                       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
+       }
        spin_unlock(&j->lock);
 }
 
@@ -352,27 +500,32 @@ retry:
        if (journal_res_get_fast(j, res, flags))
                return 0;
 
+       if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
+               ret = JOURNAL_ERR_journal_full;
+               can_discard = j->can_discard;
+               goto out;
+       }
+
+       if (j->blocked)
+               return -BCH_ERR_journal_res_get_blocked;
+
        if (bch2_journal_error(j))
-               return -EROFS;
+               return -BCH_ERR_erofs_journal_err;
+
+       if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
+               ret = JOURNAL_ERR_max_in_flight;
+               goto out;
+       }
 
        spin_lock(&j->lock);
 
        /*
         * Recheck after taking the lock, so we don't race with another thread
-        * that just did journal_entry_open() and call journal_entry_close()
+        * that just did journal_entry_open() and call bch2_journal_entry_close()
         * unnecessarily
         */
        if (journal_res_get_fast(j, res, flags)) {
-               spin_unlock(&j->lock);
-               return 0;
-       }
-
-       if ((flags & JOURNAL_WATERMARK_MASK) < j->watermark) {
-               /*
-                * Don't want to close current journal entry, just need to
-                * invoke reclaim:
-                */
-               ret = JOURNAL_ERR_journal_full;
+               ret = 0;
                goto unlock;
        }
 
@@ -387,44 +540,29 @@ retry:
            buf->buf_size < JOURNAL_ENTRY_SIZE_MAX)
                j->buf_size_want = max(j->buf_size_want, buf->buf_size << 1);
 
-       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
-       ret = journal_entry_open(j);
-
-       if (ret == JOURNAL_ERR_max_in_flight)
-               trace_journal_entry_full(c);
+       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, false);
+       ret = journal_entry_open(j) ?: JOURNAL_ERR_retry;
 unlock:
-       if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
-           !j->res_get_blocked_start) {
-               j->res_get_blocked_start = local_clock() ?: 1;
-               trace_journal_full(c);
-       }
-
        can_discard = j->can_discard;
        spin_unlock(&j->lock);
-
-       if (!ret)
+out:
+       if (ret == JOURNAL_ERR_retry)
                goto retry;
+       if (!ret)
+               return 0;
 
-       if ((ret == JOURNAL_ERR_journal_full ||
-            ret == JOURNAL_ERR_journal_pin_full) &&
-           !can_discard &&
-           !nr_unwritten_journal_entries(j) &&
-           (flags & JOURNAL_WATERMARK_MASK) == JOURNAL_WATERMARK_reserved) {
-               struct printbuf buf = PRINTBUF;
-
-               bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (ret %s)",
-                       bch2_journal_errors[ret]);
-
-               bch2_journal_debug_to_text(&buf, j);
-               bch_err(c, "%s", buf.buf);
+       if (journal_error_check_stuck(j, ret, flags))
+               ret = -BCH_ERR_journal_res_get_blocked;
 
-               printbuf_reset(&buf);
-               bch2_journal_pins_to_text(&buf, j);
-               bch_err(c, "Journal pins:\n%s", buf.buf);
+       if (ret == JOURNAL_ERR_max_in_flight &&
+           track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true)) {
 
+               struct printbuf buf = PRINTBUF;
+               prt_printf(&buf, "seq %llu\n", journal_cur_seq(j));
+               bch2_journal_bufs_to_text(&buf, j);
+               trace_journal_entry_full(c, buf.buf);
                printbuf_exit(&buf);
-               bch2_fatal_error(c);
-               dump_stack();
+               count_event(c, journal_entry_full);
        }
 
        /*
@@ -445,7 +583,9 @@ unlock:
                }
        }
 
-       return ret == JOURNAL_ERR_insufficient_devices ? -EROFS : -EAGAIN;
+       return ret == JOURNAL_ERR_insufficient_devices
+               ? -BCH_ERR_erofs_journal_err
+               : -BCH_ERR_journal_res_get_blocked;
 }
 
 /*
@@ -464,41 +604,11 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
        int ret;
 
        closure_wait_event(&j->async_wait,
-                  (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
+                  (ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
                   (flags & JOURNAL_RES_GET_NONBLOCK));
        return ret;
 }
 
-/* journal_preres: */
-
-static bool journal_preres_available(struct journal *j,
-                                    struct journal_preres *res,
-                                    unsigned new_u64s,
-                                    unsigned flags)
-{
-       bool ret = bch2_journal_preres_get_fast(j, res, new_u64s, flags, true);
-
-       if (!ret && mutex_trylock(&j->reclaim_lock)) {
-               bch2_journal_reclaim(j);
-               mutex_unlock(&j->reclaim_lock);
-       }
-
-       return ret;
-}
-
-int __bch2_journal_preres_get(struct journal *j,
-                             struct journal_preres *res,
-                             unsigned new_u64s,
-                             unsigned flags)
-{
-       int ret;
-
-       closure_wait_event(&j->preres_wait,
-                  (ret = bch2_journal_error(j)) ||
-                  journal_preres_available(j, res, new_u64s, flags));
-       return ret;
-}
-
 /* journal_entry_res: */
 
 void bch2_journal_entry_res_resize(struct journal *j,
@@ -524,7 +634,7 @@ void bch2_journal_entry_res_resize(struct journal *j,
                /*
                 * Not enough room in current journal entry, have to flush it:
                 */
-               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
        } else {
                journal_cur_buf(j)->u64s_reserved += d;
        }
@@ -537,8 +647,13 @@ out:
 
 /**
  * bch2_journal_flush_seq_async - wait for a journal entry to be written
+ * @j:         journal object
+ * @seq:       seq to flush
+ * @parent:    closure object to wait with
+ * Returns:    1 if @seq has already been flushed, 0 if @seq is being flushed,
+ *             -EIO if @seq will never be flushed
  *
- * like bch2_journal_wait_on_seq, except that it triggers a write immediately if
+ * Like bch2_journal_wait_on_seq, except that it triggers a write immediately if
  * necessary
  */
 int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
@@ -576,7 +691,7 @@ recheck_need_open:
                struct journal_res res = { 0 };
 
                if (journal_entry_is_open(j))
-                       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL);
+                       __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
 
                spin_unlock(&j->lock);
 
@@ -638,7 +753,7 @@ int bch2_journal_flush_seq(struct journal *j, u64 seq)
        ret = wait_event_interruptible(j->wait, (ret2 = bch2_journal_flush_seq_async(j, seq, NULL)));
 
        if (!ret)
-               bch2_time_stats_update(j->flush_seq_time, start_time);
+               time_stats_update(j->flush_seq_time, start_time);
 
        return ret ?: ret2 < 0 ? ret2 : 0;
 }
@@ -720,39 +835,6 @@ int bch2_journal_meta(struct journal *j)
        return bch2_journal_flush_seq(j, res.seq);
 }
 
-int bch2_journal_log_msg(struct journal *j, const char *fmt, ...)
-{
-       struct jset_entry_log *entry;
-       struct journal_res res = { 0 };
-       unsigned msglen, u64s;
-       va_list args;
-       int ret;
-
-       va_start(args, fmt);
-       msglen = vsnprintf(NULL, 0, fmt, args) + 1;
-       va_end(args);
-
-       u64s = jset_u64s(DIV_ROUND_UP(msglen, sizeof(u64)));
-
-       ret = bch2_journal_res_get(j, &res, u64s, 0);
-       if (ret)
-               return ret;
-
-       entry = container_of(journal_res_entry(j, &res),
-                            struct jset_entry_log, entry);;
-       memset(entry, 0, u64s * sizeof(u64));
-       entry->entry.type = BCH_JSET_ENTRY_log;
-       entry->entry.u64s = u64s - 1;
-
-       va_start(args, fmt);
-       vsnprintf(entry->d, INT_MAX, fmt, args);
-       va_end(args);
-
-       bch2_journal_res_put(j, &res);
-
-       return bch2_journal_flush_seq(j, res.seq);
-}
-
 /* block/unlock the journal: */
 
 void bch2_journal_unblock(struct journal *j)
@@ -773,6 +855,48 @@ void bch2_journal_block(struct journal *j)
        journal_quiesce(j);
 }
 
+static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
+{
+       struct journal_buf *ret = NULL;
+
+       mutex_lock(&j->buf_lock);
+       spin_lock(&j->lock);
+       max_seq = min(max_seq, journal_cur_seq(j));
+
+       for (u64 seq = journal_last_unwritten_seq(j);
+            seq <= max_seq;
+            seq++) {
+               unsigned idx = seq & JOURNAL_BUF_MASK;
+               struct journal_buf *buf = j->buf + idx;
+
+               if (buf->need_flush_to_write_buffer) {
+                       if (seq == journal_cur_seq(j))
+                               __journal_entry_close(j, JOURNAL_ENTRY_CLOSED_VAL, true);
+
+                       union journal_res_state s;
+                       s.v = atomic64_read_acquire(&j->reservations.counter);
+
+                       ret = journal_state_count(s, idx)
+                               ? ERR_PTR(-EAGAIN)
+                               : buf;
+                       break;
+               }
+       }
+
+       spin_unlock(&j->lock);
+       if (IS_ERR_OR_NULL(ret))
+               mutex_unlock(&j->buf_lock);
+       return ret;
+}
+
+struct journal_buf *bch2_next_write_buffer_flush_journal_buf(struct journal *j, u64 max_seq)
+{
+       struct journal_buf *ret;
+
+       wait_event(j->wait, (ret = __bch2_next_write_buffer_flush_journal_buf(j, max_seq)) != ERR_PTR(-EAGAIN));
+       return ret;
+}
+
 /* allocate journal on a device: */
 
 static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
@@ -783,40 +907,40 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
        u64 *new_bucket_seq = NULL, *new_buckets = NULL;
        struct open_bucket **ob = NULL;
        long *bu = NULL;
-       unsigned i, nr_got = 0, nr_want = nr - ja->nr;
-       unsigned old_nr                 = ja->nr;
-       unsigned old_discard_idx        = ja->discard_idx;
-       unsigned old_dirty_idx_ondisk   = ja->dirty_idx_ondisk;
-       unsigned old_dirty_idx          = ja->dirty_idx;
-       unsigned old_cur_idx            = ja->cur_idx;
+       unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
        int ret = 0;
 
-       if (c) {
-               bch2_journal_flush_all_pins(&c->journal);
-               bch2_journal_block(&c->journal);
-       }
+       BUG_ON(nr <= ja->nr);
 
-       bu              = kzalloc(nr_want * sizeof(*bu), GFP_KERNEL);
-       ob              = kzalloc(nr_want * sizeof(*ob), GFP_KERNEL);
-       new_buckets     = kzalloc(nr * sizeof(u64), GFP_KERNEL);
-       new_bucket_seq  = kzalloc(nr * sizeof(u64), GFP_KERNEL);
+       bu              = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
+       ob              = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
+       new_buckets     = kcalloc(nr, sizeof(u64), GFP_KERNEL);
+       new_bucket_seq  = kcalloc(nr, sizeof(u64), GFP_KERNEL);
        if (!bu || !ob || !new_buckets || !new_bucket_seq) {
-               ret = -ENOMEM;
-               goto err_unblock;
+               ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+               goto err_free;
        }
 
        for (nr_got = 0; nr_got < nr_want; nr_got++) {
                if (new_fs) {
                        bu[nr_got] = bch2_bucket_alloc_new_fs(ca);
                        if (bu[nr_got] < 0) {
-                               ret = -ENOSPC;
+                               ret = -BCH_ERR_ENOSPC_bucket_alloc;
                                break;
                        }
                } else {
-                       ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none,
-                                              false, cl);
-                       if (IS_ERR(ob[nr_got])) {
-                               ret = cl ? -EAGAIN : -ENOSPC;
+                       ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
+                       ret = PTR_ERR_OR_ZERO(ob[nr_got]);
+                       if (ret)
+                               break;
+
+                       ret = bch2_trans_run(c,
+                               bch2_trans_mark_metadata_bucket(trans, ca,
+                                               ob[nr_got]->bucket, BCH_DATA_journal,
+                                               ca->mi.bucket_size));
+                       if (ret) {
+                               bch2_open_bucket_put(c, ob[nr_got]);
+                               bch_err_msg(c, ret, "marking new journal buckets");
                                break;
                        }
 
@@ -825,76 +949,77 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
        }
 
        if (!nr_got)
-               goto err_unblock;
+               goto err_free;
 
-       /*
-        * We may be called from the device add path, before the new device has
-        * actually been added to the running filesystem:
-        */
-       if (!new_fs)
-               spin_lock(&c->journal.lock);
+       /* Don't return an error if we successfully allocated some buckets: */
+       ret = 0;
+
+       if (c) {
+               bch2_journal_flush_all_pins(&c->journal);
+               bch2_journal_block(&c->journal);
+               mutex_lock(&c->sb_lock);
+       }
 
        memcpy(new_buckets,     ja->buckets,    ja->nr * sizeof(u64));
        memcpy(new_bucket_seq,  ja->bucket_seq, ja->nr * sizeof(u64));
-       swap(new_buckets,       ja->buckets);
-       swap(new_bucket_seq,    ja->bucket_seq);
+
+       BUG_ON(ja->discard_idx > ja->nr);
+
+       pos = ja->discard_idx ?: ja->nr;
+
+       memmove(new_buckets + pos + nr_got,
+               new_buckets + pos,
+               sizeof(new_buckets[0]) * (ja->nr - pos));
+       memmove(new_bucket_seq + pos + nr_got,
+               new_bucket_seq + pos,
+               sizeof(new_bucket_seq[0]) * (ja->nr - pos));
 
        for (i = 0; i < nr_got; i++) {
-               unsigned pos = ja->discard_idx ?: ja->nr;
-               long b = bu[i];
-
-               __array_insert_item(ja->buckets,                ja->nr, pos);
-               __array_insert_item(ja->bucket_seq,             ja->nr, pos);
-               ja->nr++;
-
-               ja->buckets[pos] = b;
-               ja->bucket_seq[pos] = 0;
-
-               if (pos <= ja->discard_idx)
-                       ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
-               if (pos <= ja->dirty_idx_ondisk)
-                       ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
-               if (pos <= ja->dirty_idx)
-                       ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
-               if (pos <= ja->cur_idx)
-                       ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
+               new_buckets[pos + i] = bu[i];
+               new_bucket_seq[pos + i] = 0;
        }
 
-       ret = bch2_journal_buckets_to_sb(c, ca);
-       if (ret) {
-               /* Revert: */
-               swap(new_buckets,       ja->buckets);
-               swap(new_bucket_seq,    ja->bucket_seq);
-               ja->nr                  = old_nr;
-               ja->discard_idx         = old_discard_idx;
-               ja->dirty_idx_ondisk    = old_dirty_idx_ondisk;
-               ja->dirty_idx           = old_dirty_idx;
-               ja->cur_idx             = old_cur_idx;
-       }
+       nr = ja->nr + nr_got;
+
+       ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
+       if (ret)
+               goto err_unblock;
 
        if (!new_fs)
-               spin_unlock(&c->journal.lock);
+               bch2_write_super(c);
 
+       /* Commit: */
        if (c)
-               bch2_journal_unblock(&c->journal);
+               spin_lock(&c->journal.lock);
 
-       if (ret)
-               goto err;
+       swap(new_buckets,       ja->buckets);
+       swap(new_bucket_seq,    ja->bucket_seq);
+       ja->nr = nr;
 
-       if (!new_fs) {
-               for (i = 0; i < nr_got; i++) {
-                       ret = bch2_trans_run(c,
-                               bch2_trans_mark_metadata_bucket(&trans, ca,
-                                               bu[i], BCH_DATA_journal,
-                                               ca->mi.bucket_size));
-                       if (ret) {
-                               bch2_fs_inconsistent(c, "error marking new journal buckets: %i", ret);
-                               goto err;
-                       }
-               }
+       if (pos <= ja->discard_idx)
+               ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
+       if (pos <= ja->dirty_idx_ondisk)
+               ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
+       if (pos <= ja->dirty_idx)
+               ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
+       if (pos <= ja->cur_idx)
+               ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
+
+       if (c)
+               spin_unlock(&c->journal.lock);
+err_unblock:
+       if (c) {
+               bch2_journal_unblock(&c->journal);
+               mutex_unlock(&c->sb_lock);
        }
-err:
-       if (ob && !new_fs)
+
+       if (ret && !new_fs)
+               for (i = 0; i < nr_got; i++)
+                       bch2_trans_run(c,
+                               bch2_trans_mark_metadata_bucket(trans, ca,
+                                               bu[i], BCH_DATA_free, 0));
+err_free:
+       if (!new_fs)
                for (i = 0; i < nr_got; i++)
                        bch2_open_bucket_put(c, ob[i]);
 
@@ -902,12 +1027,7 @@ err:
        kfree(new_buckets);
        kfree(ob);
        kfree(bu);
-
        return ret;
-err_unblock:
-       if (c)
-               bch2_journal_unblock(&c->journal);
-       goto err;
 }
 
 /*
@@ -919,45 +1039,48 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
 {
        struct journal_device *ja = &ca->journal;
        struct closure cl;
-       unsigned current_nr;
        int ret = 0;
 
-       /* don't handle reducing nr of buckets yet: */
-       if (nr < ja->nr)
-               return 0;
-
        closure_init_stack(&cl);
 
-       while (ja->nr != nr && (ret == 0 || ret == -EAGAIN)) {
-               struct disk_reservation disk_res = { 0, 0 };
+       down_write(&c->state_lock);
 
-               closure_sync(&cl);
+       /* don't handle reducing nr of buckets yet: */
+       if (nr < ja->nr)
+               goto unlock;
 
-               mutex_lock(&c->sb_lock);
-               current_nr = ja->nr;
+       while (ja->nr < nr) {
+               struct disk_reservation disk_res = { 0, 0, 0 };
 
                /*
                 * note: journal buckets aren't really counted as _sectors_ used yet, so
                 * we don't need the disk reservation to avoid the BUG_ON() in buckets.c
                 * when space used goes up without a reservation - but we do need the
                 * reservation to ensure we'll actually be able to allocate:
+                *
+                * XXX: that's not right, disk reservations only ensure a
+                * filesystem-wide allocation will succeed, this is a device
+                * specific allocation - we can hang here:
                 */
 
-               if (bch2_disk_reservation_get(c, &disk_res,
-                                             bucket_to_sector(ca, nr - ja->nr), 1, 0)) {
-                       mutex_unlock(&c->sb_lock);
-                       return -ENOSPC;
-               }
+               ret = bch2_disk_reservation_get(c, &disk_res,
+                                               bucket_to_sector(ca, nr - ja->nr), 1, 0);
+               if (ret)
+                       break;
 
                ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
 
                bch2_disk_reservation_put(c, &disk_res);
 
-               if (ja->nr != current_nr)
-                       bch2_write_super(c);
-               mutex_unlock(&c->sb_lock);
+               closure_sync(&cl);
+
+               if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
+                       break;
        }
 
+       bch_err_fn(c, ret);
+unlock:
+       up_write(&c->state_lock);
        return ret;
 }
 
@@ -966,8 +1089,10 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
        unsigned nr;
        int ret;
 
-       if (dynamic_fault("bcachefs:add:journal_alloc"))
-               return -ENOMEM;
+       if (dynamic_fault("bcachefs:add:journal_alloc")) {
+               ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+               goto err;
+       }
 
        /* 1/128th of the device by default: */
        nr = ca->mi.nbuckets >> 7;
@@ -981,15 +1106,26 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
                     min(1 << 13,
                         (1 << 24) / ca->mi.bucket_size));
 
-       if (ca->fs)
-               mutex_lock(&ca->fs->sb_lock);
-
        ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
+err:
+       bch_err_fn(ca, ret);
+       return ret;
+}
 
-       if (ca->fs)
-               mutex_unlock(&ca->fs->sb_lock);
+int bch2_fs_journal_alloc(struct bch_fs *c)
+{
+       for_each_online_member(c, ca) {
+               if (ca->journal.nr)
+                       continue;
 
-       return ret;
+               int ret = bch2_dev_journal_alloc(ca);
+               if (ret) {
+                       percpu_ref_put(&ca->io_ref);
+                       return ret;
+               }
+       }
+
+       return 0;
 }
 
 /* startup/shutdown: */
@@ -1005,7 +1141,7 @@ static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
             seq++) {
                struct journal_buf *buf = journal_seq_to_buf(j, seq);
 
-               if (bch2_bkey_has_device(bkey_i_to_s_c(&buf->key), dev_idx))
+               if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
                        ret = true;
        }
        spin_unlock(&j->lock);
@@ -1023,7 +1159,7 @@ void bch2_fs_journal_stop(struct journal *j)
        bch2_journal_reclaim_stop(j);
        bch2_journal_flush_all_pins(j);
 
-       wait_event(j->wait, journal_entry_close(j));
+       wait_event(j->wait, bch2_journal_entry_close(j));
 
        /*
         * Always write a new journal entry, to make sure the clock hands are up
@@ -1047,7 +1183,6 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
        struct journal_replay *i, **_i;
        struct genradix_iter iter;
        bool had_entries = false;
-       unsigned ptr;
        u64 last_seq = cur_seq, nr, seq;
 
        genradix_for_each_reverse(&c->journal_entries, iter, _i) {
@@ -1067,7 +1202,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
                init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
                if (!j->pin.data) {
                        bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
-                       return -ENOMEM;
+                       return -BCH_ERR_ENOMEM_journal_pin_fifo;
                }
        }
 
@@ -1101,8 +1236,8 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
                p = journal_seq_pin(j, seq);
 
                p->devs.nr = 0;
-               for (ptr = 0; ptr < i->nr_ptrs; ptr++)
-                       bch2_dev_list_add_dev(&p->devs, i->ptrs[ptr].dev);
+               darray_for_each(i->ptrs, ptr)
+                       bch2_dev_list_add_dev(&p->devs, ptr->dev);
 
                had_entries = true;
        }
@@ -1130,30 +1265,33 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
 
 void bch2_dev_journal_exit(struct bch_dev *ca)
 {
-       kfree(ca->journal.bio);
-       kfree(ca->journal.buckets);
-       kfree(ca->journal.bucket_seq);
+       struct journal_device *ja = &ca->journal;
 
-       ca->journal.bio         = NULL;
-       ca->journal.buckets     = NULL;
-       ca->journal.bucket_seq  = NULL;
+       for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
+               kfree(ja->bio[i]);
+               ja->bio[i] = NULL;
+       }
+
+       kfree(ja->buckets);
+       kfree(ja->bucket_seq);
+       ja->buckets     = NULL;
+       ja->bucket_seq  = NULL;
 }
 
 int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
 {
        struct journal_device *ja = &ca->journal;
        struct bch_sb_field_journal *journal_buckets =
-               bch2_sb_get_journal(sb);
+               bch2_sb_field_get(sb, journal);
        struct bch_sb_field_journal_v2 *journal_buckets_v2 =
-               bch2_sb_get_journal_v2(sb);
-       unsigned i, nr_bvecs;
+               bch2_sb_field_get(sb, journal_v2);
 
        ja->nr = 0;
 
        if (journal_buckets_v2) {
                unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
 
-               for (i = 0; i < nr; i++)
+               for (unsigned i = 0; i < nr; i++)
                        ja->nr += le64_to_cpu(journal_buckets_v2->d[i].nr);
        } else if (journal_buckets) {
                ja->nr = bch2_nr_journal_buckets(journal_buckets);
@@ -1161,30 +1299,35 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
 
        ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
        if (!ja->bucket_seq)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_dev_journal_init;
 
-       nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
+       unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
 
-       ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
-       if (!ca->journal.bio)
-               return -ENOMEM;
+       for (unsigned i = 0; i < ARRAY_SIZE(ja->bio); i++) {
+               ja->bio[i] = kmalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
+                                    nr_bvecs), GFP_KERNEL);
+               if (!ja->bio[i])
+                       return -BCH_ERR_ENOMEM_dev_journal_init;
 
-       bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
+               ja->bio[i]->ca = ca;
+               ja->bio[i]->buf_idx = i;
+               bio_init(&ja->bio[i]->bio, NULL, ja->bio[i]->bio.bi_inline_vecs, nr_bvecs, 0);
+       }
 
        ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
        if (!ja->buckets)
-               return -ENOMEM;
+               return -BCH_ERR_ENOMEM_dev_journal_init;
 
        if (journal_buckets_v2) {
                unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
-               unsigned j, dst = 0;
+               unsigned dst = 0;
 
-               for (i = 0; i < nr; i++)
-                       for (j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
+               for (unsigned i = 0; i < nr; i++)
+                       for (unsigned j = 0; j < le64_to_cpu(journal_buckets_v2->d[i].nr); j++)
                                ja->buckets[dst++] =
                                        le64_to_cpu(journal_buckets_v2->d[i].start) + j;
        } else if (journal_buckets) {
-               for (i = 0; i < ja->nr; i++)
+               for (unsigned i = 0; i < ja->nr; i++)
                        ja->buckets[i] = le64_to_cpu(journal_buckets->buckets[i]);
        }
 
@@ -1193,22 +1336,21 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
 
 void bch2_fs_journal_exit(struct journal *j)
 {
-       unsigned i;
+       if (j->wq)
+               destroy_workqueue(j->wq);
 
-       for (i = 0; i < ARRAY_SIZE(j->buf); i++)
-               kvpfree(j->buf[i].data, j->buf[i].buf_size);
+       darray_exit(&j->early_journal_entries);
+
+       for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
+               kvfree(j->buf[i].data);
        free_fifo(&j->pin);
 }
 
 int bch2_fs_journal_init(struct journal *j)
 {
-       struct bch_fs *c = container_of(j, struct bch_fs, journal);
        static struct lock_class_key res_key;
-       unsigned i;
-       int ret = 0;
-
-       pr_verbose_init(c->opts, "");
 
+       mutex_init(&j->buf_lock);
        spin_lock_init(&j->lock);
        spin_lock_init(&j->err_lock);
        init_waitqueue_head(&j->wait);
@@ -1224,24 +1366,24 @@ int bch2_fs_journal_init(struct journal *j)
                ((union journal_res_state)
                 { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
 
-       if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
-               ret = -ENOMEM;
-               goto out;
-       }
+       if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
+               return -BCH_ERR_ENOMEM_journal_pin_fifo;
 
-       for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
+       for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
                j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
-               j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
-               if (!j->buf[i].data) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
+               j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
+               if (!j->buf[i].data)
+                       return -BCH_ERR_ENOMEM_journal_buf;
+               j->buf[i].idx = i;
        }
 
        j->pin.front = j->pin.back = 1;
-out:
-       pr_verbose_init(c->opts, "ret %i", ret);
-       return ret;
+
+       j->wq = alloc_workqueue("bcachefs_journal",
+                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
+       if (!j->wq)
+               return -BCH_ERR_ENOMEM_fs_other_alloc;
+       return 0;
 }
 
 /* debug: */
@@ -1250,35 +1392,36 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        union journal_res_state s;
-       struct bch_dev *ca;
        unsigned long now = jiffies;
-       u64 seq;
-       unsigned i;
+       u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
 
+       if (!out->nr_tabstops)
+               printbuf_tabstop_push(out, 24);
        out->atomic++;
-       out->tabstops[0] = 24;
 
        rcu_read_lock();
        s = READ_ONCE(j->reservations);
 
-       prt_printf(out, "dirty journal entries:\t%llu/%llu\n",fifo_used(&j->pin), j->pin.size);
+       prt_printf(out, "dirty journal entries:\t%llu/%llu\n",  fifo_used(&j->pin), j->pin.size);
        prt_printf(out, "seq:\t\t\t%llu\n",                     journal_cur_seq(j));
        prt_printf(out, "seq_ondisk:\t\t%llu\n",                j->seq_ondisk);
-       prt_printf(out, "last_seq:\t\t%llu\n",          journal_last_seq(j));
+       prt_printf(out, "last_seq:\t\t%llu\n",                  journal_last_seq(j));
        prt_printf(out, "last_seq_ondisk:\t%llu\n",             j->last_seq_ondisk);
-       prt_printf(out, "flushed_seq_ondisk:\t%llu\n",  j->flushed_seq_ondisk);
-       prt_printf(out, "prereserved:\t\t%u/%u\n",              j->prereserved.reserved, j->prereserved.remaining);
-       prt_printf(out, "watermark:\t\t%s\n",           bch2_journal_watermarks[j->watermark]);
-       prt_printf(out, "each entry reserved:\t%u\n",   j->entry_u64s_reserved);
+       prt_printf(out, "flushed_seq_ondisk:\t%llu\n",          j->flushed_seq_ondisk);
+       prt_printf(out, "watermark:\t\t%s\n",                   bch2_watermarks[j->watermark]);
+       prt_printf(out, "each entry reserved:\t%u\n",           j->entry_u64s_reserved);
        prt_printf(out, "nr flush writes:\t%llu\n",             j->nr_flush_writes);
-       prt_printf(out, "nr noflush writes:\t%llu\n",   j->nr_noflush_writes);
-       prt_printf(out, "nr direct reclaim:\t%llu\n",   j->nr_direct_reclaim);
+       prt_printf(out, "nr noflush writes:\t%llu\n",           j->nr_noflush_writes);
+       prt_printf(out, "average write size:\t");
+       prt_human_readable_u64(out, nr_writes ? div64_u64(j->entry_bytes_written, nr_writes) : 0);
+       prt_newline(out);
+       prt_printf(out, "nr direct reclaim:\t%llu\n",           j->nr_direct_reclaim);
        prt_printf(out, "nr background reclaim:\t%llu\n",       j->nr_background_reclaim);
        prt_printf(out, "reclaim kicked:\t\t%u\n",              j->reclaim_kicked);
-       prt_printf(out, "reclaim runs in:\t%u ms\n",    time_after(j->next_reclaim, now)
+       prt_printf(out, "reclaim runs in:\t%u ms\n",            time_after(j->next_reclaim, now)
               ? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
-       prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
-       prt_printf(out, "current entry error:\t%s\n",   bch2_journal_errors[j->cur_entry_error]);
+       prt_printf(out, "current entry sectors:\t%u\n",         j->cur_entry_sectors);
+       prt_printf(out, "current entry error:\t%s\n",           bch2_journal_errors[j->cur_entry_error]);
        prt_printf(out, "current entry:\t\t");
 
        switch (s.cur_entry_offset) {
@@ -1294,35 +1437,9 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
        }
 
        prt_newline(out);
-
-       for (seq = journal_cur_seq(j);
-            seq >= journal_last_unwritten_seq(j);
-            --seq) {
-               i = seq & JOURNAL_BUF_MASK;
-
-               prt_printf(out, "unwritten entry:");
-               prt_tab(out);
-               prt_printf(out, "%llu", seq);
-               prt_newline(out);
-               printbuf_indent_add(out, 2);
-
-               prt_printf(out, "refcount:");
-               prt_tab(out);
-               prt_printf(out, "%u", journal_state_count(s, i));
-               prt_newline(out);
-
-               prt_printf(out, "sectors:");
-               prt_tab(out);
-               prt_printf(out, "%u", j->buf[i].sectors);
-               prt_newline(out);
-
-               prt_printf(out, "expires");
-               prt_tab(out);
-               prt_printf(out, "%li jiffies", j->buf[i].expires - jiffies);
-               prt_newline(out);
-
-               printbuf_indent_sub(out, 2);
-       }
+       prt_printf(out, "unwritten entries:");
+       prt_newline(out);
+       bch2_journal_bufs_to_text(out, j);
 
        prt_printf(out,
               "replay done:\t\t%i\n",
@@ -1342,8 +1459,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
               j->space[journal_space_total].next_entry,
               j->space[journal_space_total].total);
 
-       for_each_member_device_rcu(ca, c, i,
-                                  &c->rw_devs[BCH_DATA_journal]) {
+       for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
                struct journal_device *ja = &ca->journal;
 
                if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
@@ -1352,7 +1468,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
                if (!ja->nr)
                        continue;
 
-               prt_printf(out, "dev %u:\n",            i);
+               prt_printf(out, "dev %u:\n",            ca->dev_idx);
                prt_printf(out, "\tnr\t\t%u\n",         ja->nr);
                prt_printf(out, "\tbucket size\t%u\n",  ca->mi.bucket_size);
                prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
@@ -1395,15 +1511,11 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64
        prt_newline(out);
        printbuf_indent_add(out, 2);
 
-       list_for_each_entry(pin, &pin_list->list, list) {
-               prt_printf(out, "\t%px %ps", pin, pin->flush);
-               prt_newline(out);
-       }
-
-       list_for_each_entry(pin, &pin_list->key_cache_list, list) {
-               prt_printf(out, "\t%px %ps", pin, pin->flush);
-               prt_newline(out);
-       }
+       for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
+               list_for_each_entry(pin, &pin_list->list[i], list) {
+                       prt_printf(out, "\t%px %ps", pin, pin->flush);
+                       prt_newline(out);
+               }
 
        if (!list_empty(&pin_list->flushed)) {
                prt_printf(out, "flushed:");