#include "journal_reclaim.h"
#include "journal_sb.h"
#include "journal_seq_blacklist.h"
-
-#include <trace/events/bcachefs.h>
-
-#define x(n) #n,
-static const char * const bch2_journal_watermarks[] = {
- JOURNAL_WATERMARKS()
- NULL
-};
+#include "trace.h"
static const char * const bch2_journal_errors[] = {
+#define x(n) #n,
JOURNAL_ERRORS()
+#undef x
NULL
};
-#undef x
static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
{
static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
{
- INIT_LIST_HEAD(&p->list);
- INIT_LIST_HEAD(&p->key_cache_list);
+ unsigned i;
+ for (i = 0; i < ARRAY_SIZE(p->list); i++)
+ INIT_LIST_HEAD(&p->list[i]);
INIT_LIST_HEAD(&p->flushed);
atomic_set(&p->count, count);
p->devs.nr = 0;
}
+/*
+ * Detect stuck journal conditions and trigger shutdown. Technically the journal
+ * can end up stuck for a variety of reasons, such as a blocked I/O, journal
+ * reservation lockup, etc. Since this is a fatal error with potentially
+ * unpredictable characteristics, we want to be fairly conservative before we
+ * decide to shut things down.
+ *
+ * Consider the journal stuck when it appears full with no ability to commit
+ * btree transactions, to discard journal buckets, nor acquire priority
+ * (reserved watermark) reservation.
+ */
+static inline bool
+journal_error_check_stuck(struct journal *j, int error, unsigned flags)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ bool stuck = false;
+ struct printbuf buf = PRINTBUF;
+
+ if (!(error == JOURNAL_ERR_journal_full ||
+ error == JOURNAL_ERR_journal_pin_full) ||
+ nr_unwritten_journal_entries(j) ||
+ (flags & BCH_WATERMARK_MASK) != BCH_WATERMARK_reclaim)
+ return stuck;
+
+ spin_lock(&j->lock);
+
+ if (j->can_discard) {
+ spin_unlock(&j->lock);
+ return stuck;
+ }
+
+ stuck = true;
+
+ /*
+ * The journal shutdown path will set ->err_seq, but do it here first to
+ * serialize against concurrent failures and avoid duplicate error
+ * reports.
+ */
+ if (j->err_seq) {
+ spin_unlock(&j->lock);
+ return stuck;
+ }
+ j->err_seq = journal_cur_seq(j);
+ spin_unlock(&j->lock);
+
+ bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
+ bch2_journal_errors[error]);
+ bch2_journal_debug_to_text(&buf, j);
+ bch_err(c, "%s", buf.buf);
+
+ printbuf_reset(&buf);
+ bch2_journal_pins_to_text(&buf, j);
+ bch_err(c, "Journal pins:\n%s", buf.buf);
+ printbuf_exit(&buf);
+
+ bch2_fatal_error(c);
+ dump_stack();
+
+ return stuck;
+}
+
/* journal entry close/open: */
void __bch2_journal_buf_put(struct journal *j)
__journal_entry_close(j, JOURNAL_ENTRY_ERROR_VAL);
if (!j->err_seq)
j->err_seq = journal_cur_seq(j);
+ journal_wake(j);
spin_unlock(&j->lock);
}
/*
* should _only_ called from journal_res_get() - when we actually want a
* journal reservation - journal entry is open means journal is dirty:
- *
- * returns:
- * 0: success
- * -ENOSPC: journal currently full, must invoke reclaim
- * -EAGAIN: journal blocked, must wait
- * -EROFS: insufficient rw devices or journal error
*/
static int journal_entry_open(struct journal *j)
{
if (!fifo_free(&j->pin))
return JOURNAL_ERR_journal_pin_full;
- if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) - 1)
+ if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
return JOURNAL_ERR_max_in_flight;
BUG_ON(!j->cur_entry_sectors);
journal_entry_overhead(j);
u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
- if (u64s <= 0)
+ if (u64s <= (ssize_t) j->early_journal_entries.nr)
return JOURNAL_ERR_journal_full;
if (fifo_empty(&j->pin) && j->reclaim_thread)
buf->data->seq = cpu_to_le64(journal_cur_seq(j));
buf->data->u64s = 0;
+ if (j->early_journal_entries.nr) {
+ memcpy(buf->data->_data, j->early_journal_entries.data,
+ j->early_journal_entries.nr * sizeof(u64));
+ le32_add_cpu(&buf->data->u64s, j->early_journal_entries.nr);
+ }
+
/*
* Must be set before marking the journal entry as open:
*/
BUG_ON(new.idx != (journal_cur_seq(j) & JOURNAL_BUF_MASK));
journal_state_inc(&new);
- new.cur_entry_offset = 0;
+
+ /* Handle any already added entries */
+ new.cur_entry_offset = le32_to_cpu(buf->data->u64s);
} while ((v = atomic64_cmpxchg(&j->reservations.counter,
old.v, new.v)) != old.v);
&j->write_work,
msecs_to_jiffies(c->opts.journal_flush_delay));
journal_wake(j);
+
+ if (j->early_journal_entries.nr)
+ darray_exit(&j->early_journal_entries);
return 0;
}
return 0;
if (bch2_journal_error(j))
- return -EROFS;
+ return -BCH_ERR_erofs_journal_err;
spin_lock(&j->lock);
+ /* check once more in case somebody else shut things down... */
+ if (bch2_journal_error(j)) {
+ spin_unlock(&j->lock);
+ return -BCH_ERR_erofs_journal_err;
+ }
+
/*
* Recheck after taking the lock, so we don't race with another thread
* that just did journal_entry_open() and call journal_entry_close()
return 0;
}
- if ((flags & JOURNAL_WATERMARK_MASK) < j->watermark) {
+ if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
/*
* Don't want to close current journal entry, just need to
* invoke reclaim:
if (!ret)
goto retry;
-
- if ((ret == JOURNAL_ERR_journal_full ||
- ret == JOURNAL_ERR_journal_pin_full) &&
- !can_discard &&
- !nr_unwritten_journal_entries(j) &&
- (flags & JOURNAL_WATERMARK_MASK) == JOURNAL_WATERMARK_reserved) {
- struct printbuf buf = PRINTBUF;
-
- bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (ret %s)",
- bch2_journal_errors[ret]);
-
- bch2_journal_debug_to_text(&buf, j);
- bch_err(c, "%s", buf.buf);
-
- printbuf_reset(&buf);
- bch2_journal_pins_to_text(&buf, j);
- bch_err(c, "Journal pins:\n%s", buf.buf);
-
- printbuf_exit(&buf);
- bch2_fatal_error(c);
- dump_stack();
- }
+ if (journal_error_check_stuck(j, ret, flags))
+ ret = -BCH_ERR_journal_res_get_blocked;
/*
* Journal is full - can't rely on reclaim from work item due to
}
}
- return ret == JOURNAL_ERR_insufficient_devices ? -EROFS : -EAGAIN;
+ return ret == JOURNAL_ERR_insufficient_devices
+ ? -BCH_ERR_erofs_journal_err
+ : -BCH_ERR_journal_res_get_blocked;
}
/*
int ret;
closure_wait_event(&j->async_wait,
- (ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
+ (ret = __journal_res_get(j, res, flags)) !=
+ -BCH_ERR_journal_res_get_blocked||
(flags & JOURNAL_RES_GET_NONBLOCK));
return ret;
}
return bch2_journal_flush_seq(j, res.seq);
}
-int bch2_journal_log_msg(struct journal *j, const char *fmt, ...)
-{
- struct jset_entry_log *entry;
- struct journal_res res = { 0 };
- unsigned msglen, u64s;
- va_list args;
- int ret;
-
- va_start(args, fmt);
- msglen = vsnprintf(NULL, 0, fmt, args) + 1;
- va_end(args);
-
- u64s = jset_u64s(DIV_ROUND_UP(msglen, sizeof(u64)));
-
- ret = bch2_journal_res_get(j, &res, u64s, 0);
- if (ret)
- return ret;
-
- entry = container_of(journal_res_entry(j, &res),
- struct jset_entry_log, entry);
- memset(entry, 0, u64s * sizeof(u64));
- entry->entry.type = BCH_JSET_ENTRY_log;
- entry->entry.u64s = u64s - 1;
-
- va_start(args, fmt);
- vsnprintf(entry->d, INT_MAX, fmt, args);
- va_end(args);
-
- bch2_journal_res_put(j, &res);
-
- return bch2_journal_flush_seq(j, res.seq);
-}
-
/* block/unlock the journal: */
void bch2_journal_unblock(struct journal *j)
u64 *new_bucket_seq = NULL, *new_buckets = NULL;
struct open_bucket **ob = NULL;
long *bu = NULL;
- unsigned i, nr_got = 0, nr_want = nr - ja->nr;
- unsigned old_nr = ja->nr;
- unsigned old_discard_idx = ja->discard_idx;
- unsigned old_dirty_idx_ondisk = ja->dirty_idx_ondisk;
- unsigned old_dirty_idx = ja->dirty_idx;
- unsigned old_cur_idx = ja->cur_idx;
+ unsigned i, pos, nr_got = 0, nr_want = nr - ja->nr;
int ret = 0;
- if (c) {
- bch2_journal_flush_all_pins(&c->journal);
- bch2_journal_block(&c->journal);
- }
+ BUG_ON(nr <= ja->nr);
bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
if (!bu || !ob || !new_buckets || !new_bucket_seq) {
- ret = -ENOMEM;
- goto err_unblock;
+ ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+ goto err_free;
}
for (nr_got = 0; nr_got < nr_want; nr_got++) {
break;
}
} else {
- ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none,
- false, cl);
- if (IS_ERR(ob[nr_got])) {
- ret = cl
- ? -EAGAIN
- : -BCH_ERR_ENOSPC_bucket_alloc;
+ ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
+ ret = PTR_ERR_OR_ZERO(ob[nr_got]);
+ if (ret)
+ break;
+
+ ret = bch2_trans_run(c,
+ bch2_trans_mark_metadata_bucket(&trans, ca,
+ ob[nr_got]->bucket, BCH_DATA_journal,
+ ca->mi.bucket_size));
+ if (ret) {
+ bch2_open_bucket_put(c, ob[nr_got]);
+ bch_err(c, "error marking new journal buckets: %s", bch2_err_str(ret));
break;
}
}
if (!nr_got)
- goto err_unblock;
+ goto err_free;
- /*
- * We may be called from the device add path, before the new device has
- * actually been added to the running filesystem:
- */
- if (!new_fs)
- spin_lock(&c->journal.lock);
+ /* Don't return an error if we successfully allocated some buckets: */
+ ret = 0;
+
+ if (c) {
+ bch2_journal_flush_all_pins(&c->journal);
+ bch2_journal_block(&c->journal);
+ mutex_lock(&c->sb_lock);
+ }
memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
memcpy(new_bucket_seq, ja->bucket_seq, ja->nr * sizeof(u64));
- swap(new_buckets, ja->buckets);
- swap(new_bucket_seq, ja->bucket_seq);
+
+ BUG_ON(ja->discard_idx > ja->nr);
+
+ pos = ja->discard_idx ?: ja->nr;
+
+ memmove(new_buckets + pos + nr_got,
+ new_buckets + pos,
+ sizeof(new_buckets[0]) * (ja->nr - pos));
+ memmove(new_bucket_seq + pos + nr_got,
+ new_bucket_seq + pos,
+ sizeof(new_bucket_seq[0]) * (ja->nr - pos));
for (i = 0; i < nr_got; i++) {
- unsigned pos = ja->discard_idx ?: ja->nr;
- long b = bu[i];
-
- __array_insert_item(ja->buckets, ja->nr, pos);
- __array_insert_item(ja->bucket_seq, ja->nr, pos);
- ja->nr++;
-
- ja->buckets[pos] = b;
- ja->bucket_seq[pos] = 0;
-
- if (pos <= ja->discard_idx)
- ja->discard_idx = (ja->discard_idx + 1) % ja->nr;
- if (pos <= ja->dirty_idx_ondisk)
- ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + 1) % ja->nr;
- if (pos <= ja->dirty_idx)
- ja->dirty_idx = (ja->dirty_idx + 1) % ja->nr;
- if (pos <= ja->cur_idx)
- ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
+ new_buckets[pos + i] = bu[i];
+ new_bucket_seq[pos + i] = 0;
}
- ret = bch2_journal_buckets_to_sb(c, ca);
- if (ret) {
- /* Revert: */
- swap(new_buckets, ja->buckets);
- swap(new_bucket_seq, ja->bucket_seq);
- ja->nr = old_nr;
- ja->discard_idx = old_discard_idx;
- ja->dirty_idx_ondisk = old_dirty_idx_ondisk;
- ja->dirty_idx = old_dirty_idx;
- ja->cur_idx = old_cur_idx;
- }
+ nr = ja->nr + nr_got;
+
+ ret = bch2_journal_buckets_to_sb(c, ca, new_buckets, nr);
+ if (ret)
+ goto err_unblock;
if (!new_fs)
- spin_unlock(&c->journal.lock);
+ bch2_write_super(c);
+ /* Commit: */
if (c)
- bch2_journal_unblock(&c->journal);
+ spin_lock(&c->journal.lock);
- if (ret)
- goto err;
+ swap(new_buckets, ja->buckets);
+ swap(new_bucket_seq, ja->bucket_seq);
+ ja->nr = nr;
- if (!new_fs) {
- for (i = 0; i < nr_got; i++) {
- ret = bch2_trans_run(c,
- bch2_trans_mark_metadata_bucket(&trans, ca,
- bu[i], BCH_DATA_journal,
- ca->mi.bucket_size));
- if (ret) {
- bch2_fs_inconsistent(c, "error marking new journal buckets: %i", ret);
- goto err;
- }
- }
+ if (pos <= ja->discard_idx)
+ ja->discard_idx = (ja->discard_idx + nr_got) % ja->nr;
+ if (pos <= ja->dirty_idx_ondisk)
+ ja->dirty_idx_ondisk = (ja->dirty_idx_ondisk + nr_got) % ja->nr;
+ if (pos <= ja->dirty_idx)
+ ja->dirty_idx = (ja->dirty_idx + nr_got) % ja->nr;
+ if (pos <= ja->cur_idx)
+ ja->cur_idx = (ja->cur_idx + nr_got) % ja->nr;
+
+ if (c)
+ spin_unlock(&c->journal.lock);
+err_unblock:
+ if (c) {
+ bch2_journal_unblock(&c->journal);
+ mutex_unlock(&c->sb_lock);
}
-err:
- if (ob && !new_fs)
+
+ if (ret && !new_fs)
+ for (i = 0; i < nr_got; i++)
+ bch2_trans_run(c,
+ bch2_trans_mark_metadata_bucket(&trans, ca,
+ bu[i], BCH_DATA_free, 0));
+err_free:
+ if (!new_fs)
for (i = 0; i < nr_got; i++)
bch2_open_bucket_put(c, ob[i]);
kfree(new_buckets);
kfree(ob);
kfree(bu);
-
return ret;
-err_unblock:
- if (c)
- bch2_journal_unblock(&c->journal);
- goto err;
}
/*
{
struct journal_device *ja = &ca->journal;
struct closure cl;
- unsigned current_nr;
int ret = 0;
- /* don't handle reducing nr of buckets yet: */
- if (nr < ja->nr)
- return 0;
-
closure_init_stack(&cl);
- while (ja->nr != nr && (ret == 0 || ret == -EAGAIN)) {
- struct disk_reservation disk_res = { 0, 0 };
+ down_write(&c->state_lock);
- closure_sync(&cl);
+ /* don't handle reducing nr of buckets yet: */
+ if (nr < ja->nr)
+ goto unlock;
- mutex_lock(&c->sb_lock);
- current_nr = ja->nr;
+ while (ja->nr < nr) {
+ struct disk_reservation disk_res = { 0, 0 };
/*
* note: journal buckets aren't really counted as _sectors_ used yet, so
* we don't need the disk reservation to avoid the BUG_ON() in buckets.c
* when space used goes up without a reservation - but we do need the
* reservation to ensure we'll actually be able to allocate:
+ *
+ * XXX: that's not right, disk reservations only ensure a
+ * filesystem-wide allocation will succeed, this is a device
+ * specific allocation - we can hang here:
*/
ret = bch2_disk_reservation_get(c, &disk_res,
bucket_to_sector(ca, nr - ja->nr), 1, 0);
- if (ret) {
- mutex_unlock(&c->sb_lock);
- return ret;
- }
+ if (ret)
+ break;
ret = __bch2_set_nr_journal_buckets(ca, nr, false, &cl);
bch2_disk_reservation_put(c, &disk_res);
- if (ja->nr != current_nr)
- bch2_write_super(c);
- mutex_unlock(&c->sb_lock);
+ closure_sync(&cl);
+
+ if (ret && ret != -BCH_ERR_bucket_alloc_blocked)
+ break;
}
+ if (ret)
+ bch_err_fn(c, ret);
+unlock:
+ up_write(&c->state_lock);
return ret;
}
unsigned nr;
int ret;
- if (dynamic_fault("bcachefs:add:journal_alloc"))
- return -ENOMEM;
+ if (dynamic_fault("bcachefs:add:journal_alloc")) {
+ ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
+ goto err;
+ }
/* 1/128th of the device by default: */
nr = ca->mi.nbuckets >> 7;
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
- if (ca->fs)
- mutex_lock(&ca->fs->sb_lock);
-
ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
-
- if (ca->fs)
- mutex_unlock(&ca->fs->sb_lock);
-
+err:
+ if (ret)
+ bch_err_fn(ca, ret);
return ret;
}
seq++) {
struct journal_buf *buf = journal_seq_to_buf(j, seq);
- if (bch2_bkey_has_device(bkey_i_to_s_c(&buf->key), dev_idx))
+ if (bch2_bkey_has_device_c(bkey_i_to_s_c(&buf->key), dev_idx))
ret = true;
}
spin_unlock(&j->lock);
init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
if (!j->pin.data) {
bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_journal_pin_fifo;
}
}
ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!ja->bucket_seq)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_dev_journal_init;
nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
if (!ca->journal.bio)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_dev_journal_init;
bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!ja->buckets)
- return -ENOMEM;
+ return -BCH_ERR_ENOMEM_dev_journal_init;
if (journal_buckets_v2) {
unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
{
unsigned i;
+ darray_exit(&j->early_journal_entries);
+
for (i = 0; i < ARRAY_SIZE(j->buf); i++)
kvpfree(j->buf[i].data, j->buf[i].buf_size);
free_fifo(&j->pin);
int bch2_fs_journal_init(struct journal *j)
{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
static struct lock_class_key res_key;
unsigned i;
- int ret = 0;
-
- pr_verbose_init(c->opts, "");
spin_lock_init(&j->lock);
spin_lock_init(&j->err_lock);
((union journal_res_state)
{ .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
- if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)))
+ return -BCH_ERR_ENOMEM_journal_pin_fifo;
for (i = 0; i < ARRAY_SIZE(j->buf); i++) {
j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
- if (!j->buf[i].data) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!j->buf[i].data)
+ return -BCH_ERR_ENOMEM_journal_buf;
}
j->pin.front = j->pin.back = 1;
-out:
- pr_verbose_init(c->opts, "ret %i", ret);
- return ret;
+ return 0;
}
/* debug: */
prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
prt_printf(out, "prereserved:\t\t%u/%u\n", j->prereserved.reserved, j->prereserved.remaining);
- prt_printf(out, "watermark:\t\t%s\n", bch2_journal_watermarks[j->watermark]);
+ prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]);
prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
{
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *pin;
+ unsigned i;
spin_lock(&j->lock);
*seq = max(*seq, j->pin.front);
prt_newline(out);
printbuf_indent_add(out, 2);
- list_for_each_entry(pin, &pin_list->list, list) {
- prt_printf(out, "\t%px %ps", pin, pin->flush);
- prt_newline(out);
- }
-
- list_for_each_entry(pin, &pin_list->key_cache_list, list) {
- prt_printf(out, "\t%px %ps", pin, pin->flush);
- prt_newline(out);
- }
+ for (i = 0; i < ARRAY_SIZE(pin_list->list); i++)
+ list_for_each_entry(pin, &pin_list->list[i], list) {
+ prt_printf(out, "\t%px %ps", pin, pin->flush);
+ prt_newline(out);
+ }
if (!list_empty(&pin_list->flushed)) {
prt_printf(out, "flushed:");