+// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
-#include "alloc_background.h"
#include "alloc_foreground.h"
-#include "btree_gc.h"
-#include "btree_update.h"
+#include "btree_io.h"
+#include "btree_update_interior.h"
#include "buckets.h"
#include "checksum.h"
+#include "disk_groups.h"
#include "error.h"
+#include "io.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include <trace/events/bcachefs.h>
+static void __journal_replay_free(struct journal_replay *i)
+{
+ list_del(&i->list);
+ kvpfree(i, offsetof(struct journal_replay, j) +
+ vstruct_bytes(&i->j));
+
+}
+
+static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
+{
+ i->ignore = true;
+
+ if (!c->opts.read_entire_journal)
+ __journal_replay_free(i);
+}
+
struct journal_list {
struct closure cl;
struct mutex lock;
* be replayed:
*/
static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
- struct journal_list *jlist, struct jset *j)
+ struct bch_extent_ptr entry_ptr,
+ struct journal_list *jlist, struct jset *j,
+ bool bad)
{
- struct journal_replay *i, *pos;
+ struct journal_replay *i, *pos, *dup = NULL;
+ struct bch_extent_ptr *ptr;
struct list_head *where;
size_t bytes = vstruct_bytes(j);
- __le64 last_seq;
- int ret;
+ u64 last_seq = 0;
+ int ret = JOURNAL_ENTRY_ADD_OK;
- last_seq = !list_empty(jlist->head)
- ? list_last_entry(jlist->head, struct journal_replay,
- list)->j.last_seq
- : 0;
+ list_for_each_entry_reverse(i, jlist->head, list) {
+ if (!JSET_NO_FLUSH(&i->j)) {
+ last_seq = le64_to_cpu(i->j.last_seq);
+ break;
+ }
+ }
/* Is this entry older than the range we need? */
- if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) {
+ if (!c->opts.read_entire_journal &&
+ le64_to_cpu(j->seq) < last_seq) {
ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
goto out;
}
/* Drop entries we don't need anymore */
- list_for_each_entry_safe(i, pos, jlist->head, list) {
- if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
- break;
- list_del(&i->list);
- kvpfree(i, offsetof(struct journal_replay, j) +
- vstruct_bytes(&i->j));
+ if (!JSET_NO_FLUSH(j)) {
+ list_for_each_entry_safe(i, pos, jlist->head, list) {
+ if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq))
+ break;
+ journal_replay_free(c, i);
+ }
}
list_for_each_entry_reverse(i, jlist->head, list) {
- /* Duplicate? */
- if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) {
- fsck_err_on(bytes != vstruct_bytes(&i->j) ||
- memcmp(j, &i->j, bytes), c,
- "found duplicate but non identical journal entries (seq %llu)",
- le64_to_cpu(j->seq));
- goto found;
- }
-
if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) {
where = &i->list;
goto add;
where = jlist->head;
add:
+ dup = where->next != jlist->head
+ ? container_of(where->next, struct journal_replay, list)
+ : NULL;
+
+ if (dup && le64_to_cpu(j->seq) != le64_to_cpu(dup->j.seq))
+ dup = NULL;
+
+ /*
+ * Duplicate journal entries? If so we want the one that didn't have a
+ * checksum error:
+ */
+ if (dup) {
+ if (dup->bad) {
+ /* we'll replace @dup: */
+ } else if (bad) {
+ i = dup;
+ goto found;
+ } else {
+ fsck_err_on(bytes != vstruct_bytes(&dup->j) ||
+ memcmp(j, &dup->j, bytes), c,
+ "found duplicate but non identical journal entries (seq %llu)",
+ le64_to_cpu(j->seq));
+ i = dup;
+ goto found;
+ }
+ }
+
i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
if (!i) {
ret = -ENOMEM;
goto out;
}
- list_add(&i->list, where);
- i->devs.nr = 0;
+ i->nr_ptrs = 0;
+ i->bad = bad;
+ i->ignore = false;
memcpy(&i->j, j, bytes);
+
+ if (dup) {
+ i->nr_ptrs = dup->nr_ptrs;
+ memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs));
+ __journal_replay_free(dup);
+ }
+
+ list_add(&i->list, where);
found:
- if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx))
- bch2_dev_list_add_dev(&i->devs, ca->dev_idx);
- else
- fsck_err_on(1, c, "duplicate journal entries on same device");
- ret = JOURNAL_ENTRY_ADD_OK;
+ for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) {
+ if (ptr->dev == ca->dev_idx) {
+ bch_err(c, "duplicate journal entry %llu on same device",
+ le64_to_cpu(i->j.seq));
+ goto out;
+ }
+ }
+
+ if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) {
+ bch_err(c, "found too many copies of journal entry %llu",
+ le64_to_cpu(i->j.seq));
+ goto out;
+ }
+
+ i->ptrs[i->nr_ptrs++] = entry_ptr;
out:
fsck_err:
return ret;
#define journal_entry_err_on(cond, c, msg, ...) \
((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false)
+#define FSCK_DELETED_KEY 5
+
static int journal_validate_key(struct bch_fs *c, struct jset *jset,
struct jset_entry *entry,
- struct bkey_i *k, enum btree_node_type key_type,
+ unsigned level, enum btree_id btree_id,
+ struct bkey_i *k,
const char *type, int write)
{
void *next = vstruct_next(entry);
int ret = 0;
if (journal_entry_err_on(!k->k.u64s, c,
- "invalid %s in journal: k->u64s 0", type)) {
+ "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: k->u64s 0",
+ type, le64_to_cpu(jset->seq),
+ (u64 *) entry - jset->_data,
+ le32_to_cpu(jset->u64s),
+ (u64 *) k - entry->_data,
+ le16_to_cpu(entry->u64s))) {
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
journal_entry_null_range(vstruct_next(entry), next);
- return 0;
+ return FSCK_DELETED_KEY;
}
if (journal_entry_err_on((void *) bkey_next(k) >
(void *) vstruct_next(entry), c,
- "invalid %s in journal: extends past end of journal entry",
- type)) {
+ "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: extends past end of journal entry",
+ type, le64_to_cpu(jset->seq),
+ (u64 *) entry - jset->_data,
+ le32_to_cpu(jset->u64s),
+ (u64 *) k - entry->_data,
+ le16_to_cpu(entry->u64s))) {
entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
journal_entry_null_range(vstruct_next(entry), next);
- return 0;
+ return FSCK_DELETED_KEY;
}
if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c,
- "invalid %s in journal: bad format %u",
- type, k->k.format)) {
- le16_add_cpu(&entry->u64s, -k->k.u64s);
+ "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: bad format %u",
+ type, le64_to_cpu(jset->seq),
+ (u64 *) entry - jset->_data,
+ le32_to_cpu(jset->u64s),
+ (u64 *) k - entry->_data,
+ le16_to_cpu(entry->u64s),
+ k->k.format)) {
+ le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
journal_entry_null_range(vstruct_next(entry), next);
- return 0;
+ return FSCK_DELETED_KEY;
}
- if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN)
- bch2_bkey_swab(NULL, bkey_to_packed(k));
+ if (!write)
+ bch2_bkey_compat(level, btree_id, version,
+ JSET_BIG_ENDIAN(jset), write,
+ NULL, bkey_to_packed(k));
- if (!write &&
- version < bcachefs_metadata_version_bkey_renumber)
- bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
-
- invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k), key_type);
+ invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(k),
+ __btree_node_type(level, btree_id));
if (invalid) {
char buf[160];
bch2_bkey_val_to_text(&PBUF(buf), c, bkey_i_to_s_c(k));
- mustfix_fsck_err(c, "invalid %s in journal: %s\n%s",
- type, invalid, buf);
-
- le16_add_cpu(&entry->u64s, -k->k.u64s);
+ mustfix_fsck_err(c, "invalid %s in jset %llu offset %zi/%u entry offset %zi/%u: %s\n%s",
+ type, le64_to_cpu(jset->seq),
+ (u64 *) entry - jset->_data,
+ le32_to_cpu(jset->u64s),
+ (u64 *) k - entry->_data,
+ le16_to_cpu(entry->u64s),
+ invalid, buf);
+
+ le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
journal_entry_null_range(vstruct_next(entry), next);
- return 0;
+ return FSCK_DELETED_KEY;
}
- if (write &&
- version < bcachefs_metadata_version_bkey_renumber)
- bch2_bkey_renumber(key_type, bkey_to_packed(k), write);
+ if (write)
+ bch2_bkey_compat(level, btree_id, version,
+ JSET_BIG_ENDIAN(jset), write,
+ NULL, bkey_to_packed(k));
fsck_err:
return ret;
}
struct jset_entry *entry,
int write)
{
- struct bkey_i *k;
+ struct bkey_i *k = entry->start;
- vstruct_for_each(entry, k) {
- int ret = journal_validate_key(c, jset, entry, k,
- __btree_node_type(entry->level,
- entry->btree_id),
- "key", write);
- if (ret)
- return ret;
+ while (k != vstruct_last(entry)) {
+ int ret = journal_validate_key(c, jset, entry,
+ entry->level,
+ entry->btree_id,
+ k, "key", write);
+ if (ret == FSCK_DELETED_KEY)
+ continue;
+
+ k = bkey_next(k);
}
return 0;
return 0;
}
- return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE,
+ return journal_validate_key(c, jset, entry, 1, entry->btree_id, k,
"btree root", write);
fsck_err:
return ret;
if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c,
"invalid journal seq blacklist entry: bad size")) {
journal_entry_null_range(entry, vstruct_next(entry));
+ goto out;
}
bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry);
"invalid journal seq blacklist entry: start > end")) {
journal_entry_null_range(entry, vstruct_next(entry));
}
+out:
+fsck_err:
+ return ret;
+}
+
+static int journal_entry_validate_usage(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ int write)
+{
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes < sizeof(*u),
+ c,
+ "invalid journal entry usage: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static int journal_entry_validate_data_usage(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ int write)
+{
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes < sizeof(*u) ||
+ bytes < sizeof(*u) + u->r.nr_devs,
+ c,
+ "invalid journal entry usage: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static int journal_entry_validate_clock(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ int write)
+{
+ struct jset_entry_clock *clock =
+ container_of(entry, struct jset_entry_clock, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes != sizeof(*clock),
+ c, "invalid journal entry clock: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+ if (journal_entry_err_on(clock->rw > 1,
+ c, "invalid journal entry clock: bad rw")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static int journal_entry_validate_dev_usage(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ int write)
+{
+ struct jset_entry_dev_usage *u =
+ container_of(entry, struct jset_entry_dev_usage, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ unsigned expected = sizeof(*u) + sizeof(u->d[0]) * 7; /* Current value of BCH_DATA_NR */
+ unsigned dev;
+ int ret = 0;
+
+ if (journal_entry_err_on(bytes < expected,
+ c, "invalid journal entry dev usage: bad size (%u < %u)",
+ bytes, expected)) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+ dev = le32_to_cpu(u->dev);
+
+ if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
+ c, "invalid journal entry dev usage: bad dev")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+ if (journal_entry_err_on(u->pad,
+ c, "invalid journal entry dev usage: bad pad")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
fsck_err:
return ret;
static int journal_entry_validate(struct bch_fs *c, struct jset *jset,
struct jset_entry *entry, int write)
{
- int ret = 0;
-
- if (entry->type >= BCH_JSET_ENTRY_NR) {
- journal_entry_err(c, "invalid journal entry type %u",
- entry->type);
- journal_entry_null_range(entry, vstruct_next(entry));
- return 0;
- }
-
- ret = bch2_jset_entry_ops[entry->type].validate(c, jset, entry, write);
-fsck_err:
- return ret;
+ return entry->type < BCH_JSET_ENTRY_NR
+ ? bch2_jset_entry_ops[entry->type].validate(c, jset,
+ entry, write)
+ : 0;
}
static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
}
static int jset_validate(struct bch_fs *c,
+ struct bch_dev *ca,
struct jset *jset, u64 sector,
unsigned bucket_sectors_left,
unsigned sectors_read,
return JOURNAL_ENTRY_NONE;
version = le32_to_cpu(jset->version);
- if ((version != BCH_JSET_VERSION_OLD &&
- version < bcachefs_metadata_version_min) ||
- version >= bcachefs_metadata_version_max) {
- bch_err(c, "unknown journal entry version %u", jset->version);
- return BCH_FSCK_UNKNOWN_VERSION;
+ if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD &&
+ version < bcachefs_metadata_version_min) ||
+ version >= bcachefs_metadata_version_max, c,
+ "%s sector %llu seq %llu: unknown journal entry version %u",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq),
+ version)) {
+ /* don't try to continue: */
+ return EINVAL;
}
+ if (bytes > (sectors_read << 9) &&
+ sectors_read < bucket_sectors_left)
+ return JOURNAL_ENTRY_REREAD;
+
if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c,
- "journal entry too big (%zu bytes), sector %lluu",
- bytes, sector)) {
- /* XXX: note we might have missing journal entries */
- return JOURNAL_ENTRY_BAD;
+ "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq), bytes)) {
+ ret = JOURNAL_ENTRY_BAD;
+ le32_add_cpu(&jset->u64s,
+ -((bytes - (bucket_sectors_left << 9)) / 8));
}
- if (bytes > sectors_read << 9)
- return JOURNAL_ENTRY_REREAD;
+ if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
+ "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq),
+ JSET_CSUM_TYPE(jset))) {
+ ret = JOURNAL_ENTRY_BAD;
+ goto csum_done;
+ }
- if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c,
- "journal entry with unknown csum type %llu sector %lluu",
- JSET_CSUM_TYPE(jset), sector))
- return JOURNAL_ENTRY_BAD;
+ if (write)
+ goto csum_done;
csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset);
if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c,
- "journal checksum bad, sector %llu", sector)) {
- /* XXX: retry IO, when we start retrying checksum errors */
- /* XXX: note we might have missing journal entries */
- return JOURNAL_ENTRY_BAD;
- }
+ "%s sector %llu seq %llu: journal checksum bad",
+ ca ? ca->name : c->name,
+ sector, le64_to_cpu(jset->seq)))
+ ret = JOURNAL_ENTRY_BAD;
bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
jset->encrypted_start,
vstruct_end(jset) - (void *) jset->encrypted_start);
-
- if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
- "invalid journal entry: last_seq > seq"))
+csum_done:
+ /* last_seq is ignored when JSET_NO_FLUSH is true */
+ if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
+ le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c,
+ "invalid journal entry: last_seq > seq (%llu > %llu)",
+ le64_to_cpu(jset->last_seq),
+ le64_to_cpu(jset->seq))) {
jset->last_seq = jset->seq;
-
- return 0;
+ return JOURNAL_ENTRY_BAD;
+ }
fsck_err:
return ret;
}
+static int jset_validate_for_write(struct bch_fs *c, struct jset *jset)
+{
+ unsigned sectors = vstruct_sectors(jset, c->block_bits);
+
+ return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?:
+ jset_validate_entries(c, jset, WRITE);
+}
+
struct journal_read_buf {
void *data;
size_t size;
sectors_read << 9));
bio_set_dev(bio, ca->disk_sb.bdev);
bio->bi_iter.bi_sector = offset;
- bio->bi_iter.bi_size = sectors_read << 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0);
- bch2_bio_map(bio, buf->data);
+ bch2_bio_map(bio, buf->data, sectors_read << 9);
ret = submit_bio_wait(bio);
bio_put(bio);
if (bch2_dev_io_err_on(ret, ca,
- "journal read from sector %llu",
+ "journal read error: sector %llu",
offset) ||
- bch2_meta_read_fault("journal"))
- return -EIO;
+ bch2_meta_read_fault("journal")) {
+ /*
+ * We don't error out of the recovery process
+ * here, since the relevant journal entry may be
+ * found on a different device, and missing or
+ * no journal entries will be handled later
+ */
+ return 0;
+ }
j = buf->data;
}
- ret = jset_validate(c, j, offset,
+ ret = jset_validate(c, ca, j, offset,
end - offset, sectors_read,
READ);
switch (ret) {
case BCH_FSCK_OK:
+ sectors = vstruct_sectors(j, c->block_bits);
break;
case JOURNAL_ENTRY_REREAD:
if (vstruct_bytes(j) > buf->size) {
goto next_block;
case JOURNAL_ENTRY_BAD:
saw_bad = true;
+ /*
+ * On checksum error we don't really trust the size
+ * field of the journal entry we read, so try reading
+ * again at next block boundary:
+ */
sectors = c->opts.block_size;
- goto next_block;
+ break;
default:
return ret;
}
ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
mutex_lock(&jlist->lock);
- ret = journal_entry_add(c, ca, jlist, j);
+ ret = journal_entry_add(c, ca, (struct bch_extent_ptr) {
+ .dev = ca->dev_idx,
+ .offset = offset,
+ }, jlist, j, ret != 0);
mutex_unlock(&jlist->lock);
switch (ret) {
default:
return ret;
}
-
- sectors = vstruct_sectors(j, c->block_bits);
next_block:
pr_debug("next");
offset += sectors;
ja->sectors_free = 0;
/*
- * Set last_idx to indicate the entire journal is full and needs to be
+ * Set dirty_idx to indicate the entire journal is full and needs to be
* reclaimed - journal reclaim will immediately reclaim whatever isn't
* pinned when it first runs:
*/
- ja->last_idx = (ja->cur_idx + 1) % ja->nr;
+ ja->discard_idx = ja->dirty_idx_ondisk =
+ ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
out:
kvpfree(buf.data, buf.size);
percpu_ref_put(&ca->io_ref);
goto out;
}
-void bch2_journal_entries_free(struct list_head *list)
+static void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
+ struct journal_replay *j)
{
+ unsigned i;
- while (!list_empty(list)) {
- struct journal_replay *i =
- list_first_entry(list, struct journal_replay, list);
- list_del(&i->list);
- kvpfree(i, offsetof(struct journal_replay, j) +
- vstruct_bytes(&i->j));
- }
-}
-
-int bch2_journal_set_seq(struct bch_fs *c, u64 last_seq, u64 end_seq)
-{
- struct journal *j = &c->journal;
- struct journal_entry_pin_list *p;
- u64 seq, nr = end_seq - last_seq + 1;
-
- if (nr > j->pin.size) {
- free_fifo(&j->pin);
- init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
- if (!j->pin.data) {
- bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
- return -ENOMEM;
- }
- }
-
- atomic64_set(&j->seq, end_seq);
- j->last_seq_ondisk = last_seq;
-
- j->pin.front = last_seq;
- j->pin.back = end_seq + 1;
+ for (i = 0; i < j->nr_ptrs; i++) {
+ struct bch_dev *ca = c->devs[j->ptrs[i].dev];
- fifo_for_each_entry_ptr(p, &j->pin, seq) {
- INIT_LIST_HEAD(&p->list);
- INIT_LIST_HEAD(&p->flushed);
- atomic_set(&p->count, 0);
- p->devs.nr = 0;
+ if (i)
+ pr_buf(out, " ");
+ pr_buf(out, "%u:%llu (offset %llu)",
+ j->ptrs[i].dev,
+ (u64) j->ptrs[i].offset,
+ (u64) j->ptrs[i].offset % ca->mi.bucket_size);
}
-
- return 0;
}
-int bch2_journal_read(struct bch_fs *c, struct list_head *list)
+int bch2_journal_read(struct bch_fs *c, struct list_head *list,
+ u64 *blacklist_seq, u64 *start_seq)
{
- struct journal *j = &c->journal;
struct journal_list jlist;
- struct journal_replay *i;
- struct journal_entry_pin_list *p;
+ struct journal_replay *i, *t;
struct bch_dev *ca;
- u64 cur_seq, end_seq;
unsigned iter;
size_t keys = 0, entries = 0;
bool degraded = false;
+ u64 seq, last_seq = 0;
int ret = 0;
closure_init_stack(&jlist.cl);
for_each_member_device(ca, c, iter) {
if (!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
- !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL)))
+ !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
continue;
if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
if (jlist.ret)
return jlist.ret;
- if (list_empty(list)){
- bch_err(c, "no journal entries found");
- return BCH_FSCK_REPAIR_IMPOSSIBLE;
+ if (list_empty(list)) {
+ bch_info(c, "journal read done, but no entries found");
+ return 0;
}
- list_for_each_entry(i, list, list) {
- struct bch_replicas_padded replicas;
- char buf[80];
-
- bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, i->devs);
-
- ret = jset_validate_entries(c, &i->j, READ);
- if (ret)
- goto fsck_err;
+ i = list_last_entry(list, struct journal_replay, list);
+ *start_seq = le64_to_cpu(i->j.seq) + 1;
- /*
- * If we're mounting in degraded mode - if we didn't read all
- * the devices - this is wrong:
- */
+ /*
+ * Find most recent flush entry, and ignore newer non flush entries -
+ * those entries will be blacklisted:
+ */
+ list_for_each_entry_safe_reverse(i, t, list, list) {
+ if (i->ignore)
+ continue;
- if (!degraded &&
- (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
- fsck_err_on(!bch2_replicas_marked(c, &replicas.e, false), c,
- "superblock not marked as containing replicas %s",
- (bch2_replicas_entry_to_text(&PBUF(buf),
- &replicas.e), buf)))) {
- ret = bch2_mark_replicas(c, &replicas.e);
- if (ret)
- return ret;
+ if (!JSET_NO_FLUSH(&i->j)) {
+ last_seq = le64_to_cpu(i->j.last_seq);
+ *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
+ break;
}
- }
- i = list_last_entry(list, struct journal_replay, list);
+ journal_replay_free(c, i);
+ }
- ret = bch2_journal_set_seq(c,
- le64_to_cpu(i->j.last_seq),
- le64_to_cpu(i->j.seq));
- if (ret)
- return ret;
+ if (!last_seq) {
+ fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
+ return -1;
+ }
- mutex_lock(&j->blacklist_lock);
+ /* Drop blacklisted entries and entries older than last_seq: */
+ list_for_each_entry_safe(i, t, list, list) {
+ if (i->ignore)
+ continue;
- list_for_each_entry(i, list, list) {
- p = journal_seq_pin(j, le64_to_cpu(i->j.seq));
+ seq = le64_to_cpu(i->j.seq);
+ if (seq < last_seq) {
+ journal_replay_free(c, i);
+ continue;
+ }
- atomic_set(&p->count, 1);
- p->devs = i->devs;
+ if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
+ fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
+ "found blacklisted journal entry %llu", seq);
- if (bch2_journal_seq_blacklist_read(j, i)) {
- mutex_unlock(&j->blacklist_lock);
- return -ENOMEM;
+ journal_replay_free(c, i);
}
}
- mutex_unlock(&j->blacklist_lock);
-
- cur_seq = journal_last_seq(j);
- end_seq = le64_to_cpu(list_last_entry(list,
- struct journal_replay, list)->j.seq);
-
+ /* Check for missing entries: */
+ seq = last_seq;
list_for_each_entry(i, list, list) {
- struct jset_entry *entry;
- struct bkey_i *k, *_n;
- bool blacklisted;
-
- mutex_lock(&j->blacklist_lock);
- while (cur_seq < le64_to_cpu(i->j.seq) &&
- bch2_journal_seq_blacklist_find(j, cur_seq))
- cur_seq++;
-
- blacklisted = bch2_journal_seq_blacklist_find(j,
- le64_to_cpu(i->j.seq));
- mutex_unlock(&j->blacklist_lock);
-
- fsck_err_on(blacklisted, c,
- "found blacklisted journal entry %llu",
- le64_to_cpu(i->j.seq));
-
- fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c,
- "journal entries %llu-%llu missing! (replaying %llu-%llu)",
- cur_seq, le64_to_cpu(i->j.seq) - 1,
- journal_last_seq(j), end_seq);
-
- cur_seq = le64_to_cpu(i->j.seq) + 1;
-
- for_each_jset_key(k, _n, entry, &i->j)
- keys++;
- entries++;
- }
+ if (i->ignore)
+ continue;
- bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
- keys, entries, journal_cur_seq(j));
-fsck_err:
- return ret;
-}
+ BUG_ON(seq > le64_to_cpu(i->j.seq));
-/* journal replay: */
+ while (seq < le64_to_cpu(i->j.seq)) {
+ u64 missing_start, missing_end;
+ char buf1[200], buf2[200];
-int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
-{
- struct journal *j = &c->journal;
- struct journal_entry_pin_list *pin_list;
- struct bkey_i *k, *_n;
- struct jset_entry *entry;
- struct journal_replay *i, *n;
- int ret = 0;
+ while (seq < le64_to_cpu(i->j.seq) &&
+ bch2_journal_seq_is_blacklisted(c, seq, false))
+ seq++;
- list_for_each_entry_safe(i, n, list, list) {
- j->replay_journal_seq = le64_to_cpu(i->j.seq);
+ if (seq == le64_to_cpu(i->j.seq))
+ break;
- for_each_jset_key(k, _n, entry, &i->j) {
+ missing_start = seq;
- if (entry->btree_id == BTREE_ID_ALLOC) {
- /*
- * allocation code handles replay for
- * BTREE_ID_ALLOC keys:
- */
- ret = bch2_alloc_replay_key(c, k);
- } else {
- /*
- * We might cause compressed extents to be
- * split, so we need to pass in a
- * disk_reservation:
- */
- struct disk_reservation disk_res =
- bch2_disk_reservation_init(c, 0);
+ while (seq < le64_to_cpu(i->j.seq) &&
+ !bch2_journal_seq_is_blacklisted(c, seq, false))
+ seq++;
- ret = bch2_btree_insert(c, entry->btree_id, k,
- &disk_res, NULL,
- BTREE_INSERT_NOFAIL|
- BTREE_INSERT_JOURNAL_REPLAY);
- }
+ if (i->list.prev != list) {
+ struct printbuf out = PBUF(buf1);
+ struct journal_replay *p = list_prev_entry(i, list);
- if (ret) {
- bch_err(c, "journal replay: error %d while replaying key",
- ret);
- goto err;
- }
+ bch2_journal_ptrs_to_text(&out, c, p);
+ pr_buf(&out, " size %llu", vstruct_sectors(&p->j, c->block_bits));
+ } else
+ sprintf(buf1, "(none)");
+ bch2_journal_ptrs_to_text(&PBUF(buf2), c, i);
- cond_resched();
+ missing_end = seq - 1;
+ fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
+ " prev at %s\n"
+ " next at %s",
+ missing_start, missing_end,
+ last_seq, *blacklist_seq - 1,
+ buf1, buf2);
}
- pin_list = journal_seq_pin(j, j->replay_journal_seq);
-
- if (atomic_dec_and_test(&pin_list->count))
- journal_wake(j);
+ seq++;
}
- j->replay_journal_seq = 0;
-
- bch2_journal_set_replay_done(j);
- bch2_journal_flush_all_pins(j);
- ret = bch2_journal_error(j);
-err:
- bch2_journal_entries_free(list);
- return ret;
-}
-
-/* journal write: */
-
-static void bch2_journal_add_btree_root(struct journal_buf *buf,
- enum btree_id id, struct bkey_i *k,
- unsigned level)
-{
- struct jset_entry *entry;
-
- entry = bch2_journal_add_entry_noreservation(buf, k->k.u64s);
- entry->type = BCH_JSET_ENTRY_btree_root;
- entry->btree_id = id;
- entry->level = level;
- memcpy_u64s(entry->_data, k, k->k.u64s);
-}
-
-static unsigned journal_dev_buckets_available(struct journal *j,
- struct journal_device *ja)
-{
- unsigned next = (ja->cur_idx + 1) % ja->nr;
- unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
-
- /*
- * Don't use the last bucket unless writing the new last_seq
- * will make another bucket available:
- */
- if (available &&
- journal_last_seq(j) <= ja->bucket_seq[ja->last_idx])
- --available;
-
- return available;
-}
-
-/* returns number of sectors available for next journal entry: */
-int bch2_journal_entry_sectors(struct journal *j)
-{
- struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct bch_dev *ca;
- unsigned sectors_available = UINT_MAX;
- unsigned i, nr_online = 0, nr_devs = 0;
-
- lockdep_assert_held(&j->lock);
-
- rcu_read_lock();
- for_each_member_device_rcu(ca, c, i,
- &c->rw_devs[BCH_DATA_JOURNAL]) {
- struct journal_device *ja = &ca->journal;
- unsigned buckets_this_device, sectors_this_device;
+ list_for_each_entry(i, list, list) {
+ struct jset_entry *entry;
+ struct bkey_i *k, *_n;
+ struct bch_replicas_padded replicas = {
+ .e.data_type = BCH_DATA_journal,
+ .e.nr_required = 1,
+ };
+ unsigned ptr;
+ char buf[80];
- if (!ja->nr)
+ if (i->ignore)
continue;
- buckets_this_device = journal_dev_buckets_available(j, ja);
- sectors_this_device = ja->sectors_free;
+ ret = jset_validate_entries(c, &i->j, READ);
+ if (ret)
+ goto fsck_err;
+
+ for (ptr = 0; ptr < i->nr_ptrs; ptr++)
+ replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev;
- nr_online++;
+ bch2_replicas_entry_sort(&replicas.e);
/*
- * We that we don't allocate the space for a journal entry
- * until we write it out - thus, account for it here:
+ * If we're mounting in degraded mode - if we didn't read all
+ * the devices - this is wrong:
*/
- if (j->prev_buf_sectors >= sectors_this_device) {
- if (!buckets_this_device)
- continue;
- buckets_this_device--;
- sectors_this_device = ca->mi.bucket_size;
+ if (!degraded &&
+ (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
+ fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
+ "superblock not marked as containing replicas %s",
+ (bch2_replicas_entry_to_text(&PBUF(buf),
+ &replicas.e), buf)))) {
+ ret = bch2_mark_replicas(c, &replicas.e);
+ if (ret)
+ return ret;
}
- sectors_this_device -= j->prev_buf_sectors;
-
- if (buckets_this_device)
- sectors_this_device = ca->mi.bucket_size;
-
- if (!sectors_this_device)
- continue;
-
- sectors_available = min(sectors_available,
- sectors_this_device);
- nr_devs++;
+ for_each_jset_key(k, _n, entry, &i->j)
+ keys++;
+ entries++;
}
- rcu_read_unlock();
- if (nr_online < c->opts.metadata_replicas_required)
- return -EROFS;
-
- if (nr_devs < min_t(unsigned, nr_online, c->opts.metadata_replicas))
- return 0;
+ bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu",
+ keys, entries, *start_seq);
- return sectors_available;
+ if (*start_seq != *blacklist_seq)
+ bch_info(c, "dropped unflushed entries %llu-%llu",
+ *blacklist_seq, *start_seq - 1);
+fsck_err:
+ return ret;
}
+/* journal write: */
+
static void __journal_write_alloc(struct journal *j,
struct journal_buf *w,
struct dev_alloc_list *devs_sorted,
sectors > ja->sectors_free)
continue;
- bch2_dev_stripe_increment(c, ca, &j->wp.stripe);
+ bch2_dev_stripe_increment(ca, &j->wp.stripe);
bch2_bkey_append_ptr(&w->key,
(struct bch_extent_ptr) {
unsigned sectors)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_devs_mask devs;
struct journal_device *ja;
struct bch_dev *ca;
struct dev_alloc_list devs_sorted;
+ unsigned target = c->opts.metadata_target ?:
+ c->opts.foreground_target;
unsigned i, replicas = 0, replicas_want =
READ_ONCE(c->opts.metadata_replicas);
rcu_read_lock();
+retry:
+ devs = target_rw_devs(c, BCH_DATA_journal, target);
- devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
- &c->rw_devs[BCH_DATA_JOURNAL]);
+ devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs);
- spin_lock(&j->lock);
__journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want);
if (sectors > ja->sectors_free &&
sectors <= ca->mi.bucket_size &&
- journal_dev_buckets_available(j, ja)) {
+ bch2_journal_dev_buckets_available(j, ja,
+ journal_space_discarded)) {
ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
ja->sectors_free = ca->mi.bucket_size;
+
+ /*
+ * ja->bucket_seq[ja->cur_idx] must always have
+ * something sensible:
+ */
+ ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq);
}
}
__journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want);
-done:
- if (replicas >= replicas_want)
- j->prev_buf_sectors = 0;
- spin_unlock(&j->lock);
+ if (replicas < replicas_want && target) {
+ /* Retry from all devices: */
+ target = 0;
+ goto retry;
+ }
+done:
rcu_read_unlock();
+ BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX);
+
return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
}
unsigned new_size = READ_ONCE(j->buf_size_want);
void *new_buf;
- if (buf->size >= new_size)
+ if (buf->buf_size >= new_size)
return;
new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN);
if (!new_buf)
return;
- memcpy(new_buf, buf->data, buf->size);
- kvpfree(buf->data, buf->size);
- buf->data = new_buf;
- buf->size = new_size;
+ memcpy(new_buf, buf->data, buf->buf_size);
+
+ spin_lock(&j->lock);
+ swap(buf->data, new_buf);
+ swap(buf->buf_size, new_size);
+ spin_unlock(&j->lock);
+
+ kvpfree(new_buf, new_size);
+}
+
+static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
+{
+ return j->buf + j->reservations.unwritten_idx;
}
static void journal_write_done(struct closure *cl)
{
struct journal *j = container_of(cl, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
- struct journal_buf *w = journal_prev_buf(j);
+ struct journal_buf *w = journal_last_unwritten_buf(j);
struct bch_devs_list devs =
bch2_bkey_devs(bkey_i_to_s_c(&w->key));
struct bch_replicas_padded replicas;
- u64 seq = le64_to_cpu(w->data->seq);
- u64 last_seq = le64_to_cpu(w->data->last_seq);
+ union journal_res_state old, new;
+ u64 v, seq, last_seq;
+ int err = 0;
bch2_time_stats_update(j->write_time, j->write_start_time);
if (!devs.nr) {
bch_err(c, "unable to write journal to sufficient devices");
- goto err;
+ err = -EIO;
+ } else {
+ bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, devs);
+ if (bch2_mark_replicas(c, &replicas.e))
+ err = -EIO;
}
- bch2_devlist_to_replicas(&replicas.e, BCH_DATA_JOURNAL, devs);
-
- if (bch2_mark_replicas(c, &replicas.e))
- goto err;
+ if (err)
+ bch2_fatal_error(c);
spin_lock(&j->lock);
- j->seq_ondisk = seq;
- j->last_seq_ondisk = last_seq;
+ seq = le64_to_cpu(w->data->seq);
+ last_seq = le64_to_cpu(w->data->last_seq);
if (seq >= j->pin.front)
journal_seq_pin(j, seq)->devs = devs;
+ j->seq_ondisk = seq;
+ if (err && (!j->err_seq || seq < j->err_seq))
+ j->err_seq = seq;
+
+ if (!JSET_NO_FLUSH(w->data)) {
+ j->flushed_seq_ondisk = seq;
+ j->last_seq_ondisk = last_seq;
+ }
+
/*
* Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
* more buckets:
* Must come before signaling write completion, for
* bch2_fs_journal_stop():
*/
- mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
-out:
+ journal_reclaim_kick(&c->journal);
+
/* also must come before signalling write completion: */
closure_debug_destroy(cl);
- BUG_ON(!j->reservations.prev_buf_unwritten);
- atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v,
- &j->reservations.counter);
+ v = atomic64_read(&j->reservations.counter);
+ do {
+ old.v = new.v = v;
+ BUG_ON(new.idx == new.unwritten_idx);
+
+ new.unwritten_idx++;
+ } while ((v = atomic64_cmpxchg(&j->reservations.counter,
+ old.v, new.v)) != old.v);
+
+ bch2_journal_space_available(j);
closure_wake_up(&w->wait);
journal_wake(j);
if (test_bit(JOURNAL_NEED_WRITE, &j->flags))
mod_delayed_work(system_freezable_wq, &j->write_work, 0);
spin_unlock(&j->lock);
- return;
-err:
- bch2_fatal_error(c);
- bch2_journal_halt(j);
- spin_lock(&j->lock);
- goto out;
+
+ if (new.unwritten_idx != new.idx &&
+ !journal_state_count(new, new.unwritten_idx))
+ closure_call(&j->io, bch2_journal_write, system_highpri_wq, NULL);
}
static void journal_write_endio(struct bio *bio)
struct bch_dev *ca = bio->bi_private;
struct journal *j = &ca->fs->journal;
- if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") ||
+ if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write error: %s",
+ bch2_blk_status_to_str(bio->bi_status)) ||
bch2_meta_write_fault("journal")) {
- struct journal_buf *w = journal_prev_buf(j);
+ struct journal_buf *w = journal_last_unwritten_buf(j);
unsigned long flags;
spin_lock_irqsave(&j->err_lock, flags);
percpu_ref_put(&ca->io_ref);
}
+static void do_journal_write(struct closure *cl)
+{
+ struct journal *j = container_of(cl, struct journal, io);
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct bch_dev *ca;
+ struct journal_buf *w = journal_last_unwritten_buf(j);
+ struct bch_extent_ptr *ptr;
+ struct bio *bio;
+ unsigned sectors = vstruct_sectors(w->data, c->block_bits);
+
+ extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
+ ca = bch_dev_bkey_exists(c, ptr->dev);
+ if (!percpu_ref_tryget(&ca->io_ref)) {
+ /* XXX: fix this */
+ bch_err(c, "missing device for journal write\n");
+ continue;
+ }
+
+ this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal],
+ sectors);
+
+ bio = ca->journal.bio;
+ bio_reset(bio);
+ bio_set_dev(bio, ca->disk_sb.bdev);
+ bio->bi_iter.bi_sector = ptr->offset;
+ bio->bi_end_io = journal_write_endio;
+ bio->bi_private = ca;
+ bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
+
+ BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
+ ca->prev_journal_sector = bio->bi_iter.bi_sector;
+
+ if (!JSET_NO_FLUSH(w->data))
+ bio->bi_opf |= REQ_FUA;
+ if (!JSET_NO_FLUSH(w->data) && !w->separate_flush)
+ bio->bi_opf |= REQ_PREFLUSH;
+
+ bch2_bio_map(bio, w->data, sectors << 9);
+
+ trace_journal_write(bio);
+ closure_bio_submit(bio, cl);
+
+ ca->journal.bucket_seq[ca->journal.cur_idx] =
+ le64_to_cpu(w->data->seq);
+ }
+
+ continue_at(cl, journal_write_done, system_highpri_wq);
+ return;
+}
+
void bch2_journal_write(struct closure *cl)
{
struct journal *j = container_of(cl, struct journal, io);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
- struct journal_buf *w = journal_prev_buf(j);
+ struct journal_buf *w = journal_last_unwritten_buf(j);
+ struct jset_entry *start, *end;
struct jset *jset;
struct bio *bio;
- struct bch_extent_ptr *ptr;
bool validate_before_checksum = false;
- unsigned i, sectors, bytes;
+ unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
+ int ret;
+
+ BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
journal_buf_realloc(j, w);
jset = w->data;
j->write_start_time = local_clock();
- mutex_lock(&c->btree_root_lock);
- for (i = 0; i < BTREE_ID_NR; i++) {
- struct btree_root *r = &c->btree_roots[i];
- if (r->alive)
- bch2_journal_add_btree_root(w, i, &r->key, r->level);
+ spin_lock(&j->lock);
+ if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
+ !w->must_flush &&
+ (jiffies - j->last_flush_write) < msecs_to_jiffies(j->write_delay_ms) &&
+ test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)) {
+ w->noflush = true;
+ SET_JSET_NO_FLUSH(jset, true);
+ jset->last_seq = 0;
+
+ j->nr_noflush_writes++;
+ } else {
+ j->last_flush_write = jiffies;
+ j->nr_flush_writes++;
}
- c->btree_roots_dirty = false;
- mutex_unlock(&c->btree_root_lock);
+ spin_unlock(&j->lock);
+
+ /*
+ * New btree roots are set by journalling them; when the journal entry
+ * gets written we have to propagate them to c->btree_roots
+ *
+ * But, every journal entry we write has to contain all the btree roots
+ * (at least for now); so after we copy btree roots to c->btree_roots we
+ * have to get any missing btree roots and add them to this journal
+ * entry:
+ */
+
+ bch2_journal_entries_to_btree_roots(c, jset);
+
+ start = end = vstruct_last(jset);
+
+ end = bch2_btree_roots_to_journal_entries(c, jset->start, end);
+
+ bch2_journal_super_entries_add_common(c, &end,
+ le64_to_cpu(jset->seq));
+ u64s = (u64 *) end - (u64 *) start;
+ BUG_ON(u64s > j->entry_u64s_reserved);
+
+ le32_add_cpu(&jset->u64s, u64s);
+ BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
journal_write_compact(jset);
- jset->read_clock = cpu_to_le16(c->bucket_clock[READ].hand);
- jset->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand);
jset->magic = cpu_to_le64(jset_magic(c));
-
jset->version = c->sb.version < bcachefs_metadata_version_new_versioning
? cpu_to_le32(BCH_JSET_VERSION_OLD)
: cpu_to_le32(c->sb.version);
SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
+ if (journal_entry_empty(jset))
+ j->last_empty_seq = le64_to_cpu(jset->seq);
+
if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
validate_before_checksum = true;
- if (le32_to_cpu(jset->version) <
- bcachefs_metadata_version_bkey_renumber)
+ if (le32_to_cpu(jset->version) <= bcachefs_metadata_version_inode_btree_change)
validate_before_checksum = true;
if (validate_before_checksum &&
- jset_validate_entries(c, jset, WRITE))
+ jset_validate_for_write(c, jset))
goto err;
bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
journal_nonce(jset), jset);
if (!validate_before_checksum &&
- jset_validate_entries(c, jset, WRITE))
+ jset_validate_for_write(c, jset))
goto err;
sectors = vstruct_sectors(jset, c->block_bits);
- BUG_ON(sectors > j->prev_buf_sectors);
+ BUG_ON(sectors > w->sectors);
- bytes = vstruct_bytes(w->data);
- memset((void *) w->data + bytes, 0, (sectors << 9) - bytes);
+ bytes = vstruct_bytes(jset);
+ memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
- if (journal_write_alloc(j, w, sectors)) {
- bch2_journal_halt(j);
+retry_alloc:
+ spin_lock(&j->lock);
+ ret = journal_write_alloc(j, w, sectors);
+
+ if (ret && j->can_discard) {
+ spin_unlock(&j->lock);
+ bch2_journal_do_discards(j);
+ goto retry_alloc;
+ }
+
+ /*
+ * write is allocated, no longer need to account for it in
+ * bch2_journal_space_available():
+ */
+ w->sectors = 0;
+
+ /*
+ * journal entry has been compacted and allocated, recalculate space
+ * available:
+ */
+ bch2_journal_space_available(j);
+ spin_unlock(&j->lock);
+
+ if (ret) {
bch_err(c, "Unable to allocate journal write");
bch2_fatal_error(c);
continue_at(cl, journal_write_done, system_highpri_wq);
if (c->opts.nochanges)
goto no_io;
- extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
- ca = bch_dev_bkey_exists(c, ptr->dev);
- if (!percpu_ref_tryget(&ca->io_ref)) {
- /* XXX: fix this */
- bch_err(c, "missing device for journal write\n");
- continue;
- }
-
- this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL],
- sectors);
-
- bio = ca->journal.bio;
- bio_reset(bio);
- bio_set_dev(bio, ca->disk_sb.bdev);
- bio->bi_iter.bi_sector = ptr->offset;
- bio->bi_iter.bi_size = sectors << 9;
- bio->bi_end_io = journal_write_endio;
- bio->bi_private = ca;
- bio_set_op_attrs(bio, REQ_OP_WRITE,
- REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
- bch2_bio_map(bio, jset);
-
- trace_journal_write(bio);
- closure_bio_submit(bio, cl);
+ for_each_rw_member(ca, c, i)
+ nr_rw_members++;
- ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq);
- }
+ if (nr_rw_members > 1)
+ w->separate_flush = true;
- for_each_rw_member(ca, c, i)
- if (journal_flushes_device(ca) &&
- !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) {
+ if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
+ for_each_rw_member(ca, c, i) {
percpu_ref_get(&ca->io_ref);
bio = ca->journal.bio;
bio->bi_private = ca;
closure_bio_submit(bio, cl);
}
+ }
+ bch2_bucket_seq_cleanup(c);
+
+ continue_at(cl, do_journal_write, system_highpri_wq);
+ return;
no_io:
bch2_bucket_seq_cleanup(c);