X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fjournal_io.c;h=107521e10ff9fffb3b3cc3fd646921e84964fe4d;hb=ddac1641ee1e2686c2211a8d671ea723634dfc89;hp=0cb1bc3c0df71f92410e39f757ef24d69b370be7;hpb=2ab2ab0f781ae750473763e8a042c900a982d399;p=bcachefs-tools-debian diff --git a/libbcachefs/journal_io.c b/libbcachefs/journal_io.c index 0cb1bc3..107521e 100644 --- a/libbcachefs/journal_io.c +++ b/libbcachefs/journal_io.c @@ -1,11 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" #include "alloc_background.h" #include "alloc_foreground.h" -#include "btree_gc.h" -#include "btree_update.h" +#include "btree_io.h" +#include "btree_update_interior.h" #include "buckets.h" #include "checksum.h" +#include "disk_groups.h" #include "error.h" +#include "io.h" #include "journal.h" #include "journal_io.h" #include "journal_reclaim.h" @@ -14,10 +17,36 @@ #include +static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq) +{ + return (seq - c->journal_entries_base_seq) & (~0U >> 1); +} + +static void __journal_replay_free(struct bch_fs *c, + struct journal_replay *i) +{ + struct journal_replay **p = + genradix_ptr(&c->journal_entries, + journal_entry_radix_idx(c, le64_to_cpu(i->j.seq))); + + BUG_ON(*p != i); + *p = NULL; + kvpfree(i, offsetof(struct journal_replay, j) + + vstruct_bytes(&i->j)); +} + +static void journal_replay_free(struct bch_fs *c, struct journal_replay *i) +{ + i->ignore = true; + + if (!c->opts.read_entire_journal) + __journal_replay_free(c, i); +} + struct journal_list { struct closure cl; + u64 last_seq; struct mutex lock; - struct list_head *head; int ret; }; @@ -29,67 +58,107 @@ struct journal_list { * be replayed: */ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, - struct journal_list *jlist, struct jset *j) + struct journal_ptr entry_ptr, + struct journal_list *jlist, struct jset *j, + bool bad) { - struct journal_replay *i, *pos; - struct list_head *where; + struct genradix_iter iter; + struct journal_replay **_i, *i, *dup; + struct journal_ptr *ptr; size_t bytes = vstruct_bytes(j); - __le64 last_seq; - int ret; - - last_seq = !list_empty(jlist->head) - ? list_last_entry(jlist->head, struct journal_replay, - list)->j.last_seq - : 0; + u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0; + int ret = JOURNAL_ENTRY_ADD_OK; /* Is this entry older than the range we need? */ - if (le64_to_cpu(j->seq) < le64_to_cpu(last_seq)) { - ret = JOURNAL_ENTRY_ADD_OUT_OF_RANGE; - goto out; - } + if (!c->opts.read_entire_journal && + le64_to_cpu(j->seq) < jlist->last_seq) + return JOURNAL_ENTRY_ADD_OUT_OF_RANGE; + + /* + * genradixes are indexed by a ulong, not a u64, so we can't index them + * by sequence number directly: Assume instead that they will all fall + * within the range of +-2billion of the filrst one we find. + */ + if (!c->journal_entries_base_seq) + c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); /* Drop entries we don't need anymore */ - list_for_each_entry_safe(i, pos, jlist->head, list) { - if (le64_to_cpu(i->j.seq) >= le64_to_cpu(j->last_seq)) - break; - list_del(&i->list); - kvpfree(i, offsetof(struct journal_replay, j) + - vstruct_bytes(&i->j)); + if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) { + genradix_for_each_from(&c->journal_entries, iter, _i, + journal_entry_radix_idx(c, jlist->last_seq)) { + i = *_i; + + if (!i || i->ignore) + continue; + + if (le64_to_cpu(i->j.seq) >= last_seq) + break; + journal_replay_free(c, i); + } } - list_for_each_entry_reverse(i, jlist->head, list) { - /* Duplicate? */ - if (le64_to_cpu(j->seq) == le64_to_cpu(i->j.seq)) { - fsck_err_on(bytes != vstruct_bytes(&i->j) || - memcmp(j, &i->j, bytes), c, + jlist->last_seq = max(jlist->last_seq, last_seq); + + _i = genradix_ptr_alloc(&c->journal_entries, + journal_entry_radix_idx(c, le64_to_cpu(j->seq)), + GFP_KERNEL); + if (!_i) + return -ENOMEM; + + /* + * Duplicate journal entries? If so we want the one that didn't have a + * checksum error: + */ + dup = *_i; + if (dup) { + if (dup->bad) { + /* we'll replace @dup: */ + } else if (bad) { + i = dup; + goto found; + } else { + fsck_err_on(bytes != vstruct_bytes(&dup->j) || + memcmp(j, &dup->j, bytes), c, "found duplicate but non identical journal entries (seq %llu)", le64_to_cpu(j->seq)); + i = dup; goto found; } + } + + i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); + if (!i) + return -ENOMEM; + + i->nr_ptrs = 0; + i->bad = bad; + i->ignore = false; + memcpy(&i->j, j, bytes); + + if (dup) { + i->nr_ptrs = dup->nr_ptrs; + memcpy(i->ptrs, dup->ptrs, sizeof(dup->ptrs)); + __journal_replay_free(c, dup); + } + - if (le64_to_cpu(j->seq) > le64_to_cpu(i->j.seq)) { - where = &i->list; - goto add; + *_i = i; +found: + for (ptr = i->ptrs; ptr < i->ptrs + i->nr_ptrs; ptr++) { + if (ptr->dev == ca->dev_idx) { + bch_err(c, "duplicate journal entry %llu on same device", + le64_to_cpu(i->j.seq)); + goto out; } } - where = jlist->head; -add: - i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); - if (!i) { - ret = -ENOMEM; + if (i->nr_ptrs >= ARRAY_SIZE(i->ptrs)) { + bch_err(c, "found too many copies of journal entry %llu", + le64_to_cpu(i->j.seq)); goto out; } - list_add(&i->list, where); - i->devs.nr = 0; - memcpy(&i->j, j, bytes); -found: - if (!bch2_dev_list_has_dev(i->devs, ca->dev_idx)) - bch2_dev_list_add_dev(&i->devs, ca->dev_idx); - else - fsck_err_on(1, c, "duplicate journal entries on same device"); - ret = JOURNAL_ENTRY_ADD_OK; + i->ptrs[i->nr_ptrs++] = entry_ptr; out: fsck_err: return ret; @@ -128,7 +197,7 @@ static void journal_entry_null_range(void *start, void *end) bch_err(c, "corrupt metadata before write:\n" \ msg, ##__VA_ARGS__); \ if (bch2_fs_inconsistent(c)) { \ - ret = BCH_FSCK_ERRORS_NOT_FIXED; \ + ret = -BCH_ERR_fsck_errors_not_fixed; \ goto fsck_err; \ } \ break; \ @@ -139,83 +208,130 @@ static void journal_entry_null_range(void *start, void *end) #define journal_entry_err_on(cond, c, msg, ...) \ ((cond) ? journal_entry_err(c, msg, ##__VA_ARGS__) : false) -static int journal_validate_key(struct bch_fs *c, struct jset *jset, +#define FSCK_DELETED_KEY 5 + +static int journal_validate_key(struct bch_fs *c, const char *where, struct jset_entry *entry, - struct bkey_i *k, enum bkey_type key_type, - const char *type, int write) + unsigned level, enum btree_id btree_id, + struct bkey_i *k, + unsigned version, int big_endian, int write) { void *next = vstruct_next(entry); - const char *invalid; - char buf[160]; + struct printbuf buf = PRINTBUF; int ret = 0; if (journal_entry_err_on(!k->k.u64s, c, - "invalid %s in journal: k->u64s 0", type)) { + "invalid key in %s at %s offset %zi/%u: k->u64s 0", + bch2_jset_entry_types[entry->type], where, + (u64 *) k - entry->_data, + le16_to_cpu(entry->u64s))) { entry->u64s = cpu_to_le16((u64 *) k - entry->_data); journal_entry_null_range(vstruct_next(entry), next); - return 0; + return FSCK_DELETED_KEY; } if (journal_entry_err_on((void *) bkey_next(k) > (void *) vstruct_next(entry), c, - "invalid %s in journal: extends past end of journal entry", - type)) { + "invalid key in %s at %s offset %zi/%u: extends past end of journal entry", + bch2_jset_entry_types[entry->type], where, + (u64 *) k - entry->_data, + le16_to_cpu(entry->u64s))) { entry->u64s = cpu_to_le16((u64 *) k - entry->_data); journal_entry_null_range(vstruct_next(entry), next); - return 0; + return FSCK_DELETED_KEY; } if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT, c, - "invalid %s in journal: bad format %u", - type, k->k.format)) { - le16_add_cpu(&entry->u64s, -k->k.u64s); + "invalid key in %s at %s offset %zi/%u: bad format %u", + bch2_jset_entry_types[entry->type], where, + (u64 *) k - entry->_data, + le16_to_cpu(entry->u64s), + k->k.format)) { + le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); memmove(k, bkey_next(k), next - (void *) bkey_next(k)); journal_entry_null_range(vstruct_next(entry), next); - return 0; + return FSCK_DELETED_KEY; } - if (JSET_BIG_ENDIAN(jset) != CPU_BIG_ENDIAN) - bch2_bkey_swab(key_type, NULL, bkey_to_packed(k)); + if (!write) + bch2_bkey_compat(level, btree_id, version, big_endian, + write, NULL, bkey_to_packed(k)); + + if (bch2_bkey_invalid(c, bkey_i_to_s_c(k), + __btree_node_type(level, btree_id), write, &buf)) { + printbuf_reset(&buf); + prt_printf(&buf, "invalid key in %s at %s offset %zi/%u:", + bch2_jset_entry_types[entry->type], where, + (u64 *) k - entry->_data, + le16_to_cpu(entry->u64s)); + prt_newline(&buf); + printbuf_indent_add(&buf, 2); + + bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k)); + prt_newline(&buf); + bch2_bkey_invalid(c, bkey_i_to_s_c(k), + __btree_node_type(level, btree_id), write, &buf); - invalid = bch2_bkey_invalid(c, key_type, bkey_i_to_s_c(k)); - if (invalid) { - bch2_bkey_val_to_text(c, key_type, buf, sizeof(buf), - bkey_i_to_s_c(k)); - mustfix_fsck_err(c, "invalid %s in journal: %s\n%s", - type, invalid, buf); + mustfix_fsck_err(c, "%s", buf.buf); - le16_add_cpu(&entry->u64s, -k->k.u64s); + le16_add_cpu(&entry->u64s, -((u16) k->k.u64s)); memmove(k, bkey_next(k), next - (void *) bkey_next(k)); journal_entry_null_range(vstruct_next(entry), next); - return 0; + + printbuf_exit(&buf); + return FSCK_DELETED_KEY; } + + if (write) + bch2_bkey_compat(level, btree_id, version, big_endian, + write, NULL, bkey_to_packed(k)); fsck_err: + printbuf_exit(&buf); return ret; } -static int journal_entry_validate_btree_keys(struct bch_fs *c, - struct jset *jset, +static int journal_entry_btree_keys_validate(struct bch_fs *c, + const char *where, struct jset_entry *entry, - int write) + unsigned version, int big_endian, int write) { - struct bkey_i *k; + struct bkey_i *k = entry->start; - vstruct_for_each(entry, k) { - int ret = journal_validate_key(c, jset, entry, k, - bkey_type(entry->level, - entry->btree_id), - "key", write); - if (ret) - return ret; + while (k != vstruct_last(entry)) { + int ret = journal_validate_key(c, where, entry, + entry->level, + entry->btree_id, + k, version, big_endian, write); + if (ret == FSCK_DELETED_KEY) + continue; + + k = bkey_next(k); } return 0; } -static int journal_entry_validate_btree_root(struct bch_fs *c, - struct jset *jset, +static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct bkey_i *k; + bool first = true; + + vstruct_for_each(entry, k) { + if (!first) { + prt_newline(out); + prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]); + } + prt_printf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level); + bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k)); + first = false; + } +} + +static int journal_entry_btree_root_validate(struct bch_fs *c, + const char *where, struct jset_entry *entry, - int write) + unsigned version, int big_endian, int write) { struct bkey_i *k = entry->start; int ret = 0; @@ -234,25 +350,36 @@ static int journal_entry_validate_btree_root(struct bch_fs *c, return 0; } - return journal_validate_key(c, jset, entry, k, BKEY_TYPE_BTREE, - "btree root", write); + return journal_validate_key(c, where, entry, 1, entry->btree_id, k, + version, big_endian, write); fsck_err: return ret; } -static int journal_entry_validate_prio_ptrs(struct bch_fs *c, - struct jset *jset, +static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + journal_entry_btree_keys_to_text(out, c, entry); +} + +static int journal_entry_prio_ptrs_validate(struct bch_fs *c, + const char *where, struct jset_entry *entry, - int write) + unsigned version, int big_endian, int write) { /* obsolete, don't care: */ return 0; } -static int journal_entry_validate_blacklist(struct bch_fs *c, - struct jset *jset, +static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ +} + +static int journal_entry_blacklist_validate(struct bch_fs *c, + const char *where, struct jset_entry *entry, - int write) + unsigned version, int big_endian, int write) { int ret = 0; @@ -264,10 +391,19 @@ fsck_err: return ret; } -static int journal_entry_validate_blacklist_v2(struct bch_fs *c, - struct jset *jset, +static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct jset_entry_blacklist *bl = + container_of(entry, struct jset_entry_blacklist, entry); + + prt_printf(out, "seq=%llu", le64_to_cpu(bl->seq)); +} + +static int journal_entry_blacklist_v2_validate(struct bch_fs *c, + const char *where, struct jset_entry *entry, - int write) + unsigned version, int big_endian, int write) { struct jset_entry_blacklist_v2 *bl_entry; int ret = 0; @@ -275,6 +411,7 @@ static int journal_entry_validate_blacklist_v2(struct bch_fs *c, if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2, c, "invalid journal seq blacklist entry: bad size")) { journal_entry_null_range(entry, vstruct_next(entry)); + goto out; } bl_entry = container_of(entry, struct jset_entry_blacklist_v2, entry); @@ -284,49 +421,261 @@ static int journal_entry_validate_blacklist_v2(struct bch_fs *c, "invalid journal seq blacklist entry: start > end")) { journal_entry_null_range(entry, vstruct_next(entry)); } +out: +fsck_err: + return ret; +} + +static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct jset_entry_blacklist_v2 *bl = + container_of(entry, struct jset_entry_blacklist_v2, entry); + + prt_printf(out, "start=%llu end=%llu", + le64_to_cpu(bl->start), + le64_to_cpu(bl->end)); +} + +static int journal_entry_usage_validate(struct bch_fs *c, + const char *where, + struct jset_entry *entry, + unsigned version, int big_endian, int write) +{ + struct jset_entry_usage *u = + container_of(entry, struct jset_entry_usage, entry); + unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); + int ret = 0; + + if (journal_entry_err_on(bytes < sizeof(*u), + c, + "invalid journal entry usage: bad size")) { + journal_entry_null_range(entry, vstruct_next(entry)); + return ret; + } + +fsck_err: + return ret; +} + +static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct jset_entry_usage *u = + container_of(entry, struct jset_entry_usage, entry); + + prt_printf(out, "type=%s v=%llu", + bch2_fs_usage_types[u->entry.btree_id], + le64_to_cpu(u->v)); +} + +static int journal_entry_data_usage_validate(struct bch_fs *c, + const char *where, + struct jset_entry *entry, + unsigned version, int big_endian, int write) +{ + struct jset_entry_data_usage *u = + container_of(entry, struct jset_entry_data_usage, entry); + unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); + int ret = 0; + + if (journal_entry_err_on(bytes < sizeof(*u) || + bytes < sizeof(*u) + u->r.nr_devs, + c, + "invalid journal entry usage: bad size")) { + journal_entry_null_range(entry, vstruct_next(entry)); + return ret; + } + +fsck_err: + return ret; +} + +static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct jset_entry_data_usage *u = + container_of(entry, struct jset_entry_data_usage, entry); + + bch2_replicas_entry_to_text(out, &u->r); + prt_printf(out, "=%llu", le64_to_cpu(u->v)); +} + +static int journal_entry_clock_validate(struct bch_fs *c, + const char *where, + struct jset_entry *entry, + unsigned version, int big_endian, int write) +{ + struct jset_entry_clock *clock = + container_of(entry, struct jset_entry_clock, entry); + unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); + int ret = 0; + + if (journal_entry_err_on(bytes != sizeof(*clock), + c, "invalid journal entry clock: bad size")) { + journal_entry_null_range(entry, vstruct_next(entry)); + return ret; + } + + if (journal_entry_err_on(clock->rw > 1, + c, "invalid journal entry clock: bad rw")) { + journal_entry_null_range(entry, vstruct_next(entry)); + return ret; + } + +fsck_err: + return ret; +} + +static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct jset_entry_clock *clock = + container_of(entry, struct jset_entry_clock, entry); + + prt_printf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time)); +} + +static int journal_entry_dev_usage_validate(struct bch_fs *c, + const char *where, + struct jset_entry *entry, + unsigned version, int big_endian, int write) +{ + struct jset_entry_dev_usage *u = + container_of(entry, struct jset_entry_dev_usage, entry); + unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64); + unsigned expected = sizeof(*u); + unsigned dev; + int ret = 0; + + if (journal_entry_err_on(bytes < expected, + c, "invalid journal entry dev usage: bad size (%u < %u)", + bytes, expected)) { + journal_entry_null_range(entry, vstruct_next(entry)); + return ret; + } + + dev = le32_to_cpu(u->dev); + + if (journal_entry_err_on(!bch2_dev_exists2(c, dev), + c, "invalid journal entry dev usage: bad dev")) { + journal_entry_null_range(entry, vstruct_next(entry)); + return ret; + } + + if (journal_entry_err_on(u->pad, + c, "invalid journal entry dev usage: bad pad")) { + journal_entry_null_range(entry, vstruct_next(entry)); + return ret; + } fsck_err: return ret; } +static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct jset_entry_dev_usage *u = + container_of(entry, struct jset_entry_dev_usage, entry); + unsigned i, nr_types = jset_entry_dev_usage_nr_types(u); + + prt_printf(out, "dev=%u", le32_to_cpu(u->dev)); + + for (i = 0; i < nr_types; i++) { + if (i < BCH_DATA_NR) + prt_printf(out, " %s", bch2_data_types[i]); + else + prt_printf(out, " (unknown data type %u)", i); + prt_printf(out, ": buckets=%llu sectors=%llu fragmented=%llu", + le64_to_cpu(u->d[i].buckets), + le64_to_cpu(u->d[i].sectors), + le64_to_cpu(u->d[i].fragmented)); + } + + prt_printf(out, " buckets_ec: %llu", le64_to_cpu(u->buckets_ec)); +} + +static int journal_entry_log_validate(struct bch_fs *c, + const char *where, + struct jset_entry *entry, + unsigned version, int big_endian, int write) +{ + return 0; +} + +static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry); + unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d); + + prt_printf(out, "%.*s", bytes, l->d); +} + +static int journal_entry_overwrite_validate(struct bch_fs *c, const char *where, + struct jset_entry *entry, + unsigned version, int big_endian, int write) +{ + return journal_entry_btree_keys_validate(c, where, entry, version, big_endian, write); +} + +static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + journal_entry_btree_keys_to_text(out, c, entry); +} + struct jset_entry_ops { - int (*validate)(struct bch_fs *, struct jset *, - struct jset_entry *, int); + int (*validate)(struct bch_fs *, const char *, + struct jset_entry *, unsigned, int, int); + void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *); }; static const struct jset_entry_ops bch2_jset_entry_ops[] = { #define x(f, nr) \ [BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \ - .validate = journal_entry_validate_##f, \ + .validate = journal_entry_##f##_validate, \ + .to_text = journal_entry_##f##_to_text, \ }, BCH_JSET_ENTRY_TYPES() #undef x }; -static int journal_entry_validate(struct bch_fs *c, struct jset *jset, - struct jset_entry *entry, int write) +int bch2_journal_entry_validate(struct bch_fs *c, const char *where, + struct jset_entry *entry, + unsigned version, int big_endian, int write) { - int ret = 0; + return entry->type < BCH_JSET_ENTRY_NR + ? bch2_jset_entry_ops[entry->type].validate(c, where, entry, + version, big_endian, write) + : 0; +} - if (entry->type >= BCH_JSET_ENTRY_NR) { - journal_entry_err(c, "invalid journal entry type %u", - entry->type); - journal_entry_null_range(entry, vstruct_next(entry)); - return 0; +void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c, + struct jset_entry *entry) +{ + if (entry->type < BCH_JSET_ENTRY_NR) { + prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]); + bch2_jset_entry_ops[entry->type].to_text(out, c, entry); + } else { + prt_printf(out, "(unknown type %u)", entry->type); } - - ret = bch2_jset_entry_ops[entry->type].validate(c, jset, entry, write); -fsck_err: - return ret; } static int jset_validate_entries(struct bch_fs *c, struct jset *jset, int write) { + char buf[100]; struct jset_entry *entry; int ret = 0; vstruct_for_each(jset, entry) { + scnprintf(buf, sizeof(buf), "jset %llu entry offset %zi/%u", + le64_to_cpu(jset->seq), + (u64 *) entry - jset->_data, + le32_to_cpu(jset->u64s)); + if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset), c, "journal entry extends past end of jset")) { @@ -334,7 +683,9 @@ static int jset_validate_entries(struct bch_fs *c, struct jset *jset, break; } - ret = journal_entry_validate(c, jset, entry, write); + ret = bch2_journal_entry_validate(c, buf, entry, + le32_to_cpu(jset->version), + JSET_BIG_ENDIAN(jset), write); if (ret) break; } @@ -343,6 +694,7 @@ fsck_err: } static int jset_validate(struct bch_fs *c, + struct bch_dev *ca, struct jset *jset, u64 sector, unsigned bucket_sectors_left, unsigned sectors_read, @@ -350,53 +702,83 @@ static int jset_validate(struct bch_fs *c, { size_t bytes = vstruct_bytes(jset); struct bch_csum csum; + unsigned version; int ret = 0; if (le64_to_cpu(jset->magic) != jset_magic(c)) return JOURNAL_ENTRY_NONE; - if (le32_to_cpu(jset->version) != BCACHE_JSET_VERSION) { - bch_err(c, "unknown journal entry version %u", - le32_to_cpu(jset->version)); - return BCH_FSCK_UNKNOWN_VERSION; + version = le32_to_cpu(jset->version); + if (journal_entry_err_on((version != BCH_JSET_VERSION_OLD && + version < bcachefs_metadata_version_min) || + version >= bcachefs_metadata_version_max, c, + "%s sector %llu seq %llu: unknown journal entry version %u", + ca ? ca->name : c->name, + sector, le64_to_cpu(jset->seq), + version)) { + /* don't try to continue: */ + return EINVAL; } + if (bytes > (sectors_read << 9) && + sectors_read < bucket_sectors_left) + return JOURNAL_ENTRY_REREAD; + if (journal_entry_err_on(bytes > bucket_sectors_left << 9, c, - "journal entry too big (%zu bytes), sector %lluu", - bytes, sector)) { - /* XXX: note we might have missing journal entries */ - return JOURNAL_ENTRY_BAD; + "%s sector %llu seq %llu: journal entry too big (%zu bytes)", + ca ? ca->name : c->name, + sector, le64_to_cpu(jset->seq), bytes)) { + ret = JOURNAL_ENTRY_BAD; + le32_add_cpu(&jset->u64s, + -((bytes - (bucket_sectors_left << 9)) / 8)); } - if (bytes > sectors_read << 9) - return JOURNAL_ENTRY_REREAD; + if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c, + "%s sector %llu seq %llu: journal entry with unknown csum type %llu", + ca ? ca->name : c->name, + sector, le64_to_cpu(jset->seq), + JSET_CSUM_TYPE(jset))) { + ret = JOURNAL_ENTRY_BAD; + goto csum_done; + } - if (fsck_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)), c, - "journal entry with unknown csum type %llu sector %lluu", - JSET_CSUM_TYPE(jset), sector)) - return JOURNAL_ENTRY_BAD; + if (write) + goto csum_done; csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset); if (journal_entry_err_on(bch2_crc_cmp(csum, jset->csum), c, - "journal checksum bad, sector %llu", sector)) { - /* XXX: retry IO, when we start retrying checksum errors */ - /* XXX: note we might have missing journal entries */ - return JOURNAL_ENTRY_BAD; - } + "%s sector %llu seq %llu: journal checksum bad", + ca ? ca->name : c->name, + sector, le64_to_cpu(jset->seq))) + ret = JOURNAL_ENTRY_BAD; - bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), + ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset->encrypted_start, vstruct_end(jset) - (void *) jset->encrypted_start); - - if (journal_entry_err_on(le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c, - "invalid journal entry: last_seq > seq")) + bch2_fs_fatal_err_on(ret, c, + "error decrypting journal entry: %i", ret); +csum_done: + /* last_seq is ignored when JSET_NO_FLUSH is true */ + if (journal_entry_err_on(!JSET_NO_FLUSH(jset) && + le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq), c, + "invalid journal entry: last_seq > seq (%llu > %llu)", + le64_to_cpu(jset->last_seq), + le64_to_cpu(jset->seq))) { jset->last_seq = jset->seq; - - return 0; + return JOURNAL_ENTRY_BAD; + } fsck_err: return ret; } +static int jset_validate_for_write(struct bch_fs *c, struct jset *jset) +{ + unsigned sectors = vstruct_sectors(jset, c->block_bits); + + return jset_validate(c, NULL, jset, 0, sectors, sectors, WRITE) ?: + jset_validate_entries(c, jset, WRITE); +} + struct journal_read_buf { void *data; size_t size; @@ -425,7 +807,7 @@ static int journal_read_buf_realloc(struct journal_read_buf *b, static int journal_read_bucket(struct bch_dev *ca, struct journal_read_buf *buf, struct journal_list *jlist, - unsigned bucket, u64 *seq, bool *entries_found) + unsigned bucket) { struct bch_fs *c = ca->fs; struct journal_device *ja = &ca->journal; @@ -441,36 +823,43 @@ static int journal_read_bucket(struct bch_dev *ca, while (offset < end) { if (!sectors_read) { struct bio *bio; + unsigned nr_bvecs; reread: sectors_read = min_t(unsigned, end - offset, buf->size >> 9); + nr_bvecs = buf_pages(buf->data, sectors_read << 9); + + bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); + bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ); - bio = bio_kmalloc(GFP_KERNEL, - buf_pages(buf->data, - sectors_read << 9)); - bio_set_dev(bio, ca->disk_sb.bdev); - bio->bi_iter.bi_sector = offset; - bio->bi_iter.bi_size = sectors_read << 9; - bio_set_op_attrs(bio, REQ_OP_READ, 0); - bch2_bio_map(bio, buf->data); + bio->bi_iter.bi_sector = offset; + bch2_bio_map(bio, buf->data, sectors_read << 9); ret = submit_bio_wait(bio); - bio_put(bio); + kfree(bio); if (bch2_dev_io_err_on(ret, ca, - "journal read from sector %llu", + "journal read error: sector %llu", offset) || - bch2_meta_read_fault("journal")) - return -EIO; + bch2_meta_read_fault("journal")) { + /* + * We don't error out of the recovery process + * here, since the relevant journal entry may be + * found on a different device, and missing or + * no journal entries will be handled later + */ + return 0; + } j = buf->data; } - ret = jset_validate(c, j, offset, + ret = jset_validate(c, ca, j, offset, end - offset, sectors_read, READ); switch (ret) { - case BCH_FSCK_OK: + case 0: + sectors = vstruct_sectors(j, c->block_bits); break; case JOURNAL_ENTRY_REREAD: if (vstruct_bytes(j) > buf->size) { @@ -483,12 +872,17 @@ reread: case JOURNAL_ENTRY_NONE: if (!saw_bad) return 0; - sectors = c->opts.block_size; + sectors = block_sectors(c); goto next_block; case JOURNAL_ENTRY_BAD: saw_bad = true; - sectors = c->opts.block_size; - goto next_block; + /* + * On checksum error we don't really trust the size + * field of the journal entry we read, so try reading + * again at next block boundary: + */ + sectors = block_sectors(c); + break; default: return ret; } @@ -505,23 +899,23 @@ reread: ja->bucket_seq[bucket] = le64_to_cpu(j->seq); mutex_lock(&jlist->lock); - ret = journal_entry_add(c, ca, jlist, j); + ret = journal_entry_add(c, ca, (struct journal_ptr) { + .dev = ca->dev_idx, + .bucket = bucket, + .bucket_offset = offset - + bucket_to_sector(ca, ja->buckets[bucket]), + .sector = offset, + }, jlist, j, ret != 0); mutex_unlock(&jlist->lock); switch (ret) { case JOURNAL_ENTRY_ADD_OK: - *entries_found = true; break; case JOURNAL_ENTRY_ADD_OUT_OF_RANGE: break; default: return ret; } - - if (le64_to_cpu(j->seq) > *seq) - *seq = le64_to_cpu(j->seq); - - sectors = vstruct_sectors(j, c->block_bits); next_block: pr_debug("next"); offset += sectors; @@ -534,139 +928,95 @@ next_block: static void bch2_journal_read_device(struct closure *cl) { -#define read_bucket(b) \ - ({ \ - bool entries_found = false; \ - ret = journal_read_bucket(ca, &buf, jlist, b, &seq, \ - &entries_found); \ - if (ret) \ - goto err; \ - __set_bit(b, bitmap); \ - entries_found; \ - }) - struct journal_device *ja = container_of(cl, struct journal_device, read); struct bch_dev *ca = container_of(ja, struct bch_dev, journal); + struct bch_fs *c = ca->fs; struct journal_list *jlist = container_of(cl->parent, struct journal_list, cl); - struct request_queue *q = bdev_get_queue(ca->disk_sb.bdev); + struct journal_replay *r, **_r; + struct genradix_iter iter; struct journal_read_buf buf = { NULL, 0 }; - - DECLARE_BITMAP(bitmap, ja->nr); - unsigned i, l, r; - u64 seq = 0; - int ret; + u64 min_seq = U64_MAX; + unsigned i; + int ret = 0; if (!ja->nr) goto out; - bitmap_zero(bitmap, ja->nr); ret = journal_read_buf_realloc(&buf, PAGE_SIZE); if (ret) goto err; pr_debug("%u journal buckets", ja->nr); - /* - * If the device supports discard but not secure discard, we can't do - * the fancy fibonacci hash/binary search because the live journal - * entries might not form a contiguous range: - */ - for (i = 0; i < ja->nr; i++) - read_bucket(i); - goto search_done; - - if (!blk_queue_nonrot(q)) - goto linear_scan; - - /* - * Read journal buckets ordered by golden ratio hash to quickly - * find a sequence of buckets with valid journal entries - */ for (i = 0; i < ja->nr; i++) { - l = (i * 2654435769U) % ja->nr; - - if (test_bit(l, bitmap)) - break; - - if (read_bucket(l)) - goto bsearch; + ret = journal_read_bucket(ca, &buf, jlist, i); + if (ret) + goto err; } - /* - * If that fails, check all the buckets we haven't checked - * already - */ - pr_debug("falling back to linear search"); -linear_scan: - for (l = find_first_zero_bit(bitmap, ja->nr); - l < ja->nr; - l = find_next_zero_bit(bitmap, ja->nr, l + 1)) - if (read_bucket(l)) - goto bsearch; - - /* no journal entries on this device? */ - if (l == ja->nr) - goto out; -bsearch: - /* Binary search */ - r = find_next_bit(bitmap, ja->nr, l + 1); - pr_debug("starting binary search, l %u r %u", l, r); - - while (l + 1 < r) { - unsigned m = (l + r) >> 1; - u64 cur_seq = seq; - - read_bucket(m); + /* Find the journal bucket with the highest sequence number: */ + for (i = 0; i < ja->nr; i++) { + if (ja->bucket_seq[i] > ja->bucket_seq[ja->cur_idx]) + ja->cur_idx = i; - if (cur_seq != seq) - l = m; - else - r = m; + min_seq = min(ja->bucket_seq[i], min_seq); } -search_done: /* - * Find the journal bucket with the highest sequence number: - * * If there's duplicate journal entries in multiple buckets (which * definitely isn't supposed to happen, but...) - make sure to start * cur_idx at the last of those buckets, so we don't deadlock trying to * allocate */ - seq = 0; + while (ja->bucket_seq[ja->cur_idx] > min_seq && + ja->bucket_seq[ja->cur_idx] == + ja->bucket_seq[(ja->cur_idx + 1) % ja->nr]) + ja->cur_idx = (ja->cur_idx + 1) % ja->nr; - for (i = 0; i < ja->nr; i++) - if (ja->bucket_seq[i] >= seq && - ja->bucket_seq[i] != ja->bucket_seq[(i + 1) % ja->nr]) { - /* - * When journal_next_bucket() goes to allocate for - * the first time, it'll use the bucket after - * ja->cur_idx - */ - ja->cur_idx = i; - seq = ja->bucket_seq[i]; + ja->sectors_free = ca->mi.bucket_size; + + mutex_lock(&jlist->lock); + genradix_for_each(&c->journal_entries, iter, _r) { + r = *_r; + + if (!r) + continue; + + for (i = 0; i < r->nr_ptrs; i++) { + if (r->ptrs[i].dev == ca->dev_idx && + sector_to_bucket(ca, r->ptrs[i].sector) == ja->buckets[ja->cur_idx]) { + unsigned wrote = bucket_remainder(ca, r->ptrs[i].sector) + + vstruct_sectors(&r->j, c->block_bits); + + ja->sectors_free = min(ja->sectors_free, + ca->mi.bucket_size - wrote); + } + } + } + mutex_unlock(&jlist->lock); + + if (ja->bucket_seq[ja->cur_idx] && + ja->sectors_free == ca->mi.bucket_size) { + bch_err(c, "ja->sectors_free == ca->mi.bucket_size"); + bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr); + for (i = 0; i < 3; i++) { + unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr; + bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]); } + ja->sectors_free = 0; + } /* - * Set last_idx to indicate the entire journal is full and needs to be + * Set dirty_idx to indicate the entire journal is full and needs to be * reclaimed - journal reclaim will immediately reclaim whatever isn't * pinned when it first runs: */ - ja->last_idx = (ja->cur_idx + 1) % ja->nr; - - /* - * Read buckets in reverse order until we stop finding more journal - * entries: - */ - for (i = (ja->cur_idx + ja->nr - 1) % ja->nr; - i != ja->cur_idx; - i = (i + ja->nr - 1) % ja->nr) - if (!test_bit(i, bitmap) && - !read_bucket(i)) - break; + ja->discard_idx = ja->dirty_idx_ondisk = + ja->dirty_idx = (ja->cur_idx + 1) % ja->nr; out: + bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); kvpfree(buf.data, buf.size); percpu_ref_put(&ca->io_ref); closure_return(cl); @@ -676,76 +1026,54 @@ err: jlist->ret = ret; mutex_unlock(&jlist->lock); goto out; -#undef read_bucket } -void bch2_journal_entries_free(struct list_head *list) +void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c, + struct journal_replay *j) { + unsigned i; - while (!list_empty(list)) { - struct journal_replay *i = - list_first_entry(list, struct journal_replay, list); - list_del(&i->list); - kvpfree(i, offsetof(struct journal_replay, j) + - vstruct_bytes(&i->j)); - } -} - -int bch2_journal_set_seq(struct bch_fs *c, u64 last_seq, u64 end_seq) -{ - struct journal *j = &c->journal; - struct journal_entry_pin_list *p; - u64 seq, nr = end_seq - last_seq + 1; - - if (nr > j->pin.size) { - free_fifo(&j->pin); - init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL); - if (!j->pin.data) { - bch_err(c, "error reallocating journal fifo (%llu open entries)", nr); - return -ENOMEM; - } - } - - atomic64_set(&j->seq, end_seq); - j->last_seq_ondisk = last_seq; + for (i = 0; i < j->nr_ptrs; i++) { + struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev); + u64 offset; - j->pin.front = last_seq; - j->pin.back = end_seq + 1; + div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset); - fifo_for_each_entry_ptr(p, &j->pin, seq) { - INIT_LIST_HEAD(&p->list); - INIT_LIST_HEAD(&p->flushed); - atomic_set(&p->count, 0); - p->devs.nr = 0; + if (i) + prt_printf(out, " "); + prt_printf(out, "%u:%u:%u (sector %llu)", + j->ptrs[i].dev, + j->ptrs[i].bucket, + j->ptrs[i].bucket_offset, + j->ptrs[i].sector); } - - return 0; } -int bch2_journal_read(struct bch_fs *c, struct list_head *list) +int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq) { - struct journal *j = &c->journal; struct journal_list jlist; - struct journal_replay *i; - struct journal_entry_pin_list *p; + struct journal_replay *i, **_i, *prev = NULL; + struct genradix_iter radix_iter; struct bch_dev *ca; - u64 cur_seq, end_seq; unsigned iter; + struct printbuf buf = PRINTBUF; size_t keys = 0, entries = 0; bool degraded = false; + u64 seq, last_seq = 0; int ret = 0; closure_init_stack(&jlist.cl); mutex_init(&jlist.lock); - jlist.head = list; + jlist.last_seq = 0; jlist.ret = 0; for_each_member_device(ca, c, iter) { - if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_JOURNAL))) + if (!c->opts.fsck && + !(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal))) continue; - if ((ca->mi.state == BCH_MEMBER_STATE_RW || - ca->mi.state == BCH_MEMBER_STATE_RO) && + if ((ca->mi.state == BCH_MEMBER_STATE_rw || + ca->mi.state == BCH_MEMBER_STATE_ro) && percpu_ref_tryget(&ca->io_ref)) closure_call(&ca->journal.read, bch2_journal_read_device, @@ -760,263 +1088,223 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list) if (jlist.ret) return jlist.ret; - if (list_empty(list)){ - bch_err(c, "no journal entries found"); - return BCH_FSCK_REPAIR_IMPOSSIBLE; - } + *start_seq = 0; - list_for_each_entry(i, list, list) { - ret = jset_validate_entries(c, &i->j, READ); - if (ret) - goto fsck_err; + /* + * Find most recent flush entry, and ignore newer non flush entries - + * those entries will be blacklisted: + */ + genradix_for_each_reverse(&c->journal_entries, radix_iter, _i) { + i = *_i; - /* - * If we're mounting in degraded mode - if we didn't read all - * the devices - this is wrong: - */ + if (!i || i->ignore) + continue; - if (!degraded && - (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) || - fsck_err_on(!bch2_replicas_marked(c, BCH_DATA_JOURNAL, - i->devs), c, - "superblock not marked as containing replicas (type %u)", - BCH_DATA_JOURNAL))) { - ret = bch2_mark_replicas(c, BCH_DATA_JOURNAL, i->devs); - if (ret) - return ret; + if (!*start_seq) + *start_seq = le64_to_cpu(i->j.seq) + 1; + + if (!JSET_NO_FLUSH(&i->j)) { + last_seq = le64_to_cpu(i->j.last_seq); + *blacklist_seq = le64_to_cpu(i->j.seq) + 1; + break; } + + journal_replay_free(c, i); } - i = list_last_entry(list, struct journal_replay, list); + if (!*start_seq) { + bch_info(c, "journal read done, but no entries found"); + return 0; + } - ret = bch2_journal_set_seq(c, - le64_to_cpu(i->j.last_seq), - le64_to_cpu(i->j.seq)); - if (ret) - return ret; + if (!last_seq) { + fsck_err(c, "journal read done, but no entries found after dropping non-flushes"); + ret = -1; + goto err; + } - mutex_lock(&j->blacklist_lock); + /* Drop blacklisted entries and entries older than last_seq: */ + genradix_for_each(&c->journal_entries, radix_iter, _i) { + i = *_i; - list_for_each_entry(i, list, list) { - p = journal_seq_pin(j, le64_to_cpu(i->j.seq)); + if (!i || i->ignore) + continue; - atomic_set(&p->count, 1); - p->devs = i->devs; + seq = le64_to_cpu(i->j.seq); + if (seq < last_seq) { + journal_replay_free(c, i); + continue; + } - if (bch2_journal_seq_blacklist_read(j, i)) { - mutex_unlock(&j->blacklist_lock); - return -ENOMEM; + if (bch2_journal_seq_is_blacklisted(c, seq, true)) { + fsck_err_on(!JSET_NO_FLUSH(&i->j), c, + "found blacklisted journal entry %llu", seq); + + journal_replay_free(c, i); } } - mutex_unlock(&j->blacklist_lock); + /* Check for missing entries: */ + seq = last_seq; + genradix_for_each(&c->journal_entries, radix_iter, _i) { + i = *_i; - cur_seq = journal_last_seq(j); - end_seq = le64_to_cpu(list_last_entry(list, - struct journal_replay, list)->j.seq); + if (!i || i->ignore) + continue; - list_for_each_entry(i, list, list) { - struct jset_entry *entry; - struct bkey_i *k, *_n; - bool blacklisted; + BUG_ON(seq > le64_to_cpu(i->j.seq)); - mutex_lock(&j->blacklist_lock); - while (cur_seq < le64_to_cpu(i->j.seq) && - bch2_journal_seq_blacklist_find(j, cur_seq)) - cur_seq++; + while (seq < le64_to_cpu(i->j.seq)) { + u64 missing_start, missing_end; + struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF; - blacklisted = bch2_journal_seq_blacklist_find(j, - le64_to_cpu(i->j.seq)); - mutex_unlock(&j->blacklist_lock); + while (seq < le64_to_cpu(i->j.seq) && + bch2_journal_seq_is_blacklisted(c, seq, false)) + seq++; - fsck_err_on(blacklisted, c, - "found blacklisted journal entry %llu", - le64_to_cpu(i->j.seq)); + if (seq == le64_to_cpu(i->j.seq)) + break; - fsck_err_on(le64_to_cpu(i->j.seq) != cur_seq, c, - "journal entries %llu-%llu missing! (replaying %llu-%llu)", - cur_seq, le64_to_cpu(i->j.seq) - 1, - journal_last_seq(j), end_seq); + missing_start = seq; - cur_seq = le64_to_cpu(i->j.seq) + 1; + while (seq < le64_to_cpu(i->j.seq) && + !bch2_journal_seq_is_blacklisted(c, seq, false)) + seq++; - for_each_jset_key(k, _n, entry, &i->j) - keys++; - entries++; - } + if (prev) { + bch2_journal_ptrs_to_text(&buf1, c, prev); + prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits)); + } else + prt_printf(&buf1, "(none)"); + bch2_journal_ptrs_to_text(&buf2, c, i); - bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu", - keys, entries, journal_cur_seq(j)); -fsck_err: - return ret; -} + missing_end = seq - 1; + fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n" + " prev at %s\n" + " next at %s", + missing_start, missing_end, + last_seq, *blacklist_seq - 1, + buf1.buf, buf2.buf); + + printbuf_exit(&buf1); + printbuf_exit(&buf2); + } -/* journal replay: */ + prev = i; + seq++; + } -int bch2_journal_replay(struct bch_fs *c, struct list_head *list) -{ - struct journal *j = &c->journal; - struct journal_entry_pin_list *pin_list; - struct bkey_i *k, *_n; - struct jset_entry *entry; - struct journal_replay *i, *n; - int ret = 0; + genradix_for_each(&c->journal_entries, radix_iter, _i) { + struct jset_entry *entry; + struct bkey_i *k, *_n; + struct bch_replicas_padded replicas = { + .e.data_type = BCH_DATA_journal, + .e.nr_required = 1, + }; + unsigned ptr; + + i = *_i; + if (!i || i->ignore) + continue; - list_for_each_entry_safe(i, n, list, list) { + ret = jset_validate_entries(c, &i->j, READ); + if (ret) + goto err; - j->replay_journal_seq = le64_to_cpu(i->j.seq); + for (ptr = 0; ptr < i->nr_ptrs; ptr++) + replicas.e.devs[replicas.e.nr_devs++] = i->ptrs[ptr].dev; - for_each_jset_key(k, _n, entry, &i->j) { + bch2_replicas_entry_sort(&replicas.e); - if (entry->btree_id == BTREE_ID_ALLOC) { - /* - * allocation code handles replay for - * BTREE_ID_ALLOC keys: - */ - ret = bch2_alloc_replay_key(c, k->k.p); - } else { - /* - * We might cause compressed extents to be - * split, so we need to pass in a - * disk_reservation: - */ - struct disk_reservation disk_res = - bch2_disk_reservation_init(c, 0); + /* + * If we're mounting in degraded mode - if we didn't read all + * the devices - this is wrong: + */ - ret = bch2_btree_insert(c, entry->btree_id, k, - &disk_res, NULL, - BTREE_INSERT_NOFAIL| - BTREE_INSERT_JOURNAL_REPLAY); - } + printbuf_reset(&buf); + bch2_replicas_entry_to_text(&buf, &replicas.e); - if (ret) { - bch_err(c, "journal replay: error %d while replaying key", - ret); + if (!degraded && + fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c, + "superblock not marked as containing replicas %s", + buf.buf)) { + ret = bch2_mark_replicas(c, &replicas.e); + if (ret) goto err; - } - - cond_resched(); } - pin_list = journal_seq_pin(j, j->replay_journal_seq); - - if (atomic_dec_and_test(&pin_list->count)) - journal_wake(j); + for_each_jset_key(k, _n, entry, &i->j) + keys++; + entries++; } - j->replay_journal_seq = 0; + bch_info(c, "journal read done, %zu keys in %zu entries, seq %llu", + keys, entries, *start_seq); - bch2_journal_set_replay_done(j); - bch2_journal_flush_all_pins(j); - ret = bch2_journal_error(j); + if (*start_seq != *blacklist_seq) + bch_info(c, "dropped unflushed entries %llu-%llu", + *blacklist_seq, *start_seq - 1); err: - bch2_journal_entries_free(list); +fsck_err: + printbuf_exit(&buf); return ret; } /* journal write: */ -static void bch2_journal_add_btree_root(struct journal_buf *buf, - enum btree_id id, struct bkey_i *k, - unsigned level) -{ - struct jset_entry *entry; - - entry = bch2_journal_add_entry_noreservation(buf, k->k.u64s); - entry->type = BCH_JSET_ENTRY_btree_root; - entry->btree_id = id; - entry->level = level; - memcpy_u64s(entry->_data, k, k->k.u64s); -} - -static unsigned journal_dev_buckets_available(struct journal *j, - struct bch_dev *ca) -{ - struct journal_device *ja = &ca->journal; - unsigned next = (ja->cur_idx + 1) % ja->nr; - unsigned available = (ja->last_idx + ja->nr - next) % ja->nr; - - /* - * Hack to avoid a deadlock during journal replay: - * journal replay might require setting a new btree - * root, which requires writing another journal entry - - * thus, if the journal is full (and this happens when - * replaying the first journal bucket's entries) we're - * screwed. - * - * So don't let the journal fill up unless we're in - * replay: - */ - if (test_bit(JOURNAL_REPLAY_DONE, &j->flags)) - available = max((int) available - 2, 0); - - /* - * Don't use the last bucket unless writing the new last_seq - * will make another bucket available: - */ - if (ja->bucket_seq[ja->last_idx] >= journal_last_seq(j)) - available = max((int) available - 1, 0); - - return available; -} - -/* returns number of sectors available for next journal entry: */ -int bch2_journal_entry_sectors(struct journal *j) +static void __journal_write_alloc(struct journal *j, + struct journal_buf *w, + struct dev_alloc_list *devs_sorted, + unsigned sectors, + unsigned *replicas, + unsigned replicas_want) { struct bch_fs *c = container_of(j, struct bch_fs, journal); + struct journal_device *ja; struct bch_dev *ca; - struct bkey_s_extent e = bkey_i_to_s_extent(&j->key); - unsigned sectors_available = UINT_MAX; - unsigned i, nr_online = 0, nr_devs = 0; - - lockdep_assert_held(&j->lock); + unsigned i; - rcu_read_lock(); - for_each_member_device_rcu(ca, c, i, - &c->rw_devs[BCH_DATA_JOURNAL]) { - struct journal_device *ja = &ca->journal; - unsigned buckets_required = 0; + if (*replicas >= replicas_want) + return; - if (!ja->nr) + for (i = 0; i < devs_sorted->nr; i++) { + ca = rcu_dereference(c->devs[devs_sorted->devs[i]]); + if (!ca) continue; - sectors_available = min_t(unsigned, sectors_available, - ca->mi.bucket_size); + ja = &ca->journal; /* - * Note that we don't allocate the space for a journal entry - * until we write it out - thus, if we haven't started the write - * for the previous entry we have to make sure we have space for - * it too: + * Check that we can use this device, and aren't already using + * it: */ - if (bch2_extent_has_device(e.c, ca->dev_idx)) { - if (j->prev_buf_sectors > ja->sectors_free) - buckets_required++; + if (!ca->mi.durability || + ca->mi.state != BCH_MEMBER_STATE_rw || + !ja->nr || + bch2_bkey_has_device(bkey_i_to_s_c(&w->key), + ca->dev_idx) || + sectors > ja->sectors_free) + continue; - if (j->prev_buf_sectors + sectors_available > - ja->sectors_free) - buckets_required++; - } else { - if (j->prev_buf_sectors + sectors_available > - ca->mi.bucket_size) - buckets_required++; + bch2_dev_stripe_increment(ca, &j->wp.stripe); - buckets_required++; - } - - if (journal_dev_buckets_available(j, ca) >= buckets_required) - nr_devs++; - nr_online++; - } - rcu_read_unlock(); + bch2_bkey_append_ptr(&w->key, + (struct bch_extent_ptr) { + .offset = bucket_to_sector(ca, + ja->buckets[ja->cur_idx]) + + ca->mi.bucket_size - + ja->sectors_free, + .dev = ca->dev_idx, + }); - if (nr_online < c->opts.metadata_replicas_required) - return -EROFS; + ja->sectors_free -= sectors; + ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); - if (nr_devs < min_t(unsigned, nr_online, c->opts.metadata_replicas)) - return 0; + *replicas += ca->mi.durability; - return sectors_available; + if (*replicas >= replicas_want) + break; + } } /** @@ -1026,143 +1314,63 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w, unsigned sectors) { struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct bkey_s_extent e; - struct bch_extent_ptr *ptr; + struct bch_devs_mask devs; struct journal_device *ja; struct bch_dev *ca; struct dev_alloc_list devs_sorted; - unsigned i, replicas, replicas_want = + unsigned target = c->opts.metadata_target ?: + c->opts.foreground_target; + unsigned i, replicas = 0, replicas_want = READ_ONCE(c->opts.metadata_replicas); - spin_lock(&j->lock); - e = bkey_i_to_s_extent(&j->key); - - /* - * Drop any pointers to devices that have been removed, are no longer - * empty, or filled up their current journal bucket: - * - * Note that a device may have had a small amount of free space (perhaps - * one sector) that wasn't enough for the smallest possible journal - * entry - that's why we drop pointers to devices <= current free space, - * i.e. whichever device was limiting the current journal entry size. - */ - bch2_extent_drop_ptrs(e, ptr, ({ - ca = bch_dev_bkey_exists(c, ptr->dev); - - ca->mi.state != BCH_MEMBER_STATE_RW || - ca->journal.sectors_free <= sectors; - })); - - extent_for_each_ptr(e, ptr) { - ca = bch_dev_bkey_exists(c, ptr->dev); + rcu_read_lock(); +retry: + devs = target_rw_devs(c, BCH_DATA_journal, target); - BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW || - ca->journal.sectors_free <= sectors); - ca->journal.sectors_free -= sectors; - } + devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); - replicas = bch2_extent_nr_ptrs(e.c); + __journal_write_alloc(j, w, &devs_sorted, + sectors, &replicas, replicas_want); - rcu_read_lock(); - devs_sorted = bch2_wp_alloc_list(c, &j->wp, - &c->rw_devs[BCH_DATA_JOURNAL]); + if (replicas >= replicas_want) + goto done; for (i = 0; i < devs_sorted.nr; i++) { ca = rcu_dereference(c->devs[devs_sorted.devs[i]]); if (!ca) continue; - if (!ca->mi.durability) - continue; - ja = &ca->journal; - if (!ja->nr) - continue; - - if (replicas >= replicas_want) - break; - /* - * Check that we can use this device, and aren't already using - * it: - */ - if (bch2_extent_has_device(e.c, ca->dev_idx) || - !journal_dev_buckets_available(j, ca) || - sectors > ca->mi.bucket_size) - continue; + if (sectors > ja->sectors_free && + sectors <= ca->mi.bucket_size && + bch2_journal_dev_buckets_available(j, ja, + journal_space_discarded)) { + ja->cur_idx = (ja->cur_idx + 1) % ja->nr; + ja->sectors_free = ca->mi.bucket_size; - j->wp.next_alloc[ca->dev_idx] += U32_MAX; - bch2_wp_rescale(c, ca, &j->wp); + /* + * ja->bucket_seq[ja->cur_idx] must always have + * something sensible: + */ + ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); + } + } - ja->sectors_free = ca->mi.bucket_size - sectors; - ja->cur_idx = (ja->cur_idx + 1) % ja->nr; - ja->bucket_seq[ja->cur_idx] = le64_to_cpu(w->data->seq); + __journal_write_alloc(j, w, &devs_sorted, + sectors, &replicas, replicas_want); - extent_ptr_append(bkey_i_to_extent(&j->key), - (struct bch_extent_ptr) { - .offset = bucket_to_sector(ca, - ja->buckets[ja->cur_idx]), - .dev = ca->dev_idx, - }); - - replicas += ca->mi.durability; + if (replicas < replicas_want && target) { + /* Retry from all devices: */ + target = 0; + goto retry; } +done: rcu_read_unlock(); - j->prev_buf_sectors = 0; - - bkey_copy(&w->key, &j->key); - spin_unlock(&j->lock); - - if (replicas < c->opts.metadata_replicas_required) - return -EROFS; - - BUG_ON(!replicas); - - return 0; -} - -static void journal_write_compact(struct jset *jset) -{ - struct jset_entry *i, *next, *prev = NULL; - - /* - * Simple compaction, dropping empty jset_entries (from journal - * reservations that weren't fully used) and merging jset_entries that - * can be. - * - * If we wanted to be really fancy here, we could sort all the keys in - * the jset and drop keys that were overwritten - probably not worth it: - */ - vstruct_for_each_safe(jset, i, next) { - unsigned u64s = le16_to_cpu(i->u64s); - - /* Empty entry: */ - if (!u64s) - continue; - - /* Can we merge with previous entry? */ - if (prev && - i->btree_id == prev->btree_id && - i->level == prev->level && - i->type == prev->type && - i->type == BCH_JSET_ENTRY_btree_keys && - le16_to_cpu(prev->u64s) + u64s <= U16_MAX) { - memmove_u64s_down(vstruct_next(prev), - i->_data, - u64s); - le16_add_cpu(&prev->u64s, u64s); - continue; - } - - /* Couldn't merge, move i into new position (after prev): */ - prev = prev ? vstruct_next(prev) : jset->start; - if (i != prev) - memmove_u64s_down(prev, i, jset_u64s(u64s)); - } + BUG_ON(bkey_val_u64s(&w->key.k) > BCH_REPLICAS_MAX); - prev = prev ? vstruct_next(prev) : jset->start; - jset->u64s = cpu_to_le32((u64 *) prev - jset->_data); + return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS; } static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) @@ -1171,45 +1379,73 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) unsigned new_size = READ_ONCE(j->buf_size_want); void *new_buf; - if (buf->size >= new_size) + if (buf->buf_size >= new_size) return; new_buf = kvpmalloc(new_size, GFP_NOIO|__GFP_NOWARN); if (!new_buf) return; - memcpy(new_buf, buf->data, buf->size); - kvpfree(buf->data, buf->size); - buf->data = new_buf; - buf->size = new_size; + memcpy(new_buf, buf->data, buf->buf_size); + + spin_lock(&j->lock); + swap(buf->data, new_buf); + swap(buf->buf_size, new_size); + spin_unlock(&j->lock); + + kvpfree(new_buf, new_size); +} + +static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) +{ + return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); } static void journal_write_done(struct closure *cl) { struct journal *j = container_of(cl, struct journal, io); struct bch_fs *c = container_of(j, struct bch_fs, journal); - struct journal_buf *w = journal_prev_buf(j); - struct bch_devs_list devs = - bch2_extent_devs(bkey_i_to_s_c_extent(&w->key)); - u64 seq = le64_to_cpu(w->data->seq); - u64 last_seq = le64_to_cpu(w->data->last_seq); + struct journal_buf *w = journal_last_unwritten_buf(j); + struct bch_replicas_padded replicas; + union journal_res_state old, new; + u64 v, seq; + int err = 0; - bch2_time_stats_update(j->write_time, j->write_start_time); + bch2_time_stats_update(!JSET_NO_FLUSH(w->data) + ? j->flush_write_time + : j->noflush_write_time, j->write_start_time); - if (!devs.nr) { + if (!w->devs_written.nr) { bch_err(c, "unable to write journal to sufficient devices"); - goto err; + err = -EIO; + } else { + bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal, + w->devs_written); + if (bch2_mark_replicas(c, &replicas.e)) + err = -EIO; } - if (bch2_mark_replicas(c, BCH_DATA_JOURNAL, devs)) - goto err; + if (err) + bch2_fatal_error(c); spin_lock(&j->lock); - j->seq_ondisk = seq; - j->last_seq_ondisk = last_seq; + seq = le64_to_cpu(w->data->seq); if (seq >= j->pin.front) - journal_seq_pin(j, seq)->devs = devs; + journal_seq_pin(j, seq)->devs = w->devs_written; + + if (!err) { + if (!JSET_NO_FLUSH(w->data)) { + j->flushed_seq_ondisk = seq; + j->last_seq_ondisk = w->last_seq; + + bch2_do_discards(c); + closure_wake_up(&c->freelist_wait); + } + } else if (!j->err_seq || seq < j->err_seq) + j->err_seq = seq; + + j->seq_ondisk = seq; /* * Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard @@ -1218,41 +1454,59 @@ static void journal_write_done(struct closure *cl) * Must come before signaling write completion, for * bch2_fs_journal_stop(): */ - mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0); -out: + if (j->watermark) + journal_reclaim_kick(&c->journal); + /* also must come before signalling write completion: */ closure_debug_destroy(cl); - BUG_ON(!j->reservations.prev_buf_unwritten); - atomic64_sub(((union journal_res_state) { .prev_buf_unwritten = 1 }).v, - &j->reservations.counter); + v = atomic64_read(&j->reservations.counter); + do { + old.v = new.v = v; + BUG_ON(journal_state_count(new, new.unwritten_idx)); + + new.unwritten_idx++; + } while ((v = atomic64_cmpxchg(&j->reservations.counter, + old.v, new.v)) != old.v); + + bch2_journal_space_available(j); closure_wake_up(&w->wait); journal_wake(j); - if (test_bit(JOURNAL_NEED_WRITE, &j->flags)) - mod_delayed_work(system_freezable_wq, &j->write_work, 0); + if (!journal_state_count(new, new.unwritten_idx) && + journal_last_unwritten_seq(j) <= journal_cur_seq(j)) { + closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL); + } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) && + new.cur_entry_offset < JOURNAL_ENTRY_CLOSED_VAL) { + struct journal_buf *buf = journal_cur_buf(j); + long delta = buf->expires - jiffies; + + /* + * We don't close a journal entry to write it while there's + * previous entries still in flight - the current journal entry + * might want to be written now: + */ + + mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta)); + } + spin_unlock(&j->lock); - return; -err: - bch2_fatal_error(c); - bch2_journal_halt(j); - spin_lock(&j->lock); - goto out; } static void journal_write_endio(struct bio *bio) { struct bch_dev *ca = bio->bi_private; struct journal *j = &ca->fs->journal; + struct journal_buf *w = journal_last_unwritten_buf(j); + unsigned long flags; - if (bch2_dev_io_err_on(bio->bi_status, ca, "journal write") || + if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s", + le64_to_cpu(w->data->seq), + bch2_blk_status_to_str(bio->bi_status)) || bch2_meta_write_fault("journal")) { - struct journal_buf *w = journal_prev_buf(j); - unsigned long flags; - spin_lock_irqsave(&j->err_lock, flags); - bch2_extent_drop_device(bkey_i_to_s_extent(&w->key), ca->dev_idx); + bch2_dev_list_drop_dev(&w->devs_written, ca->dev_idx); spin_unlock_irqrestore(&j->err_lock, flags); } @@ -1260,128 +1514,222 @@ static void journal_write_endio(struct bio *bio) percpu_ref_put(&ca->io_ref); } +static void do_journal_write(struct closure *cl) +{ + struct journal *j = container_of(cl, struct journal, io); + struct bch_fs *c = container_of(j, struct bch_fs, journal); + struct bch_dev *ca; + struct journal_buf *w = journal_last_unwritten_buf(j); + struct bch_extent_ptr *ptr; + struct bio *bio; + unsigned sectors = vstruct_sectors(w->data, c->block_bits); + + extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { + ca = bch_dev_bkey_exists(c, ptr->dev); + if (!percpu_ref_tryget(&ca->io_ref)) { + /* XXX: fix this */ + bch_err(c, "missing device for journal write\n"); + continue; + } + + this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_journal], + sectors); + + bio = ca->journal.bio; + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META); + bio->bi_iter.bi_sector = ptr->offset; + bio->bi_end_io = journal_write_endio; + bio->bi_private = ca; + + BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); + ca->prev_journal_sector = bio->bi_iter.bi_sector; + + if (!JSET_NO_FLUSH(w->data)) + bio->bi_opf |= REQ_FUA; + if (!JSET_NO_FLUSH(w->data) && !w->separate_flush) + bio->bi_opf |= REQ_PREFLUSH; + + bch2_bio_map(bio, w->data, sectors << 9); + + trace_journal_write(bio); + closure_bio_submit(bio, cl); + + ca->journal.bucket_seq[ca->journal.cur_idx] = + le64_to_cpu(w->data->seq); + } + + continue_at(cl, journal_write_done, c->io_complete_wq); + return; +} + void bch2_journal_write(struct closure *cl) { struct journal *j = container_of(cl, struct journal, io); struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_dev *ca; - struct journal_buf *w = journal_prev_buf(j); + struct journal_buf *w = journal_last_unwritten_buf(j); + struct jset_entry *start, *end; struct jset *jset; struct bio *bio; - struct bch_extent_ptr *ptr; - unsigned i, sectors, bytes; + struct printbuf journal_debug_buf = PRINTBUF; + bool validate_before_checksum = false; + unsigned i, sectors, bytes, u64s, nr_rw_members = 0; + int ret; + + BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb)); journal_buf_realloc(j, w); jset = w->data; j->write_start_time = local_clock(); - mutex_lock(&c->btree_root_lock); - for (i = 0; i < BTREE_ID_NR; i++) { - struct btree_root *r = &c->btree_roots[i]; - if (r->alive) - bch2_journal_add_btree_root(w, i, &r->key, r->level); + spin_lock(&j->lock); + if (bch2_journal_error(j) || + w->noflush || + (!w->must_flush && + (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) && + test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) { + w->noflush = true; + SET_JSET_NO_FLUSH(jset, true); + jset->last_seq = 0; + w->last_seq = 0; + + j->nr_noflush_writes++; + } else { + j->last_flush_write = jiffies; + j->nr_flush_writes++; } - c->btree_roots_dirty = false; - mutex_unlock(&c->btree_root_lock); + spin_unlock(&j->lock); + + /* + * New btree roots are set by journalling them; when the journal entry + * gets written we have to propagate them to c->btree_roots + * + * But, every journal entry we write has to contain all the btree roots + * (at least for now); so after we copy btree roots to c->btree_roots we + * have to get any missing btree roots and add them to this journal + * entry: + */ + + bch2_journal_entries_to_btree_roots(c, jset); + + start = end = vstruct_last(jset); - journal_write_compact(jset); + end = bch2_btree_roots_to_journal_entries(c, jset->start, end); + + bch2_journal_super_entries_add_common(c, &end, + le64_to_cpu(jset->seq)); + u64s = (u64 *) end - (u64 *) start; + BUG_ON(u64s > j->entry_u64s_reserved); + + le32_add_cpu(&jset->u64s, u64s); + BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors); - jset->read_clock = cpu_to_le16(c->bucket_clock[READ].hand); - jset->write_clock = cpu_to_le16(c->bucket_clock[WRITE].hand); jset->magic = cpu_to_le64(jset_magic(c)); - jset->version = cpu_to_le32(BCACHE_JSET_VERSION); + jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber + ? cpu_to_le32(BCH_JSET_VERSION_OLD) + : cpu_to_le32(c->sb.version); SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN); SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c)); - if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) && - jset_validate_entries(c, jset, WRITE)) + if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset)) + j->last_empty_seq = le64_to_cpu(jset->seq); + + if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset))) + validate_before_checksum = true; + + if (le32_to_cpu(jset->version) < bcachefs_metadata_version_current) + validate_before_checksum = true; + + if (validate_before_checksum && + jset_validate_for_write(c, jset)) goto err; - bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), + ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset->encrypted_start, vstruct_end(jset) - (void *) jset->encrypted_start); + if (bch2_fs_fatal_err_on(ret, c, + "error decrypting journal entry: %i", ret)) + goto err; jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset), journal_nonce(jset), jset); - if (!bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)) && - jset_validate_entries(c, jset, WRITE)) + if (!validate_before_checksum && + jset_validate_for_write(c, jset)) goto err; sectors = vstruct_sectors(jset, c->block_bits); - BUG_ON(sectors > j->prev_buf_sectors); + BUG_ON(sectors > w->sectors); - bytes = vstruct_bytes(w->data); - memset((void *) w->data + bytes, 0, (sectors << 9) - bytes); + bytes = vstruct_bytes(jset); + memset((void *) jset + bytes, 0, (sectors << 9) - bytes); - if (journal_write_alloc(j, w, sectors)) { - bch2_journal_halt(j); - bch_err(c, "Unable to allocate journal write"); - bch2_fatal_error(c); - continue_at(cl, journal_write_done, system_highpri_wq); - return; +retry_alloc: + spin_lock(&j->lock); + ret = journal_write_alloc(j, w, sectors); + + if (ret && j->can_discard) { + spin_unlock(&j->lock); + bch2_journal_do_discards(j); + goto retry_alloc; } + if (ret) + __bch2_journal_debug_to_text(&journal_debug_buf, j); + /* - * XXX: we really should just disable the entire journal in nochanges - * mode + * write is allocated, no longer need to account for it in + * bch2_journal_space_available(): */ - if (c->opts.nochanges) - goto no_io; - - extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) { - ca = bch_dev_bkey_exists(c, ptr->dev); - if (!percpu_ref_tryget(&ca->io_ref)) { - /* XXX: fix this */ - bch_err(c, "missing device for journal write\n"); - continue; - } + w->sectors = 0; - this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_JOURNAL], - sectors); + /* + * journal entry has been compacted and allocated, recalculate space + * available: + */ + bch2_journal_space_available(j); + spin_unlock(&j->lock); - bio = ca->journal.bio; - bio_reset(bio); - bio_set_dev(bio, ca->disk_sb.bdev); - bio->bi_iter.bi_sector = ptr->offset; - bio->bi_iter.bi_size = sectors << 9; - bio->bi_end_io = journal_write_endio; - bio->bi_private = ca; - bio_set_op_attrs(bio, REQ_OP_WRITE, - REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA); - bch2_bio_map(bio, jset); + if (ret) { + bch_err(c, "Unable to allocate journal write:\n%s", + journal_debug_buf.buf); + printbuf_exit(&journal_debug_buf); + bch2_fatal_error(c); + continue_at(cl, journal_write_done, c->io_complete_wq); + return; + } - trace_journal_write(bio); - closure_bio_submit(bio, cl); + w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key)); - ca->journal.bucket_seq[ca->journal.cur_idx] = le64_to_cpu(w->data->seq); - } + if (c->opts.nochanges) + goto no_io; for_each_rw_member(ca, c, i) - if (journal_flushes_device(ca) && - !bch2_extent_has_device(bkey_i_to_s_c_extent(&w->key), i)) { + nr_rw_members++; + + if (nr_rw_members > 1) + w->separate_flush = true; + + if (!JSET_NO_FLUSH(jset) && w->separate_flush) { + for_each_rw_member(ca, c, i) { percpu_ref_get(&ca->io_ref); bio = ca->journal.bio; - bio_reset(bio); - bio_set_dev(bio, ca->disk_sb.bdev); - bio->bi_opf = REQ_OP_FLUSH; + bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH); bio->bi_end_io = journal_write_endio; bio->bi_private = ca; closure_bio_submit(bio, cl); } + } + continue_at(cl, do_journal_write, c->io_complete_wq); + return; no_io: - extent_for_each_ptr(bkey_i_to_s_extent(&j->key), ptr) - ptr->offset += sectors; - - bch2_bucket_seq_cleanup(c); - - continue_at(cl, journal_write_done, system_highpri_wq); + continue_at(cl, journal_write_done, c->io_complete_wq); return; err: - bch2_inconsistent_error(c); - continue_at(cl, journal_write_done, system_highpri_wq); + bch2_fatal_error(c); + continue_at(cl, journal_write_done, c->io_complete_wq); }