return ret;
}
-static int journal_entry_validate_btree_keys(struct bch_fs *c,
+static int journal_entry_btree_keys_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return 0;
}
-static int journal_entry_validate_btree_root(struct bch_fs *c,
+static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct bkey_i *k;
+ bool first = true;
+
+ vstruct_for_each(entry, k) {
+ if (!first) {
+ printbuf_newline(out);
+ pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ }
+ pr_buf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
+ bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
+ first = false;
+ }
+}
+
+static int journal_entry_btree_root_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return ret;
}
-static int journal_entry_validate_prio_ptrs(struct bch_fs *c,
+static void journal_entry_btree_root_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ journal_entry_btree_keys_to_text(out, c, entry);
+}
+
+static int journal_entry_prio_ptrs_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return 0;
}
-static int journal_entry_validate_blacklist(struct bch_fs *c,
+static void journal_entry_prio_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+}
+
+static int journal_entry_blacklist_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return ret;
}
-static int journal_entry_validate_blacklist_v2(struct bch_fs *c,
+static void journal_entry_blacklist_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_blacklist *bl =
+ container_of(entry, struct jset_entry_blacklist, entry);
+
+ pr_buf(out, "seq=%llu", le64_to_cpu(bl->seq));
+}
+
+static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return ret;
}
-static int journal_entry_validate_usage(struct bch_fs *c,
+static void journal_entry_blacklist_v2_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_blacklist_v2 *bl =
+ container_of(entry, struct jset_entry_blacklist_v2, entry);
+
+ pr_buf(out, "start=%llu end=%llu",
+ le64_to_cpu(bl->start),
+ le64_to_cpu(bl->end));
+}
+
+static int journal_entry_usage_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return ret;
}
-static int journal_entry_validate_data_usage(struct bch_fs *c,
+static void journal_entry_usage_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+
+ pr_buf(out, "type=%s v=%llu",
+ bch2_fs_usage_types[u->entry.btree_id],
+ le64_to_cpu(u->v));
+}
+
+static int journal_entry_data_usage_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return ret;
}
-static int journal_entry_validate_clock(struct bch_fs *c,
+static void journal_entry_data_usage_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+
+ bch2_replicas_entry_to_text(out, &u->r);
+ pr_buf(out, "=%llu", le64_to_cpu(u->v));
+}
+
+static int journal_entry_clock_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return ret;
}
-static int journal_entry_validate_dev_usage(struct bch_fs *c,
+static void journal_entry_clock_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_clock *clock =
+ container_of(entry, struct jset_entry_clock, entry);
+
+ pr_buf(out, "%s=%llu", clock->rw ? "write" : "read", le64_to_cpu(clock->time));
+}
+
+static int journal_entry_dev_usage_validate(struct bch_fs *c,
const char *where,
struct jset_entry *entry,
unsigned version, int big_endian, int write)
return ret;
}
+static void journal_entry_dev_usage_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_dev_usage *u =
+ container_of(entry, struct jset_entry_dev_usage, entry);
+ unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
+
+ pr_buf(out, "dev=%u", le32_to_cpu(u->dev));
+
+ for (i = 0; i < nr_types; i++) {
+ if (i < BCH_DATA_NR)
+ pr_buf(out, " %s", bch2_data_types[i]);
+ else
+ pr_buf(out, " (unknown data type %u)", i);
+ pr_buf(out, ": buckets=%llu sectors=%llu fragmented=%llu",
+ le64_to_cpu(u->d[i].buckets),
+ le64_to_cpu(u->d[i].sectors),
+ le64_to_cpu(u->d[i].fragmented));
+ }
+
+ pr_buf(out, " buckets_ec: %llu buckets_unavailable: %llu",
+ le64_to_cpu(u->buckets_ec),
+ le64_to_cpu(u->buckets_unavailable));
+}
+
+static int journal_entry_log_validate(struct bch_fs *c,
+ const char *where,
+ struct jset_entry *entry,
+ unsigned version, int big_endian, int write)
+{
+ return 0;
+}
+
+static void journal_entry_log_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ struct jset_entry_log *l = container_of(entry, struct jset_entry_log, entry);
+ unsigned bytes = vstruct_bytes(entry) - offsetof(struct jset_entry_log, d);
+
+ bch_scnmemcpy(out, l->d, strnlen(l->d, bytes));
+}
+
struct jset_entry_ops {
int (*validate)(struct bch_fs *, const char *,
struct jset_entry *, unsigned, int, int);
+ void (*to_text)(struct printbuf *, struct bch_fs *, struct jset_entry *);
};
static const struct jset_entry_ops bch2_jset_entry_ops[] = {
#define x(f, nr) \
[BCH_JSET_ENTRY_##f] = (struct jset_entry_ops) { \
- .validate = journal_entry_validate_##f, \
+ .validate = journal_entry_##f##_validate, \
+ .to_text = journal_entry_##f##_to_text, \
},
BCH_JSET_ENTRY_TYPES()
#undef x
: 0;
}
+void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ if (entry->type < BCH_JSET_ENTRY_NR) {
+ pr_buf(out, "%s: ", bch2_jset_entry_types[entry->type]);
+ bch2_jset_entry_ops[entry->type].to_text(out, c, entry);
+ } else {
+ pr_buf(out, "(unknown type %u)", entry->type);
+ }
+}
+
static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
int write)
{
case JOURNAL_ENTRY_NONE:
if (!saw_bad)
return 0;
- sectors = c->opts.block_size;
+ sectors = block_sectors(c);
goto next_block;
case JOURNAL_ENTRY_BAD:
saw_bad = true;
* field of the journal entry we read, so try reading
* again at next block boundary:
*/
- sectors = c->opts.block_size;
+ sectors = block_sectors(c);
break;
default:
return ret;
struct journal_device *ja =
container_of(cl, struct journal_device, read);
struct bch_dev *ca = container_of(ja, struct bch_dev, journal);
+ struct bch_fs *c = ca->fs;
struct journal_list *jlist =
container_of(cl->parent, struct journal_list, cl);
struct journal_read_buf buf = { NULL, 0 };
u64 min_seq = U64_MAX;
unsigned i;
- int ret;
+ int ret = 0;
if (!ja->nr)
goto out;
ja->discard_idx = ja->dirty_idx_ondisk =
ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
out:
+ bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
kvpfree(buf.data, buf.size);
percpu_ref_put(&ca->io_ref);
closure_return(cl);
u64 v, seq;
int err = 0;
- bch2_time_stats_update(j->write_time, j->write_start_time);
+ bch2_time_stats_update(!JSET_NO_FLUSH(w->data)
+ ? j->flush_write_time
+ : j->noflush_write_time, j->write_start_time);
if (!w->devs_written.nr) {
bch_err(c, "unable to write journal to sufficient devices");
if (seq >= j->pin.front)
journal_seq_pin(j, seq)->devs = w->devs_written;
- j->seq_ondisk = seq;
- if (err && (!j->err_seq || seq < j->err_seq))
- j->err_seq = seq;
+ if (!err) {
+ j->seq_ondisk = seq;
- if (!JSET_NO_FLUSH(w->data)) {
- j->flushed_seq_ondisk = seq;
- j->last_seq_ondisk = w->last_seq;
- }
+ if (!JSET_NO_FLUSH(w->data)) {
+ j->flushed_seq_ondisk = seq;
+ j->last_seq_ondisk = w->last_seq;
+ }
+ } else if (!j->err_seq || seq < j->err_seq)
+ j->err_seq = seq;
/*
* Updating last_seq_ondisk may let bch2_journal_reclaim_work() discard
spin_lock(&j->lock);
if (c->sb.features & (1ULL << BCH_FEATURE_journal_no_flush) &&
- !w->must_flush &&
- (jiffies - j->last_flush_write) < msecs_to_jiffies(j->write_delay_ms) &&
- test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)) {
+ (w->noflush ||
+ (!w->must_flush &&
+ (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
+ test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
w->noflush = true;
SET_JSET_NO_FLUSH(jset, true);
jset->last_seq = 0;
SET_JSET_BIG_ENDIAN(jset, CPU_BIG_ENDIAN);
SET_JSET_CSUM_TYPE(jset, bch2_meta_checksum_type(c));
- if (journal_entry_empty(jset))
+ if (!JSET_NO_FLUSH(jset) && journal_entry_empty(jset))
j->last_empty_seq = le64_to_cpu(jset->seq);
if (bch2_csum_type_is_encryption(JSET_CSUM_TYPE(jset)))
w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
- if (c->opts.nochanges)
+ if (test_bit(JOURNAL_NOCHANGES, &j->flags))
goto no_io;
for_each_rw_member(ca, c, i)
}
}
- bch2_bucket_seq_cleanup(c);
-
continue_at(cl, do_journal_write, c->io_complete_wq);
return;
no_io:
- bch2_bucket_seq_cleanup(c);
-
continue_at(cl, journal_write_done, c->io_complete_wq);
return;
err:
- bch2_inconsistent_error(c);
+ bch2_fatal_error(c);
continue_at(cl, journal_write_done, c->io_complete_wq);
}