+/* for -o reconstruct_alloc: */
+static void drop_alloc_keys(struct journal_keys *keys)
+{
+ size_t src, dst;
+
+ for (src = 0, dst = 0; src < keys->nr; src++)
+ if (keys->d[src].btree_id != BTREE_ID_alloc)
+ keys->d[dst++] = keys->d[src];
+
+ keys->nr = dst;
+}
+
+/* iterate over keys read from the journal: */
+
+static int __journal_key_cmp(enum btree_id l_btree_id,
+ unsigned l_level,
+ struct bpos l_pos,
+ struct journal_key *r)
+{
+ return (cmp_int(l_btree_id, r->btree_id) ?:
+ cmp_int(l_level, r->level) ?:
+ bpos_cmp(l_pos, r->k->k.p));
+}
+
+static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
+{
+ return (cmp_int(l->btree_id, r->btree_id) ?:
+ cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->k->k.p, r->k->k.p));
+}
+
+static size_t journal_key_search(struct journal_keys *journal_keys,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
+{
+ size_t l = 0, r = journal_keys->nr, m;
+
+ while (l < r) {
+ m = l + ((r - l) >> 1);
+ if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
+ l = m + 1;
+ else
+ r = m;
+ }
+
+ BUG_ON(l < journal_keys->nr &&
+ __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
+
+ BUG_ON(l &&
+ __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
+
+ return l;
+}
+
+static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
+{
+ struct bkey_i *n = iter->keys->d[idx].k;
+ struct btree_and_journal_iter *biter =
+ container_of(iter, struct btree_and_journal_iter, journal);
+
+ if (iter->idx > idx ||
+ (iter->idx == idx &&
+ biter->last &&
+ bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
+ iter->idx++;
+}
+
+int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bkey_i *k)
+{
+ struct journal_key n = {
+ .btree_id = id,
+ .level = level,
+ .k = k,
+ .allocated = true
+ };
+ struct journal_keys *keys = &c->journal_keys;
+ struct journal_iter *iter;
+ unsigned idx = journal_key_search(keys, id, level, k->k.p);
+
+ if (idx < keys->nr &&
+ journal_key_cmp(&n, &keys->d[idx]) == 0) {
+ if (keys->d[idx].allocated)
+ kfree(keys->d[idx].k);
+ keys->d[idx] = n;
+ return 0;
+ }
+
+ if (keys->nr == keys->size) {
+ struct journal_keys new_keys = {
+ .nr = keys->nr,
+ .size = keys->size * 2,
+ .journal_seq_base = keys->journal_seq_base,
+ };
+
+ new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
+ if (!new_keys.d) {
+ bch_err(c, "%s: error allocating new key array (size %zu)",
+ __func__, new_keys.size);
+ return -ENOMEM;
+ }
+
+ memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
+ kvfree(keys->d);
+ *keys = new_keys;
+ }
+
+ array_insert_item(keys->d, keys->nr, idx, n);
+
+ list_for_each_entry(iter, &c->journal_iters, list)
+ journal_iter_fix(c, iter, idx);
+
+ return 0;
+}
+
+int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
+ unsigned level, struct bpos pos)
+{
+ struct bkey_i *whiteout =
+ kmalloc(sizeof(struct bkey), GFP_KERNEL);
+ int ret;
+
+ if (!whiteout) {
+ bch_err(c, "%s: error allocating new key", __func__);
+ return -ENOMEM;
+ }
+
+ bkey_init(&whiteout->k);
+ whiteout->k.p = pos;
+
+ ret = bch2_journal_key_insert(c, id, level, whiteout);
+ if (ret)
+ kfree(whiteout);
+ return ret;
+}
+
+static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
+{
+ struct journal_key *k = iter->idx - iter->keys->nr
+ ? iter->keys->d + iter->idx : NULL;
+
+ if (k &&
+ k->btree_id == iter->btree_id &&
+ k->level == iter->level)
+ return k->k;
+
+ iter->idx = iter->keys->nr;
+ return NULL;
+}
+
+static void bch2_journal_iter_advance(struct journal_iter *iter)
+{
+ if (iter->idx < iter->keys->nr)
+ iter->idx++;
+}
+
+static void bch2_journal_iter_exit(struct journal_iter *iter)
+{
+ list_del(&iter->list);
+}
+
+static void bch2_journal_iter_init(struct bch_fs *c,
+ struct journal_iter *iter,
+ enum btree_id id, unsigned level,
+ struct bpos pos)
+{
+ iter->btree_id = id;
+ iter->level = level;
+ iter->keys = &c->journal_keys;
+ iter->idx = journal_key_search(&c->journal_keys, id, level, pos);
+ list_add(&iter->list, &c->journal_iters);
+}
+
+static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
+{
+ return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
+ iter->b, &iter->unpacked);
+}
+
+static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
+{
+ bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
+}
+
+void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
+{
+ switch (iter->last) {
+ case none:
+ break;
+ case btree:
+ bch2_journal_iter_advance_btree(iter);
+ break;
+ case journal:
+ bch2_journal_iter_advance(&iter->journal);
+ break;
+ }
+
+ iter->last = none;
+}
+
+struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
+{
+ struct bkey_s_c ret;
+
+ while (1) {
+ struct bkey_s_c btree_k =
+ bch2_journal_iter_peek_btree(iter);
+ struct bkey_s_c journal_k =
+ bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
+
+ if (btree_k.k && journal_k.k) {
+ int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
+
+ if (!cmp)
+ bch2_journal_iter_advance_btree(iter);
+
+ iter->last = cmp < 0 ? btree : journal;
+ } else if (btree_k.k) {
+ iter->last = btree;
+ } else if (journal_k.k) {
+ iter->last = journal;
+ } else {
+ iter->last = none;
+ return bkey_s_c_null;
+ }
+
+ ret = iter->last == journal ? journal_k : btree_k;
+
+ if (iter->b &&
+ bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
+ iter->journal.idx = iter->journal.keys->nr;
+ iter->last = none;
+ return bkey_s_c_null;
+ }
+
+ if (!bkey_deleted(ret.k))
+ break;
+
+ bch2_btree_and_journal_iter_advance(iter);
+ }
+
+ return ret;
+}
+
+struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
+{
+ bch2_btree_and_journal_iter_advance(iter);
+
+ return bch2_btree_and_journal_iter_peek(iter);
+}
+
+void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
+{
+ bch2_journal_iter_exit(&iter->journal);
+}
+
+void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
+ struct bch_fs *c,
+ struct btree *b)
+{
+ memset(iter, 0, sizeof(*iter));
+
+ iter->b = b;
+ bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
+ bch2_journal_iter_init(c, &iter->journal,
+ b->c.btree_id, b->c.level, b->data->min_key);
+}
+
+/* Walk btree, overlaying keys from the journal: */
+
+static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
+ struct btree_and_journal_iter iter)
+{
+ unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
+ struct bkey_s_c k;
+ struct bkey_buf tmp;
+
+ BUG_ON(!b->c.level);
+
+ bch2_bkey_buf_init(&tmp);
+
+ while (i < nr &&
+ (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ bch2_bkey_buf_reassemble(&tmp, c, k);
+
+ bch2_btree_node_prefetch(c, NULL, tmp.k,
+ b->c.btree_id, b->c.level - 1);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+ i++;
+ }
+
+ bch2_bkey_buf_exit(&tmp, c);
+}
+
+static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
+ struct journal_keys *journal_keys,
+ enum btree_id btree_id,
+ btree_walk_node_fn node_fn,
+ btree_walk_key_fn key_fn)
+{
+ struct btree_and_journal_iter iter;
+ struct bkey_s_c k;
+ struct bkey_buf tmp;
+ struct btree *child;
+ int ret = 0;
+
+ bch2_bkey_buf_init(&tmp);
+ bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
+
+ while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
+ ret = key_fn(c, btree_id, b->c.level, k);
+ if (ret)
+ break;
+
+ if (b->c.level) {
+ bch2_bkey_buf_reassemble(&tmp, c, k);
+
+ bch2_btree_and_journal_iter_advance(&iter);
+
+ child = bch2_btree_node_get_noiter(c, tmp.k,
+ b->c.btree_id, b->c.level - 1,
+ false);
+
+ ret = PTR_ERR_OR_ZERO(child);
+ if (ret)
+ break;
+
+ btree_and_journal_iter_prefetch(c, b, iter);
+
+ ret = (node_fn ? node_fn(c, b) : 0) ?:
+ bch2_btree_and_journal_walk_recurse(c, child,
+ journal_keys, btree_id, node_fn, key_fn);
+ six_unlock_read(&child->c.lock);
+
+ if (ret)
+ break;
+ } else {
+ bch2_btree_and_journal_iter_advance(&iter);
+ }
+ }
+
+ bch2_btree_and_journal_iter_exit(&iter);
+ bch2_bkey_buf_exit(&tmp, c);
+ return ret;
+}
+
+int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
+ enum btree_id btree_id,
+ btree_walk_node_fn node_fn,
+ btree_walk_key_fn key_fn)
+{
+ struct btree *b = c->btree_roots[btree_id].b;
+ int ret = 0;
+
+ if (btree_node_fake(b))
+ return 0;
+
+ six_lock_read(&b->c.lock, NULL, NULL);
+ ret = (node_fn ? node_fn(c, b) : 0) ?:
+ bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
+ node_fn, key_fn) ?:
+ key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
+ six_unlock_read(&b->c.lock);
+
+ return ret;
+}
+
+/* sort and dedup all keys in the journal: */
+
+void bch2_journal_entries_free(struct list_head *list)
+{
+
+ while (!list_empty(list)) {
+ struct journal_replay *i =
+ list_first_entry(list, struct journal_replay, list);
+ list_del(&i->list);
+ kvpfree(i, offsetof(struct journal_replay, j) +
+ vstruct_bytes(&i->j));
+ }
+}
+
+/*
+ * When keys compare equal, oldest compares first:
+ */
+static int journal_sort_key_cmp(const void *_l, const void *_r)
+{
+ const struct journal_key *l = _l;
+ const struct journal_key *r = _r;
+
+ return cmp_int(l->btree_id, r->btree_id) ?:
+ cmp_int(l->level, r->level) ?:
+ bpos_cmp(l->k->k.p, r->k->k.p) ?:
+ cmp_int(l->journal_seq, r->journal_seq) ?:
+ cmp_int(l->journal_offset, r->journal_offset);
+}
+
+void bch2_journal_keys_free(struct journal_keys *keys)
+{
+ struct journal_key *i;
+
+ for (i = keys->d; i < keys->d + keys->nr; i++)
+ if (i->allocated)
+ kfree(i->k);
+
+ kvfree(keys->d);
+ keys->d = NULL;
+ keys->nr = 0;
+}
+
+static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
+{
+ struct journal_replay *i;
+ struct jset_entry *entry;
+ struct bkey_i *k, *_n;
+ struct journal_keys keys = { NULL };
+ struct journal_key *src, *dst;
+ size_t nr_keys = 0;
+
+ if (list_empty(journal_entries))
+ return keys;
+
+ list_for_each_entry(i, journal_entries, list) {
+ if (i->ignore)
+ continue;
+
+ if (!keys.journal_seq_base)
+ keys.journal_seq_base = le64_to_cpu(i->j.seq);
+
+ for_each_jset_key(k, _n, entry, &i->j)
+ nr_keys++;
+ }
+
+ keys.size = roundup_pow_of_two(nr_keys);
+
+ keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
+ if (!keys.d)
+ goto err;
+
+ list_for_each_entry(i, journal_entries, list) {
+ if (i->ignore)
+ continue;
+
+ BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
+
+ for_each_jset_key(k, _n, entry, &i->j)
+ keys.d[keys.nr++] = (struct journal_key) {
+ .btree_id = entry->btree_id,
+ .level = entry->level,
+ .k = k,
+ .journal_seq = le64_to_cpu(i->j.seq) -
+ keys.journal_seq_base,
+ .journal_offset = k->_data - i->j._data,
+ };
+ }
+
+ sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
+
+ src = dst = keys.d;
+ while (src < keys.d + keys.nr) {
+ while (src + 1 < keys.d + keys.nr &&
+ src[0].btree_id == src[1].btree_id &&
+ src[0].level == src[1].level &&
+ !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
+ src++;
+
+ *dst++ = *src++;
+ }
+
+ keys.nr = dst - keys.d;
+err:
+ return keys;
+}
+
+/* journal replay: */
+
+static void replay_now_at(struct journal *j, u64 seq)
+{
+ BUG_ON(seq < j->replay_journal_seq);
+ BUG_ON(seq > j->replay_journal_seq_end);
+
+ while (j->replay_journal_seq < seq)
+ bch2_journal_pin_put(j, j->replay_journal_seq++);
+}
+
+static int __bch2_journal_replay_key(struct btree_trans *trans,
+ enum btree_id id, unsigned level,
+ struct bkey_i *k)
+{
+ struct btree_iter *iter;
+ int ret;
+
+ iter = bch2_trans_get_node_iter(trans, id, k->k.p,
+ BTREE_MAX_DEPTH, level,
+ BTREE_ITER_INTENT);
+
+ /*
+ * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
+ * extent_handle_overwrites() and extent_update_to_keys() - but we don't
+ * want that here, journal replay is supposed to treat extents like
+ * regular keys:
+ */
+ BUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
+
+ ret = bch2_btree_iter_traverse(iter) ?:
+ bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+}
+
+static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
+{
+ unsigned commit_flags = BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_LAZY_RW;
+
+ if (!k->allocated)
+ commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
+
+ return bch2_trans_do(c, NULL, NULL, commit_flags,
+ __bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
+}
+
+static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
+{
+ struct btree_iter *iter;
+ int ret;
+
+ iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
+ BTREE_ITER_CACHED|
+ BTREE_ITER_CACHED_NOFILL|
+ BTREE_ITER_INTENT);
+ ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
+ bch2_trans_iter_put(trans, iter);
+ return ret;
+}
+
+static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
+{
+ return bch2_trans_do(c, NULL, NULL,
+ BTREE_INSERT_NOFAIL|
+ BTREE_INSERT_USE_RESERVE|
+ BTREE_INSERT_LAZY_RW|
+ BTREE_INSERT_JOURNAL_REPLAY,
+ __bch2_alloc_replay_key(&trans, k));
+}
+
+static int journal_sort_seq_cmp(const void *_l, const void *_r)
+{
+ const struct journal_key *l = _l;
+ const struct journal_key *r = _r;
+
+ return cmp_int(r->level, l->level) ?:
+ cmp_int(l->journal_seq, r->journal_seq) ?:
+ cmp_int(l->btree_id, r->btree_id) ?:
+ bpos_cmp(l->k->k.p, r->k->k.p);
+}
+
+static int bch2_journal_replay(struct bch_fs *c,
+ struct journal_keys keys)
+{
+ struct journal *j = &c->journal;
+ struct journal_key *i;
+ u64 seq;
+ int ret;
+
+ sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
+
+ if (keys.nr)
+ replay_now_at(j, keys.journal_seq_base);
+
+ seq = j->replay_journal_seq;
+
+ /*
+ * First replay updates to the alloc btree - these will only update the
+ * btree key cache:
+ */
+ for_each_journal_key(keys, i) {
+ cond_resched();
+
+ if (!i->level && i->btree_id == BTREE_ID_alloc) {
+ j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
+ ret = bch2_alloc_replay_key(c, i->k);
+ if (ret)
+ goto err;
+ }
+ }
+
+ /*
+ * Next replay updates to interior btree nodes:
+ */
+ for_each_journal_key(keys, i) {
+ cond_resched();
+
+ if (i->level) {
+ j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
+ ret = bch2_journal_replay_key(c, i);
+ if (ret)
+ goto err;
+ }
+ }
+
+ /*
+ * Now that the btree is in a consistent state, we can start journal
+ * reclaim (which will be flushing entries from the btree key cache back
+ * to the btree:
+ */
+ set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
+ set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
+ journal_reclaim_kick(j);
+
+ j->replay_journal_seq = seq;
+
+ /*
+ * Now replay leaf node updates:
+ */
+ for_each_journal_key(keys, i) {
+ cond_resched();
+
+ if (i->level || i->btree_id == BTREE_ID_alloc)
+ continue;
+
+ replay_now_at(j, keys.journal_seq_base + i->journal_seq);
+
+ ret = bch2_journal_replay_key(c, i);
+ if (ret)
+ goto err;
+ }
+
+ replay_now_at(j, j->replay_journal_seq_end);
+ j->replay_journal_seq = 0;
+
+ bch2_journal_set_replay_done(j);
+ bch2_journal_flush_all_pins(j);
+ return bch2_journal_error(j);
+err:
+ bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
+ ret, bch2_btree_ids[i->btree_id], i->level);
+ return ret;
+}
+
+/* journal replay early: */
+
+static int journal_replay_entry_early(struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ int ret = 0;
+
+ switch (entry->type) {
+ case BCH_JSET_ENTRY_btree_root: {
+ struct btree_root *r;
+
+ if (entry->btree_id >= BTREE_ID_NR) {
+ bch_err(c, "filesystem has unknown btree type %u",
+ entry->btree_id);
+ return -EINVAL;
+ }
+
+ r = &c->btree_roots[entry->btree_id];
+
+ if (entry->u64s) {
+ r->level = entry->level;
+ bkey_copy(&r->key, &entry->start[0]);
+ r->error = 0;
+ } else {
+ r->error = -EIO;
+ }
+ r->alive = true;
+ break;
+ }
+ case BCH_JSET_ENTRY_usage: {
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+
+ switch (entry->btree_id) {
+ case FS_USAGE_RESERVED:
+ if (entry->level < BCH_REPLICAS_MAX)
+ c->usage_base->persistent_reserved[entry->level] =
+ le64_to_cpu(u->v);
+ break;
+ case FS_USAGE_INODES:
+ c->usage_base->nr_inodes = le64_to_cpu(u->v);
+ break;
+ case FS_USAGE_KEY_VERSION:
+ atomic64_set(&c->key_version,
+ le64_to_cpu(u->v));
+ break;
+ }
+
+ break;
+ }
+ case BCH_JSET_ENTRY_data_usage: {
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+
+ ret = bch2_replicas_set_usage(c, &u->r,
+ le64_to_cpu(u->v));
+ break;
+ }
+ case BCH_JSET_ENTRY_dev_usage: {
+ struct jset_entry_dev_usage *u =
+ container_of(entry, struct jset_entry_dev_usage, entry);
+ struct bch_dev *ca = bch_dev_bkey_exists(c, u->dev);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
+ sizeof(struct jset_entry_dev_usage_type);
+ unsigned i;
+
+ ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
+ ca->usage_base->buckets_unavailable = le64_to_cpu(u->buckets_unavailable);
+
+ for (i = 0; i < nr_types; i++) {
+ ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
+ ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
+ ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
+ }
+
+ break;
+ }
+ case BCH_JSET_ENTRY_blacklist: {
+ struct jset_entry_blacklist *bl_entry =
+ container_of(entry, struct jset_entry_blacklist, entry);
+
+ ret = bch2_journal_seq_blacklist_add(c,
+ le64_to_cpu(bl_entry->seq),
+ le64_to_cpu(bl_entry->seq) + 1);
+ break;
+ }
+ case BCH_JSET_ENTRY_blacklist_v2: {
+ struct jset_entry_blacklist_v2 *bl_entry =
+ container_of(entry, struct jset_entry_blacklist_v2, entry);
+
+ ret = bch2_journal_seq_blacklist_add(c,
+ le64_to_cpu(bl_entry->start),
+ le64_to_cpu(bl_entry->end) + 1);
+ break;
+ }
+ case BCH_JSET_ENTRY_clock: {
+ struct jset_entry_clock *clock =
+ container_of(entry, struct jset_entry_clock, entry);
+
+ atomic64_set(&c->io_clock[clock->rw].now, clock->time);
+ }
+ }
+
+ return ret;
+}
+
+static int journal_replay_early(struct bch_fs *c,
+ struct bch_sb_field_clean *clean,
+ struct list_head *journal)
+{
+ struct journal_replay *i;
+ struct jset_entry *entry;
+ int ret;
+
+ if (clean) {
+ for (entry = clean->start;
+ entry != vstruct_end(&clean->field);
+ entry = vstruct_next(entry)) {
+ ret = journal_replay_entry_early(c, entry);
+ if (ret)
+ return ret;
+ }
+ } else {
+ list_for_each_entry(i, journal, list) {
+ if (i->ignore)
+ continue;
+
+ vstruct_for_each(&i->j, entry) {
+ ret = journal_replay_entry_early(c, entry);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+
+ bch2_fs_usage_initialize(c);
+
+ return 0;
+}
+
+/* sb clean section: */
+