+ ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
+out:
+ bch2_trans_iter_exit(trans, &iter);
+
+ return ret;
+}
+
+static void async_btree_node_rewrite_work(struct work_struct *work)
+{
+ struct async_btree_rewrite *a =
+ container_of(work, struct async_btree_rewrite, work);
+ struct bch_fs *c = a->c;
+ int ret;
+
+ ret = bch2_trans_do(c, NULL, NULL, 0,
+ async_btree_node_rewrite_trans(trans, a));
+ if (ret)
+ bch_err_fn(c, ret);
+ bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
+ kfree(a);
+}
+
+void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
+{
+ struct async_btree_rewrite *a;
+ int ret;
+
+ a = kmalloc(sizeof(*a), GFP_NOFS);
+ if (!a) {
+ bch_err(c, "%s: error allocating memory", __func__);
+ return;
+ }
+
+ a->c = c;
+ a->btree_id = b->c.btree_id;
+ a->level = b->c.level;
+ a->pos = b->key.k.p;
+ a->seq = b->data->keys.seq;
+ INIT_WORK(&a->work, async_btree_node_rewrite_work);
+
+ if (unlikely(!test_bit(BCH_FS_MAY_GO_RW, &c->flags))) {
+ mutex_lock(&c->pending_node_rewrites_lock);
+ list_add(&a->list, &c->pending_node_rewrites);
+ mutex_unlock(&c->pending_node_rewrites_lock);
+ return;
+ }
+
+ if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_node_rewrite)) {
+ if (test_bit(BCH_FS_STARTED, &c->flags)) {
+ bch_err(c, "%s: error getting c->writes ref", __func__);
+ kfree(a);
+ return;