+/* Deferred btree updates: */
+
+static void deferred_update_flush(struct journal *j,
+ struct journal_entry_pin *pin,
+ u64 seq)
+{
+ struct bch_fs *c = container_of(j, struct bch_fs, journal);
+ struct deferred_update *d =
+ container_of(pin, struct deferred_update, journal);
+ u64 tmp[32];
+ struct bkey_i *k = (void *) tmp;
+ unsigned gen;
+ int ret;
+
+ if (d->allocated_u64s > ARRAY_SIZE(tmp)) {
+ k = kmalloc(d->allocated_u64s * sizeof(u64), GFP_NOFS);
+
+ BUG_ON(!k); /* XXX */
+ }
+
+ spin_lock(&d->lock);
+ gen = d->gen;
+
+ if (journal_pin_active(&d->journal)) {
+ BUG_ON(d->k.k.u64s > d->allocated_u64s);
+ bkey_copy(k, &d->k);
+
+ spin_unlock(&d->lock);
+
+ ret = bch2_btree_insert(c, d->btree_id, k, NULL, NULL,
+ BTREE_INSERT_NOFAIL);
+ bch2_fs_fatal_err_on(ret && !bch2_journal_error(j),
+ c, "error flushing deferred btree update: %i", ret);
+
+ spin_lock(&d->lock);
+ }
+
+ if (gen == d->gen)
+ bch2_journal_pin_drop(j, &d->journal);
+ spin_unlock(&d->lock);
+
+ if (k != (void *) tmp)
+ kfree(k);
+}
+
+static enum btree_insert_ret
+btree_insert_key_deferred(struct btree_insert *trans,
+ struct btree_insert_entry *insert)
+{
+ struct bch_fs *c = trans->c;
+ struct journal *j = &c->journal;
+ struct deferred_update *d = insert->d;
+
+ BUG_ON(trans->flags & BTREE_INSERT_JOURNAL_REPLAY);
+ BUG_ON(insert->k->u64s > d->allocated_u64s);
+
+ __btree_journal_key(trans, d->btree_id, insert->k);
+
+ spin_lock(&d->lock);
+ d->gen++;
+ bkey_copy(&d->k, insert->k);
+ spin_unlock(&d->lock);
+
+ bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
+ deferred_update_flush);
+
+ return BTREE_INSERT_OK;
+}
+
+void bch2_deferred_update_free(struct bch_fs *c,
+ struct deferred_update *d)
+{
+ deferred_update_flush(&c->journal, &d->journal, 0);
+
+ BUG_ON(journal_pin_active(&d->journal));
+
+ bch2_journal_pin_flush(&c->journal, &d->journal);
+ kfree(d);
+}
+
+struct deferred_update *
+bch2_deferred_update_alloc(struct bch_fs *c,
+ enum btree_id btree_id,
+ unsigned u64s)
+{
+ struct deferred_update *d;
+
+ BUG_ON(u64s > U8_MAX);
+
+ d = kmalloc(offsetof(struct deferred_update, k) +
+ u64s * sizeof(u64), GFP_NOFS);
+ BUG_ON(!d);
+
+ memset(d, 0, offsetof(struct deferred_update, k));
+
+ spin_lock_init(&d->lock);
+ d->allocated_u64s = u64s;
+ d->btree_id = btree_id;
+
+ return d;
+}
+
+/* struct btree_insert operations: */
+
+/*
+ * We sort transaction entries so that if multiple iterators point to the same
+ * leaf node they'll be adjacent:
+ */