+ ssize_t idx = iter - trans->iters;
+
+ BUG_ON(idx < 0 || idx >= trans->nr_iters);
+ BUG_ON(!(trans->iters_live & (1ULL << idx)));
+
+ return idx;
+}
+
+void bch2_trans_iter_put(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ ssize_t idx = btree_trans_iter_idx(trans, iter);
+
+ trans->iters_live &= ~(1ULL << idx);
+}
+
+void bch2_trans_iter_free(struct btree_trans *trans,
+ struct btree_iter *iter)
+{
+ ssize_t idx = btree_trans_iter_idx(trans, iter);
+
+ trans->iters_live &= ~(1ULL << idx);
+ trans->iters_linked &= ~(1ULL << idx);
+ bch2_btree_iter_unlink(iter);
+}
+
+static int btree_trans_realloc_iters(struct btree_trans *trans,
+ unsigned new_size)
+{
+ void *new_iters, *new_updates;
+ unsigned i;
+
+ BUG_ON(new_size > BTREE_ITER_MAX);
+
+ if (new_size <= trans->size)
+ return 0;
+
+ BUG_ON(trans->used_mempool);
+
+ bch2_trans_unlock(trans);
+
+ new_iters = kmalloc(sizeof(struct btree_iter) * new_size +
+ sizeof(struct btree_insert_entry) * (new_size + 4),
+ GFP_NOFS);
+ if (new_iters)
+ goto success;
+
+ new_iters = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
+ new_size = BTREE_ITER_MAX;
+
+ trans->used_mempool = true;
+success:
+ new_updates = new_iters + sizeof(struct btree_iter) * new_size;
+
+ memcpy(new_iters, trans->iters,
+ sizeof(struct btree_iter) * trans->nr_iters);
+ memcpy(new_updates, trans->updates,
+ sizeof(struct btree_insert_entry) * trans->nr_updates);
+
+ if (trans->iters != trans->iters_onstack)
+ kfree(trans->iters);
+
+ trans->iters = new_iters;
+ trans->updates = new_updates;
+ trans->size = new_size;
+
+ for (i = 0; i < trans->nr_iters; i++)
+ trans->iters[i].next = &trans->iters[i];
+
+ if (trans->iters_linked) {
+ unsigned first_linked = __ffs(trans->iters_linked);
+
+ for (i = first_linked + 1; i < trans->nr_iters; i++)
+ if (trans->iters_linked & (1 << i))
+ bch2_btree_iter_link(&trans->iters[first_linked],
+ &trans->iters[i]);
+ }
+
+ btree_trans_verify(trans);
+
+ if (trans->iters_live) {
+ trans_restart();
+ return -EINTR;
+ }
+
+ return 0;
+}
+
+void bch2_trans_preload_iters(struct btree_trans *trans)
+{
+ btree_trans_realloc_iters(trans, BTREE_ITER_MAX);
+}
+
+static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
+ unsigned btree_id,
+ unsigned flags, u64 iter_id)
+{
+ struct btree_iter *iter;
+ int idx;
+
+ BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
+
+ for (idx = 0; idx < trans->nr_iters; idx++)
+ if (trans->iters[idx].id == iter_id)
+ goto found;
+ idx = -1;
+found:
+ if (idx < 0) {
+ idx = ffz(trans->iters_linked);
+ if (idx < trans->nr_iters)
+ goto got_slot;
+
+ BUG_ON(trans->nr_iters > trans->size);
+
+ if (trans->nr_iters == trans->size) {
+ int ret = btree_trans_realloc_iters(trans,
+ trans->size * 2);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ idx = trans->nr_iters++;
+ BUG_ON(trans->nr_iters > trans->size);
+got_slot:
+ iter = &trans->iters[idx];
+ iter->id = iter_id;
+
+ bch2_btree_iter_init(iter, trans->c, btree_id, POS_MIN, flags);
+ } else {
+ iter = &trans->iters[idx];
+
+ iter->flags &= ~(BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
+ iter->flags |= flags & (BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
+ }
+
+ BUG_ON(trans->iters_live & (1ULL << idx));
+ trans->iters_live |= 1ULL << idx;
+
+ if (trans->iters_linked &&
+ !(trans->iters_linked & (1 << idx)))
+ bch2_btree_iter_link(&trans->iters[__ffs(trans->iters_linked)],
+ iter);
+
+ trans->iters_linked |= 1ULL << idx;
+
+ btree_trans_verify(trans);
+
+ BUG_ON(iter->btree_id != btree_id);
+ BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
+
+ return iter;
+}
+
+struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
+ enum btree_id btree_id,
+ struct bpos pos, unsigned flags,
+ u64 iter_id)
+{
+ struct btree_iter *iter =
+ __btree_trans_get_iter(trans, btree_id, flags, iter_id);
+
+ if (!IS_ERR(iter))
+ bch2_btree_iter_set_pos(iter, pos);
+ return iter;
+}
+
+struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
+ struct btree_iter *src,
+ u64 iter_id)
+{
+ struct btree_iter *iter =
+ __btree_trans_get_iter(trans, src->btree_id,
+ src->flags, iter_id);
+
+ if (!IS_ERR(iter))
+ bch2_btree_iter_copy(iter, src);
+ return iter;
+}
+
+void *bch2_trans_kmalloc(struct btree_trans *trans,
+ size_t size)
+{
+ void *ret;
+
+ if (trans->mem_top + size > trans->mem_bytes) {
+ size_t old_bytes = trans->mem_bytes;
+ size_t new_bytes = roundup_pow_of_two(trans->mem_top + size);
+ void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
+
+ if (!new_mem)
+ return ERR_PTR(-ENOMEM);
+
+ trans->mem = new_mem;
+ trans->mem_bytes = new_bytes;
+
+ if (old_bytes) {
+ trans_restart();
+ return ERR_PTR(-EINTR);
+ }
+ }
+
+ ret = trans->mem + trans->mem_top;
+ trans->mem_top += size;
+ return ret;
+}
+
+int bch2_trans_unlock(struct btree_trans *trans)
+{
+ unsigned iters = trans->iters_linked;
+ int ret = 0;
+
+ while (iters) {
+ unsigned idx = __ffs(iters);
+ struct btree_iter *iter = &trans->iters[idx];
+
+ if (iter->flags & BTREE_ITER_ERROR)
+ ret = -EIO;
+
+ __bch2_btree_iter_unlock(iter);
+ iters ^= 1 << idx;
+ }
+
+ return ret;
+}
+
+void __bch2_trans_begin(struct btree_trans *trans)
+{
+ u64 linked_not_live;
+ unsigned idx;
+
+ btree_trans_verify(trans);
+
+ /*
+ * On transaction restart, the transaction isn't required to allocate
+ * all the same iterators it on the last iteration:
+ *
+ * Unlink any iterators it didn't use this iteration, assuming it got
+ * further (allocated an iter with a higher idx) than where the iter
+ * was originally allocated:
+ */
+ while (1) {
+ linked_not_live = trans->iters_linked & ~trans->iters_live;
+ if (!linked_not_live)
+ break;
+
+ idx = __ffs64(linked_not_live);
+ if (1ULL << idx > trans->iters_live)
+ break;
+
+ trans->iters_linked ^= 1 << idx;
+ bch2_btree_iter_unlink(&trans->iters[idx]);
+ }
+
+ trans->iters_live = 0;
+ trans->nr_updates = 0;
+ trans->mem_top = 0;
+
+ btree_trans_verify(trans);
+}
+
+void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c)
+{
+ memset(trans, 0, offsetof(struct btree_trans, iters_onstack));
+
+ trans->c = c;
+ trans->size = ARRAY_SIZE(trans->iters_onstack);
+ trans->iters = trans->iters_onstack;
+ trans->updates = trans->updates_onstack;
+}
+
+int bch2_trans_exit(struct btree_trans *trans)
+{
+ int ret = bch2_trans_unlock(trans);
+
+ kfree(trans->mem);
+ if (trans->used_mempool)
+ mempool_free(trans->iters, &trans->c->btree_iters_pool);
+ else if (trans->iters != trans->iters_onstack)
+ kfree(trans->iters);
+ trans->mem = (void *) 0x1;
+ trans->iters = (void *) 0x1;
+ return ret;