X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_iter.c;h=eff7630a8e6ecf3184d1879804e5fd974c0f28f2;hb=bd9e0153342c51390ec655b4e78eda1aa1c32a84;hp=f524e4b394c3feff8db5c5fe623dd3ae24ae333a;hpb=7f102ee83d83fd918783ca542fac1574f9b2c623;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_iter.c b/libbcachefs/btree_iter.c index f524e4b..eff7630 100644 --- a/libbcachefs/btree_iter.c +++ b/libbcachefs/btree_iter.c @@ -5,6 +5,7 @@ #include "bkey_buf.h" #include "btree_cache.h" #include "btree_iter.h" +#include "btree_journal_iter.h" #include "btree_key_cache.h" #include "btree_locking.h" #include "btree_update.h" @@ -12,13 +13,12 @@ #include "error.h" #include "extents.h" #include "journal.h" -#include "recovery.h" #include "replicas.h" -#include "subvolume.h" +#include "snapshot.h" +#include "trace.h" #include #include -#include static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *); static inline void btree_path_list_add(struct btree_trans *, struct btree_path *, @@ -35,21 +35,6 @@ static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *); -/* - * Unlocks before scheduling - * Note: does not revalidate iterator - */ -static inline int bch2_trans_cond_resched(struct btree_trans *trans) -{ - if (need_resched() || race_fault()) { - bch2_trans_unlock(trans); - schedule(); - return bch2_trans_relock(trans); - } else { - return 0; - } -} - static inline int __btree_path_cmp(const struct btree_path *l, enum btree_id r_btree_id, bool r_cached, @@ -241,7 +226,7 @@ static void bch2_btree_path_verify(struct btree_trans *trans, for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) { if (!path->l[i].b) { BUG_ON(!path->cached && - c->btree_roots[path->btree_id].b->c.level > i); + bch2_btree_id_root(c, path->btree_id)->b->c.level > i); break; } @@ -377,7 +362,7 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id, bch2_bpos_to_text(&buf, pos); panic("not locked: %s %s%s\n", - bch2_btree_ids[id], buf.buf, + bch2_btree_id_str(id), buf.buf, key_cache ? " cached" : ""); } @@ -503,7 +488,6 @@ fixup_done: if (!bch2_btree_node_iter_end(node_iter) && iter_current_key_modified && b->c.level) { - struct bset_tree *t; struct bkey_packed *k, *k2, *p; k = bch2_btree_node_iter_peek_all(node_iter, b); @@ -652,9 +636,8 @@ void bch2_btree_path_level_init(struct btree_trans *trans, BUG_ON(path->cached); EBUG_ON(!btree_path_pos_in_node(path, b)); - EBUG_ON(b->c.lock.state.seq & 1); - path->l[b->c.level].lock_seq = b->c.lock.state.seq; + path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock); path->l[b->c.level].b = b; __btree_path_level_init(path, b->c.level); } @@ -704,7 +687,7 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree *b) if (t != BTREE_NODE_UNLOCKED) { btree_node_unlock(trans, path, b->c.level); - six_lock_increment(&b->c.lock, t); + six_lock_increment(&b->c.lock, (enum six_lock_type) t); mark_btree_node_locked(trans, path, b->c.level, t); } @@ -736,7 +719,7 @@ static inline int btree_path_lock_root(struct btree_trans *trans, unsigned long trace_ip) { struct bch_fs *c = trans->c; - struct btree *b, **rootp = &c->btree_roots[path->btree_id].b; + struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b; enum six_lock_type lock_type; unsigned i; int ret; @@ -780,7 +763,8 @@ static inline int btree_path_lock_root(struct btree_trans *trans, for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++) path->l[i].b = NULL; - mark_btree_node_locked(trans, path, path->level, lock_type); + mark_btree_node_locked(trans, path, path->level, + (enum btree_node_locked_type) lock_type); bch2_btree_path_level_init(trans, path, b); return 0; } @@ -952,7 +936,8 @@ static __always_inline int btree_path_down(struct btree_trans *trans, if (btree_node_read_locked(path, level + 1)) btree_node_unlock(trans, path, level + 1); - mark_btree_node_locked(trans, path, level, lock_type); + mark_btree_node_locked(trans, path, level, + (enum btree_node_locked_type) lock_type); path->level = level; bch2_btree_path_level_init(trans, path, b); @@ -1024,7 +1009,7 @@ retry_all: /* * We used to assert that all paths had been traversed here * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since - * path->Should_be_locked is not set yet, we we might have unlocked and + * path->should_be_locked is not set yet, we might have unlocked and * then failed to relock a path - that's fine. */ err: @@ -1357,14 +1342,14 @@ static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *p __bch2_path_free(trans, path); } -void bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count) +void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count) { panic("trans->restart_count %u, should be %u, last restarted by %pS\n", trans->restart_count, restart_count, (void *) trans->last_begin_ip); } -void bch2_trans_in_restart_error(struct btree_trans *trans) +void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans) { panic("in transaction restart: %s, last restarted by %pS\n", bch2_err_str(trans->restarted), @@ -1386,7 +1371,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans) struct bkey_s_c old = { &i->old_k, i->old_v }; prt_printf(buf, "update: btree=%s cached=%u %pS", - bch2_btree_ids[i->btree_id], + bch2_btree_id_str(i->btree_id), i->cached, (void *) i->ip_allocated); prt_newline(buf); @@ -1402,7 +1387,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans) trans_for_each_wb_update(trans, wb) { prt_printf(buf, "update: btree=%s wb=1 %pS", - bch2_btree_ids[wb->btree], + bch2_btree_id_str(wb->btree), (void *) i->ip_allocated); prt_newline(buf); @@ -1431,7 +1416,7 @@ void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path) path->idx, path->ref, path->intent_ref, path->preserve ? 'P' : ' ', path->should_be_locked ? 'S' : ' ', - bch2_btree_ids[path->btree_id], + bch2_btree_id_str(path->btree_id), path->level); bch2_bpos_to_text(out, path->pos); @@ -1442,7 +1427,7 @@ void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path) prt_newline(out); } -noinline __cold +static noinline __cold void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans, bool nosort) { @@ -1462,7 +1447,7 @@ void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans) __bch2_trans_paths_to_text(out, trans, false); } -noinline __cold +static noinline __cold void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort) { struct printbuf buf = PRINTBUF; @@ -1509,7 +1494,7 @@ static void bch2_trans_update_max_paths(struct btree_trans *trans) static noinline void btree_path_overflow(struct btree_trans *trans) { bch2_dump_trans_paths_updates(trans); - panic("trans path oveflow\n"); + panic("trans path overflow\n"); } static inline struct btree_path *btree_path_alloc(struct btree_trans *trans, @@ -1871,9 +1856,9 @@ static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter) : NULL; } -struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, - struct btree_iter *iter, - struct bpos end_pos) +static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans, + struct btree_iter *iter, + struct bpos end_pos) { struct bkey_i *k; @@ -2062,8 +2047,12 @@ out: } /** - * bch2_btree_iter_peek: returns first key greater than or equal to iterator's - * current position + * bch2_btree_iter_peek_upto() - returns first key greater than or equal to + * iterator's current position + * @iter: iterator to peek from + * @end: search limit: returns keys less than or equal to @end + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end) { @@ -2200,10 +2189,13 @@ end: } /** - * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal - * to iterator's current position, returning keys from every level of the btree. - * For keys at different levels of the btree that compare equal, the key from - * the lower level (leaf) is returned first. + * bch2_btree_iter_peek_all_levels() - returns the first key greater than or + * equal to iterator's current position, returning keys from every level of the + * btree. For keys at different levels of the btree that compare equal, the key + * from the lower level (leaf) is returned first. + * @iter: iterator to peek from + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter) { @@ -2294,8 +2286,11 @@ out_no_locked: } /** - * bch2_btree_iter_next: returns first key greater than iterator's current + * bch2_btree_iter_next() - returns first key greater than iterator's current * position + * @iter: iterator to peek from + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) { @@ -2306,8 +2301,11 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter) } /** - * bch2_btree_iter_peek_prev: returns first key less than or equal to + * bch2_btree_iter_peek_prev() - returns first key less than or equal to * iterator's current position + * @iter: iterator to peek from + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) { @@ -2430,8 +2428,11 @@ out_no_locked: } /** - * bch2_btree_iter_prev: returns first key less than iterator's current + * bch2_btree_iter_prev() - returns first key less than iterator's current * position + * @iter: iterator to peek from + * + * Returns: key if found, or an error extractable with bkey_err(). */ struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter) { @@ -2736,19 +2737,9 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter) iter->key_cache_path = NULL; } -static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans, - struct btree_iter *iter, - unsigned btree_id, struct bpos pos, - unsigned flags) -{ - bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, - bch2_btree_iter_flags(trans, btree_id, flags), - _RET_IP_); -} - void bch2_trans_iter_init_outlined(struct btree_trans *trans, struct btree_iter *iter, - unsigned btree_id, struct bpos pos, + enum btree_id btree_id, struct bpos pos, unsigned flags) { bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0, @@ -2764,9 +2755,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans, unsigned depth, unsigned flags) { - flags |= BTREE_ITER_NOT_EXTENTS; - flags |= __BTREE_ITER_ALL_SNAPSHOTS; - flags |= BTREE_ITER_ALL_SNAPSHOTS; + flags |= BTREE_ITER_NOT_EXTENTS; + flags |= __BTREE_ITER_ALL_SNAPSHOTS; + flags |= BTREE_ITER_ALL_SNAPSHOTS; bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth, __bch2_btree_iter_flags(trans, btree_id, flags), @@ -2794,6 +2785,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) unsigned new_top = trans->mem_top + size; size_t old_bytes = trans->mem_bytes; size_t new_bytes = roundup_pow_of_two(new_top); + int ret; void *new_mem; void *p; @@ -2801,15 +2793,27 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size) WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX); - new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS); - if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) { - new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL); - new_bytes = BTREE_TRANS_MEM_MAX; - kfree(trans->mem); - } + new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN); + if (unlikely(!new_mem)) { + bch2_trans_unlock(trans); + + new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL); + if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) { + new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL); + new_bytes = BTREE_TRANS_MEM_MAX; + kfree(trans->mem); + } + + if (!new_mem) + return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc); - if (!new_mem) - return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc); + trans->mem = new_mem; + trans->mem_bytes = new_bytes; + + ret = bch2_trans_relock(trans); + if (ret) + return ERR_PTR(ret); + } trans->mem = new_mem; trans->mem_bytes = new_bytes; @@ -2843,6 +2847,8 @@ static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans) * bch2_trans_begin() - reset a transaction after a interrupted attempt * @trans: transaction to reset * + * Returns: current restart counter, to be used with trans_was_restarted() + * * While iterating over nodes or updating nodes a attempt to lock a btree node * may return BCH_ERR_transaction_restart when the trylock fails. When this * occurs bch2_trans_begin() should be called and the transaction retried. @@ -2850,6 +2856,7 @@ static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans) u32 bch2_trans_begin(struct btree_trans *trans) { struct btree_path *path; + u64 now; bch2_trans_reset_updates(trans); @@ -2878,13 +2885,14 @@ u32 bch2_trans_begin(struct btree_trans *trans) path->preserve = false; } + now = local_clock(); if (!trans->restarted && (need_resched() || - local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) { - bch2_trans_unlock(trans); - cond_resched(); - bch2_trans_relock(trans); + now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) { + drop_locks_do(trans, (cond_resched(), 0)); + now = local_clock(); } + trans->last_begin_time = now; if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10)))) bch2_trans_reset_srcu_lock(trans); @@ -2895,26 +2903,26 @@ u32 bch2_trans_begin(struct btree_trans *trans) trans->notrace_relock_fail = false; } - trans->last_begin_time = local_clock(); return trans->restart_count; } -static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c) +static struct btree_trans *bch2_trans_alloc(struct bch_fs *c) { - size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX; - size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX; - void *p = NULL; - - BUG_ON(trans->used_mempool); + struct btree_trans *trans; -#ifdef __KERNEL__ - p = this_cpu_xchg(c->btree_paths_bufs->path, NULL); -#endif - if (!p) - p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS); + if (IS_ENABLED(__KERNEL__)) { + trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL); + if (trans) + return trans; + } - trans->paths = p; p += paths_bytes; - trans->updates = p; p += updates_bytes; + trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS); + /* + * paths need to be zeroed, bch2_check_for_deadlock looks at + * paths in other threads + */ + memset(&trans->paths, 0, sizeof(trans->paths)); + return trans; } const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR]; @@ -2934,13 +2942,16 @@ unsigned bch2_trans_get_fn_idx(const char *fn) return i; } -void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_idx) +struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx) __acquires(&c->btree_trans_barrier) { + struct btree_trans *trans; struct btree_transaction_stats *s; bch2_assert_btree_nodes_not_locked(); + trans = bch2_trans_alloc(c); + memset(trans, 0, sizeof(*trans)); trans->c = c; trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns) @@ -2952,8 +2963,6 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_ !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags); closure_init_stack(&trans->ref); - bch2_trans_alloc_paths(trans, c); - s = btree_trans_stats(trans); if (s && s->max_mem) { unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem); @@ -2979,7 +2988,7 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_ if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) { struct btree_trans *pos; - mutex_lock(&c->btree_trans_lock); + seqmutex_lock(&c->btree_trans_lock); list_for_each_entry(pos, &c->btree_trans_list, list) { /* * We'd much prefer to be stricter here and completely @@ -2997,8 +3006,10 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_ } list_add_tail(&trans->list, &c->btree_trans_list); list_add_done: - mutex_unlock(&c->btree_trans_lock); + seqmutex_unlock(&c->btree_trans_lock); } + + return trans; } static void check_btree_paths_leaked(struct btree_trans *trans) @@ -3016,14 +3027,14 @@ leaked: trans_for_each_path(trans, path) if (path->ref) printk(KERN_ERR " btree %s %pS\n", - bch2_btree_ids[path->btree_id], + bch2_btree_id_str(path->btree_id), (void *) path->ip_allocated); /* Be noisy about this: */ bch2_fatal_error(c); #endif } -void bch2_trans_exit(struct btree_trans *trans) +void bch2_trans_put(struct btree_trans *trans) __releases(&c->btree_trans_barrier) { struct btree_insert_entry *i; @@ -3032,6 +3043,12 @@ void bch2_trans_exit(struct btree_trans *trans) bch2_trans_unlock(trans); + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) { + seqmutex_lock(&c->btree_trans_lock); + list_del(&trans->list); + seqmutex_unlock(&c->btree_trans_lock); + } + closure_sync(&trans->ref); if (s) @@ -3043,12 +3060,6 @@ void bch2_trans_exit(struct btree_trans *trans) check_btree_paths_leaked(trans); - if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) { - mutex_lock(&c->btree_trans_lock); - list_del(&trans->list); - mutex_unlock(&c->btree_trans_lock); - } - srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); bch2_journal_preres_put(&c->journal, &trans->journal_preres); @@ -3069,18 +3080,11 @@ void bch2_trans_exit(struct btree_trans *trans) else kfree(trans->mem); -#ifdef __KERNEL__ - /* - * Userspace doesn't have a real percpu implementation: - */ - trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths); -#endif - - if (trans->paths) - mempool_free(trans->paths, &c->btree_paths_pool); - - trans->mem = (void *) 0x1; - trans->paths = (void *) 0x1; + /* Userspace doesn't have a real percpu implementation: */ + if (IS_ENABLED(__KERNEL__)) + trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans); + if (trans) + mempool_free(trans, &c->btree_trans_pool); } static void __maybe_unused @@ -3098,7 +3102,7 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out, prt_tab(out); prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b', - b->level, bch2_btree_ids[b->btree_id]); + b->level, bch2_btree_id_str(b->btree_id)); bch2_bpos_to_text(out, btree_node_pos(b)); prt_tab(out); @@ -3111,7 +3115,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) struct btree_path *path; struct btree_bkey_cached_common *b; static char lock_types[] = { 'r', 'i', 'w' }; - unsigned l; + unsigned l, idx; if (!out->nr_tabstops) { printbuf_tabstop_push(out, 16); @@ -3120,7 +3124,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn); - trans_for_each_path(trans, path) { + trans_for_each_path_safe(trans, path, idx) { if (!path->nodes_locked) continue; @@ -3128,7 +3132,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) path->idx, path->cached ? 'c' : 'b', path->level, - bch2_btree_ids[path->btree_id]); + bch2_btree_id_str(path->btree_id)); bch2_bpos_to_text(out, path->pos); prt_newline(out); @@ -3158,6 +3162,17 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans) void bch2_fs_btree_iter_exit(struct bch_fs *c) { struct btree_transaction_stats *s; + struct btree_trans *trans; + int cpu; + + trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list); + if (trans) + panic("%s leaked btree_trans\n", trans->fn); + + if (c->btree_trans_bufs) + for_each_possible_cpu(cpu) + kfree(per_cpu_ptr(c->btree_trans_bufs, cpu)->trans); + free_percpu(c->btree_trans_bufs); for (s = c->btree_transaction_stats; s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats); @@ -3169,13 +3184,12 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c) if (c->btree_trans_barrier_initialized) cleanup_srcu_struct(&c->btree_trans_barrier); mempool_exit(&c->btree_trans_mem_pool); - mempool_exit(&c->btree_paths_pool); + mempool_exit(&c->btree_trans_pool); } int bch2_fs_btree_iter_init(struct bch_fs *c) { struct btree_transaction_stats *s; - unsigned nr = BTREE_ITER_MAX; int ret; for (s = c->btree_transaction_stats; @@ -3186,11 +3200,14 @@ int bch2_fs_btree_iter_init(struct bch_fs *c) } INIT_LIST_HEAD(&c->btree_trans_list); - mutex_init(&c->btree_trans_lock); + seqmutex_init(&c->btree_trans_lock); + + c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf); + if (!c->btree_trans_bufs) + return -ENOMEM; - ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1, - sizeof(struct btree_path) * nr + - sizeof(struct btree_insert_entry) * nr) ?: + ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1, + sizeof(struct btree_trans)) ?: mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1, BTREE_TRANS_MEM_MAX) ?: init_srcu_struct(&c->btree_trans_barrier);