1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include <linux/prefetch.h>
18 #include <trace/events/bcachefs.h>
20 static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
22 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
24 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
26 /* Are we iterating over keys in all snapshots? */
27 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
28 p = bpos_successor(p);
30 p = bpos_nosnap_successor(p);
31 p.snapshot = iter->snapshot;
37 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
39 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
41 /* Are we iterating over keys in all snapshots? */
42 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
43 p = bpos_predecessor(p);
45 p = bpos_nosnap_predecessor(p);
46 p.snapshot = iter->snapshot;
52 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
54 return l < BTREE_MAX_DEPTH &&
55 (unsigned long) iter->l[l].b >= 128;
58 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
60 struct bpos pos = iter->pos;
62 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
63 bkey_cmp(pos, POS_MAX))
64 pos = bkey_successor(iter, pos);
68 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
71 return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
74 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
77 return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
80 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
83 return iter->btree_id == b->c.btree_id &&
84 !btree_iter_pos_before_node(iter, b) &&
85 !btree_iter_pos_after_node(iter, b);
88 /* Btree node locking: */
90 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
92 bch2_btree_node_unlock_write_inlined(b, iter);
95 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
97 struct btree_iter *linked;
100 EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
102 trans_for_each_iter(iter->trans, linked)
103 if (linked->l[b->c.level].b == b &&
104 btree_node_read_locked(linked, b->c.level))
108 * Must drop our read locks before calling six_lock_write() -
109 * six_unlock() won't do wakeups until the reader count
110 * goes to 0, and it's safe because we have the node intent
113 atomic64_sub(__SIX_VAL(read_lock, readers),
114 &b->c.lock.state.counter);
115 btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
116 atomic64_add(__SIX_VAL(read_lock, readers),
117 &b->c.lock.state.counter);
120 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
122 struct btree *b = btree_iter_node(iter, level);
123 int want = __btree_lock_want(iter, level);
125 if (!is_btree_node(iter, level))
131 if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
132 (btree_node_lock_seq_matches(iter, b, level) &&
133 btree_node_lock_increment(iter->trans, b, level, want))) {
134 mark_btree_node_locked(iter, level, want);
141 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
143 struct btree *b = iter->l[level].b;
145 EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
147 if (!is_btree_node(iter, level))
150 if (btree_node_intent_locked(iter, level))
156 if (btree_node_locked(iter, level)
157 ? six_lock_tryupgrade(&b->c.lock)
158 : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
161 if (btree_node_lock_seq_matches(iter, b, level) &&
162 btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
163 btree_node_unlock(iter, level);
169 mark_btree_node_intent_locked(iter, level);
173 static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
174 unsigned long trace_ip)
176 unsigned l = iter->level;
180 if (!btree_iter_node(iter, l))
184 ? bch2_btree_node_upgrade(iter, l)
185 : bch2_btree_node_relock(iter, l))) {
187 ? trace_node_upgrade_fail
188 : trace_node_relock_fail)(iter->trans->ip, trace_ip,
189 iter->btree_id, &iter->real_pos,
190 l, iter->l[l].lock_seq,
191 is_btree_node(iter, l)
193 : (unsigned long) iter->l[l].b,
194 is_btree_node(iter, l)
195 ? iter->l[l].b->c.lock.state.seq
199 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
203 } while (l < iter->locks_want);
206 * When we fail to get a lock, we have to ensure that any child nodes
207 * can't be relocked so bch2_btree_iter_traverse has to walk back up to
208 * the node that we failed to relock:
210 while (fail_idx >= 0) {
211 btree_node_unlock(iter, fail_idx);
212 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
216 if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
217 iter->uptodate = BTREE_ITER_NEED_PEEK;
219 bch2_btree_trans_verify_locks(iter->trans);
221 return iter->uptodate < BTREE_ITER_NEED_RELOCK;
224 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
225 enum btree_iter_type type)
227 return type != BTREE_ITER_CACHED
228 ? container_of(_b, struct btree, c)->key.k.p
229 : container_of(_b, struct bkey_cached, c)->key.pos;
233 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
234 unsigned level, struct btree_iter *iter,
235 enum six_lock_type type,
236 six_lock_should_sleep_fn should_sleep_fn, void *p,
239 struct btree_trans *trans = iter->trans;
240 struct btree_iter *linked, *deadlock_iter = NULL;
241 u64 start_time = local_clock();
245 /* Check if it's safe to block: */
246 trans_for_each_iter(trans, linked) {
247 if (!linked->nodes_locked)
251 * Can't block taking an intent lock if we have _any_ nodes read
254 * - Our read lock blocks another thread with an intent lock on
255 * the same node from getting a write lock, and thus from
256 * dropping its intent lock
258 * - And the other thread may have multiple nodes intent locked:
259 * both the node we want to intent lock, and the node we
260 * already have read locked - deadlock:
262 if (type == SIX_LOCK_intent &&
263 linked->nodes_locked != linked->nodes_intent_locked) {
264 deadlock_iter = linked;
268 if (linked->btree_id != iter->btree_id) {
269 if (linked->btree_id > iter->btree_id) {
270 deadlock_iter = linked;
277 * Within the same btree, cached iterators come before non
280 if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
281 if (btree_iter_is_cached(iter)) {
282 deadlock_iter = linked;
289 * Interior nodes must be locked before their descendants: if
290 * another iterator has possible descendants locked of the node
291 * we're about to lock, it must have the ancestors locked too:
293 if (level > __fls(linked->nodes_locked)) {
294 deadlock_iter = linked;
298 /* Must lock btree nodes in key order: */
299 if (btree_node_locked(linked, level) &&
300 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
301 btree_iter_type(linked))) <= 0) {
302 deadlock_iter = linked;
304 BUG_ON(trans->in_traverse_all);
308 if (unlikely(deadlock_iter)) {
309 trace_trans_restart_would_deadlock(iter->trans->ip, ip,
310 trans->in_traverse_all, reason,
311 deadlock_iter->btree_id,
312 btree_iter_type(deadlock_iter),
313 &deadlock_iter->real_pos,
315 btree_iter_type(iter),
320 if (six_trylock_type(&b->c.lock, type))
323 #ifdef CONFIG_BCACHEFS_DEBUG
324 trans->locking_iter_idx = iter->idx;
325 trans->locking_pos = pos;
326 trans->locking_btree_id = iter->btree_id;
327 trans->locking_level = level;
331 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
333 #ifdef CONFIG_BCACHEFS_DEBUG
334 trans->locking = NULL;
337 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
342 /* Btree iterator locking: */
344 #ifdef CONFIG_BCACHEFS_DEBUG
345 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
349 if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
350 BUG_ON(iter->nodes_locked);
354 for (l = 0; is_btree_node(iter, l); l++) {
355 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
356 !btree_node_locked(iter, l))
359 BUG_ON(btree_lock_want(iter, l) !=
360 btree_node_locked_type(iter, l));
364 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
366 struct btree_iter *iter;
368 trans_for_each_iter(trans, iter)
369 bch2_btree_iter_verify_locks(iter);
372 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
376 static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
378 return btree_iter_get_locks(iter, false, trace_ip);
381 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
382 unsigned new_locks_want)
384 struct btree_iter *linked;
386 EBUG_ON(iter->locks_want >= new_locks_want);
388 iter->locks_want = new_locks_want;
390 if (btree_iter_get_locks(iter, true, _THIS_IP_))
394 * XXX: this is ugly - we'd prefer to not be mucking with other
395 * iterators in the btree_trans here.
397 * On failure to upgrade the iterator, setting iter->locks_want and
398 * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
399 * get the locks we want on transaction restart.
401 * But if this iterator was a clone, on transaction restart what we did
402 * to this iterator isn't going to be preserved.
404 * Possibly we could add an iterator field for the parent iterator when
405 * an iterator is a copy - for now, we'll just upgrade any other
406 * iterators with the same btree id.
408 * The code below used to be needed to ensure ancestor nodes get locked
409 * before interior nodes - now that's handled by
410 * bch2_btree_iter_traverse_all().
412 trans_for_each_iter(iter->trans, linked)
413 if (linked != iter &&
414 btree_iter_type(linked) == btree_iter_type(iter) &&
415 linked->btree_id == iter->btree_id &&
416 linked->locks_want < new_locks_want) {
417 linked->locks_want = new_locks_want;
418 btree_iter_get_locks(linked, true, _THIS_IP_);
424 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
425 unsigned new_locks_want)
429 EBUG_ON(iter->locks_want < new_locks_want);
431 iter->locks_want = new_locks_want;
433 while (iter->nodes_locked &&
434 (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
435 if (l > iter->level) {
436 btree_node_unlock(iter, l);
438 if (btree_node_intent_locked(iter, l)) {
439 six_lock_downgrade(&iter->l[l].b->c.lock);
440 iter->nodes_intent_locked ^= 1 << l;
446 bch2_btree_trans_verify_locks(iter->trans);
449 void bch2_trans_downgrade(struct btree_trans *trans)
451 struct btree_iter *iter;
453 trans_for_each_iter(trans, iter)
454 bch2_btree_iter_downgrade(iter);
457 /* Btree transaction locking: */
459 static inline bool btree_iter_should_be_locked(struct btree_trans *trans,
460 struct btree_iter *iter)
462 return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) ||
463 iter->should_be_locked;
466 bool bch2_trans_relock(struct btree_trans *trans)
468 struct btree_iter *iter;
470 trans_for_each_iter(trans, iter)
471 if (!bch2_btree_iter_relock(iter, _RET_IP_) &&
472 btree_iter_should_be_locked(trans, iter)) {
473 trace_trans_restart_relock(trans->ip, _RET_IP_,
474 iter->btree_id, &iter->real_pos);
480 void bch2_trans_unlock(struct btree_trans *trans)
482 struct btree_iter *iter;
484 trans_for_each_iter(trans, iter)
485 __bch2_btree_iter_unlock(iter);
488 /* Btree iterator: */
490 #ifdef CONFIG_BCACHEFS_DEBUG
492 static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
494 struct bkey_cached *ck;
495 bool locked = btree_node_locked(iter, 0);
497 if (!bch2_btree_node_relock(iter, 0))
500 ck = (void *) iter->l[0].b;
501 BUG_ON(ck->key.btree_id != iter->btree_id ||
502 bkey_cmp(ck->key.pos, iter->pos));
505 btree_node_unlock(iter, 0);
508 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
511 struct btree_iter_level *l;
512 struct btree_node_iter tmp;
514 struct bkey_packed *p, *k;
515 char buf1[100], buf2[100], buf3[100];
518 if (!bch2_debug_check_iterators)
523 locked = btree_node_locked(iter, level);
525 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
527 bch2_btree_iter_verify_cached(iter);
531 BUG_ON(iter->level < iter->min_depth);
533 if (!btree_iter_node(iter, level))
536 if (!bch2_btree_node_relock(iter, level))
539 BUG_ON(!btree_iter_pos_in_node(iter, l->b));
542 * node iterators don't use leaf node iterator:
544 if (btree_iter_type(iter) == BTREE_ITER_NODES &&
545 level <= iter->min_depth)
548 bch2_btree_node_iter_verify(&l->iter, l->b);
551 * For interior nodes, the iterator will have skipped past
554 * For extents, the iterator may have skipped past deleted keys (but not
557 p = level || btree_node_type_is_extents(iter->btree_id)
558 ? bch2_btree_node_iter_prev(&tmp, l->b)
559 : bch2_btree_node_iter_prev_all(&tmp, l->b);
560 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
562 if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) {
567 if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
573 btree_node_unlock(iter, level);
576 strcpy(buf2, "(none)");
577 strcpy(buf3, "(none)");
579 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
582 struct bkey uk = bkey_unpack_key(l->b, p);
583 bch2_bkey_to_text(&PBUF(buf2), &uk);
587 struct bkey uk = bkey_unpack_key(l->b, k);
588 bch2_bkey_to_text(&PBUF(buf3), &uk);
591 panic("iterator should be %s key at level %u:\n"
595 msg, level, buf1, buf2, buf3);
598 static void bch2_btree_iter_verify(struct btree_iter *iter)
600 enum btree_iter_type type = btree_iter_type(iter);
603 EBUG_ON(iter->btree_id >= BTREE_ID_NR);
605 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
606 iter->pos.snapshot != iter->snapshot);
608 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
609 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
611 BUG_ON(type == BTREE_ITER_NODES &&
612 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
614 BUG_ON(type != BTREE_ITER_NODES &&
615 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
616 !btree_type_has_snapshots(iter->btree_id));
618 bch2_btree_iter_verify_locks(iter);
620 for (i = 0; i < BTREE_MAX_DEPTH; i++)
621 bch2_btree_iter_verify_level(iter, i);
624 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
626 enum btree_iter_type type = btree_iter_type(iter);
628 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
629 iter->pos.snapshot != iter->snapshot);
631 BUG_ON((type == BTREE_ITER_KEYS ||
632 type == BTREE_ITER_CACHED) &&
633 (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
634 bkey_cmp(iter->pos, iter->k.p) > 0));
637 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
639 struct btree_iter *iter;
641 if (!bch2_debug_check_iterators)
644 trans_for_each_iter_with_node(trans, b, iter)
645 bch2_btree_iter_verify_level(iter, b->c.level);
650 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
651 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
652 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
656 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
659 struct bkey_packed *k)
661 struct btree_node_iter_set *set;
663 btree_node_iter_for_each(iter, set)
664 if (set->end == t->end_offset) {
665 set->k = __btree_node_key_to_offset(b, k);
666 bch2_btree_node_iter_sort(iter, b);
670 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
673 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
675 struct bkey_packed *where)
677 struct btree_iter_level *l = &iter->l[b->c.level];
679 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
682 if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
683 bch2_btree_node_iter_advance(&l->iter, l->b);
685 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
688 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
690 struct bkey_packed *where)
692 struct btree_iter *linked;
694 trans_for_each_iter_with_node(iter->trans, b, linked) {
695 __bch2_btree_iter_fix_key_modified(linked, b, where);
696 bch2_btree_iter_verify_level(linked, b->c.level);
700 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
702 struct btree_node_iter *node_iter,
704 struct bkey_packed *where,
705 unsigned clobber_u64s,
708 const struct bkey_packed *end = btree_bkey_last(b, t);
709 struct btree_node_iter_set *set;
710 unsigned offset = __btree_node_key_to_offset(b, where);
711 int shift = new_u64s - clobber_u64s;
712 unsigned old_end = t->end_offset - shift;
713 unsigned orig_iter_pos = node_iter->data[0].k;
714 bool iter_current_key_modified =
715 orig_iter_pos >= offset &&
716 orig_iter_pos <= offset + clobber_u64s;
718 btree_node_iter_for_each(node_iter, set)
719 if (set->end == old_end)
722 /* didn't find the bset in the iterator - might have to readd it: */
724 bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
725 bch2_btree_node_iter_push(node_iter, b, where, end);
728 /* Iterator is after key that changed */
732 set->end = t->end_offset;
734 /* Iterator hasn't gotten to the key that changed yet: */
739 bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
741 } else if (set->k < offset + clobber_u64s) {
742 set->k = offset + new_u64s;
743 if (set->k == set->end)
744 bch2_btree_node_iter_set_drop(node_iter, set);
746 /* Iterator is after key that changed */
747 set->k = (int) set->k + shift;
751 bch2_btree_node_iter_sort(node_iter, b);
753 if (node_iter->data[0].k != orig_iter_pos)
754 iter_current_key_modified = true;
757 * When a new key is added, and the node iterator now points to that
758 * key, the iterator might have skipped past deleted keys that should
759 * come after the key the iterator now points to. We have to rewind to
760 * before those deleted keys - otherwise
761 * bch2_btree_node_iter_prev_all() breaks:
763 if (!bch2_btree_node_iter_end(node_iter) &&
764 iter_current_key_modified &&
766 btree_node_type_is_extents(iter->btree_id))) {
768 struct bkey_packed *k, *k2, *p;
770 k = bch2_btree_node_iter_peek_all(node_iter, b);
772 for_each_bset(b, t) {
773 bool set_pos = false;
775 if (node_iter->data[0].end == t->end_offset)
778 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
780 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
781 bkey_iter_cmp(b, k, p) < 0) {
787 btree_node_iter_set_set_pos(node_iter,
793 node_iter == &iter->l[0].iter &&
794 iter_current_key_modified)
795 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
798 void bch2_btree_node_iter_fix(struct btree_iter *iter,
800 struct btree_node_iter *node_iter,
801 struct bkey_packed *where,
802 unsigned clobber_u64s,
805 struct bset_tree *t = bch2_bkey_to_bset(b, where);
806 struct btree_iter *linked;
808 if (node_iter != &iter->l[b->c.level].iter) {
809 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
810 where, clobber_u64s, new_u64s);
812 if (bch2_debug_check_iterators)
813 bch2_btree_node_iter_verify(node_iter, b);
816 trans_for_each_iter_with_node(iter->trans, b, linked) {
817 __bch2_btree_node_iter_fix(linked, b,
818 &linked->l[b->c.level].iter, t,
819 where, clobber_u64s, new_u64s);
820 bch2_btree_iter_verify_level(linked, b->c.level);
824 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
825 struct btree_iter_level *l,
827 struct bkey_packed *k)
833 * signal to bch2_btree_iter_peek_slot() that we're currently at
836 u->type = KEY_TYPE_deleted;
837 return bkey_s_c_null;
840 ret = bkey_disassemble(l->b, k, u);
843 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
844 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
845 * being overwritten but doesn't change k->size. But this is ok, because
846 * those keys are never written out, we just have to avoid a spurious
849 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
850 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
855 /* peek_all() doesn't skip deleted keys */
856 static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
857 struct btree_iter_level *l,
860 return __btree_iter_unpack(iter, l, u,
861 bch2_btree_node_iter_peek_all(&l->iter, l->b));
864 static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
865 struct btree_iter_level *l)
867 struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
868 bch2_btree_node_iter_peek(&l->iter, l->b));
870 iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
874 static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
875 struct btree_iter_level *l)
877 struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
878 bch2_btree_node_iter_prev(&l->iter, l->b));
880 iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
884 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
885 struct btree_iter_level *l,
888 struct bkey_packed *k;
891 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
892 bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
893 if (max_advance > 0 && nr_advanced >= max_advance)
896 bch2_btree_node_iter_advance(&l->iter, l->b);
904 * Verify that iterator for parent node points to child node:
906 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
908 struct btree_iter_level *l;
911 struct bkey_packed *k;
913 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
916 plevel = b->c.level + 1;
917 if (!btree_iter_node(iter, plevel))
920 parent_locked = btree_node_locked(iter, plevel);
922 if (!bch2_btree_node_relock(iter, plevel))
925 l = &iter->l[plevel];
926 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
929 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
934 struct bkey uk = bkey_unpack_key(b, k);
936 bch2_dump_btree_node(iter->trans->c, l->b);
937 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
938 bch2_bkey_to_text(&PBUF(buf2), &uk);
939 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
940 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
941 panic("parent iter doesn't point to new node:\n"
945 bch2_btree_ids[iter->btree_id], buf1,
950 btree_node_unlock(iter, b->c.level + 1);
953 static inline void __btree_iter_init(struct btree_iter *iter,
956 struct btree_iter_level *l = &iter->l[level];
958 bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos);
961 * Iterators to interior nodes should always be pointed at the first non
965 bch2_btree_node_iter_peek(&l->iter, l->b);
967 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
970 static inline void btree_iter_node_set(struct btree_iter *iter,
973 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
975 btree_iter_verify_new_node(iter, b);
977 EBUG_ON(!btree_iter_pos_in_node(iter, b));
978 EBUG_ON(b->c.lock.state.seq & 1);
980 iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
981 iter->l[b->c.level].b = b;
982 __btree_iter_init(iter, b->c.level);
986 * A btree node is being replaced - update the iterator to point to the new
989 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
991 enum btree_node_locked_type t;
992 struct btree_iter *linked;
994 trans_for_each_iter(iter->trans, linked)
995 if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
996 btree_iter_pos_in_node(linked, b)) {
998 * bch2_btree_iter_node_drop() has already been called -
999 * the old node we're replacing has already been
1000 * unlocked and the pointer invalidated
1002 BUG_ON(btree_node_locked(linked, b->c.level));
1004 t = btree_lock_want(linked, b->c.level);
1005 if (t != BTREE_NODE_UNLOCKED) {
1006 six_lock_increment(&b->c.lock, t);
1007 mark_btree_node_locked(linked, b->c.level, t);
1010 btree_iter_node_set(linked, b);
1014 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
1016 struct btree_iter *linked;
1017 unsigned level = b->c.level;
1019 trans_for_each_iter(iter->trans, linked)
1020 if (linked->l[level].b == b) {
1021 btree_node_unlock(linked, level);
1022 linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
1027 * A btree node has been modified in such a way as to invalidate iterators - fix
1030 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
1032 struct btree_iter *linked;
1034 trans_for_each_iter_with_node(iter->trans, b, linked)
1035 __btree_iter_init(linked, b->c.level);
1038 static int lock_root_check_fn(struct six_lock *lock, void *p)
1040 struct btree *b = container_of(lock, struct btree, c.lock);
1041 struct btree **rootp = p;
1043 return b == *rootp ? 0 : -1;
1046 static inline int btree_iter_lock_root(struct btree_iter *iter,
1047 unsigned depth_want,
1048 unsigned long trace_ip)
1050 struct bch_fs *c = iter->trans->c;
1051 struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
1052 enum six_lock_type lock_type;
1055 EBUG_ON(iter->nodes_locked);
1058 b = READ_ONCE(*rootp);
1059 iter->level = READ_ONCE(b->c.level);
1061 if (unlikely(iter->level < depth_want)) {
1063 * the root is at a lower depth than the depth we want:
1064 * got to the end of the btree, or we're walking nodes
1065 * greater than some depth and there are no nodes >=
1068 iter->level = depth_want;
1069 for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
1070 iter->l[i].b = NULL;
1074 lock_type = __btree_lock_want(iter, iter->level);
1075 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
1077 lock_root_check_fn, rootp,
1081 if (likely(b == READ_ONCE(*rootp) &&
1082 b->c.level == iter->level &&
1084 for (i = 0; i < iter->level; i++)
1085 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1086 iter->l[iter->level].b = b;
1087 for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
1088 iter->l[i].b = NULL;
1090 mark_btree_node_locked(iter, iter->level, lock_type);
1091 btree_iter_node_set(iter, b);
1095 six_unlock_type(&b->c.lock, lock_type);
1100 static void btree_iter_prefetch(struct btree_iter *iter)
1102 struct bch_fs *c = iter->trans->c;
1103 struct btree_iter_level *l = &iter->l[iter->level];
1104 struct btree_node_iter node_iter = l->iter;
1105 struct bkey_packed *k;
1106 struct bkey_buf tmp;
1107 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1108 ? (iter->level > 1 ? 0 : 2)
1109 : (iter->level > 1 ? 1 : 16);
1110 bool was_locked = btree_node_locked(iter, iter->level);
1112 bch2_bkey_buf_init(&tmp);
1115 if (!bch2_btree_node_relock(iter, iter->level))
1118 bch2_btree_node_iter_advance(&node_iter, l->b);
1119 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1123 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1124 bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
1129 btree_node_unlock(iter, iter->level);
1131 bch2_bkey_buf_exit(&tmp, c);
1134 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
1135 unsigned plevel, struct btree *b)
1137 struct btree_iter_level *l = &iter->l[plevel];
1138 bool locked = btree_node_locked(iter, plevel);
1139 struct bkey_packed *k;
1140 struct bch_btree_ptr_v2 *bp;
1142 if (!bch2_btree_node_relock(iter, plevel))
1145 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1146 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1148 bp = (void *) bkeyp_val(&l->b->format, k);
1149 bp->mem_ptr = (unsigned long)b;
1152 btree_node_unlock(iter, plevel);
1155 static __always_inline int btree_iter_down(struct btree_iter *iter,
1156 unsigned long trace_ip)
1158 struct bch_fs *c = iter->trans->c;
1159 struct btree_iter_level *l = &iter->l[iter->level];
1161 unsigned level = iter->level - 1;
1162 enum six_lock_type lock_type = __btree_lock_want(iter, level);
1163 struct bkey_buf tmp;
1166 EBUG_ON(!btree_node_locked(iter, iter->level));
1168 bch2_bkey_buf_init(&tmp);
1169 bch2_bkey_buf_unpack(&tmp, c, l->b,
1170 bch2_btree_node_iter_peek(&l->iter, l->b));
1172 b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
1173 ret = PTR_ERR_OR_ZERO(b);
1177 mark_btree_node_locked(iter, level, lock_type);
1178 btree_iter_node_set(iter, b);
1180 if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1181 unlikely(b != btree_node_mem_ptr(tmp.k)))
1182 btree_node_mem_ptr_set(iter, level + 1, b);
1184 if (iter->flags & BTREE_ITER_PREFETCH)
1185 btree_iter_prefetch(iter);
1187 iter->level = level;
1189 bch2_bkey_buf_exit(&tmp, c);
1193 static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
1195 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
1196 unsigned long trace_ip)
1198 struct bch_fs *c = trans->c;
1199 struct btree_iter *iter;
1200 u8 sorted[BTREE_ITER_MAX];
1201 int i, nr_sorted = 0;
1204 if (trans->in_traverse_all)
1207 trans->in_traverse_all = true;
1210 relock_fail = false;
1212 trans_for_each_iter(trans, iter) {
1213 if (!bch2_btree_iter_relock(iter, _THIS_IP_))
1215 sorted[nr_sorted++] = iter->idx;
1219 trans->in_traverse_all = false;
1223 #define btree_iter_cmp_by_idx(_l, _r) \
1224 btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
1226 bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
1227 #undef btree_iter_cmp_by_idx
1229 for (i = nr_sorted - 2; i >= 0; --i) {
1230 struct btree_iter *iter1 = trans->iters + sorted[i];
1231 struct btree_iter *iter2 = trans->iters + sorted[i + 1];
1233 if (iter1->btree_id == iter2->btree_id &&
1234 iter1->locks_want < iter2->locks_want)
1235 __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
1236 else if (!iter1->locks_want && iter2->locks_want)
1237 __bch2_btree_iter_upgrade(iter1, 1);
1240 bch2_trans_unlock(trans);
1243 if (unlikely(ret == -ENOMEM)) {
1246 closure_init_stack(&cl);
1249 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1254 if (unlikely(ret == -EIO)) {
1255 trans->error = true;
1259 BUG_ON(ret && ret != -EINTR);
1261 /* Now, redo traversals in correct order: */
1262 for (i = 0; i < nr_sorted; i++) {
1263 unsigned idx = sorted[i];
1266 * sucessfully traversing one iterator can cause another to be
1267 * unlinked, in btree_key_cache_fill()
1269 if (!(trans->iters_linked & (1ULL << idx)))
1272 ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
1277 if (hweight64(trans->iters_live) > 1)
1280 trans_for_each_iter(trans, iter)
1281 if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
1286 bch2_btree_cache_cannibalize_unlock(c);
1288 trans->in_traverse_all = false;
1290 trace_trans_traverse_all(trans->ip, trace_ip);
1294 int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1296 return __btree_iter_traverse_all(trans, 0, _RET_IP_);
1299 static inline bool btree_iter_good_node(struct btree_iter *iter,
1300 unsigned l, int check_pos)
1302 if (!is_btree_node(iter, l) ||
1303 !bch2_btree_node_relock(iter, l))
1306 if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1308 if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1313 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1316 unsigned l = iter->level;
1318 while (btree_iter_node(iter, l) &&
1319 !btree_iter_good_node(iter, l, check_pos)) {
1320 btree_node_unlock(iter, l);
1321 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1329 * This is the main state machine for walking down the btree - walks down to a
1332 * Returns 0 on success, -EIO on error (error reading in a btree node).
1334 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1335 * stashed in the iterator and returned from bch2_trans_exit().
1337 static int btree_iter_traverse_one(struct btree_iter *iter,
1338 unsigned long trace_ip)
1340 unsigned depth_want = iter->level;
1344 * if we need interior nodes locked, call btree_iter_relock() to make
1345 * sure we walk back up enough that we lock them:
1347 if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
1348 iter->locks_want > 1)
1349 bch2_btree_iter_relock(iter, _THIS_IP_);
1351 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1352 ret = bch2_btree_iter_traverse_cached(iter);
1356 if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
1359 if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1362 iter->level = btree_iter_up_until_good_node(iter, 0);
1365 * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1366 * would indicate to other code that we got to the end of the btree,
1367 * here it indicates that relocking the root failed - it's critical that
1368 * btree_iter_lock_root() comes next and that it can't fail
1370 while (iter->level > depth_want) {
1371 ret = btree_iter_node(iter, iter->level)
1372 ? btree_iter_down(iter, trace_ip)
1373 : btree_iter_lock_root(iter, depth_want, trace_ip);
1374 if (unlikely(ret)) {
1377 * Got to the end of the btree (in
1378 * BTREE_ITER_NODES mode)
1384 iter->level = depth_want;
1387 iter->flags |= BTREE_ITER_ERROR;
1388 iter->l[iter->level].b =
1389 BTREE_ITER_NO_NODE_ERROR;
1391 iter->l[iter->level].b =
1392 BTREE_ITER_NO_NODE_DOWN;
1398 iter->uptodate = BTREE_ITER_NEED_PEEK;
1400 trace_iter_traverse(iter->trans->ip, trace_ip,
1401 iter->btree_id, &iter->real_pos, ret);
1402 bch2_btree_iter_verify(iter);
1406 static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1408 struct btree_trans *trans = iter->trans;
1411 ret = bch2_trans_cond_resched(trans) ?:
1412 btree_iter_traverse_one(iter, _RET_IP_);
1414 ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
1421 * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
1422 * for internal btree iterator users
1424 * bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
1425 * btree_iter_traverse() does not:
1427 static inline int __must_check
1428 btree_iter_traverse(struct btree_iter *iter)
1430 return iter->uptodate >= BTREE_ITER_NEED_RELOCK
1431 ? __bch2_btree_iter_traverse(iter)
1436 bch2_btree_iter_traverse(struct btree_iter *iter)
1440 btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1442 ret = btree_iter_traverse(iter);
1446 iter->should_be_locked = true;
1450 /* Iterate across nodes (leaf and interior nodes) */
1452 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1457 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1458 bch2_btree_iter_verify(iter);
1460 ret = btree_iter_traverse(iter);
1464 b = btree_iter_node(iter, iter->level);
1468 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1470 iter->pos = iter->real_pos = b->key.k.p;
1472 bch2_btree_iter_verify(iter);
1473 iter->should_be_locked = true;
1478 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1483 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1484 bch2_btree_iter_verify(iter);
1486 /* already got to end? */
1487 if (!btree_iter_node(iter, iter->level))
1490 bch2_trans_cond_resched(iter->trans);
1492 btree_node_unlock(iter, iter->level);
1493 iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1496 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1497 ret = btree_iter_traverse(iter);
1502 b = btree_iter_node(iter, iter->level);
1506 if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
1508 * Haven't gotten to the end of the parent node: go back down to
1509 * the next child node
1511 btree_iter_set_search_pos(iter, bpos_successor(iter->pos));
1513 /* Unlock to avoid screwing up our lock invariants: */
1514 btree_node_unlock(iter, iter->level);
1516 iter->level = iter->min_depth;
1517 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1518 bch2_btree_iter_verify(iter);
1520 ret = btree_iter_traverse(iter);
1524 b = iter->l[iter->level].b;
1527 iter->pos = iter->real_pos = b->key.k.p;
1529 bch2_btree_iter_verify(iter);
1530 iter->should_be_locked = true;
1535 /* Iterate across keys (in leaf nodes only) */
1537 static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
1539 struct bpos old_pos = iter->real_pos;
1540 int cmp = bpos_cmp(new_pos, iter->real_pos);
1541 unsigned l = iter->level;
1546 iter->real_pos = new_pos;
1547 iter->should_be_locked = false;
1549 if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
1550 btree_node_unlock(iter, 0);
1551 iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1552 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1556 l = btree_iter_up_until_good_node(iter, cmp);
1558 if (btree_iter_node(iter, l)) {
1560 * We might have to skip over many keys, or just a few: try
1561 * advancing the node iterator, and if we have to skip over too
1562 * many keys just reinit it (or if we're rewinding, since that
1566 !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1567 __btree_iter_init(iter, l);
1569 /* Don't leave it locked if we're not supposed to: */
1570 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1571 btree_node_unlock(iter, l);
1574 if (l != iter->level)
1575 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1577 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1579 bch2_btree_iter_verify(iter);
1580 #ifdef CONFIG_BCACHEFS_DEBUG
1581 trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
1583 &old_pos, &new_pos, l);
1587 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1589 struct bpos pos = iter->k.p;
1590 bool ret = bpos_cmp(pos, POS_MAX) != 0;
1592 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1593 pos = bkey_successor(iter, pos);
1594 bch2_btree_iter_set_pos(iter, pos);
1598 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1600 struct bpos pos = bkey_start_pos(&iter->k);
1601 bool ret = bpos_cmp(pos, POS_MIN) != 0;
1603 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1604 pos = bkey_predecessor(iter, pos);
1605 bch2_btree_iter_set_pos(iter, pos);
1609 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1611 struct bpos next_pos = iter->l[0].b->key.k.p;
1612 bool ret = bpos_cmp(next_pos, POS_MAX) != 0;
1615 * Typically, we don't want to modify iter->pos here, since that
1616 * indicates where we searched from - unless we got to the end of the
1617 * btree, in that case we want iter->pos to reflect that:
1620 btree_iter_set_search_pos(iter, bpos_successor(next_pos));
1622 bch2_btree_iter_set_pos(iter, POS_MAX);
1627 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1629 struct bpos next_pos = iter->l[0].b->data->min_key;
1630 bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
1633 btree_iter_set_search_pos(iter, bpos_predecessor(next_pos));
1635 bch2_btree_iter_set_pos(iter, POS_MIN);
1640 static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
1641 enum btree_id btree_id, struct bpos pos)
1643 struct btree_insert_entry *i;
1645 trans_for_each_update2(trans, i)
1646 if ((cmp_int(btree_id, i->iter->btree_id) ?:
1647 bkey_cmp(pos, i->k->k.p)) <= 0) {
1648 if (btree_id == i->iter->btree_id)
1656 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool with_updates)
1658 struct bpos search_key = btree_iter_search_key(iter);
1659 struct bkey_i *next_update;
1663 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1664 bch2_btree_iter_verify(iter);
1665 bch2_btree_iter_verify_entry_exit(iter);
1667 next_update = with_updates
1668 ? btree_trans_peek_updates(iter->trans, iter->btree_id, search_key)
1670 btree_iter_set_search_pos(iter, search_key);
1673 ret = btree_iter_traverse(iter);
1675 return bkey_s_c_err(ret);
1677 k = btree_iter_level_peek(iter, &iter->l[0]);
1680 bpos_cmp(next_update->k.p, iter->real_pos) <= 0)
1681 k = bkey_i_to_s_c(next_update);
1684 if (bkey_deleted(k.k)) {
1685 search_key = bkey_successor(iter, k.k->p);
1692 if (!btree_iter_set_pos_to_next_leaf(iter))
1693 return bkey_s_c_null;
1697 * iter->pos should be mononotically increasing, and always be equal to
1698 * the key we just returned - except extents can straddle iter->pos:
1700 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
1702 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1703 iter->pos = bkey_start_pos(k.k);
1705 bch2_btree_iter_verify_entry_exit(iter);
1706 bch2_btree_iter_verify(iter);
1707 iter->should_be_locked = true;
1712 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1715 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1717 return __btree_iter_peek(iter, false);
1721 * bch2_btree_iter_next: returns first key greater than iterator's current
1724 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1726 if (!bch2_btree_iter_advance(iter))
1727 return bkey_s_c_null;
1729 return bch2_btree_iter_peek(iter);
1732 struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
1734 return __btree_iter_peek(iter, true);
1737 struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
1739 if (!bch2_btree_iter_advance(iter))
1740 return bkey_s_c_null;
1742 return bch2_btree_iter_peek_with_updates(iter);
1746 * bch2_btree_iter_peek_prev: returns first key less than or equal to
1747 * iterator's current position
1749 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1751 struct btree_iter_level *l = &iter->l[0];
1755 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1756 bch2_btree_iter_verify(iter);
1757 bch2_btree_iter_verify_entry_exit(iter);
1759 btree_iter_set_search_pos(iter, iter->pos);
1762 ret = btree_iter_traverse(iter);
1763 if (unlikely(ret)) {
1764 k = bkey_s_c_err(ret);
1768 k = btree_iter_level_peek(iter, l);
1770 ((iter->flags & BTREE_ITER_IS_EXTENTS)
1771 ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
1772 : bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0))
1773 k = btree_iter_level_prev(iter, l);
1778 if (!btree_iter_set_pos_to_prev_leaf(iter)) {
1784 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
1786 /* Extents can straddle iter->pos: */
1787 if (bkey_cmp(k.k->p, iter->pos) < 0)
1790 bch2_btree_iter_verify_entry_exit(iter);
1791 bch2_btree_iter_verify(iter);
1792 iter->should_be_locked = true;
1796 * btree_iter_level_peek() may have set iter->k to a key we didn't want, and
1797 * then we errored going to the previous leaf - make sure it's
1798 * consistent with iter->pos:
1800 bkey_init(&iter->k);
1801 iter->k.p = iter->pos;
1806 * bch2_btree_iter_prev: returns first key less than iterator's current
1809 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1811 if (!bch2_btree_iter_rewind(iter))
1812 return bkey_s_c_null;
1814 return bch2_btree_iter_peek_prev(iter);
1817 static inline struct bkey_s_c
1818 __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
1821 struct bpos pos, next_start;
1823 /* keys & holes can't span inode numbers: */
1824 if (iter->pos.offset == KEY_OFFSET_MAX) {
1825 if (iter->pos.inode == KEY_INODE_MAX)
1826 return bkey_s_c_null;
1828 bch2_btree_iter_set_pos(iter, bkey_successor(iter, iter->pos));
1832 k = bch2_btree_iter_peek(iter);
1838 if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0)
1841 next_start = k.k ? bkey_start_pos(k.k) : POS_MAX;
1843 bkey_init(&iter->k);
1844 iter->k.p = iter->pos;
1845 bch2_key_resize(&iter->k,
1846 min_t(u64, KEY_SIZE_MAX,
1847 (next_start.inode == iter->pos.inode
1852 EBUG_ON(!iter->k.size);
1854 bch2_btree_iter_verify_entry_exit(iter);
1855 bch2_btree_iter_verify(iter);
1857 return (struct bkey_s_c) { &iter->k, NULL };
1860 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1862 struct btree_iter_level *l = &iter->l[0];
1866 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1867 bch2_btree_iter_verify(iter);
1868 bch2_btree_iter_verify_entry_exit(iter);
1870 btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1872 if (iter->flags & BTREE_ITER_IS_EXTENTS)
1873 return __bch2_btree_iter_peek_slot_extents(iter);
1875 ret = btree_iter_traverse(iter);
1877 return bkey_s_c_err(ret);
1879 k = btree_iter_level_peek_all(iter, l, &iter->k);
1881 EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
1883 if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
1885 bkey_init(&iter->k);
1886 iter->k.p = iter->pos;
1887 k = (struct bkey_s_c) { &iter->k, NULL };
1890 bch2_btree_iter_verify_entry_exit(iter);
1891 bch2_btree_iter_verify(iter);
1892 iter->should_be_locked = true;
1897 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1899 if (!bch2_btree_iter_advance(iter))
1900 return bkey_s_c_null;
1902 return bch2_btree_iter_peek_slot(iter);
1905 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
1907 if (!bch2_btree_iter_rewind(iter))
1908 return bkey_s_c_null;
1910 return bch2_btree_iter_peek_slot(iter);
1913 struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
1915 struct bkey_cached *ck;
1918 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
1919 bch2_btree_iter_verify(iter);
1921 ret = btree_iter_traverse(iter);
1923 return bkey_s_c_err(ret);
1925 ck = (void *) iter->l[0].b;
1927 EBUG_ON(iter->btree_id != ck->key.btree_id ||
1928 bkey_cmp(iter->pos, ck->key.pos));
1931 iter->should_be_locked = true;
1933 return bkey_i_to_s_c(ck->k);
1936 static inline void bch2_btree_iter_init(struct btree_trans *trans,
1937 struct btree_iter *iter, enum btree_id btree_id)
1939 struct bch_fs *c = trans->c;
1942 iter->trans = trans;
1943 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
1944 iter->btree_id = btree_id;
1945 iter->real_pos = POS_MIN;
1947 iter->min_depth = 0;
1948 iter->locks_want = 0;
1949 iter->nodes_locked = 0;
1950 iter->nodes_intent_locked = 0;
1951 for (i = 0; i < ARRAY_SIZE(iter->l); i++)
1952 iter->l[i].b = BTREE_ITER_NO_NODE_INIT;
1954 prefetch(c->btree_roots[btree_id].b);
1957 /* new transactional stuff: */
1959 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
1962 __bch2_btree_iter_unlock(&trans->iters[idx]);
1963 trans->iters_linked &= ~(1ULL << idx);
1964 trans->iters_live &= ~(1ULL << idx);
1965 trans->iters_touched &= ~(1ULL << idx);
1968 int bch2_trans_iter_put(struct btree_trans *trans,
1969 struct btree_iter *iter)
1973 if (IS_ERR_OR_NULL(iter))
1976 BUG_ON(trans->iters + iter->idx != iter);
1977 BUG_ON(!btree_iter_live(trans, iter));
1979 ret = btree_iter_err(iter);
1981 if (!(trans->iters_touched & (1ULL << iter->idx)) &&
1982 !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
1983 __bch2_trans_iter_free(trans, iter->idx);
1985 trans->iters_live &= ~(1ULL << iter->idx);
1989 int bch2_trans_iter_free(struct btree_trans *trans,
1990 struct btree_iter *iter)
1992 if (IS_ERR_OR_NULL(iter))
1995 set_btree_iter_dontneed(trans, iter);
1997 return bch2_trans_iter_put(trans, iter);
2001 static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
2004 struct btree_iter *iter;
2005 struct btree_insert_entry *i;
2008 trans_for_each_iter(trans, iter)
2009 printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n",
2010 bch2_btree_ids[iter->btree_id],
2011 (bch2_bpos_to_text(&PBUF(buf), iter->pos), buf),
2012 btree_iter_live(trans, iter) ? " live" : "",
2013 (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
2014 iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
2015 (void *) iter->ip_allocated);
2017 trans_for_each_update(trans, i) {
2020 bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k));
2021 printk(KERN_ERR "update: btree %s %s\n",
2022 bch2_btree_ids[i->iter->btree_id], buf);
2024 panic("trans iter oveflow\n");
2027 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
2031 if (unlikely(trans->iters_linked ==
2032 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
2033 btree_trans_iter_alloc_fail(trans);
2035 idx = __ffs64(~trans->iters_linked);
2037 trans->iters_linked |= 1ULL << idx;
2038 trans->iters[idx].idx = idx;
2039 trans->iters[idx].flags = 0;
2040 return &trans->iters[idx];
2043 static inline void btree_iter_copy(struct btree_iter *dst,
2044 struct btree_iter *src)
2046 unsigned i, idx = dst->idx;
2050 dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2052 for (i = 0; i < BTREE_MAX_DEPTH; i++)
2053 if (btree_node_locked(dst, i))
2054 six_lock_increment(&dst->l[i].b->c.lock,
2055 __btree_lock_want(dst, i));
2057 dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2058 dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
2061 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2062 unsigned btree_id, struct bpos pos,
2063 unsigned locks_want,
2067 struct btree_iter *iter, *best = NULL;
2068 struct bpos real_pos, pos_min = POS_MIN;
2070 if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2071 btree_node_type_is_extents(btree_id) &&
2072 !(flags & BTREE_ITER_NOT_EXTENTS) &&
2073 !(flags & BTREE_ITER_ALL_SNAPSHOTS))
2074 flags |= BTREE_ITER_IS_EXTENTS;
2076 if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2077 !btree_type_has_snapshots(btree_id))
2078 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2080 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
2081 pos.snapshot = btree_type_has_snapshots(btree_id)
2086 if ((flags & BTREE_ITER_IS_EXTENTS) &&
2087 bkey_cmp(pos, POS_MAX))
2088 real_pos = bpos_nosnap_successor(pos);
2090 trans_for_each_iter(trans, iter) {
2091 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
2094 if (iter->btree_id != btree_id)
2098 int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
2099 bpos_diff(iter->real_pos, real_pos));
2102 ((cmp == 0 && btree_iter_keep(trans, iter))))
2110 iter = btree_trans_iter_alloc(trans);
2111 bch2_btree_iter_init(trans, iter, btree_id);
2112 } else if (btree_iter_keep(trans, best)) {
2113 iter = btree_trans_iter_alloc(trans);
2114 btree_iter_copy(iter, best);
2119 trans->iters_live |= 1ULL << iter->idx;
2120 trans->iters_touched |= 1ULL << iter->idx;
2122 iter->flags = flags;
2124 iter->snapshot = pos.snapshot;
2127 * If the iterator has locks_want greater than requested, we explicitly
2128 * do not downgrade it here - on transaction restart because btree node
2129 * split needs to upgrade locks, we might be putting/getting the
2130 * iterator again. Downgrading iterators only happens via an explicit
2131 * bch2_trans_downgrade().
2134 locks_want = min(locks_want, BTREE_MAX_DEPTH);
2135 if (locks_want > iter->locks_want) {
2136 iter->locks_want = locks_want;
2137 btree_iter_get_locks(iter, true, _THIS_IP_);
2140 while (iter->level != depth) {
2141 btree_node_unlock(iter, iter->level);
2142 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2143 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
2144 if (iter->level < depth)
2150 iter->min_depth = depth;
2152 bch2_btree_iter_set_pos(iter, pos);
2153 btree_iter_set_search_pos(iter, real_pos);
2155 trace_trans_get_iter(_RET_IP_, trans->ip,
2157 &real_pos, locks_want, iter->uptodate,
2158 best ? &best->real_pos : &pos_min,
2159 best ? best->locks_want : U8_MAX,
2160 best ? best->uptodate : U8_MAX);
2165 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2166 enum btree_id btree_id,
2168 unsigned locks_want,
2172 struct btree_iter *iter =
2173 __bch2_trans_get_iter(trans, btree_id, pos,
2176 BTREE_ITER_NOT_EXTENTS|
2177 BTREE_ITER_ALL_SNAPSHOTS|
2180 BUG_ON(bkey_cmp(iter->pos, pos));
2181 BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
2182 BUG_ON(iter->level != depth);
2183 BUG_ON(iter->min_depth != depth);
2184 iter->ip_allocated = _RET_IP_;
2189 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2190 struct btree_iter *src)
2192 struct btree_iter *iter;
2194 iter = btree_trans_iter_alloc(trans);
2195 btree_iter_copy(iter, src);
2197 trans->iters_live |= 1ULL << iter->idx;
2199 * We don't need to preserve this iter since it's cheap to copy it
2200 * again - this will cause trans_iter_put() to free it right away:
2202 set_btree_iter_dontneed(trans, iter);
2207 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2209 size_t new_top = trans->mem_top + size;
2212 if (new_top > trans->mem_bytes) {
2213 size_t old_bytes = trans->mem_bytes;
2214 size_t new_bytes = roundup_pow_of_two(new_top);
2217 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2219 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2220 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2221 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2222 new_bytes = BTREE_TRANS_MEM_MAX;
2227 return ERR_PTR(-ENOMEM);
2229 trans->mem = new_mem;
2230 trans->mem_bytes = new_bytes;
2233 trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2234 return ERR_PTR(-EINTR);
2238 p = trans->mem + trans->mem_top;
2239 trans->mem_top += size;
2243 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2245 u64 iters = trans->iters_linked &
2246 ~trans->iters_touched &
2250 unsigned idx = __ffs64(iters);
2252 iters &= ~(1ULL << idx);
2253 __bch2_trans_iter_free(trans, idx);
2257 void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
2259 struct btree_iter *iter;
2261 trans_for_each_iter(trans, iter)
2262 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2263 BTREE_ITER_SET_POS_AFTER_COMMIT);
2265 bch2_trans_unlink_iters(trans);
2267 trans->iters_touched &= trans->iters_live;
2269 trans->nr_updates = 0;
2270 trans->nr_updates2 = 0;
2273 trans->hooks = NULL;
2274 trans->extra_journal_entries = NULL;
2275 trans->extra_journal_entry_u64s = 0;
2277 if (trans->fs_usage_deltas) {
2278 trans->fs_usage_deltas->used = 0;
2279 memset(&trans->fs_usage_deltas->memset_start, 0,
2280 (void *) &trans->fs_usage_deltas->memset_end -
2281 (void *) &trans->fs_usage_deltas->memset_start);
2284 if (!(flags & TRANS_RESET_NOUNLOCK))
2285 bch2_trans_cond_resched(trans);
2287 if (!(flags & TRANS_RESET_NOTRAVERSE) &&
2288 trans->iters_linked)
2289 bch2_btree_iter_traverse_all(trans);
2292 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
2294 size_t iters_bytes = sizeof(struct btree_iter) * BTREE_ITER_MAX;
2295 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2298 BUG_ON(trans->used_mempool);
2301 p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL);
2304 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2306 trans->iters = p; p += iters_bytes;
2307 trans->updates = p; p += updates_bytes;
2308 trans->updates2 = p; p += updates_bytes;
2311 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2312 unsigned expected_nr_iters,
2313 size_t expected_mem_bytes)
2314 __acquires(&c->btree_trans_barrier)
2316 memset(trans, 0, sizeof(*trans));
2318 trans->ip = _RET_IP_;
2321 * reallocating iterators currently completely breaks
2322 * bch2_trans_iter_put(), we always allocate the max:
2324 bch2_trans_alloc_iters(trans, c);
2326 if (expected_mem_bytes) {
2327 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2328 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2330 if (!unlikely(trans->mem)) {
2331 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2332 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2336 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2338 #ifdef CONFIG_BCACHEFS_DEBUG
2339 trans->pid = current->pid;
2340 mutex_lock(&c->btree_trans_lock);
2341 list_add(&trans->list, &c->btree_trans_list);
2342 mutex_unlock(&c->btree_trans_lock);
2346 int bch2_trans_exit(struct btree_trans *trans)
2347 __releases(&c->btree_trans_barrier)
2349 struct bch_fs *c = trans->c;
2351 bch2_trans_unlock(trans);
2353 #ifdef CONFIG_BCACHEFS_DEBUG
2354 if (trans->iters_live) {
2355 struct btree_iter *iter;
2357 bch_err(c, "btree iterators leaked!");
2358 trans_for_each_iter(trans, iter)
2359 if (btree_iter_live(trans, iter))
2360 printk(KERN_ERR " btree %s allocated at %pS\n",
2361 bch2_btree_ids[iter->btree_id],
2362 (void *) iter->ip_allocated);
2363 /* Be noisy about this: */
2364 bch2_fatal_error(c);
2367 mutex_lock(&trans->c->btree_trans_lock);
2368 list_del(&trans->list);
2369 mutex_unlock(&trans->c->btree_trans_lock);
2372 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2374 bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
2376 if (trans->fs_usage_deltas) {
2377 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2378 REPLICAS_DELTA_LIST_MAX)
2379 mempool_free(trans->fs_usage_deltas,
2380 &trans->c->replicas_delta_pool);
2382 kfree(trans->fs_usage_deltas);
2385 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2386 mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
2392 * Userspace doesn't have a real percpu implementation:
2394 trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
2398 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2400 trans->mem = (void *) 0x1;
2401 trans->iters = (void *) 0x1;
2403 return trans->error ? -EIO : 0;
2406 static void __maybe_unused
2407 bch2_btree_iter_node_to_text(struct printbuf *out,
2408 struct btree_bkey_cached_common *_b,
2409 enum btree_iter_type type)
2411 pr_buf(out, " l=%u %s:",
2412 _b->level, bch2_btree_ids[_b->btree_id]);
2413 bch2_bpos_to_text(out, btree_node_pos(_b, type));
2416 #ifdef CONFIG_BCACHEFS_DEBUG
2417 static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
2419 struct btree_iter *iter;
2421 trans_for_each_iter(trans, iter)
2422 if (btree_iter_type(iter) != BTREE_ITER_CACHED &&
2429 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2431 #ifdef CONFIG_BCACHEFS_DEBUG
2432 struct btree_trans *trans;
2433 struct btree_iter *iter;
2437 mutex_lock(&c->btree_trans_lock);
2438 list_for_each_entry(trans, &c->btree_trans_list, list) {
2439 if (!trans_has_btree_nodes_locked(trans))
2442 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2444 trans_for_each_iter(trans, iter) {
2445 if (!iter->nodes_locked)
2448 pr_buf(out, " iter %u %c %s:",
2450 btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2451 bch2_btree_ids[iter->btree_id]);
2452 bch2_bpos_to_text(out, iter->pos);
2455 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2456 if (btree_node_locked(iter, l)) {
2457 pr_buf(out, " %s l=%u ",
2458 btree_node_intent_locked(iter, l) ? "i" : "r", l);
2459 bch2_btree_iter_node_to_text(out,
2460 (void *) iter->l[l].b,
2461 btree_iter_type(iter));
2467 b = READ_ONCE(trans->locking);
2469 iter = &trans->iters[trans->locking_iter_idx];
2470 pr_buf(out, " locking iter %u %c l=%u %s:",
2471 trans->locking_iter_idx,
2472 btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2473 trans->locking_level,
2474 bch2_btree_ids[trans->locking_btree_id]);
2475 bch2_bpos_to_text(out, trans->locking_pos);
2477 pr_buf(out, " node ");
2478 bch2_btree_iter_node_to_text(out,
2480 btree_iter_type(iter));
2484 mutex_unlock(&c->btree_trans_lock);
2488 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2490 mempool_exit(&c->btree_trans_mem_pool);
2491 mempool_exit(&c->btree_iters_pool);
2492 cleanup_srcu_struct(&c->btree_trans_barrier);
2495 int bch2_fs_btree_iter_init(struct bch_fs *c)
2497 unsigned nr = BTREE_ITER_MAX;
2499 INIT_LIST_HEAD(&c->btree_trans_list);
2500 mutex_init(&c->btree_trans_lock);
2502 return init_srcu_struct(&c->btree_trans_barrier) ?:
2503 mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2504 sizeof(struct btree_iter) * nr +
2505 sizeof(struct btree_insert_entry) * nr +
2506 sizeof(struct btree_insert_entry) * nr) ?:
2507 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2508 BTREE_TRANS_MEM_MAX);