1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include <linux/prefetch.h>
18 #include <trace/events/bcachefs.h>
20 static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
21 static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long);
22 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *);
23 static void btree_iter_copy(struct btree_iter *, struct btree_iter *);
25 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
27 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
29 /* Are we iterating over keys in all snapshots? */
30 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
31 p = bpos_successor(p);
33 p = bpos_nosnap_successor(p);
34 p.snapshot = iter->snapshot;
40 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
42 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
44 /* Are we iterating over keys in all snapshots? */
45 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
46 p = bpos_predecessor(p);
48 p = bpos_nosnap_predecessor(p);
49 p.snapshot = iter->snapshot;
55 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
57 return l < BTREE_MAX_DEPTH &&
58 (unsigned long) iter->l[l].b >= 128;
61 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
63 struct bpos pos = iter->pos;
65 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
66 bkey_cmp(pos, POS_MAX))
67 pos = bkey_successor(iter, pos);
71 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
74 return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
77 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
80 return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
83 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
86 return iter->btree_id == b->c.btree_id &&
87 !btree_iter_pos_before_node(iter, b) &&
88 !btree_iter_pos_after_node(iter, b);
91 /* Btree node locking: */
93 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
95 bch2_btree_node_unlock_write_inlined(b, iter);
98 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
100 struct btree_iter *linked;
101 unsigned readers = 0;
103 EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
105 trans_for_each_iter(iter->trans, linked)
106 if (linked->l[b->c.level].b == b &&
107 btree_node_read_locked(linked, b->c.level))
111 * Must drop our read locks before calling six_lock_write() -
112 * six_unlock() won't do wakeups until the reader count
113 * goes to 0, and it's safe because we have the node intent
116 atomic64_sub(__SIX_VAL(read_lock, readers),
117 &b->c.lock.state.counter);
118 btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
119 atomic64_add(__SIX_VAL(read_lock, readers),
120 &b->c.lock.state.counter);
123 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
125 struct btree *b = btree_iter_node(iter, level);
126 int want = __btree_lock_want(iter, level);
128 if (!is_btree_node(iter, level))
134 if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
135 (btree_node_lock_seq_matches(iter, b, level) &&
136 btree_node_lock_increment(iter->trans, b, level, want))) {
137 mark_btree_node_locked(iter, level, want);
144 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
146 struct btree *b = iter->l[level].b;
148 EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
150 if (!is_btree_node(iter, level))
153 if (btree_node_intent_locked(iter, level))
159 if (btree_node_locked(iter, level)
160 ? six_lock_tryupgrade(&b->c.lock)
161 : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
164 if (btree_node_lock_seq_matches(iter, b, level) &&
165 btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
166 btree_node_unlock(iter, level);
172 mark_btree_node_intent_locked(iter, level);
176 static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
177 unsigned long trace_ip)
179 unsigned l = iter->level;
183 if (!btree_iter_node(iter, l))
187 ? bch2_btree_node_upgrade(iter, l)
188 : bch2_btree_node_relock(iter, l))) {
190 ? trace_node_upgrade_fail
191 : trace_node_relock_fail)(iter->trans->ip, trace_ip,
192 iter->btree_id, &iter->real_pos,
193 l, iter->l[l].lock_seq,
194 is_btree_node(iter, l)
196 : (unsigned long) iter->l[l].b,
197 is_btree_node(iter, l)
198 ? iter->l[l].b->c.lock.state.seq
202 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
206 } while (l < iter->locks_want);
209 * When we fail to get a lock, we have to ensure that any child nodes
210 * can't be relocked so bch2_btree_iter_traverse has to walk back up to
211 * the node that we failed to relock:
213 while (fail_idx >= 0) {
214 btree_node_unlock(iter, fail_idx);
215 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
219 if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
220 iter->uptodate = BTREE_ITER_NEED_PEEK;
222 bch2_btree_trans_verify_locks(iter->trans);
224 return iter->uptodate < BTREE_ITER_NEED_RELOCK;
227 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
228 enum btree_iter_type type)
230 return type != BTREE_ITER_CACHED
231 ? container_of(_b, struct btree, c)->key.k.p
232 : container_of(_b, struct bkey_cached, c)->key.pos;
236 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
237 unsigned level, struct btree_iter *iter,
238 enum six_lock_type type,
239 six_lock_should_sleep_fn should_sleep_fn, void *p,
242 struct btree_trans *trans = iter->trans;
243 struct btree_iter *linked, *deadlock_iter = NULL;
244 u64 start_time = local_clock();
248 /* Check if it's safe to block: */
249 trans_for_each_iter(trans, linked) {
250 if (!linked->nodes_locked)
254 * Can't block taking an intent lock if we have _any_ nodes read
257 * - Our read lock blocks another thread with an intent lock on
258 * the same node from getting a write lock, and thus from
259 * dropping its intent lock
261 * - And the other thread may have multiple nodes intent locked:
262 * both the node we want to intent lock, and the node we
263 * already have read locked - deadlock:
265 if (type == SIX_LOCK_intent &&
266 linked->nodes_locked != linked->nodes_intent_locked) {
267 deadlock_iter = linked;
271 if (linked->btree_id != iter->btree_id) {
272 if (linked->btree_id > iter->btree_id) {
273 deadlock_iter = linked;
280 * Within the same btree, cached iterators come before non
283 if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
284 if (btree_iter_is_cached(iter)) {
285 deadlock_iter = linked;
292 * Interior nodes must be locked before their descendants: if
293 * another iterator has possible descendants locked of the node
294 * we're about to lock, it must have the ancestors locked too:
296 if (level > __fls(linked->nodes_locked)) {
297 deadlock_iter = linked;
301 /* Must lock btree nodes in key order: */
302 if (btree_node_locked(linked, level) &&
303 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
304 btree_iter_type(linked))) <= 0) {
305 deadlock_iter = linked;
307 BUG_ON(trans->in_traverse_all);
311 if (unlikely(deadlock_iter)) {
312 trace_trans_restart_would_deadlock(iter->trans->ip, ip,
313 trans->in_traverse_all, reason,
314 deadlock_iter->btree_id,
315 btree_iter_type(deadlock_iter),
316 &deadlock_iter->real_pos,
318 btree_iter_type(iter),
323 if (six_trylock_type(&b->c.lock, type))
326 #ifdef CONFIG_BCACHEFS_DEBUG
327 trans->locking_iter_idx = iter->idx;
328 trans->locking_pos = pos;
329 trans->locking_btree_id = iter->btree_id;
330 trans->locking_level = level;
334 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
336 #ifdef CONFIG_BCACHEFS_DEBUG
337 trans->locking = NULL;
340 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
345 /* Btree iterator locking: */
347 #ifdef CONFIG_BCACHEFS_DEBUG
348 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
352 if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
353 BUG_ON(iter->nodes_locked);
357 for (l = 0; btree_iter_node(iter, l); l++) {
358 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
359 !btree_node_locked(iter, l))
362 BUG_ON(btree_lock_want(iter, l) !=
363 btree_node_locked_type(iter, l));
367 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
369 struct btree_iter *iter;
371 trans_for_each_iter(trans, iter)
372 bch2_btree_iter_verify_locks(iter);
375 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
379 bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
381 return btree_iter_get_locks(iter, false, trace_ip);
384 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
385 unsigned new_locks_want)
387 struct btree_iter *linked;
389 EBUG_ON(iter->locks_want >= new_locks_want);
391 iter->locks_want = new_locks_want;
393 if (btree_iter_get_locks(iter, true, _THIS_IP_))
397 * XXX: this is ugly - we'd prefer to not be mucking with other
398 * iterators in the btree_trans here.
400 * On failure to upgrade the iterator, setting iter->locks_want and
401 * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
402 * get the locks we want on transaction restart.
404 * But if this iterator was a clone, on transaction restart what we did
405 * to this iterator isn't going to be preserved.
407 * Possibly we could add an iterator field for the parent iterator when
408 * an iterator is a copy - for now, we'll just upgrade any other
409 * iterators with the same btree id.
411 * The code below used to be needed to ensure ancestor nodes get locked
412 * before interior nodes - now that's handled by
413 * bch2_btree_iter_traverse_all().
415 trans_for_each_iter(iter->trans, linked)
416 if (linked != iter &&
417 btree_iter_type(linked) == btree_iter_type(iter) &&
418 linked->btree_id == iter->btree_id &&
419 linked->locks_want < new_locks_want) {
420 linked->locks_want = new_locks_want;
421 btree_iter_get_locks(linked, true, _THIS_IP_);
427 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
428 unsigned new_locks_want)
432 EBUG_ON(iter->locks_want < new_locks_want);
434 iter->locks_want = new_locks_want;
436 while (iter->nodes_locked &&
437 (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
438 if (l > iter->level) {
439 btree_node_unlock(iter, l);
441 if (btree_node_intent_locked(iter, l)) {
442 six_lock_downgrade(&iter->l[l].b->c.lock);
443 iter->nodes_intent_locked ^= 1 << l;
449 bch2_btree_trans_verify_locks(iter->trans);
452 void bch2_trans_downgrade(struct btree_trans *trans)
454 struct btree_iter *iter;
456 trans_for_each_iter(trans, iter)
457 bch2_btree_iter_downgrade(iter);
460 /* Btree transaction locking: */
462 static inline bool btree_iter_should_be_locked(struct btree_trans *trans,
463 struct btree_iter *iter)
465 return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) ||
466 iter->should_be_locked;
469 bool bch2_trans_relock(struct btree_trans *trans)
471 struct btree_iter *iter;
473 trans_for_each_iter(trans, iter)
474 if (!bch2_btree_iter_relock(iter, _RET_IP_) &&
475 btree_iter_should_be_locked(trans, iter)) {
476 trace_trans_restart_relock(trans->ip, _RET_IP_,
477 iter->btree_id, &iter->real_pos);
483 void bch2_trans_unlock(struct btree_trans *trans)
485 struct btree_iter *iter;
487 trans_for_each_iter(trans, iter)
488 __bch2_btree_iter_unlock(iter);
490 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
493 /* Btree iterator: */
495 #ifdef CONFIG_BCACHEFS_DEBUG
497 static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
499 struct bkey_cached *ck;
500 bool locked = btree_node_locked(iter, 0);
502 if (!bch2_btree_node_relock(iter, 0))
505 ck = (void *) iter->l[0].b;
506 BUG_ON(ck->key.btree_id != iter->btree_id ||
507 bkey_cmp(ck->key.pos, iter->pos));
510 btree_node_unlock(iter, 0);
513 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
516 struct btree_iter_level *l;
517 struct btree_node_iter tmp;
519 struct bkey_packed *p, *k;
520 char buf1[100], buf2[100], buf3[100];
523 if (!bch2_debug_check_iterators)
528 locked = btree_node_locked(iter, level);
530 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
532 bch2_btree_iter_verify_cached(iter);
536 BUG_ON(iter->level < iter->min_depth);
538 if (!btree_iter_node(iter, level))
541 if (!bch2_btree_node_relock(iter, level))
544 BUG_ON(!btree_iter_pos_in_node(iter, l->b));
547 * node iterators don't use leaf node iterator:
549 if (btree_iter_type(iter) == BTREE_ITER_NODES &&
550 level <= iter->min_depth)
553 bch2_btree_node_iter_verify(&l->iter, l->b);
556 * For interior nodes, the iterator will have skipped past
559 * For extents, the iterator may have skipped past deleted keys (but not
562 p = level || btree_node_type_is_extents(iter->btree_id)
563 ? bch2_btree_node_iter_prev(&tmp, l->b)
564 : bch2_btree_node_iter_prev_all(&tmp, l->b);
565 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
567 if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) {
572 if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
578 btree_node_unlock(iter, level);
581 strcpy(buf2, "(none)");
582 strcpy(buf3, "(none)");
584 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
587 struct bkey uk = bkey_unpack_key(l->b, p);
588 bch2_bkey_to_text(&PBUF(buf2), &uk);
592 struct bkey uk = bkey_unpack_key(l->b, k);
593 bch2_bkey_to_text(&PBUF(buf3), &uk);
596 panic("iterator should be %s key at level %u:\n"
600 msg, level, buf1, buf2, buf3);
603 static void bch2_btree_iter_verify(struct btree_iter *iter)
605 struct btree_trans *trans = iter->trans;
606 struct bch_fs *c = trans->c;
607 enum btree_iter_type type = btree_iter_type(iter);
610 EBUG_ON(iter->btree_id >= BTREE_ID_NR);
612 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
613 iter->pos.snapshot != iter->snapshot);
615 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
616 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
618 BUG_ON(type == BTREE_ITER_NODES &&
619 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
621 BUG_ON(type != BTREE_ITER_NODES &&
622 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
623 !btree_type_has_snapshots(iter->btree_id));
625 for (i = 0; i < (type != BTREE_ITER_CACHED ? BTREE_MAX_DEPTH : 1); i++) {
627 BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
631 bch2_btree_iter_verify_level(iter, i);
634 bch2_btree_iter_verify_locks(iter);
637 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
639 enum btree_iter_type type = btree_iter_type(iter);
641 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
642 iter->pos.snapshot != iter->snapshot);
644 BUG_ON((type == BTREE_ITER_KEYS ||
645 type == BTREE_ITER_CACHED) &&
646 (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
647 bkey_cmp(iter->pos, iter->k.p) > 0));
650 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
652 struct btree_iter *iter;
654 if (!bch2_debug_check_iterators)
657 trans_for_each_iter_with_node(trans, b, iter)
658 bch2_btree_iter_verify_level(iter, b->c.level);
663 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
664 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
665 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
669 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
672 struct bkey_packed *k)
674 struct btree_node_iter_set *set;
676 btree_node_iter_for_each(iter, set)
677 if (set->end == t->end_offset) {
678 set->k = __btree_node_key_to_offset(b, k);
679 bch2_btree_node_iter_sort(iter, b);
683 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
686 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
688 struct bkey_packed *where)
690 struct btree_iter_level *l = &iter->l[b->c.level];
692 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
695 if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
696 bch2_btree_node_iter_advance(&l->iter, l->b);
698 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
701 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
703 struct bkey_packed *where)
705 struct btree_iter *linked;
707 trans_for_each_iter_with_node(iter->trans, b, linked) {
708 __bch2_btree_iter_fix_key_modified(linked, b, where);
709 bch2_btree_iter_verify_level(linked, b->c.level);
713 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
715 struct btree_node_iter *node_iter,
717 struct bkey_packed *where,
718 unsigned clobber_u64s,
721 const struct bkey_packed *end = btree_bkey_last(b, t);
722 struct btree_node_iter_set *set;
723 unsigned offset = __btree_node_key_to_offset(b, where);
724 int shift = new_u64s - clobber_u64s;
725 unsigned old_end = t->end_offset - shift;
726 unsigned orig_iter_pos = node_iter->data[0].k;
727 bool iter_current_key_modified =
728 orig_iter_pos >= offset &&
729 orig_iter_pos <= offset + clobber_u64s;
731 btree_node_iter_for_each(node_iter, set)
732 if (set->end == old_end)
735 /* didn't find the bset in the iterator - might have to readd it: */
737 bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
738 bch2_btree_node_iter_push(node_iter, b, where, end);
741 /* Iterator is after key that changed */
745 set->end = t->end_offset;
747 /* Iterator hasn't gotten to the key that changed yet: */
752 bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
754 } else if (set->k < offset + clobber_u64s) {
755 set->k = offset + new_u64s;
756 if (set->k == set->end)
757 bch2_btree_node_iter_set_drop(node_iter, set);
759 /* Iterator is after key that changed */
760 set->k = (int) set->k + shift;
764 bch2_btree_node_iter_sort(node_iter, b);
766 if (node_iter->data[0].k != orig_iter_pos)
767 iter_current_key_modified = true;
770 * When a new key is added, and the node iterator now points to that
771 * key, the iterator might have skipped past deleted keys that should
772 * come after the key the iterator now points to. We have to rewind to
773 * before those deleted keys - otherwise
774 * bch2_btree_node_iter_prev_all() breaks:
776 if (!bch2_btree_node_iter_end(node_iter) &&
777 iter_current_key_modified &&
779 btree_node_type_is_extents(iter->btree_id))) {
781 struct bkey_packed *k, *k2, *p;
783 k = bch2_btree_node_iter_peek_all(node_iter, b);
785 for_each_bset(b, t) {
786 bool set_pos = false;
788 if (node_iter->data[0].end == t->end_offset)
791 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
793 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
794 bkey_iter_cmp(b, k, p) < 0) {
800 btree_node_iter_set_set_pos(node_iter,
806 node_iter == &iter->l[0].iter &&
807 iter_current_key_modified)
808 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
811 void bch2_btree_node_iter_fix(struct btree_iter *iter,
813 struct btree_node_iter *node_iter,
814 struct bkey_packed *where,
815 unsigned clobber_u64s,
818 struct bset_tree *t = bch2_bkey_to_bset(b, where);
819 struct btree_iter *linked;
821 if (node_iter != &iter->l[b->c.level].iter) {
822 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
823 where, clobber_u64s, new_u64s);
825 if (bch2_debug_check_iterators)
826 bch2_btree_node_iter_verify(node_iter, b);
829 trans_for_each_iter_with_node(iter->trans, b, linked) {
830 __bch2_btree_node_iter_fix(linked, b,
831 &linked->l[b->c.level].iter, t,
832 where, clobber_u64s, new_u64s);
833 bch2_btree_iter_verify_level(linked, b->c.level);
837 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
838 struct btree_iter_level *l,
840 struct bkey_packed *k)
846 * signal to bch2_btree_iter_peek_slot() that we're currently at
849 u->type = KEY_TYPE_deleted;
850 return bkey_s_c_null;
853 ret = bkey_disassemble(l->b, k, u);
856 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
857 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
858 * being overwritten but doesn't change k->size. But this is ok, because
859 * those keys are never written out, we just have to avoid a spurious
862 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
863 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
868 /* peek_all() doesn't skip deleted keys */
869 static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
870 struct btree_iter_level *l)
872 return __btree_iter_unpack(iter, l, &iter->k,
873 bch2_btree_node_iter_peek_all(&l->iter, l->b));
876 static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
877 struct btree_iter_level *l)
879 struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
880 bch2_btree_node_iter_peek(&l->iter, l->b));
882 iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
886 static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
887 struct btree_iter_level *l)
889 struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
890 bch2_btree_node_iter_prev(&l->iter, l->b));
892 iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
896 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
897 struct btree_iter_level *l,
900 struct bkey_packed *k;
903 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
904 bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
905 if (max_advance > 0 && nr_advanced >= max_advance)
908 bch2_btree_node_iter_advance(&l->iter, l->b);
916 * Verify that iterator for parent node points to child node:
918 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
920 struct btree_iter_level *l;
923 struct bkey_packed *k;
925 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
928 plevel = b->c.level + 1;
929 if (!btree_iter_node(iter, plevel))
932 parent_locked = btree_node_locked(iter, plevel);
934 if (!bch2_btree_node_relock(iter, plevel))
937 l = &iter->l[plevel];
938 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
941 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
946 struct bkey uk = bkey_unpack_key(b, k);
948 bch2_dump_btree_node(iter->trans->c, l->b);
949 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
950 bch2_bkey_to_text(&PBUF(buf2), &uk);
951 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
952 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
953 panic("parent iter doesn't point to new node:\n"
957 bch2_btree_ids[iter->btree_id], buf1,
962 btree_node_unlock(iter, b->c.level + 1);
965 static inline void __btree_iter_init(struct btree_iter *iter,
968 struct btree_iter_level *l = &iter->l[level];
970 bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos);
973 * Iterators to interior nodes should always be pointed at the first non
977 bch2_btree_node_iter_peek(&l->iter, l->b);
979 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
982 static inline void btree_iter_node_set(struct btree_iter *iter,
985 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
987 btree_iter_verify_new_node(iter, b);
989 EBUG_ON(!btree_iter_pos_in_node(iter, b));
990 EBUG_ON(b->c.lock.state.seq & 1);
992 iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
993 iter->l[b->c.level].b = b;
994 __btree_iter_init(iter, b->c.level);
998 * A btree node is being replaced - update the iterator to point to the new
1001 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
1003 enum btree_node_locked_type t;
1004 struct btree_iter *linked;
1006 trans_for_each_iter(iter->trans, linked)
1007 if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
1008 btree_iter_pos_in_node(linked, b)) {
1010 * bch2_btree_iter_node_drop() has already been called -
1011 * the old node we're replacing has already been
1012 * unlocked and the pointer invalidated
1014 BUG_ON(btree_node_locked(linked, b->c.level));
1016 t = btree_lock_want(linked, b->c.level);
1017 if (t != BTREE_NODE_UNLOCKED) {
1018 six_lock_increment(&b->c.lock, t);
1019 mark_btree_node_locked(linked, b->c.level, t);
1022 btree_iter_node_set(linked, b);
1026 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
1028 struct btree_iter *linked;
1029 unsigned level = b->c.level;
1031 trans_for_each_iter(iter->trans, linked)
1032 if (linked->l[level].b == b) {
1033 btree_node_unlock(linked, level);
1034 linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
1039 * A btree node has been modified in such a way as to invalidate iterators - fix
1042 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
1044 struct btree_iter *linked;
1046 trans_for_each_iter_with_node(iter->trans, b, linked)
1047 __btree_iter_init(linked, b->c.level);
1050 static int lock_root_check_fn(struct six_lock *lock, void *p)
1052 struct btree *b = container_of(lock, struct btree, c.lock);
1053 struct btree **rootp = p;
1055 return b == *rootp ? 0 : -1;
1058 static inline int btree_iter_lock_root(struct btree_iter *iter,
1059 unsigned depth_want,
1060 unsigned long trace_ip)
1062 struct bch_fs *c = iter->trans->c;
1063 struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
1064 enum six_lock_type lock_type;
1067 EBUG_ON(iter->nodes_locked);
1070 b = READ_ONCE(*rootp);
1071 iter->level = READ_ONCE(b->c.level);
1073 if (unlikely(iter->level < depth_want)) {
1075 * the root is at a lower depth than the depth we want:
1076 * got to the end of the btree, or we're walking nodes
1077 * greater than some depth and there are no nodes >=
1080 iter->level = depth_want;
1081 for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
1082 iter->l[i].b = NULL;
1086 lock_type = __btree_lock_want(iter, iter->level);
1087 if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level,
1089 lock_root_check_fn, rootp,
1093 if (likely(b == READ_ONCE(*rootp) &&
1094 b->c.level == iter->level &&
1096 for (i = 0; i < iter->level; i++)
1097 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1098 iter->l[iter->level].b = b;
1099 for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
1100 iter->l[i].b = NULL;
1102 mark_btree_node_locked(iter, iter->level, lock_type);
1103 btree_iter_node_set(iter, b);
1107 six_unlock_type(&b->c.lock, lock_type);
1112 static void btree_iter_prefetch(struct btree_iter *iter)
1114 struct bch_fs *c = iter->trans->c;
1115 struct btree_iter_level *l = &iter->l[iter->level];
1116 struct btree_node_iter node_iter = l->iter;
1117 struct bkey_packed *k;
1118 struct bkey_buf tmp;
1119 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1120 ? (iter->level > 1 ? 0 : 2)
1121 : (iter->level > 1 ? 1 : 16);
1122 bool was_locked = btree_node_locked(iter, iter->level);
1124 bch2_bkey_buf_init(&tmp);
1127 if (!bch2_btree_node_relock(iter, iter->level))
1130 bch2_btree_node_iter_advance(&node_iter, l->b);
1131 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1135 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1136 bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
1141 btree_node_unlock(iter, iter->level);
1143 bch2_bkey_buf_exit(&tmp, c);
1146 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
1147 unsigned plevel, struct btree *b)
1149 struct btree_iter_level *l = &iter->l[plevel];
1150 bool locked = btree_node_locked(iter, plevel);
1151 struct bkey_packed *k;
1152 struct bch_btree_ptr_v2 *bp;
1154 if (!bch2_btree_node_relock(iter, plevel))
1157 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1158 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1160 bp = (void *) bkeyp_val(&l->b->format, k);
1161 bp->mem_ptr = (unsigned long)b;
1164 btree_node_unlock(iter, plevel);
1167 static __always_inline int btree_iter_down(struct btree_iter *iter,
1168 unsigned long trace_ip)
1170 struct bch_fs *c = iter->trans->c;
1171 struct btree_iter_level *l = &iter->l[iter->level];
1173 unsigned level = iter->level - 1;
1174 enum six_lock_type lock_type = __btree_lock_want(iter, level);
1175 struct bkey_buf tmp;
1178 EBUG_ON(!btree_node_locked(iter, iter->level));
1180 bch2_bkey_buf_init(&tmp);
1181 bch2_bkey_buf_unpack(&tmp, c, l->b,
1182 bch2_btree_node_iter_peek(&l->iter, l->b));
1184 b = bch2_btree_node_get(c, iter, tmp.k, level, lock_type, trace_ip);
1185 ret = PTR_ERR_OR_ZERO(b);
1189 mark_btree_node_locked(iter, level, lock_type);
1190 btree_iter_node_set(iter, b);
1192 if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1193 unlikely(b != btree_node_mem_ptr(tmp.k)))
1194 btree_node_mem_ptr_set(iter, level + 1, b);
1196 if (iter->flags & BTREE_ITER_PREFETCH)
1197 btree_iter_prefetch(iter);
1199 if (btree_node_read_locked(iter, level + 1))
1200 btree_node_unlock(iter, level + 1);
1201 iter->level = level;
1203 bch2_btree_iter_verify_locks(iter);
1205 bch2_bkey_buf_exit(&tmp, c);
1209 static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
1211 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
1212 unsigned long trace_ip)
1214 struct bch_fs *c = trans->c;
1215 struct btree_iter *iter;
1216 u8 sorted[BTREE_ITER_MAX];
1217 int i, nr_sorted = 0;
1220 if (trans->in_traverse_all)
1223 trans->in_traverse_all = true;
1226 relock_fail = false;
1228 trans_for_each_iter(trans, iter) {
1229 if (!bch2_btree_iter_relock(iter, _THIS_IP_))
1231 sorted[nr_sorted++] = iter->idx;
1235 trans->in_traverse_all = false;
1239 #define btree_iter_cmp_by_idx(_l, _r) \
1240 btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
1242 bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
1243 #undef btree_iter_cmp_by_idx
1245 for (i = nr_sorted - 2; i >= 0; --i) {
1246 struct btree_iter *iter1 = trans->iters + sorted[i];
1247 struct btree_iter *iter2 = trans->iters + sorted[i + 1];
1249 if (iter1->btree_id == iter2->btree_id &&
1250 iter1->locks_want < iter2->locks_want)
1251 __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
1252 else if (!iter1->locks_want && iter2->locks_want)
1253 __bch2_btree_iter_upgrade(iter1, 1);
1256 bch2_trans_unlock(trans);
1259 if (unlikely(ret == -ENOMEM)) {
1262 closure_init_stack(&cl);
1265 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1270 if (unlikely(ret == -EIO)) {
1271 trans->error = true;
1275 BUG_ON(ret && ret != -EINTR);
1277 /* Now, redo traversals in correct order: */
1278 for (i = 0; i < nr_sorted; i++) {
1279 unsigned idx = sorted[i];
1282 * sucessfully traversing one iterator can cause another to be
1283 * unlinked, in btree_key_cache_fill()
1285 if (!(trans->iters_linked & (1ULL << idx)))
1288 ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
1293 if (hweight64(trans->iters_live) > 1)
1296 trans_for_each_iter(trans, iter)
1297 if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
1302 bch2_btree_cache_cannibalize_unlock(c);
1304 trans->in_traverse_all = false;
1306 trace_trans_traverse_all(trans->ip, trace_ip);
1310 int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1312 return __btree_iter_traverse_all(trans, 0, _RET_IP_);
1315 static inline bool btree_iter_good_node(struct btree_iter *iter,
1316 unsigned l, int check_pos)
1318 if (!is_btree_node(iter, l) ||
1319 !bch2_btree_node_relock(iter, l))
1322 if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1324 if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1329 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1332 unsigned l = iter->level;
1334 while (btree_iter_node(iter, l) &&
1335 !btree_iter_good_node(iter, l, check_pos)) {
1336 btree_node_unlock(iter, l);
1337 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1345 * This is the main state machine for walking down the btree - walks down to a
1348 * Returns 0 on success, -EIO on error (error reading in a btree node).
1350 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1351 * stashed in the iterator and returned from bch2_trans_exit().
1353 static int btree_iter_traverse_one(struct btree_iter *iter,
1354 unsigned long trace_ip)
1356 unsigned l, depth_want = iter->level;
1359 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1360 ret = bch2_btree_iter_traverse_cached(iter);
1364 if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1367 iter->level = btree_iter_up_until_good_node(iter, 0);
1369 /* If we need intent locks, take them too: */
1370 for (l = iter->level + 1;
1371 l < iter->locks_want && btree_iter_node(iter, l);
1373 if (!bch2_btree_node_relock(iter, l))
1374 while (iter->level <= l) {
1375 btree_node_unlock(iter, iter->level);
1376 iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1381 * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1382 * would indicate to other code that we got to the end of the btree,
1383 * here it indicates that relocking the root failed - it's critical that
1384 * btree_iter_lock_root() comes next and that it can't fail
1386 while (iter->level > depth_want) {
1387 ret = btree_iter_node(iter, iter->level)
1388 ? btree_iter_down(iter, trace_ip)
1389 : btree_iter_lock_root(iter, depth_want, trace_ip);
1390 if (unlikely(ret)) {
1393 * Got to the end of the btree (in
1394 * BTREE_ITER_NODES mode)
1400 __bch2_btree_iter_unlock(iter);
1401 iter->level = depth_want;
1404 iter->flags |= BTREE_ITER_ERROR;
1405 iter->l[iter->level].b =
1406 BTREE_ITER_NO_NODE_ERROR;
1408 iter->l[iter->level].b =
1409 BTREE_ITER_NO_NODE_DOWN;
1415 iter->uptodate = BTREE_ITER_NEED_PEEK;
1417 trace_iter_traverse(iter->trans->ip, trace_ip,
1418 iter->btree_id, &iter->real_pos, ret);
1419 bch2_btree_iter_verify(iter);
1423 static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1425 struct btree_trans *trans = iter->trans;
1428 ret = bch2_trans_cond_resched(trans) ?:
1429 btree_iter_traverse_one(iter, _RET_IP_);
1431 ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
1438 * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
1439 * for internal btree iterator users
1441 * bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
1442 * btree_iter_traverse() does not:
1444 static inline int __must_check
1445 btree_iter_traverse(struct btree_iter *iter)
1447 return iter->uptodate >= BTREE_ITER_NEED_RELOCK
1448 ? __bch2_btree_iter_traverse(iter)
1453 bch2_btree_iter_traverse(struct btree_iter *iter)
1457 btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1459 ret = btree_iter_traverse(iter);
1463 iter->should_be_locked = true;
1467 /* Iterate across nodes (leaf and interior nodes) */
1469 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1474 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1475 bch2_btree_iter_verify(iter);
1477 ret = btree_iter_traverse(iter);
1481 b = btree_iter_node(iter, iter->level);
1485 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1487 iter->pos = iter->real_pos = b->key.k.p;
1489 bch2_btree_iter_verify(iter);
1490 iter->should_be_locked = true;
1495 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1500 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1501 bch2_btree_iter_verify(iter);
1503 /* already got to end? */
1504 if (!btree_iter_node(iter, iter->level))
1507 bch2_trans_cond_resched(iter->trans);
1509 btree_node_unlock(iter, iter->level);
1510 iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1513 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1514 ret = btree_iter_traverse(iter);
1519 b = btree_iter_node(iter, iter->level);
1523 if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
1525 * Haven't gotten to the end of the parent node: go back down to
1526 * the next child node
1528 btree_iter_set_search_pos(iter, bpos_successor(iter->pos));
1530 /* Unlock to avoid screwing up our lock invariants: */
1531 btree_node_unlock(iter, iter->level);
1533 iter->level = iter->min_depth;
1534 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1535 bch2_btree_iter_verify(iter);
1537 ret = btree_iter_traverse(iter);
1541 b = iter->l[iter->level].b;
1544 iter->pos = iter->real_pos = b->key.k.p;
1546 bch2_btree_iter_verify(iter);
1547 iter->should_be_locked = true;
1552 /* Iterate across keys (in leaf nodes only) */
1554 static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
1556 #ifdef CONFIG_BCACHEFS_DEBUG
1557 struct bpos old_pos = iter->real_pos;
1559 int cmp = bpos_cmp(new_pos, iter->real_pos);
1560 unsigned l = iter->level;
1565 iter->real_pos = new_pos;
1566 iter->should_be_locked = false;
1568 if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
1569 btree_node_unlock(iter, 0);
1570 iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1571 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1575 l = btree_iter_up_until_good_node(iter, cmp);
1577 if (btree_iter_node(iter, l)) {
1579 * We might have to skip over many keys, or just a few: try
1580 * advancing the node iterator, and if we have to skip over too
1581 * many keys just reinit it (or if we're rewinding, since that
1585 !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1586 __btree_iter_init(iter, l);
1588 /* Don't leave it locked if we're not supposed to: */
1589 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1590 btree_node_unlock(iter, l);
1593 if (l != iter->level)
1594 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1596 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1598 bch2_btree_iter_verify(iter);
1599 #ifdef CONFIG_BCACHEFS_DEBUG
1600 trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
1602 &old_pos, &new_pos, l);
1606 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1608 struct bpos pos = iter->k.p;
1609 bool ret = bpos_cmp(pos, SPOS_MAX) != 0;
1611 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1612 pos = bkey_successor(iter, pos);
1613 bch2_btree_iter_set_pos(iter, pos);
1617 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1619 struct bpos pos = bkey_start_pos(&iter->k);
1620 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1621 ? bpos_cmp(pos, POS_MIN)
1622 : bkey_cmp(pos, POS_MIN)) != 0;
1624 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1625 pos = bkey_predecessor(iter, pos);
1626 bch2_btree_iter_set_pos(iter, pos);
1630 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1632 struct bpos next_pos = iter->l[0].b->key.k.p;
1633 bool ret = bpos_cmp(next_pos, SPOS_MAX) != 0;
1636 * Typically, we don't want to modify iter->pos here, since that
1637 * indicates where we searched from - unless we got to the end of the
1638 * btree, in that case we want iter->pos to reflect that:
1641 btree_iter_set_search_pos(iter, bpos_successor(next_pos));
1643 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1648 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1650 struct bpos next_pos = iter->l[0].b->data->min_key;
1651 bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
1654 btree_iter_set_search_pos(iter, bpos_predecessor(next_pos));
1656 bch2_btree_iter_set_pos(iter, POS_MIN);
1661 static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter,
1664 struct btree_insert_entry *i;
1666 if (!(iter->flags & BTREE_ITER_WITH_UPDATES))
1669 trans_for_each_update(iter->trans, i)
1670 if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
1671 bkey_cmp(pos, i->k->k.p)) <= 0) {
1672 if (iter->btree_id == i->iter->btree_id)
1681 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1684 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1686 struct bpos search_key = btree_iter_search_key(iter);
1687 struct bkey_i *next_update;
1691 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1692 bch2_btree_iter_verify(iter);
1693 bch2_btree_iter_verify_entry_exit(iter);
1695 next_update = btree_trans_peek_updates(iter, search_key);
1696 btree_iter_set_search_pos(iter, search_key);
1699 ret = btree_iter_traverse(iter);
1701 return bkey_s_c_err(ret);
1703 k = btree_iter_level_peek(iter, &iter->l[0]);
1706 bpos_cmp(next_update->k.p, iter->real_pos) <= 0) {
1707 iter->k = next_update->k;
1708 k = bkey_i_to_s_c(next_update);
1712 if (bkey_deleted(k.k)) {
1713 search_key = bkey_successor(iter, k.k->p);
1720 if (!btree_iter_set_pos_to_next_leaf(iter))
1721 return bkey_s_c_null;
1725 * iter->pos should be mononotically increasing, and always be equal to
1726 * the key we just returned - except extents can straddle iter->pos:
1728 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
1730 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1731 iter->pos = bkey_start_pos(k.k);
1733 bch2_btree_iter_verify_entry_exit(iter);
1734 bch2_btree_iter_verify(iter);
1735 iter->should_be_locked = true;
1740 * bch2_btree_iter_next: returns first key greater than iterator's current
1743 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1745 if (!bch2_btree_iter_advance(iter))
1746 return bkey_s_c_null;
1748 return bch2_btree_iter_peek(iter);
1752 * bch2_btree_iter_peek_prev: returns first key less than or equal to
1753 * iterator's current position
1755 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1757 struct btree_iter_level *l = &iter->l[0];
1761 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1762 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
1763 bch2_btree_iter_verify(iter);
1764 bch2_btree_iter_verify_entry_exit(iter);
1766 btree_iter_set_search_pos(iter, iter->pos);
1769 ret = btree_iter_traverse(iter);
1770 if (unlikely(ret)) {
1771 k = bkey_s_c_err(ret);
1775 k = btree_iter_level_peek(iter, l);
1777 ((iter->flags & BTREE_ITER_IS_EXTENTS)
1778 ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
1779 : bkey_cmp(k.k->p, iter->pos) > 0))
1780 k = btree_iter_level_prev(iter, l);
1785 if (!btree_iter_set_pos_to_prev_leaf(iter)) {
1791 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
1793 /* Extents can straddle iter->pos: */
1794 if (bkey_cmp(k.k->p, iter->pos) < 0)
1797 bch2_btree_iter_verify_entry_exit(iter);
1798 bch2_btree_iter_verify(iter);
1799 iter->should_be_locked = true;
1803 * btree_iter_level_peek() may have set iter->k to a key we didn't want, and
1804 * then we errored going to the previous leaf - make sure it's
1805 * consistent with iter->pos:
1807 bkey_init(&iter->k);
1808 iter->k.p = iter->pos;
1813 * bch2_btree_iter_prev: returns first key less than iterator's current
1816 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1818 if (!bch2_btree_iter_rewind(iter))
1819 return bkey_s_c_null;
1821 return bch2_btree_iter_peek_prev(iter);
1824 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1826 struct bpos search_key;
1830 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS &&
1831 btree_iter_type(iter) != BTREE_ITER_CACHED);
1832 bch2_btree_iter_verify(iter);
1833 bch2_btree_iter_verify_entry_exit(iter);
1835 /* extents can't span inode numbers: */
1836 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
1837 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
1838 if (iter->pos.inode == KEY_INODE_MAX)
1839 return bkey_s_c_null;
1841 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
1844 search_key = btree_iter_search_key(iter);
1845 btree_iter_set_search_pos(iter, search_key);
1847 ret = btree_iter_traverse(iter);
1849 return bkey_s_c_err(ret);
1851 if (btree_iter_type(iter) == BTREE_ITER_CACHED ||
1852 !(iter->flags & BTREE_ITER_IS_EXTENTS)) {
1853 struct bkey_i *next_update;
1854 struct bkey_cached *ck;
1856 switch (btree_iter_type(iter)) {
1857 case BTREE_ITER_KEYS:
1858 k = btree_iter_level_peek_all(iter, &iter->l[0]);
1859 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, iter->pos) == 0);
1861 case BTREE_ITER_CACHED:
1862 ck = (void *) iter->l[0].b;
1863 EBUG_ON(iter->btree_id != ck->key.btree_id ||
1864 bkey_cmp(iter->pos, ck->key.pos));
1867 k = bkey_i_to_s_c(ck->k);
1869 case BTREE_ITER_NODES:
1873 next_update = btree_trans_peek_updates(iter, search_key);
1875 (!k.k || bpos_cmp(next_update->k.p, k.k->p) <= 0)) {
1876 iter->k = next_update->k;
1877 k = bkey_i_to_s_c(next_update);
1880 if ((iter->flags & BTREE_ITER_INTENT)) {
1881 struct btree_iter *child =
1882 btree_iter_child_alloc(iter, _THIS_IP_);
1884 btree_iter_copy(child, iter);
1885 k = bch2_btree_iter_peek(child);
1887 if (k.k && !bkey_err(k))
1890 struct bpos pos = iter->pos;
1892 k = bch2_btree_iter_peek(iter);
1896 if (unlikely(bkey_err(k)))
1900 if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) {
1902 ((iter->flags & BTREE_ITER_ALL_SNAPSHOTS)
1903 ? bpos_cmp(iter->pos, k.k->p)
1904 : bkey_cmp(iter->pos, k.k->p))) {
1905 bkey_init(&iter->k);
1906 iter->k.p = iter->pos;
1907 k = (struct bkey_s_c) { &iter->k, NULL };
1910 struct bpos next = k.k ? bkey_start_pos(k.k) : POS_MAX;
1912 if (bkey_cmp(iter->pos, next) < 0) {
1913 bkey_init(&iter->k);
1914 iter->k.p = iter->pos;
1915 bch2_key_resize(&iter->k,
1916 min_t(u64, KEY_SIZE_MAX,
1917 (next.inode == iter->pos.inode
1922 k = (struct bkey_s_c) { &iter->k, NULL };
1923 EBUG_ON(!k.k->size);
1927 bch2_btree_iter_verify_entry_exit(iter);
1928 bch2_btree_iter_verify(iter);
1929 iter->should_be_locked = true;
1934 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1936 if (!bch2_btree_iter_advance(iter))
1937 return bkey_s_c_null;
1939 return bch2_btree_iter_peek_slot(iter);
1942 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
1944 if (!bch2_btree_iter_rewind(iter))
1945 return bkey_s_c_null;
1947 return bch2_btree_iter_peek_slot(iter);
1950 static inline void bch2_btree_iter_init(struct btree_trans *trans,
1951 struct btree_iter *iter, enum btree_id btree_id)
1953 struct bch_fs *c = trans->c;
1956 iter->trans = trans;
1957 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
1958 iter->btree_id = btree_id;
1959 iter->real_pos = POS_MIN;
1961 iter->min_depth = 0;
1962 iter->locks_want = 0;
1963 iter->nodes_locked = 0;
1964 iter->nodes_intent_locked = 0;
1965 for (i = 0; i < ARRAY_SIZE(iter->l); i++)
1966 iter->l[i].b = BTREE_ITER_NO_NODE_INIT;
1968 prefetch(c->btree_roots[btree_id].b);
1971 /* new transactional stuff: */
1973 static void btree_iter_child_free(struct btree_iter *iter)
1975 struct btree_iter *child = btree_iter_child(iter);
1978 bch2_trans_iter_free(iter->trans, child);
1979 iter->child_idx = U8_MAX;
1983 static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
1986 struct btree_trans *trans = iter->trans;
1987 struct btree_iter *child = btree_iter_child(iter);
1990 child = btree_trans_iter_alloc(trans);
1991 child->ip_allocated = ip;
1992 iter->child_idx = child->idx;
1994 trans->iters_live |= 1ULL << child->idx;
1995 trans->iters_touched |= 1ULL << child->idx;
2001 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
2004 btree_iter_child_free(&trans->iters[idx]);
2006 __bch2_btree_iter_unlock(&trans->iters[idx]);
2007 trans->iters_linked &= ~(1ULL << idx);
2008 trans->iters_live &= ~(1ULL << idx);
2009 trans->iters_touched &= ~(1ULL << idx);
2012 int bch2_trans_iter_put(struct btree_trans *trans,
2013 struct btree_iter *iter)
2017 if (IS_ERR_OR_NULL(iter))
2020 BUG_ON(trans->iters + iter->idx != iter);
2021 BUG_ON(!btree_iter_live(trans, iter));
2023 ret = btree_iter_err(iter);
2025 if (!(trans->iters_touched & (1ULL << iter->idx)) &&
2026 !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
2027 __bch2_trans_iter_free(trans, iter->idx);
2029 trans->iters_live &= ~(1ULL << iter->idx);
2033 int bch2_trans_iter_free(struct btree_trans *trans,
2034 struct btree_iter *iter)
2036 if (IS_ERR_OR_NULL(iter))
2039 set_btree_iter_dontneed(trans, iter);
2041 return bch2_trans_iter_put(trans, iter);
2045 static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
2048 struct btree_iter *iter;
2049 struct btree_insert_entry *i;
2052 trans_for_each_iter(trans, iter)
2053 printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n",
2054 bch2_btree_ids[iter->btree_id],
2055 (bch2_bpos_to_text(&PBUF(buf), iter->pos), buf),
2056 btree_iter_live(trans, iter) ? " live" : "",
2057 (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
2058 iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
2059 (void *) iter->ip_allocated);
2061 trans_for_each_update(trans, i) {
2064 bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k));
2065 printk(KERN_ERR "update: btree %s %s\n",
2066 bch2_btree_ids[i->iter->btree_id], buf);
2068 panic("trans iter oveflow\n");
2071 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
2073 struct btree_iter *iter;
2076 if (unlikely(trans->iters_linked ==
2077 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
2078 btree_trans_iter_alloc_fail(trans);
2080 idx = __ffs64(~trans->iters_linked);
2081 iter = &trans->iters[idx];
2083 iter->trans = trans;
2085 iter->child_idx = U8_MAX;
2087 iter->nodes_locked = 0;
2088 iter->nodes_intent_locked = 0;
2089 trans->iters_linked |= 1ULL << idx;
2093 static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
2097 __bch2_btree_iter_unlock(dst);
2098 btree_iter_child_free(dst);
2100 memcpy(&dst->flags, &src->flags,
2101 sizeof(struct btree_iter) - offsetof(struct btree_iter, flags));
2103 for (i = 0; i < BTREE_MAX_DEPTH; i++)
2104 if (btree_node_locked(dst, i))
2105 six_lock_increment(&dst->l[i].b->c.lock,
2106 __btree_lock_want(dst, i));
2108 dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2109 dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
2112 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2113 unsigned btree_id, struct bpos pos,
2114 unsigned locks_want,
2118 struct btree_iter *iter, *best = NULL;
2119 struct bpos real_pos, pos_min = POS_MIN;
2121 if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2122 btree_node_type_is_extents(btree_id) &&
2123 !(flags & BTREE_ITER_NOT_EXTENTS) &&
2124 !(flags & BTREE_ITER_ALL_SNAPSHOTS))
2125 flags |= BTREE_ITER_IS_EXTENTS;
2127 if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2128 !btree_type_has_snapshots(btree_id))
2129 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2131 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
2132 pos.snapshot = btree_type_has_snapshots(btree_id)
2137 if ((flags & BTREE_ITER_IS_EXTENTS) &&
2138 bkey_cmp(pos, POS_MAX))
2139 real_pos = bpos_nosnap_successor(pos);
2141 trans_for_each_iter(trans, iter) {
2142 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
2145 if (iter->btree_id != btree_id)
2149 int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
2150 bpos_diff(iter->real_pos, real_pos));
2153 ((cmp == 0 && btree_iter_keep(trans, iter))))
2161 iter = btree_trans_iter_alloc(trans);
2162 bch2_btree_iter_init(trans, iter, btree_id);
2163 } else if (btree_iter_keep(trans, best)) {
2164 iter = btree_trans_iter_alloc(trans);
2165 btree_iter_copy(iter, best);
2170 trans->iters_live |= 1ULL << iter->idx;
2171 trans->iters_touched |= 1ULL << iter->idx;
2173 iter->flags = flags;
2175 iter->snapshot = pos.snapshot;
2178 * If the iterator has locks_want greater than requested, we explicitly
2179 * do not downgrade it here - on transaction restart because btree node
2180 * split needs to upgrade locks, we might be putting/getting the
2181 * iterator again. Downgrading iterators only happens via an explicit
2182 * bch2_trans_downgrade().
2185 locks_want = min(locks_want, BTREE_MAX_DEPTH);
2186 if (locks_want > iter->locks_want) {
2187 iter->locks_want = locks_want;
2188 btree_iter_get_locks(iter, true, _THIS_IP_);
2191 while (iter->level != depth) {
2192 btree_node_unlock(iter, iter->level);
2193 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2194 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
2195 if (iter->level < depth)
2201 iter->min_depth = depth;
2203 bch2_btree_iter_set_pos(iter, pos);
2204 btree_iter_set_search_pos(iter, real_pos);
2206 trace_trans_get_iter(_RET_IP_, trans->ip,
2208 &real_pos, locks_want, iter->uptodate,
2209 best ? &best->real_pos : &pos_min,
2210 best ? best->locks_want : U8_MAX,
2211 best ? best->uptodate : U8_MAX);
2216 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2217 enum btree_id btree_id,
2219 unsigned locks_want,
2223 struct btree_iter *iter =
2224 __bch2_trans_get_iter(trans, btree_id, pos,
2227 BTREE_ITER_NOT_EXTENTS|
2228 BTREE_ITER_ALL_SNAPSHOTS|
2231 BUG_ON(bkey_cmp(iter->pos, pos));
2232 BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
2233 BUG_ON(iter->level != depth);
2234 BUG_ON(iter->min_depth != depth);
2235 iter->ip_allocated = _RET_IP_;
2240 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2241 struct btree_iter *src)
2243 struct btree_iter *iter;
2245 iter = btree_trans_iter_alloc(trans);
2246 btree_iter_copy(iter, src);
2248 trans->iters_live |= 1ULL << iter->idx;
2250 * We don't need to preserve this iter since it's cheap to copy it
2251 * again - this will cause trans_iter_put() to free it right away:
2253 set_btree_iter_dontneed(trans, iter);
2258 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2260 size_t new_top = trans->mem_top + size;
2263 if (new_top > trans->mem_bytes) {
2264 size_t old_bytes = trans->mem_bytes;
2265 size_t new_bytes = roundup_pow_of_two(new_top);
2268 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2270 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2271 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2272 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2273 new_bytes = BTREE_TRANS_MEM_MAX;
2278 return ERR_PTR(-ENOMEM);
2280 trans->mem = new_mem;
2281 trans->mem_bytes = new_bytes;
2284 trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2285 return ERR_PTR(-EINTR);
2289 p = trans->mem + trans->mem_top;
2290 trans->mem_top += size;
2295 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2297 u64 iters = trans->iters_linked &
2298 ~trans->iters_touched &
2302 unsigned idx = __ffs64(iters);
2304 iters &= ~(1ULL << idx);
2305 __bch2_trans_iter_free(trans, idx);
2310 * bch2_trans_reset() - reset a transaction after a interrupted attempt
2311 * @trans: transaction to reset
2312 * @flags: transaction reset flags.
2314 * While iterating over nodes or updating nodes a attempt to lock a btree
2315 * node may return EINTR when the trylock fails. When this occurs
2316 * bch2_trans_reset() or bch2_trans_begin() should be called and the
2317 * transaction retried.
2319 * Transaction reset flags include:
2321 * - TRANS_RESET_NOUNLOCK - Do not attempt to unlock and reschedule the
2323 * - TRANS_RESET_NOTRAVERSE - Do not traverse all linked iters.
2325 void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
2327 struct btree_iter *iter;
2329 trans_for_each_iter(trans, iter) {
2330 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2331 BTREE_ITER_SET_POS_AFTER_COMMIT);
2332 iter->should_be_locked = false;
2335 bch2_trans_unlink_iters(trans);
2337 trans->iters_touched &= trans->iters_live;
2339 trans->extra_journal_res = 0;
2340 trans->nr_updates = 0;
2343 trans->hooks = NULL;
2344 trans->extra_journal_entries = NULL;
2345 trans->extra_journal_entry_u64s = 0;
2347 if (trans->fs_usage_deltas) {
2348 trans->fs_usage_deltas->used = 0;
2349 memset(&trans->fs_usage_deltas->memset_start, 0,
2350 (void *) &trans->fs_usage_deltas->memset_end -
2351 (void *) &trans->fs_usage_deltas->memset_start);
2354 if (!(flags & TRANS_RESET_NOUNLOCK))
2355 bch2_trans_cond_resched(trans);
2357 if (!(flags & TRANS_RESET_NOTRAVERSE) &&
2358 trans->iters_linked)
2359 bch2_btree_iter_traverse_all(trans);
2362 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
2364 size_t iters_bytes = sizeof(struct btree_iter) * BTREE_ITER_MAX;
2365 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2368 BUG_ON(trans->used_mempool);
2371 p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL);
2374 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2376 trans->iters = p; p += iters_bytes;
2377 trans->updates = p; p += updates_bytes;
2380 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2381 unsigned expected_nr_iters,
2382 size_t expected_mem_bytes)
2383 __acquires(&c->btree_trans_barrier)
2385 memset(trans, 0, sizeof(*trans));
2387 trans->ip = _RET_IP_;
2390 * reallocating iterators currently completely breaks
2391 * bch2_trans_iter_put(), we always allocate the max:
2393 bch2_trans_alloc_iters(trans, c);
2395 if (expected_mem_bytes) {
2396 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2397 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2399 if (!unlikely(trans->mem)) {
2400 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2401 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2405 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2407 #ifdef CONFIG_BCACHEFS_DEBUG
2408 trans->pid = current->pid;
2409 mutex_lock(&c->btree_trans_lock);
2410 list_add(&trans->list, &c->btree_trans_list);
2411 mutex_unlock(&c->btree_trans_lock);
2415 int bch2_trans_exit(struct btree_trans *trans)
2416 __releases(&c->btree_trans_barrier)
2418 struct bch_fs *c = trans->c;
2420 bch2_trans_unlock(trans);
2422 #ifdef CONFIG_BCACHEFS_DEBUG
2423 if (trans->iters_live) {
2424 struct btree_iter *iter;
2426 trans_for_each_iter(trans, iter)
2427 btree_iter_child_free(iter);
2430 if (trans->iters_live) {
2431 struct btree_iter *iter;
2433 bch_err(c, "btree iterators leaked!");
2434 trans_for_each_iter(trans, iter)
2435 if (btree_iter_live(trans, iter))
2436 printk(KERN_ERR " btree %s allocated at %pS\n",
2437 bch2_btree_ids[iter->btree_id],
2438 (void *) iter->ip_allocated);
2439 /* Be noisy about this: */
2440 bch2_fatal_error(c);
2443 mutex_lock(&trans->c->btree_trans_lock);
2444 list_del(&trans->list);
2445 mutex_unlock(&trans->c->btree_trans_lock);
2448 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2450 bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
2452 if (trans->fs_usage_deltas) {
2453 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2454 REPLICAS_DELTA_LIST_MAX)
2455 mempool_free(trans->fs_usage_deltas,
2456 &trans->c->replicas_delta_pool);
2458 kfree(trans->fs_usage_deltas);
2461 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2462 mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
2468 * Userspace doesn't have a real percpu implementation:
2470 trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
2474 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2476 trans->mem = (void *) 0x1;
2477 trans->iters = (void *) 0x1;
2479 return trans->error ? -EIO : 0;
2482 static void __maybe_unused
2483 bch2_btree_iter_node_to_text(struct printbuf *out,
2484 struct btree_bkey_cached_common *_b,
2485 enum btree_iter_type type)
2487 pr_buf(out, " l=%u %s:",
2488 _b->level, bch2_btree_ids[_b->btree_id]);
2489 bch2_bpos_to_text(out, btree_node_pos(_b, type));
2492 #ifdef CONFIG_BCACHEFS_DEBUG
2493 static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
2495 struct btree_iter *iter;
2497 trans_for_each_iter(trans, iter)
2498 if (btree_iter_type(iter) != BTREE_ITER_CACHED &&
2505 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2507 #ifdef CONFIG_BCACHEFS_DEBUG
2508 struct btree_trans *trans;
2509 struct btree_iter *iter;
2513 mutex_lock(&c->btree_trans_lock);
2514 list_for_each_entry(trans, &c->btree_trans_list, list) {
2515 if (!trans_has_btree_nodes_locked(trans))
2518 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2520 trans_for_each_iter(trans, iter) {
2521 if (!iter->nodes_locked)
2524 pr_buf(out, " iter %u %c %s:",
2526 btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2527 bch2_btree_ids[iter->btree_id]);
2528 bch2_bpos_to_text(out, iter->pos);
2531 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2532 if (btree_node_locked(iter, l)) {
2533 pr_buf(out, " %s l=%u ",
2534 btree_node_intent_locked(iter, l) ? "i" : "r", l);
2535 bch2_btree_iter_node_to_text(out,
2536 (void *) iter->l[l].b,
2537 btree_iter_type(iter));
2543 b = READ_ONCE(trans->locking);
2545 iter = &trans->iters[trans->locking_iter_idx];
2546 pr_buf(out, " locking iter %u %c l=%u %s:",
2547 trans->locking_iter_idx,
2548 btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2549 trans->locking_level,
2550 bch2_btree_ids[trans->locking_btree_id]);
2551 bch2_bpos_to_text(out, trans->locking_pos);
2553 pr_buf(out, " node ");
2554 bch2_btree_iter_node_to_text(out,
2556 btree_iter_type(iter));
2560 mutex_unlock(&c->btree_trans_lock);
2564 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2566 mempool_exit(&c->btree_trans_mem_pool);
2567 mempool_exit(&c->btree_iters_pool);
2568 cleanup_srcu_struct(&c->btree_trans_barrier);
2571 int bch2_fs_btree_iter_init(struct bch_fs *c)
2573 unsigned nr = BTREE_ITER_MAX;
2575 INIT_LIST_HEAD(&c->btree_trans_list);
2576 mutex_init(&c->btree_trans_lock);
2578 return init_srcu_struct(&c->btree_trans_barrier) ?:
2579 mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2580 sizeof(struct btree_iter) * nr +
2581 sizeof(struct btree_insert_entry) * nr) ?:
2582 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2583 BTREE_TRANS_MEM_MAX);