1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include <linux/prefetch.h>
18 #include <trace/events/bcachefs.h>
20 static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
21 static void btree_trans_sort_iters(struct btree_trans *);
22 static void btree_iter_check_sort(struct btree_trans *, struct btree_iter *);
23 static struct btree_iter *btree_iter_child_alloc(struct btree_iter *, unsigned long);
24 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *,
26 static void btree_iter_copy(struct btree_iter *, struct btree_iter *);
28 static inline int btree_iter_cmp(const struct btree_iter *l,
29 const struct btree_iter *r)
31 return cmp_int(l->btree_id, r->btree_id) ?:
32 -cmp_int(btree_iter_is_cached(l), btree_iter_is_cached(r)) ?:
33 bkey_cmp(l->real_pos, r->real_pos);
36 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
38 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
40 /* Are we iterating over keys in all snapshots? */
41 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
42 p = bpos_successor(p);
44 p = bpos_nosnap_successor(p);
45 p.snapshot = iter->snapshot;
51 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
53 EBUG_ON(btree_iter_type(iter) == BTREE_ITER_NODES);
55 /* Are we iterating over keys in all snapshots? */
56 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
57 p = bpos_predecessor(p);
59 p = bpos_nosnap_predecessor(p);
60 p.snapshot = iter->snapshot;
66 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
68 return l < BTREE_MAX_DEPTH &&
69 (unsigned long) iter->l[l].b >= 128;
72 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
74 struct bpos pos = iter->pos;
76 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
77 bkey_cmp(pos, POS_MAX))
78 pos = bkey_successor(iter, pos);
82 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
85 return bpos_cmp(iter->real_pos, b->data->min_key) < 0;
88 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
91 return bpos_cmp(b->key.k.p, iter->real_pos) < 0;
94 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
97 return iter->btree_id == b->c.btree_id &&
98 !btree_iter_pos_before_node(iter, b) &&
99 !btree_iter_pos_after_node(iter, b);
102 /* Btree node locking: */
104 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
106 bch2_btree_node_unlock_write_inlined(b, iter);
109 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
111 struct btree_iter *linked;
112 unsigned readers = 0;
114 EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
116 trans_for_each_iter(iter->trans, linked)
117 if (linked->l[b->c.level].b == b &&
118 btree_node_read_locked(linked, b->c.level))
122 * Must drop our read locks before calling six_lock_write() -
123 * six_unlock() won't do wakeups until the reader count
124 * goes to 0, and it's safe because we have the node intent
127 atomic64_sub(__SIX_VAL(read_lock, readers),
128 &b->c.lock.state.counter);
129 btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
130 atomic64_add(__SIX_VAL(read_lock, readers),
131 &b->c.lock.state.counter);
134 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
136 struct btree *b = btree_iter_node(iter, level);
137 int want = __btree_lock_want(iter, level);
139 if (!is_btree_node(iter, level))
145 if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
146 (btree_node_lock_seq_matches(iter, b, level) &&
147 btree_node_lock_increment(iter->trans, b, level, want))) {
148 mark_btree_node_locked(iter, level, want);
155 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
157 struct btree *b = iter->l[level].b;
159 EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
161 if (!is_btree_node(iter, level))
164 if (btree_node_intent_locked(iter, level))
170 if (btree_node_locked(iter, level)
171 ? six_lock_tryupgrade(&b->c.lock)
172 : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
175 if (btree_node_lock_seq_matches(iter, b, level) &&
176 btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
177 btree_node_unlock(iter, level);
183 mark_btree_node_intent_locked(iter, level);
187 static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
188 unsigned long trace_ip)
190 unsigned l = iter->level;
194 if (!btree_iter_node(iter, l))
198 ? bch2_btree_node_upgrade(iter, l)
199 : bch2_btree_node_relock(iter, l))) {
201 ? trace_node_upgrade_fail
202 : trace_node_relock_fail)(iter->trans->ip, trace_ip,
203 btree_iter_type(iter) == BTREE_ITER_CACHED,
204 iter->btree_id, &iter->real_pos,
205 l, iter->l[l].lock_seq,
206 is_btree_node(iter, l)
208 : (unsigned long) iter->l[l].b,
209 is_btree_node(iter, l)
210 ? iter->l[l].b->c.lock.state.seq
213 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
217 } while (l < iter->locks_want);
220 * When we fail to get a lock, we have to ensure that any child nodes
221 * can't be relocked so bch2_btree_iter_traverse has to walk back up to
222 * the node that we failed to relock:
224 while (fail_idx >= 0) {
225 btree_node_unlock(iter, fail_idx);
226 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
230 if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
231 iter->uptodate = BTREE_ITER_NEED_PEEK;
233 bch2_btree_trans_verify_locks(iter->trans);
235 return iter->uptodate < BTREE_ITER_NEED_RELOCK;
238 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
239 enum btree_iter_type type)
241 return type != BTREE_ITER_CACHED
242 ? container_of(_b, struct btree, c)->key.k.p
243 : container_of(_b, struct bkey_cached, c)->key.pos;
247 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
248 unsigned level, struct btree_iter *iter,
249 enum six_lock_type type,
250 six_lock_should_sleep_fn should_sleep_fn, void *p,
253 struct btree_trans *trans = iter->trans;
254 struct btree_iter *linked, *deadlock_iter = NULL;
255 u64 start_time = local_clock();
259 /* Check if it's safe to block: */
260 trans_for_each_iter(trans, linked) {
261 if (!linked->nodes_locked)
265 * Can't block taking an intent lock if we have _any_ nodes read
268 * - Our read lock blocks another thread with an intent lock on
269 * the same node from getting a write lock, and thus from
270 * dropping its intent lock
272 * - And the other thread may have multiple nodes intent locked:
273 * both the node we want to intent lock, and the node we
274 * already have read locked - deadlock:
276 if (type == SIX_LOCK_intent &&
277 linked->nodes_locked != linked->nodes_intent_locked) {
278 deadlock_iter = linked;
282 if (linked->btree_id != iter->btree_id) {
283 if (linked->btree_id > iter->btree_id) {
284 deadlock_iter = linked;
291 * Within the same btree, cached iterators come before non
294 if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
295 if (btree_iter_is_cached(iter)) {
296 deadlock_iter = linked;
303 * Interior nodes must be locked before their descendants: if
304 * another iterator has possible descendants locked of the node
305 * we're about to lock, it must have the ancestors locked too:
307 if (level > __fls(linked->nodes_locked)) {
308 deadlock_iter = linked;
312 /* Must lock btree nodes in key order: */
313 if (btree_node_locked(linked, level) &&
314 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
315 btree_iter_type(linked))) <= 0) {
316 deadlock_iter = linked;
318 BUG_ON(trans->in_traverse_all);
322 if (unlikely(deadlock_iter)) {
323 trace_trans_restart_would_deadlock(trans->ip, ip,
324 trans->in_traverse_all, reason,
325 deadlock_iter->btree_id,
326 btree_iter_type(deadlock_iter),
327 &deadlock_iter->real_pos,
329 btree_iter_type(iter),
331 btree_trans_restart(trans);
335 if (six_trylock_type(&b->c.lock, type))
338 #ifdef CONFIG_BCACHEFS_DEBUG
339 trans->locking_iter_idx = iter->idx;
340 trans->locking_pos = pos;
341 trans->locking_btree_id = iter->btree_id;
342 trans->locking_level = level;
346 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
348 #ifdef CONFIG_BCACHEFS_DEBUG
349 trans->locking = NULL;
352 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
357 /* Btree iterator locking: */
359 #ifdef CONFIG_BCACHEFS_DEBUG
360 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
364 if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
365 BUG_ON(iter->nodes_locked);
369 for (l = 0; btree_iter_node(iter, l); l++) {
370 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
371 !btree_node_locked(iter, l))
374 BUG_ON(btree_lock_want(iter, l) !=
375 btree_node_locked_type(iter, l));
379 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
381 struct btree_iter *iter;
383 trans_for_each_iter(trans, iter)
384 bch2_btree_iter_verify_locks(iter);
387 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
391 * Only for btree_cache.c - only relocks intent locks
393 bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
397 for (l = iter->level;
398 l < iter->locks_want && btree_iter_node(iter, l);
400 if (!bch2_btree_node_relock(iter, l)) {
401 trace_node_relock_fail(iter->trans->ip, _RET_IP_,
402 btree_iter_type(iter) == BTREE_ITER_CACHED,
403 iter->btree_id, &iter->real_pos,
404 l, iter->l[l].lock_seq,
405 is_btree_node(iter, l)
407 : (unsigned long) iter->l[l].b,
408 is_btree_node(iter, l)
409 ? iter->l[l].b->c.lock.state.seq
411 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
412 btree_trans_restart(iter->trans);
421 bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
423 bool ret = btree_iter_get_locks(iter, false, trace_ip);
426 btree_trans_restart(iter->trans);
430 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
431 unsigned new_locks_want)
433 struct btree_iter *linked;
435 EBUG_ON(iter->locks_want >= new_locks_want);
437 iter->locks_want = new_locks_want;
439 if (btree_iter_get_locks(iter, true, _THIS_IP_))
443 * XXX: this is ugly - we'd prefer to not be mucking with other
444 * iterators in the btree_trans here.
446 * On failure to upgrade the iterator, setting iter->locks_want and
447 * calling get_locks() is sufficient to make bch2_btree_iter_traverse()
448 * get the locks we want on transaction restart.
450 * But if this iterator was a clone, on transaction restart what we did
451 * to this iterator isn't going to be preserved.
453 * Possibly we could add an iterator field for the parent iterator when
454 * an iterator is a copy - for now, we'll just upgrade any other
455 * iterators with the same btree id.
457 * The code below used to be needed to ensure ancestor nodes get locked
458 * before interior nodes - now that's handled by
459 * bch2_btree_iter_traverse_all().
461 trans_for_each_iter(iter->trans, linked)
462 if (linked != iter &&
463 btree_iter_type(linked) == btree_iter_type(iter) &&
464 linked->btree_id == iter->btree_id &&
465 linked->locks_want < new_locks_want) {
466 linked->locks_want = new_locks_want;
467 btree_iter_get_locks(linked, true, _THIS_IP_);
470 if (iter->should_be_locked)
471 btree_trans_restart(iter->trans);
475 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
476 unsigned new_locks_want)
480 EBUG_ON(iter->locks_want < new_locks_want);
482 iter->locks_want = new_locks_want;
484 while (iter->nodes_locked &&
485 (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
486 if (l > iter->level) {
487 btree_node_unlock(iter, l);
489 if (btree_node_intent_locked(iter, l)) {
490 six_lock_downgrade(&iter->l[l].b->c.lock);
491 iter->nodes_intent_locked ^= 1 << l;
497 bch2_btree_trans_verify_locks(iter->trans);
500 void bch2_trans_downgrade(struct btree_trans *trans)
502 struct btree_iter *iter;
504 trans_for_each_iter(trans, iter)
505 bch2_btree_iter_downgrade(iter);
508 /* Btree transaction locking: */
510 static inline bool btree_iter_should_be_locked(struct btree_iter *iter)
512 return (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) ||
513 iter->should_be_locked;
516 bool bch2_trans_relock(struct btree_trans *trans)
518 struct btree_iter *iter;
520 if (unlikely(trans->restarted))
523 trans_for_each_iter(trans, iter)
524 if (btree_iter_should_be_locked(iter) &&
525 !bch2_btree_iter_relock(iter, _RET_IP_)) {
526 trace_trans_restart_relock(trans->ip, _RET_IP_,
527 iter->btree_id, &iter->real_pos);
528 BUG_ON(!trans->restarted);
534 void bch2_trans_unlock(struct btree_trans *trans)
536 struct btree_iter *iter;
538 trans_for_each_iter(trans, iter)
539 __bch2_btree_iter_unlock(iter);
541 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
544 /* Btree iterator: */
546 #ifdef CONFIG_BCACHEFS_DEBUG
548 static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
550 struct bkey_cached *ck;
551 bool locked = btree_node_locked(iter, 0);
553 if (!bch2_btree_node_relock(iter, 0))
556 ck = (void *) iter->l[0].b;
557 BUG_ON(ck->key.btree_id != iter->btree_id ||
558 bkey_cmp(ck->key.pos, iter->pos));
561 btree_node_unlock(iter, 0);
564 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
567 struct btree_iter_level *l;
568 struct btree_node_iter tmp;
570 struct bkey_packed *p, *k;
571 char buf1[100], buf2[100], buf3[100];
574 if (!bch2_debug_check_iterators)
579 locked = btree_node_locked(iter, level);
581 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
583 bch2_btree_iter_verify_cached(iter);
587 BUG_ON(iter->level < iter->min_depth);
589 if (!btree_iter_node(iter, level))
592 if (!bch2_btree_node_relock(iter, level))
595 BUG_ON(!btree_iter_pos_in_node(iter, l->b));
598 * node iterators don't use leaf node iterator:
600 if (btree_iter_type(iter) == BTREE_ITER_NODES &&
601 level <= iter->min_depth)
604 bch2_btree_node_iter_verify(&l->iter, l->b);
607 * For interior nodes, the iterator will have skipped past
610 * For extents, the iterator may have skipped past deleted keys (but not
613 p = level || btree_node_type_is_extents(iter->btree_id)
614 ? bch2_btree_node_iter_prev(&tmp, l->b)
615 : bch2_btree_node_iter_prev_all(&tmp, l->b);
616 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
618 if (p && bkey_iter_pos_cmp(l->b, p, &iter->real_pos) >= 0) {
623 if (k && bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
629 btree_node_unlock(iter, level);
632 strcpy(buf2, "(none)");
633 strcpy(buf3, "(none)");
635 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
638 struct bkey uk = bkey_unpack_key(l->b, p);
639 bch2_bkey_to_text(&PBUF(buf2), &uk);
643 struct bkey uk = bkey_unpack_key(l->b, k);
644 bch2_bkey_to_text(&PBUF(buf3), &uk);
647 panic("iterator should be %s key at level %u:\n"
651 msg, level, buf1, buf2, buf3);
654 static void bch2_btree_iter_verify(struct btree_iter *iter)
656 struct btree_trans *trans = iter->trans;
657 struct bch_fs *c = trans->c;
658 enum btree_iter_type type = btree_iter_type(iter);
661 EBUG_ON(iter->btree_id >= BTREE_ID_NR);
663 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
664 iter->pos.snapshot != iter->snapshot);
666 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
667 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
669 BUG_ON(type == BTREE_ITER_NODES &&
670 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
672 BUG_ON(type != BTREE_ITER_NODES &&
673 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
674 !btree_type_has_snapshots(iter->btree_id));
676 for (i = 0; i < (type != BTREE_ITER_CACHED ? BTREE_MAX_DEPTH : 1); i++) {
678 BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
682 bch2_btree_iter_verify_level(iter, i);
685 bch2_btree_iter_verify_locks(iter);
688 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
690 enum btree_iter_type type = btree_iter_type(iter);
692 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
693 iter->pos.snapshot != iter->snapshot);
695 BUG_ON((type == BTREE_ITER_KEYS ||
696 type == BTREE_ITER_CACHED) &&
697 (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
698 bkey_cmp(iter->pos, iter->k.p) > 0));
701 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
703 struct btree_iter *iter;
705 if (!bch2_debug_check_iterators)
708 trans_for_each_iter_with_node(trans, b, iter)
709 bch2_btree_iter_verify_level(iter, b->c.level);
714 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
715 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
716 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
720 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
723 struct bkey_packed *k)
725 struct btree_node_iter_set *set;
727 btree_node_iter_for_each(iter, set)
728 if (set->end == t->end_offset) {
729 set->k = __btree_node_key_to_offset(b, k);
730 bch2_btree_node_iter_sort(iter, b);
734 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
737 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
739 struct bkey_packed *where)
741 struct btree_iter_level *l = &iter->l[b->c.level];
743 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
746 if (bkey_iter_pos_cmp(l->b, where, &iter->real_pos) < 0)
747 bch2_btree_node_iter_advance(&l->iter, l->b);
749 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
752 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
754 struct bkey_packed *where)
756 struct btree_iter *linked;
758 trans_for_each_iter_with_node(iter->trans, b, linked) {
759 __bch2_btree_iter_fix_key_modified(linked, b, where);
760 bch2_btree_iter_verify_level(linked, b->c.level);
764 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
766 struct btree_node_iter *node_iter,
768 struct bkey_packed *where,
769 unsigned clobber_u64s,
772 const struct bkey_packed *end = btree_bkey_last(b, t);
773 struct btree_node_iter_set *set;
774 unsigned offset = __btree_node_key_to_offset(b, where);
775 int shift = new_u64s - clobber_u64s;
776 unsigned old_end = t->end_offset - shift;
777 unsigned orig_iter_pos = node_iter->data[0].k;
778 bool iter_current_key_modified =
779 orig_iter_pos >= offset &&
780 orig_iter_pos <= offset + clobber_u64s;
782 btree_node_iter_for_each(node_iter, set)
783 if (set->end == old_end)
786 /* didn't find the bset in the iterator - might have to readd it: */
788 bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
789 bch2_btree_node_iter_push(node_iter, b, where, end);
792 /* Iterator is after key that changed */
796 set->end = t->end_offset;
798 /* Iterator hasn't gotten to the key that changed yet: */
803 bkey_iter_pos_cmp(b, where, &iter->real_pos) >= 0) {
805 } else if (set->k < offset + clobber_u64s) {
806 set->k = offset + new_u64s;
807 if (set->k == set->end)
808 bch2_btree_node_iter_set_drop(node_iter, set);
810 /* Iterator is after key that changed */
811 set->k = (int) set->k + shift;
815 bch2_btree_node_iter_sort(node_iter, b);
817 if (node_iter->data[0].k != orig_iter_pos)
818 iter_current_key_modified = true;
821 * When a new key is added, and the node iterator now points to that
822 * key, the iterator might have skipped past deleted keys that should
823 * come after the key the iterator now points to. We have to rewind to
824 * before those deleted keys - otherwise
825 * bch2_btree_node_iter_prev_all() breaks:
827 if (!bch2_btree_node_iter_end(node_iter) &&
828 iter_current_key_modified &&
830 btree_node_type_is_extents(iter->btree_id))) {
832 struct bkey_packed *k, *k2, *p;
834 k = bch2_btree_node_iter_peek_all(node_iter, b);
836 for_each_bset(b, t) {
837 bool set_pos = false;
839 if (node_iter->data[0].end == t->end_offset)
842 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
844 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
845 bkey_iter_cmp(b, k, p) < 0) {
851 btree_node_iter_set_set_pos(node_iter,
857 node_iter == &iter->l[0].iter &&
858 iter_current_key_modified)
859 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
862 void bch2_btree_node_iter_fix(struct btree_iter *iter,
864 struct btree_node_iter *node_iter,
865 struct bkey_packed *where,
866 unsigned clobber_u64s,
869 struct bset_tree *t = bch2_bkey_to_bset(b, where);
870 struct btree_iter *linked;
872 if (node_iter != &iter->l[b->c.level].iter) {
873 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
874 where, clobber_u64s, new_u64s);
876 if (bch2_debug_check_iterators)
877 bch2_btree_node_iter_verify(node_iter, b);
880 trans_for_each_iter_with_node(iter->trans, b, linked) {
881 __bch2_btree_node_iter_fix(linked, b,
882 &linked->l[b->c.level].iter, t,
883 where, clobber_u64s, new_u64s);
884 bch2_btree_iter_verify_level(linked, b->c.level);
888 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
889 struct btree_iter_level *l,
891 struct bkey_packed *k)
897 * signal to bch2_btree_iter_peek_slot() that we're currently at
900 u->type = KEY_TYPE_deleted;
901 return bkey_s_c_null;
904 ret = bkey_disassemble(l->b, k, u);
907 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
908 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
909 * being overwritten but doesn't change k->size. But this is ok, because
910 * those keys are never written out, we just have to avoid a spurious
913 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
914 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
919 /* peek_all() doesn't skip deleted keys */
920 static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
921 struct btree_iter_level *l)
923 return __btree_iter_unpack(iter, l, &iter->k,
924 bch2_btree_node_iter_peek_all(&l->iter, l->b));
927 static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
928 struct btree_iter_level *l)
930 struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
931 bch2_btree_node_iter_peek(&l->iter, l->b));
933 iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
937 static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
938 struct btree_iter_level *l)
940 struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
941 bch2_btree_node_iter_prev(&l->iter, l->b));
943 iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
947 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
948 struct btree_iter_level *l,
951 struct bkey_packed *k;
954 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
955 bkey_iter_pos_cmp(l->b, k, &iter->real_pos) < 0) {
956 if (max_advance > 0 && nr_advanced >= max_advance)
959 bch2_btree_node_iter_advance(&l->iter, l->b);
967 * Verify that iterator for parent node points to child node:
969 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
971 struct btree_iter_level *l;
974 struct bkey_packed *k;
976 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
979 plevel = b->c.level + 1;
980 if (!btree_iter_node(iter, plevel))
983 parent_locked = btree_node_locked(iter, plevel);
985 if (!bch2_btree_node_relock(iter, plevel))
988 l = &iter->l[plevel];
989 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
992 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
997 struct bkey uk = bkey_unpack_key(b, k);
999 bch2_dump_btree_node(iter->trans->c, l->b);
1000 bch2_bpos_to_text(&PBUF(buf1), iter->real_pos);
1001 bch2_bkey_to_text(&PBUF(buf2), &uk);
1002 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
1003 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
1004 panic("parent iter doesn't point to new node:\n"
1008 bch2_btree_ids[iter->btree_id], buf1,
1013 btree_node_unlock(iter, b->c.level + 1);
1016 static inline void __btree_iter_init(struct btree_iter *iter,
1019 struct btree_iter_level *l = &iter->l[level];
1021 bch2_btree_node_iter_init(&l->iter, l->b, &iter->real_pos);
1024 * Iterators to interior nodes should always be pointed at the first non
1028 bch2_btree_node_iter_peek(&l->iter, l->b);
1030 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1033 static inline void btree_iter_node_set(struct btree_iter *iter,
1036 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
1038 btree_iter_verify_new_node(iter, b);
1040 EBUG_ON(!btree_iter_pos_in_node(iter, b));
1041 EBUG_ON(b->c.lock.state.seq & 1);
1043 iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
1044 iter->l[b->c.level].b = b;
1045 __btree_iter_init(iter, b->c.level);
1049 * A btree node is being replaced - update the iterator to point to the new
1052 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
1054 enum btree_node_locked_type t;
1055 struct btree_iter *linked;
1057 trans_for_each_iter(iter->trans, linked)
1058 if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
1059 btree_iter_pos_in_node(linked, b)) {
1061 * bch2_btree_iter_node_drop() has already been called -
1062 * the old node we're replacing has already been
1063 * unlocked and the pointer invalidated
1065 BUG_ON(btree_node_locked(linked, b->c.level));
1067 t = btree_lock_want(linked, b->c.level);
1068 if (t != BTREE_NODE_UNLOCKED) {
1069 six_lock_increment(&b->c.lock, t);
1070 mark_btree_node_locked(linked, b->c.level, t);
1073 btree_iter_node_set(linked, b);
1077 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
1079 struct btree_iter *linked;
1080 unsigned level = b->c.level;
1082 trans_for_each_iter(iter->trans, linked)
1083 if (linked->l[level].b == b) {
1084 btree_node_unlock(linked, level);
1085 linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
1090 * A btree node has been modified in such a way as to invalidate iterators - fix
1093 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
1095 struct btree_iter *linked;
1097 trans_for_each_iter_with_node(iter->trans, b, linked)
1098 __btree_iter_init(linked, b->c.level);
1101 static int lock_root_check_fn(struct six_lock *lock, void *p)
1103 struct btree *b = container_of(lock, struct btree, c.lock);
1104 struct btree **rootp = p;
1106 return b == *rootp ? 0 : -1;
1109 static inline int btree_iter_lock_root(struct btree_trans *trans,
1110 struct btree_iter *iter,
1111 unsigned depth_want,
1112 unsigned long trace_ip)
1114 struct bch_fs *c = trans->c;
1115 struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
1116 enum six_lock_type lock_type;
1119 EBUG_ON(iter->nodes_locked);
1122 b = READ_ONCE(*rootp);
1123 iter->level = READ_ONCE(b->c.level);
1125 if (unlikely(iter->level < depth_want)) {
1127 * the root is at a lower depth than the depth we want:
1128 * got to the end of the btree, or we're walking nodes
1129 * greater than some depth and there are no nodes >=
1132 iter->level = depth_want;
1133 for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
1134 iter->l[i].b = NULL;
1138 lock_type = __btree_lock_want(iter, iter->level);
1139 if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level,
1141 lock_root_check_fn, rootp,
1143 if (trans->restarted)
1148 if (likely(b == READ_ONCE(*rootp) &&
1149 b->c.level == iter->level &&
1151 for (i = 0; i < iter->level; i++)
1152 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1153 iter->l[iter->level].b = b;
1154 for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
1155 iter->l[i].b = NULL;
1157 mark_btree_node_locked(iter, iter->level, lock_type);
1158 btree_iter_node_set(iter, b);
1162 six_unlock_type(&b->c.lock, lock_type);
1167 static int btree_iter_prefetch(struct btree_iter *iter)
1169 struct bch_fs *c = iter->trans->c;
1170 struct btree_iter_level *l = &iter->l[iter->level];
1171 struct btree_node_iter node_iter = l->iter;
1172 struct bkey_packed *k;
1173 struct bkey_buf tmp;
1174 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1175 ? (iter->level > 1 ? 0 : 2)
1176 : (iter->level > 1 ? 1 : 16);
1177 bool was_locked = btree_node_locked(iter, iter->level);
1180 bch2_bkey_buf_init(&tmp);
1182 while (nr && !ret) {
1183 if (!bch2_btree_node_relock(iter, iter->level))
1186 bch2_btree_node_iter_advance(&node_iter, l->b);
1187 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1191 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1192 ret = bch2_btree_node_prefetch(c, iter, tmp.k, iter->btree_id,
1197 btree_node_unlock(iter, iter->level);
1199 bch2_bkey_buf_exit(&tmp, c);
1203 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
1204 unsigned plevel, struct btree *b)
1206 struct btree_iter_level *l = &iter->l[plevel];
1207 bool locked = btree_node_locked(iter, plevel);
1208 struct bkey_packed *k;
1209 struct bch_btree_ptr_v2 *bp;
1211 if (!bch2_btree_node_relock(iter, plevel))
1214 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1215 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1217 bp = (void *) bkeyp_val(&l->b->format, k);
1218 bp->mem_ptr = (unsigned long)b;
1221 btree_node_unlock(iter, plevel);
1224 static __always_inline int btree_iter_down(struct btree_trans *trans,
1225 struct btree_iter *iter,
1226 unsigned long trace_ip)
1228 struct bch_fs *c = trans->c;
1229 struct btree_iter_level *l = &iter->l[iter->level];
1231 unsigned level = iter->level - 1;
1232 enum six_lock_type lock_type = __btree_lock_want(iter, level);
1233 struct bkey_buf tmp;
1236 EBUG_ON(!btree_node_locked(iter, iter->level));
1238 bch2_bkey_buf_init(&tmp);
1239 bch2_bkey_buf_unpack(&tmp, c, l->b,
1240 bch2_btree_node_iter_peek(&l->iter, l->b));
1242 b = bch2_btree_node_get(trans, iter, tmp.k, level, lock_type, trace_ip);
1243 ret = PTR_ERR_OR_ZERO(b);
1247 mark_btree_node_locked(iter, level, lock_type);
1248 btree_iter_node_set(iter, b);
1250 if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1251 unlikely(b != btree_node_mem_ptr(tmp.k)))
1252 btree_node_mem_ptr_set(iter, level + 1, b);
1254 if (iter->flags & BTREE_ITER_PREFETCH)
1255 ret = btree_iter_prefetch(iter);
1257 if (btree_node_read_locked(iter, level + 1))
1258 btree_node_unlock(iter, level + 1);
1259 iter->level = level;
1261 bch2_btree_iter_verify_locks(iter);
1263 bch2_bkey_buf_exit(&tmp, c);
1267 static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
1269 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret,
1270 unsigned long trace_ip)
1272 struct bch_fs *c = trans->c;
1273 struct btree_iter *iter;
1276 if (trans->in_traverse_all)
1279 trans->in_traverse_all = true;
1281 trans->restarted = false;
1283 trans_for_each_iter(trans, iter)
1284 iter->should_be_locked = false;
1286 btree_trans_sort_iters(trans);
1288 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1289 struct btree_iter *iter1 = trans->iters + trans->sorted[i];
1290 struct btree_iter *iter2 = trans->iters + trans->sorted[i + 1];
1292 if (iter1->btree_id == iter2->btree_id &&
1293 iter1->locks_want < iter2->locks_want)
1294 __bch2_btree_iter_upgrade(iter1, iter2->locks_want);
1295 else if (!iter1->locks_want && iter2->locks_want)
1296 __bch2_btree_iter_upgrade(iter1, 1);
1299 bch2_trans_unlock(trans);
1302 if (unlikely(ret == -ENOMEM)) {
1305 closure_init_stack(&cl);
1308 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1313 if (unlikely(ret == -EIO)) {
1314 trans->error = true;
1318 BUG_ON(ret && ret != -EINTR);
1320 /* Now, redo traversals in correct order: */
1321 trans_for_each_iter_inorder(trans, iter) {
1322 EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
1324 ret = btree_iter_traverse_one(iter, _THIS_IP_);
1328 EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
1331 trans_for_each_iter(trans, iter)
1332 BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
1334 bch2_btree_cache_cannibalize_unlock(c);
1336 trans->in_traverse_all = false;
1338 trace_trans_traverse_all(trans->ip, trace_ip);
1342 static int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1344 return __btree_iter_traverse_all(trans, 0, _RET_IP_);
1347 static inline bool btree_iter_good_node(struct btree_iter *iter,
1348 unsigned l, int check_pos)
1350 if (!is_btree_node(iter, l) ||
1351 !bch2_btree_node_relock(iter, l))
1354 if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1356 if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1361 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1364 unsigned l = iter->level;
1366 while (btree_iter_node(iter, l) &&
1367 !btree_iter_good_node(iter, l, check_pos)) {
1368 btree_node_unlock(iter, l);
1369 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1377 * This is the main state machine for walking down the btree - walks down to a
1380 * Returns 0 on success, -EIO on error (error reading in a btree node).
1382 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1383 * stashed in the iterator and returned from bch2_trans_exit().
1385 static int btree_iter_traverse_one(struct btree_iter *iter,
1386 unsigned long trace_ip)
1388 struct btree_trans *trans = iter->trans;
1389 unsigned l, depth_want = iter->level;
1393 * Ensure we obey iter->should_be_locked: if it's set, we can't unlock
1394 * and re-traverse the iterator without a transaction restart:
1396 if (iter->should_be_locked) {
1397 ret = bch2_btree_iter_relock(iter, trace_ip) ? 0 : -EINTR;
1401 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
1402 ret = bch2_btree_iter_traverse_cached(iter);
1406 if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1409 iter->level = btree_iter_up_until_good_node(iter, 0);
1411 /* If we need intent locks, take them too: */
1412 for (l = iter->level + 1;
1413 l < iter->locks_want && btree_iter_node(iter, l);
1415 if (!bch2_btree_node_relock(iter, l))
1416 while (iter->level <= l) {
1417 btree_node_unlock(iter, iter->level);
1418 iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1423 * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1424 * would indicate to other code that we got to the end of the btree,
1425 * here it indicates that relocking the root failed - it's critical that
1426 * btree_iter_lock_root() comes next and that it can't fail
1428 while (iter->level > depth_want) {
1429 ret = btree_iter_node(iter, iter->level)
1430 ? btree_iter_down(trans, iter, trace_ip)
1431 : btree_iter_lock_root(trans, iter, depth_want, trace_ip);
1432 if (unlikely(ret)) {
1435 * Got to the end of the btree (in
1436 * BTREE_ITER_NODES mode)
1442 __bch2_btree_iter_unlock(iter);
1443 iter->level = depth_want;
1446 iter->flags |= BTREE_ITER_ERROR;
1447 iter->l[iter->level].b =
1448 BTREE_ITER_NO_NODE_ERROR;
1450 iter->l[iter->level].b =
1451 BTREE_ITER_NO_NODE_DOWN;
1457 iter->uptodate = BTREE_ITER_NEED_PEEK;
1459 BUG_ON((ret == -EINTR) != !!trans->restarted);
1460 trace_iter_traverse(trans->ip, trace_ip,
1461 btree_iter_type(iter) == BTREE_ITER_CACHED,
1462 iter->btree_id, &iter->real_pos, ret);
1463 bch2_btree_iter_verify(iter);
1467 static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1469 struct btree_trans *trans = iter->trans;
1472 ret = bch2_trans_cond_resched(trans) ?:
1473 btree_iter_traverse_one(iter, _RET_IP_);
1474 if (unlikely(ret) && hweight64(trans->iters_linked) == 1) {
1475 ret = __btree_iter_traverse_all(trans, ret, _RET_IP_);
1476 BUG_ON(ret == -EINTR);
1484 * bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
1485 * for internal btree iterator users
1487 * bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
1488 * btree_iter_traverse() does not:
1490 static inline int __must_check
1491 btree_iter_traverse(struct btree_iter *iter)
1493 return iter->uptodate >= BTREE_ITER_NEED_RELOCK
1494 ? __bch2_btree_iter_traverse(iter)
1499 bch2_btree_iter_traverse(struct btree_iter *iter)
1503 btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
1505 ret = btree_iter_traverse(iter);
1509 iter->should_be_locked = true;
1513 /* Iterate across nodes (leaf and interior nodes) */
1515 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1520 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1521 bch2_btree_iter_verify(iter);
1523 ret = btree_iter_traverse(iter);
1527 b = btree_iter_node(iter, iter->level);
1531 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1533 iter->pos = iter->real_pos = b->key.k.p;
1535 bch2_btree_iter_verify(iter);
1536 iter->should_be_locked = true;
1541 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1546 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1547 bch2_btree_iter_verify(iter);
1549 /* already got to end? */
1550 if (!btree_iter_node(iter, iter->level))
1553 bch2_trans_cond_resched(iter->trans);
1555 btree_node_unlock(iter, iter->level);
1556 iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
1559 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1560 ret = btree_iter_traverse(iter);
1565 b = btree_iter_node(iter, iter->level);
1569 if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
1571 * Haven't gotten to the end of the parent node: go back down to
1572 * the next child node
1574 btree_iter_set_search_pos(iter, bpos_successor(iter->pos));
1576 /* Unlock to avoid screwing up our lock invariants: */
1577 btree_node_unlock(iter, iter->level);
1579 iter->level = iter->min_depth;
1580 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1581 bch2_btree_iter_verify(iter);
1583 ret = btree_iter_traverse(iter);
1587 b = iter->l[iter->level].b;
1590 iter->pos = iter->real_pos = b->key.k.p;
1592 bch2_btree_iter_verify(iter);
1593 iter->should_be_locked = true;
1598 /* Iterate across keys (in leaf nodes only) */
1600 static void btree_iter_set_search_pos(struct btree_iter *iter, struct bpos new_pos)
1602 #ifdef CONFIG_BCACHEFS_DEBUG
1603 struct bpos old_pos = iter->real_pos;
1605 int cmp = bpos_cmp(new_pos, iter->real_pos);
1606 unsigned l = iter->level;
1608 EBUG_ON(iter->trans->restarted);
1613 iter->real_pos = new_pos;
1614 iter->should_be_locked = false;
1616 btree_iter_check_sort(iter->trans, iter);
1618 if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
1619 btree_node_unlock(iter, 0);
1620 iter->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1621 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1625 l = btree_iter_up_until_good_node(iter, cmp);
1627 if (btree_iter_node(iter, l)) {
1629 * We might have to skip over many keys, or just a few: try
1630 * advancing the node iterator, and if we have to skip over too
1631 * many keys just reinit it (or if we're rewinding, since that
1635 !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1636 __btree_iter_init(iter, l);
1638 /* Don't leave it locked if we're not supposed to: */
1639 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1640 btree_node_unlock(iter, l);
1643 if (l != iter->level)
1644 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1646 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1648 bch2_btree_iter_verify(iter);
1649 #ifdef CONFIG_BCACHEFS_DEBUG
1650 trace_iter_set_search_pos(iter->trans->ip, _RET_IP_,
1652 &old_pos, &new_pos, l);
1656 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1658 struct bpos pos = iter->k.p;
1659 bool ret = bpos_cmp(pos, SPOS_MAX) != 0;
1661 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1662 pos = bkey_successor(iter, pos);
1663 bch2_btree_iter_set_pos(iter, pos);
1667 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1669 struct bpos pos = bkey_start_pos(&iter->k);
1670 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1671 ? bpos_cmp(pos, POS_MIN)
1672 : bkey_cmp(pos, POS_MIN)) != 0;
1674 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1675 pos = bkey_predecessor(iter, pos);
1676 bch2_btree_iter_set_pos(iter, pos);
1680 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1682 struct bpos next_pos = iter->l[0].b->key.k.p;
1683 bool ret = bpos_cmp(next_pos, SPOS_MAX) != 0;
1686 * Typically, we don't want to modify iter->pos here, since that
1687 * indicates where we searched from - unless we got to the end of the
1688 * btree, in that case we want iter->pos to reflect that:
1691 btree_iter_set_search_pos(iter, bpos_successor(next_pos));
1693 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1698 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1700 struct bpos next_pos = iter->l[0].b->data->min_key;
1701 bool ret = bpos_cmp(next_pos, POS_MIN) != 0;
1704 btree_iter_set_search_pos(iter, bpos_predecessor(next_pos));
1706 bch2_btree_iter_set_pos(iter, POS_MIN);
1711 static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter,
1714 struct btree_insert_entry *i;
1716 if (!(iter->flags & BTREE_ITER_WITH_UPDATES))
1719 trans_for_each_update(iter->trans, i)
1720 if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
1721 bkey_cmp(pos, i->k->k.p)) <= 0) {
1722 if (iter->btree_id == i->iter->btree_id)
1731 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1734 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1736 struct bpos search_key = btree_iter_search_key(iter);
1737 struct bkey_i *next_update;
1741 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1742 bch2_btree_iter_verify(iter);
1743 bch2_btree_iter_verify_entry_exit(iter);
1745 next_update = btree_trans_peek_updates(iter, search_key);
1746 btree_iter_set_search_pos(iter, search_key);
1749 ret = btree_iter_traverse(iter);
1751 return bkey_s_c_err(ret);
1753 k = btree_iter_level_peek(iter, &iter->l[0]);
1756 bpos_cmp(next_update->k.p, iter->real_pos) <= 0) {
1757 iter->k = next_update->k;
1758 k = bkey_i_to_s_c(next_update);
1762 if (bkey_deleted(k.k)) {
1763 search_key = bkey_successor(iter, k.k->p);
1770 if (!btree_iter_set_pos_to_next_leaf(iter))
1771 return bkey_s_c_null;
1775 * iter->pos should be mononotically increasing, and always be equal to
1776 * the key we just returned - except extents can straddle iter->pos:
1778 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
1780 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1781 iter->pos = bkey_start_pos(k.k);
1783 bch2_btree_iter_verify_entry_exit(iter);
1784 bch2_btree_iter_verify(iter);
1785 iter->should_be_locked = true;
1790 * bch2_btree_iter_next: returns first key greater than iterator's current
1793 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1795 if (!bch2_btree_iter_advance(iter))
1796 return bkey_s_c_null;
1798 return bch2_btree_iter_peek(iter);
1802 * bch2_btree_iter_peek_prev: returns first key less than or equal to
1803 * iterator's current position
1805 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1807 struct btree_iter_level *l = &iter->l[0];
1811 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1812 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
1813 bch2_btree_iter_verify(iter);
1814 bch2_btree_iter_verify_entry_exit(iter);
1816 btree_iter_set_search_pos(iter, iter->pos);
1819 ret = btree_iter_traverse(iter);
1820 if (unlikely(ret)) {
1821 k = bkey_s_c_err(ret);
1825 k = btree_iter_level_peek(iter, l);
1827 ((iter->flags & BTREE_ITER_IS_EXTENTS)
1828 ? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
1829 : bkey_cmp(k.k->p, iter->pos) > 0))
1830 k = btree_iter_level_prev(iter, l);
1835 if (!btree_iter_set_pos_to_prev_leaf(iter)) {
1841 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
1843 /* Extents can straddle iter->pos: */
1844 if (bkey_cmp(k.k->p, iter->pos) < 0)
1847 bch2_btree_iter_verify_entry_exit(iter);
1848 bch2_btree_iter_verify(iter);
1849 iter->should_be_locked = true;
1853 * btree_iter_level_peek() may have set iter->k to a key we didn't want, and
1854 * then we errored going to the previous leaf - make sure it's
1855 * consistent with iter->pos:
1857 bkey_init(&iter->k);
1858 iter->k.p = iter->pos;
1863 * bch2_btree_iter_prev: returns first key less than iterator's current
1866 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1868 if (!bch2_btree_iter_rewind(iter))
1869 return bkey_s_c_null;
1871 return bch2_btree_iter_peek_prev(iter);
1874 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1876 struct bpos search_key;
1880 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS &&
1881 btree_iter_type(iter) != BTREE_ITER_CACHED);
1882 bch2_btree_iter_verify(iter);
1883 bch2_btree_iter_verify_entry_exit(iter);
1885 /* extents can't span inode numbers: */
1886 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
1887 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
1888 if (iter->pos.inode == KEY_INODE_MAX)
1889 return bkey_s_c_null;
1891 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
1894 search_key = btree_iter_search_key(iter);
1895 btree_iter_set_search_pos(iter, search_key);
1897 ret = btree_iter_traverse(iter);
1899 return bkey_s_c_err(ret);
1901 if (btree_iter_type(iter) == BTREE_ITER_CACHED ||
1902 !(iter->flags & BTREE_ITER_IS_EXTENTS)) {
1903 struct bkey_i *next_update;
1904 struct bkey_cached *ck;
1906 switch (btree_iter_type(iter)) {
1907 case BTREE_ITER_KEYS:
1908 k = btree_iter_level_peek_all(iter, &iter->l[0]);
1909 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, iter->pos) == 0);
1911 case BTREE_ITER_CACHED:
1912 ck = (void *) iter->l[0].b;
1913 EBUG_ON(iter->btree_id != ck->key.btree_id ||
1914 bkey_cmp(iter->pos, ck->key.pos));
1917 k = bkey_i_to_s_c(ck->k);
1919 case BTREE_ITER_NODES:
1923 next_update = btree_trans_peek_updates(iter, search_key);
1925 (!k.k || bpos_cmp(next_update->k.p, k.k->p) <= 0)) {
1926 iter->k = next_update->k;
1927 k = bkey_i_to_s_c(next_update);
1930 if ((iter->flags & BTREE_ITER_INTENT)) {
1931 struct btree_iter *child =
1932 btree_iter_child_alloc(iter, _THIS_IP_);
1934 btree_iter_copy(child, iter);
1935 k = bch2_btree_iter_peek(child);
1937 if (k.k && !bkey_err(k))
1940 struct bpos pos = iter->pos;
1942 k = bch2_btree_iter_peek(iter);
1946 if (unlikely(bkey_err(k)))
1950 if (!(iter->flags & BTREE_ITER_IS_EXTENTS)) {
1952 ((iter->flags & BTREE_ITER_ALL_SNAPSHOTS)
1953 ? bpos_cmp(iter->pos, k.k->p)
1954 : bkey_cmp(iter->pos, k.k->p))) {
1955 bkey_init(&iter->k);
1956 iter->k.p = iter->pos;
1957 k = (struct bkey_s_c) { &iter->k, NULL };
1960 struct bpos next = k.k ? bkey_start_pos(k.k) : POS_MAX;
1962 if (bkey_cmp(iter->pos, next) < 0) {
1963 bkey_init(&iter->k);
1964 iter->k.p = iter->pos;
1965 bch2_key_resize(&iter->k,
1966 min_t(u64, KEY_SIZE_MAX,
1967 (next.inode == iter->pos.inode
1972 k = (struct bkey_s_c) { &iter->k, NULL };
1973 EBUG_ON(!k.k->size);
1977 bch2_btree_iter_verify_entry_exit(iter);
1978 bch2_btree_iter_verify(iter);
1979 iter->should_be_locked = true;
1984 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1986 if (!bch2_btree_iter_advance(iter))
1987 return bkey_s_c_null;
1989 return bch2_btree_iter_peek_slot(iter);
1992 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
1994 if (!bch2_btree_iter_rewind(iter))
1995 return bkey_s_c_null;
1997 return bch2_btree_iter_peek_slot(iter);
2000 static inline void bch2_btree_iter_init(struct btree_trans *trans,
2001 struct btree_iter *iter, enum btree_id btree_id)
2003 struct bch_fs *c = trans->c;
2006 iter->trans = trans;
2007 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
2008 iter->btree_id = btree_id;
2009 iter->real_pos = POS_MIN;
2011 iter->min_depth = 0;
2012 iter->locks_want = 0;
2013 iter->nodes_locked = 0;
2014 iter->nodes_intent_locked = 0;
2015 for (i = 0; i < ARRAY_SIZE(iter->l); i++)
2016 iter->l[i].b = BTREE_ITER_NO_NODE_INIT;
2018 prefetch(c->btree_roots[btree_id].b);
2021 /* new transactional stuff: */
2023 static inline void btree_iter_verify_sorted_ref(struct btree_trans *trans,
2024 struct btree_iter *iter)
2026 EBUG_ON(iter->sorted_idx >= trans->nr_sorted);
2027 EBUG_ON(trans->sorted[iter->sorted_idx] != iter->idx);
2028 EBUG_ON(!(trans->iters_linked & (1ULL << iter->idx)));
2031 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2033 #ifdef CONFIG_BCACHEFS_DEBUG
2036 for (i = 0; i < trans->nr_sorted; i++)
2037 btree_iter_verify_sorted_ref(trans, trans->iters + trans->sorted[i]);
2041 static inline void btree_trans_verify_sorted(struct btree_trans *trans)
2043 #ifdef CONFIG_BCACHEFS_DEBUG
2044 struct btree_iter *iter, *prev = NULL;
2046 trans_for_each_iter_inorder(trans, iter)
2047 BUG_ON(prev && btree_iter_cmp(prev, iter) > 0);
2051 static inline void btree_iter_swap(struct btree_trans *trans,
2052 struct btree_iter *l, struct btree_iter *r)
2054 swap(l->sorted_idx, r->sorted_idx);
2055 swap(trans->sorted[l->sorted_idx],
2056 trans->sorted[r->sorted_idx]);
2058 btree_iter_verify_sorted_ref(trans, l);
2059 btree_iter_verify_sorted_ref(trans, r);
2062 static void btree_trans_sort_iters(struct btree_trans *trans)
2064 bool swapped = false;
2065 int i, l = 0, r = trans->nr_sorted;
2068 for (i = l; i + 1 < r; i++) {
2069 if (btree_iter_cmp(trans->iters + trans->sorted[i],
2070 trans->iters + trans->sorted[i + 1]) > 0) {
2071 swap(trans->sorted[i], trans->sorted[i + 1]);
2072 trans->iters[trans->sorted[i]].sorted_idx = i;
2073 trans->iters[trans->sorted[i + 1]].sorted_idx = i + 1;
2084 for (i = r - 2; i >= l; --i) {
2085 if (btree_iter_cmp(trans->iters + trans->sorted[i],
2086 trans->iters + trans->sorted[i + 1]) > 0) {
2087 swap(trans->sorted[i],
2088 trans->sorted[i + 1]);
2089 trans->iters[trans->sorted[i]].sorted_idx = i;
2090 trans->iters[trans->sorted[i + 1]].sorted_idx = i + 1;
2102 btree_trans_verify_sorted_refs(trans);
2103 btree_trans_verify_sorted(trans);
2106 static void btree_iter_check_sort(struct btree_trans *trans, struct btree_iter *iter)
2108 struct btree_iter *n;
2110 EBUG_ON(iter->sorted_idx == U8_MAX);
2112 n = next_btree_iter(trans, iter);
2113 if (n && btree_iter_cmp(iter, n) > 0) {
2115 btree_iter_swap(trans, iter, n);
2116 n = next_btree_iter(trans, iter);
2117 } while (n && btree_iter_cmp(iter, n) > 0);
2122 n = prev_btree_iter(trans, iter);
2123 if (n && btree_iter_cmp(n, iter) > 0) {
2125 btree_iter_swap(trans, n, iter);
2126 n = prev_btree_iter(trans, iter);
2127 } while (n && btree_iter_cmp(n, iter) > 0);
2130 btree_trans_verify_sorted(trans);
2133 static inline void btree_iter_list_remove(struct btree_trans *trans,
2134 struct btree_iter *iter)
2138 EBUG_ON(iter->sorted_idx >= trans->nr_sorted);
2140 array_remove_item(trans->sorted, trans->nr_sorted, iter->sorted_idx);
2142 for (i = iter->sorted_idx; i < trans->nr_sorted; i++)
2143 trans->iters[trans->sorted[i]].sorted_idx = i;
2145 iter->sorted_idx = U8_MAX;
2147 btree_trans_verify_sorted_refs(trans);
2150 static inline void btree_iter_list_add(struct btree_trans *trans,
2151 struct btree_iter *pos,
2152 struct btree_iter *iter)
2156 btree_trans_verify_sorted_refs(trans);
2158 iter->sorted_idx = pos ? pos->sorted_idx : trans->nr_sorted;
2160 array_insert_item(trans->sorted, trans->nr_sorted, iter->sorted_idx, iter->idx);
2162 for (i = iter->sorted_idx; i < trans->nr_sorted; i++)
2163 trans->iters[trans->sorted[i]].sorted_idx = i;
2165 btree_trans_verify_sorted_refs(trans);
2168 static void btree_iter_child_free(struct btree_iter *iter)
2170 struct btree_iter *child = btree_iter_child(iter);
2173 bch2_trans_iter_free(iter->trans, child);
2174 iter->child_idx = U8_MAX;
2178 static struct btree_iter *btree_iter_child_alloc(struct btree_iter *iter,
2181 struct btree_trans *trans = iter->trans;
2182 struct btree_iter *child = btree_iter_child(iter);
2185 child = btree_trans_iter_alloc(trans, iter);
2186 child->ip_allocated = ip;
2187 iter->child_idx = child->idx;
2189 trans->iters_live |= 1ULL << child->idx;
2190 trans->iters_touched |= 1ULL << child->idx;
2196 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
2199 btree_iter_child_free(&trans->iters[idx]);
2201 btree_iter_list_remove(trans, &trans->iters[idx]);
2203 __bch2_btree_iter_unlock(&trans->iters[idx]);
2204 trans->iters_linked &= ~(1ULL << idx);
2205 trans->iters_live &= ~(1ULL << idx);
2206 trans->iters_touched &= ~(1ULL << idx);
2209 int bch2_trans_iter_put(struct btree_trans *trans,
2210 struct btree_iter *iter)
2214 if (IS_ERR_OR_NULL(iter))
2217 BUG_ON(trans->iters + iter->idx != iter);
2218 BUG_ON(!btree_iter_live(trans, iter));
2220 ret = btree_iter_err(iter);
2222 if (!(trans->iters_touched & (1ULL << iter->idx)) &&
2223 !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
2224 __bch2_trans_iter_free(trans, iter->idx);
2226 trans->iters_live &= ~(1ULL << iter->idx);
2230 int bch2_trans_iter_free(struct btree_trans *trans,
2231 struct btree_iter *iter)
2233 if (IS_ERR_OR_NULL(iter))
2236 set_btree_iter_dontneed(trans, iter);
2238 return bch2_trans_iter_put(trans, iter);
2242 static void btree_trans_iter_alloc_fail(struct btree_trans *trans)
2245 struct btree_iter *iter;
2246 struct btree_insert_entry *i;
2249 btree_trans_sort_iters(trans);
2251 trans_for_each_iter_inorder(trans, iter)
2252 printk(KERN_ERR "iter: btree %s pos %s%s%s%s %pS\n",
2253 bch2_btree_ids[iter->btree_id],
2254 (bch2_bpos_to_text(&PBUF(buf), iter->real_pos), buf),
2255 btree_iter_live(trans, iter) ? " live" : "",
2256 (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
2257 iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
2258 (void *) iter->ip_allocated);
2260 trans_for_each_update(trans, i) {
2263 bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k));
2264 printk(KERN_ERR "update: btree %s %s\n",
2265 bch2_btree_ids[i->iter->btree_id], buf);
2267 panic("trans iter oveflow\n");
2270 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans,
2271 struct btree_iter *pos)
2273 struct btree_iter *iter;
2276 if (unlikely(trans->iters_linked ==
2277 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
2278 btree_trans_iter_alloc_fail(trans);
2280 idx = __ffs64(~trans->iters_linked);
2281 iter = &trans->iters[idx];
2283 iter->trans = trans;
2285 iter->child_idx = U8_MAX;
2286 iter->sorted_idx = U8_MAX;
2288 iter->nodes_locked = 0;
2289 iter->nodes_intent_locked = 0;
2290 trans->iters_linked |= 1ULL << idx;
2292 btree_iter_list_add(trans, pos, iter);
2296 static void btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
2300 __bch2_btree_iter_unlock(dst);
2301 btree_iter_child_free(dst);
2303 memcpy(&dst->flags, &src->flags,
2304 sizeof(struct btree_iter) - offsetof(struct btree_iter, flags));
2306 for (i = 0; i < BTREE_MAX_DEPTH; i++)
2307 if (btree_node_locked(dst, i))
2308 six_lock_increment(&dst->l[i].b->c.lock,
2309 __btree_lock_want(dst, i));
2311 dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2312 dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
2314 btree_iter_check_sort(dst->trans, dst);
2317 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2318 unsigned btree_id, struct bpos pos,
2319 unsigned locks_want,
2323 struct btree_iter *iter, *best = NULL;
2324 struct bpos real_pos, pos_min = POS_MIN;
2326 EBUG_ON(trans->restarted);
2328 if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2329 btree_node_type_is_extents(btree_id) &&
2330 !(flags & BTREE_ITER_NOT_EXTENTS) &&
2331 !(flags & BTREE_ITER_ALL_SNAPSHOTS))
2332 flags |= BTREE_ITER_IS_EXTENTS;
2334 if ((flags & BTREE_ITER_TYPE) != BTREE_ITER_NODES &&
2335 !btree_type_has_snapshots(btree_id))
2336 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2338 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS))
2339 pos.snapshot = btree_type_has_snapshots(btree_id)
2344 if ((flags & BTREE_ITER_IS_EXTENTS) &&
2345 bkey_cmp(pos, POS_MAX))
2346 real_pos = bpos_nosnap_successor(pos);
2348 trans_for_each_iter(trans, iter) {
2349 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
2352 if (iter->btree_id != btree_id)
2356 int cmp = bkey_cmp(bpos_diff(best->real_pos, real_pos),
2357 bpos_diff(iter->real_pos, real_pos));
2360 ((cmp == 0 && btree_iter_keep(trans, iter))))
2368 iter = btree_trans_iter_alloc(trans, NULL);
2369 bch2_btree_iter_init(trans, iter, btree_id);
2370 } else if (btree_iter_keep(trans, best)) {
2371 iter = btree_trans_iter_alloc(trans, best);
2372 btree_iter_copy(iter, best);
2377 trans->iters_live |= 1ULL << iter->idx;
2378 trans->iters_touched |= 1ULL << iter->idx;
2380 iter->flags = flags;
2382 iter->snapshot = pos.snapshot;
2385 * If the iterator has locks_want greater than requested, we explicitly
2386 * do not downgrade it here - on transaction restart because btree node
2387 * split needs to upgrade locks, we might be putting/getting the
2388 * iterator again. Downgrading iterators only happens via an explicit
2389 * bch2_trans_downgrade().
2392 locks_want = min(locks_want, BTREE_MAX_DEPTH);
2393 if (locks_want > iter->locks_want) {
2394 iter->locks_want = locks_want;
2395 btree_iter_get_locks(iter, true, _THIS_IP_);
2398 while (iter->level != depth) {
2399 btree_node_unlock(iter, iter->level);
2400 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2401 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
2402 if (iter->level < depth)
2408 iter->min_depth = depth;
2410 bch2_btree_iter_set_pos(iter, pos);
2411 btree_iter_set_search_pos(iter, real_pos);
2413 trace_trans_get_iter(_RET_IP_, trans->ip,
2415 &real_pos, locks_want, iter->uptodate,
2416 best ? &best->real_pos : &pos_min,
2417 best ? best->locks_want : U8_MAX,
2418 best ? best->uptodate : U8_MAX);
2423 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2424 enum btree_id btree_id,
2426 unsigned locks_want,
2430 struct btree_iter *iter =
2431 __bch2_trans_get_iter(trans, btree_id, pos,
2434 BTREE_ITER_NOT_EXTENTS|
2435 BTREE_ITER_ALL_SNAPSHOTS|
2438 BUG_ON(bkey_cmp(iter->pos, pos));
2439 BUG_ON(iter->locks_want != min(locks_want, BTREE_MAX_DEPTH));
2440 BUG_ON(iter->level != depth);
2441 BUG_ON(iter->min_depth != depth);
2442 iter->ip_allocated = _RET_IP_;
2447 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2448 struct btree_iter *src)
2450 struct btree_iter *iter;
2452 iter = btree_trans_iter_alloc(trans, src);
2453 btree_iter_copy(iter, src);
2455 trans->iters_live |= 1ULL << iter->idx;
2457 * We don't need to preserve this iter since it's cheap to copy it
2458 * again - this will cause trans_iter_put() to free it right away:
2460 set_btree_iter_dontneed(trans, iter);
2465 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2467 size_t new_top = trans->mem_top + size;
2470 if (new_top > trans->mem_bytes) {
2471 size_t old_bytes = trans->mem_bytes;
2472 size_t new_bytes = roundup_pow_of_two(new_top);
2475 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2477 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2478 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2479 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2480 new_bytes = BTREE_TRANS_MEM_MAX;
2485 return ERR_PTR(-ENOMEM);
2487 trans->mem = new_mem;
2488 trans->mem_bytes = new_bytes;
2491 trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2492 btree_trans_restart(trans);
2493 return ERR_PTR(-EINTR);
2497 p = trans->mem + trans->mem_top;
2498 trans->mem_top += size;
2503 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2505 u64 iters = trans->iters_linked &
2506 ~trans->iters_touched &
2510 unsigned idx = __ffs64(iters);
2512 iters &= ~(1ULL << idx);
2513 __bch2_trans_iter_free(trans, idx);
2518 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2519 * @trans: transaction to reset
2521 * While iterating over nodes or updating nodes a attempt to lock a btree
2522 * node may return EINTR when the trylock fails. When this occurs
2523 * bch2_trans_begin() should be called and the transaction retried.
2525 void bch2_trans_begin(struct btree_trans *trans)
2527 struct btree_iter *iter;
2529 trans_for_each_iter(trans, iter)
2530 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2531 BTREE_ITER_SET_POS_AFTER_COMMIT);
2534 * XXX: we shouldn't be doing this if the transaction was restarted, but
2535 * currently we still overflow transaction iterators if we do that
2537 bch2_trans_unlink_iters(trans);
2538 trans->iters_touched &= trans->iters_live;
2540 trans->extra_journal_res = 0;
2541 trans->nr_updates = 0;
2544 trans->hooks = NULL;
2545 trans->extra_journal_entries = NULL;
2546 trans->extra_journal_entry_u64s = 0;
2548 if (trans->fs_usage_deltas) {
2549 trans->fs_usage_deltas->used = 0;
2550 memset(&trans->fs_usage_deltas->memset_start, 0,
2551 (void *) &trans->fs_usage_deltas->memset_end -
2552 (void *) &trans->fs_usage_deltas->memset_start);
2555 bch2_trans_cond_resched(trans);
2557 if (trans->restarted)
2558 bch2_btree_iter_traverse_all(trans);
2560 trans->restarted = false;
2563 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
2565 size_t iters_bytes = sizeof(struct btree_iter) * BTREE_ITER_MAX;
2566 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2567 size_t sorted_bytes = sizeof(u8) * BTREE_ITER_MAX;
2570 BUG_ON(trans->used_mempool);
2573 p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL);
2576 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2578 trans->iters = p; p += iters_bytes;
2579 trans->updates = p; p += updates_bytes;
2580 trans->sorted = p; p += sorted_bytes;
2583 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2584 unsigned expected_nr_iters,
2585 size_t expected_mem_bytes)
2586 __acquires(&c->btree_trans_barrier)
2588 memset(trans, 0, sizeof(*trans));
2590 trans->ip = _RET_IP_;
2593 * reallocating iterators currently completely breaks
2594 * bch2_trans_iter_put(), we always allocate the max:
2596 bch2_trans_alloc_iters(trans, c);
2598 if (expected_mem_bytes) {
2599 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2600 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2602 if (!unlikely(trans->mem)) {
2603 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2604 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2608 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2610 #ifdef CONFIG_BCACHEFS_DEBUG
2611 trans->pid = current->pid;
2612 mutex_lock(&c->btree_trans_lock);
2613 list_add(&trans->list, &c->btree_trans_list);
2614 mutex_unlock(&c->btree_trans_lock);
2618 int bch2_trans_exit(struct btree_trans *trans)
2619 __releases(&c->btree_trans_barrier)
2621 struct bch_fs *c = trans->c;
2623 bch2_trans_unlock(trans);
2625 #ifdef CONFIG_BCACHEFS_DEBUG
2626 if (trans->iters_live) {
2627 struct btree_iter *iter;
2629 trans_for_each_iter(trans, iter)
2630 btree_iter_child_free(iter);
2633 if (trans->iters_live) {
2634 struct btree_iter *iter;
2636 bch_err(c, "btree iterators leaked!");
2637 trans_for_each_iter(trans, iter)
2638 if (btree_iter_live(trans, iter))
2639 printk(KERN_ERR " btree %s allocated at %pS\n",
2640 bch2_btree_ids[iter->btree_id],
2641 (void *) iter->ip_allocated);
2642 /* Be noisy about this: */
2643 bch2_fatal_error(c);
2646 mutex_lock(&trans->c->btree_trans_lock);
2647 list_del(&trans->list);
2648 mutex_unlock(&trans->c->btree_trans_lock);
2651 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2653 bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
2655 if (trans->fs_usage_deltas) {
2656 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2657 REPLICAS_DELTA_LIST_MAX)
2658 mempool_free(trans->fs_usage_deltas,
2659 &trans->c->replicas_delta_pool);
2661 kfree(trans->fs_usage_deltas);
2664 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2665 mempool_free(trans->mem, &trans->c->btree_trans_mem_pool);
2671 * Userspace doesn't have a real percpu implementation:
2673 trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
2677 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2679 trans->mem = (void *) 0x1;
2680 trans->iters = (void *) 0x1;
2682 return trans->error ? -EIO : 0;
2685 static void __maybe_unused
2686 bch2_btree_iter_node_to_text(struct printbuf *out,
2687 struct btree_bkey_cached_common *_b,
2688 enum btree_iter_type type)
2690 pr_buf(out, " l=%u %s:",
2691 _b->level, bch2_btree_ids[_b->btree_id]);
2692 bch2_bpos_to_text(out, btree_node_pos(_b, type));
2695 #ifdef CONFIG_BCACHEFS_DEBUG
2696 static bool trans_has_btree_nodes_locked(struct btree_trans *trans)
2698 struct btree_iter *iter;
2700 trans_for_each_iter(trans, iter)
2701 if (btree_iter_type(iter) != BTREE_ITER_CACHED &&
2708 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2710 #ifdef CONFIG_BCACHEFS_DEBUG
2711 struct btree_trans *trans;
2712 struct btree_iter *iter;
2716 mutex_lock(&c->btree_trans_lock);
2717 list_for_each_entry(trans, &c->btree_trans_list, list) {
2718 if (!trans_has_btree_nodes_locked(trans))
2721 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2723 trans_for_each_iter(trans, iter) {
2724 if (!iter->nodes_locked)
2727 pr_buf(out, " iter %u %c %s:",
2729 btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2730 bch2_btree_ids[iter->btree_id]);
2731 bch2_bpos_to_text(out, iter->pos);
2734 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2735 if (btree_node_locked(iter, l)) {
2736 pr_buf(out, " %s l=%u ",
2737 btree_node_intent_locked(iter, l) ? "i" : "r", l);
2738 bch2_btree_iter_node_to_text(out,
2739 (void *) iter->l[l].b,
2740 btree_iter_type(iter));
2746 b = READ_ONCE(trans->locking);
2748 iter = &trans->iters[trans->locking_iter_idx];
2749 pr_buf(out, " locking iter %u %c l=%u %s:",
2750 trans->locking_iter_idx,
2751 btree_iter_type(iter) == BTREE_ITER_CACHED ? 'c' : 'b',
2752 trans->locking_level,
2753 bch2_btree_ids[trans->locking_btree_id]);
2754 bch2_bpos_to_text(out, trans->locking_pos);
2756 pr_buf(out, " node ");
2757 bch2_btree_iter_node_to_text(out,
2759 btree_iter_type(iter));
2763 mutex_unlock(&c->btree_trans_lock);
2767 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2769 mempool_exit(&c->btree_trans_mem_pool);
2770 mempool_exit(&c->btree_iters_pool);
2771 cleanup_srcu_struct(&c->btree_trans_barrier);
2774 int bch2_fs_btree_iter_init(struct bch_fs *c)
2776 unsigned nr = BTREE_ITER_MAX;
2778 INIT_LIST_HEAD(&c->btree_trans_list);
2779 mutex_init(&c->btree_trans_lock);
2781 return init_srcu_struct(&c->btree_trans_barrier) ?:
2782 mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2784 sizeof(struct btree_iter) * nr +
2785 sizeof(struct btree_insert_entry) * nr) ?:
2786 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2787 BTREE_TRANS_MEM_MAX);