1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prefetch.h>
20 #include <trace/events/bcachefs.h>
22 static void btree_trans_verify_sorted(struct btree_trans *);
23 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
25 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
26 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
29 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
31 #ifdef CONFIG_BCACHEFS_DEBUG
32 return iter->ip_allocated;
38 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
41 * Unlocks before scheduling
42 * Note: does not revalidate iterator
44 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
46 if (need_resched() || race_fault()) {
47 bch2_trans_unlock(trans);
49 return bch2_trans_relock(trans) ? 0 : -EINTR;
55 static inline int __btree_path_cmp(const struct btree_path *l,
56 enum btree_id r_btree_id,
62 * Must match lock ordering as defined by __bch2_btree_node_lock:
64 return cmp_int(l->btree_id, r_btree_id) ?:
65 cmp_int((int) l->cached, (int) r_cached) ?:
66 bpos_cmp(l->pos, r_pos) ?:
67 -cmp_int(l->level, r_level);
70 static inline int btree_path_cmp(const struct btree_path *l,
71 const struct btree_path *r)
73 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
76 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
78 /* Are we iterating over keys in all snapshots? */
79 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
80 p = bpos_successor(p);
82 p = bpos_nosnap_successor(p);
83 p.snapshot = iter->snapshot;
89 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
91 /* Are we iterating over keys in all snapshots? */
92 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
93 p = bpos_predecessor(p);
95 p = bpos_nosnap_predecessor(p);
96 p.snapshot = iter->snapshot;
102 static inline bool is_btree_node(struct btree_path *path, unsigned l)
104 return l < BTREE_MAX_DEPTH &&
105 (unsigned long) path->l[l].b >= 128;
108 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
110 struct bpos pos = iter->pos;
112 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
113 bkey_cmp(pos, POS_MAX))
114 pos = bkey_successor(iter, pos);
118 static inline bool btree_path_pos_before_node(struct btree_path *path,
121 return bpos_cmp(path->pos, b->data->min_key) < 0;
124 static inline bool btree_path_pos_after_node(struct btree_path *path,
127 return bpos_cmp(b->key.k.p, path->pos) < 0;
130 static inline bool btree_path_pos_in_node(struct btree_path *path,
133 return path->btree_id == b->c.btree_id &&
134 !btree_path_pos_before_node(path, b) &&
135 !btree_path_pos_after_node(path, b);
138 /* Btree node locking: */
140 void bch2_btree_node_unlock_write(struct btree_trans *trans,
141 struct btree_path *path, struct btree *b)
143 bch2_btree_node_unlock_write_inlined(trans, path, b);
146 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
148 struct btree_path *linked;
149 unsigned readers = 0;
151 trans_for_each_path(trans, linked)
152 if (linked->l[b->c.level].b == b &&
153 btree_node_read_locked(linked, b->c.level))
157 * Must drop our read locks before calling six_lock_write() -
158 * six_unlock() won't do wakeups until the reader count
159 * goes to 0, and it's safe because we have the node intent
162 if (!b->c.lock.readers)
163 atomic64_sub(__SIX_VAL(read_lock, readers),
164 &b->c.lock.state.counter);
166 this_cpu_sub(*b->c.lock.readers, readers);
168 six_lock_write(&b->c.lock, NULL, NULL);
170 if (!b->c.lock.readers)
171 atomic64_add(__SIX_VAL(read_lock, readers),
172 &b->c.lock.state.counter);
174 this_cpu_add(*b->c.lock.readers, readers);
177 bool __bch2_btree_node_relock(struct btree_trans *trans,
178 struct btree_path *path, unsigned level)
180 struct btree *b = btree_path_node(path, level);
181 int want = __btree_lock_want(path, level);
183 if (!is_btree_node(path, level))
189 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
190 (btree_node_lock_seq_matches(path, b, level) &&
191 btree_node_lock_increment(trans, b, level, want))) {
192 mark_btree_node_locked(trans, path, level, want);
196 if (b != BTREE_ITER_NO_NODE_CACHED &&
197 b != BTREE_ITER_NO_NODE_INIT)
198 trace_btree_node_relock_fail(trans->fn, _RET_IP_,
202 path->l[level].lock_seq,
203 is_btree_node(path, level) ? b->c.lock.state.seq : 0);
207 bool bch2_btree_node_upgrade(struct btree_trans *trans,
208 struct btree_path *path, unsigned level)
210 struct btree *b = path->l[level].b;
212 if (!is_btree_node(path, level))
215 switch (btree_lock_want(path, level)) {
216 case BTREE_NODE_UNLOCKED:
217 BUG_ON(btree_node_locked(path, level));
219 case BTREE_NODE_READ_LOCKED:
220 BUG_ON(btree_node_intent_locked(path, level));
221 return bch2_btree_node_relock(trans, path, level);
222 case BTREE_NODE_INTENT_LOCKED:
226 if (btree_node_intent_locked(path, level))
232 if (btree_node_locked(path, level)
233 ? six_lock_tryupgrade(&b->c.lock)
234 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
237 if (btree_node_lock_seq_matches(path, b, level) &&
238 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
239 btree_node_unlock(path, level);
245 mark_btree_node_intent_locked(trans, path, level);
249 static inline bool btree_path_get_locks(struct btree_trans *trans,
250 struct btree_path *path,
253 unsigned l = path->level;
257 if (!btree_path_node(path, l))
261 ? bch2_btree_node_upgrade(trans, path, l)
262 : bch2_btree_node_relock(trans, path, l)))
266 } while (l < path->locks_want);
269 * When we fail to get a lock, we have to ensure that any child nodes
270 * can't be relocked so bch2_btree_path_traverse has to walk back up to
271 * the node that we failed to relock:
274 __bch2_btree_path_unlock(path);
275 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
278 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
280 } while (fail_idx >= 0);
283 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
284 path->uptodate = BTREE_ITER_UPTODATE;
286 bch2_trans_verify_locks(trans);
288 return path->uptodate < BTREE_ITER_NEED_RELOCK;
291 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
295 ? container_of(_b, struct btree, c)->key.k.p
296 : container_of(_b, struct bkey_cached, c)->key.pos;
300 bool __bch2_btree_node_lock(struct btree_trans *trans,
301 struct btree_path *path,
303 struct bpos pos, unsigned level,
304 enum six_lock_type type,
305 six_lock_should_sleep_fn should_sleep_fn, void *p,
308 struct btree_path *linked;
311 /* Check if it's safe to block: */
312 trans_for_each_path(trans, linked) {
313 if (!linked->nodes_locked)
317 * Can't block taking an intent lock if we have _any_ nodes read
320 * - Our read lock blocks another thread with an intent lock on
321 * the same node from getting a write lock, and thus from
322 * dropping its intent lock
324 * - And the other thread may have multiple nodes intent locked:
325 * both the node we want to intent lock, and the node we
326 * already have read locked - deadlock:
328 if (type == SIX_LOCK_intent &&
329 linked->nodes_locked != linked->nodes_intent_locked) {
334 if (linked->btree_id != path->btree_id) {
335 if (linked->btree_id < path->btree_id)
343 * Within the same btree, non-cached paths come before cached
346 if (linked->cached != path->cached) {
355 * Interior nodes must be locked before their descendants: if
356 * another path has possible descendants locked of the node
357 * we're about to lock, it must have the ancestors locked too:
359 if (level > __fls(linked->nodes_locked)) {
364 /* Must lock btree nodes in key order: */
365 if (btree_node_locked(linked, level) &&
366 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
367 linked->cached)) <= 0) {
373 return btree_node_lock_type(trans, path, b, pos, level,
374 type, should_sleep_fn, p);
376 trace_trans_restart_would_deadlock(trans->fn, ip,
377 trans->in_traverse_all, reason,
384 btree_trans_restart(trans);
388 /* Btree iterator locking: */
390 #ifdef CONFIG_BCACHEFS_DEBUG
392 static void bch2_btree_path_verify_locks(struct btree_path *path)
396 if (!path->nodes_locked) {
397 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
398 btree_path_node(path, path->level));
402 for (l = 0; btree_path_node(path, l); l++)
403 BUG_ON(btree_lock_want(path, l) !=
404 btree_node_locked_type(path, l));
407 void bch2_trans_verify_locks(struct btree_trans *trans)
409 struct btree_path *path;
411 trans_for_each_path(trans, path)
412 bch2_btree_path_verify_locks(path);
415 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
418 /* Btree path locking: */
421 * Only for btree_cache.c - only relocks intent locks
423 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
424 struct btree_path *path)
428 for (l = path->level;
429 l < path->locks_want && btree_path_node(path, l);
431 if (!bch2_btree_node_relock(trans, path, l)) {
432 __bch2_btree_path_unlock(path);
433 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
434 trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
435 path->btree_id, &path->pos);
436 btree_trans_restart(trans);
445 static bool bch2_btree_path_relock(struct btree_trans *trans,
446 struct btree_path *path, unsigned long trace_ip)
448 bool ret = btree_path_get_locks(trans, path, false);
451 trace_trans_restart_relock_path(trans->fn, trace_ip,
452 path->btree_id, &path->pos);
453 btree_trans_restart(trans);
458 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
459 struct btree_path *path,
460 unsigned new_locks_want)
462 struct btree_path *linked;
464 EBUG_ON(path->locks_want >= new_locks_want);
466 path->locks_want = new_locks_want;
468 if (btree_path_get_locks(trans, path, true))
472 * XXX: this is ugly - we'd prefer to not be mucking with other
473 * iterators in the btree_trans here.
475 * On failure to upgrade the iterator, setting iter->locks_want and
476 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
477 * get the locks we want on transaction restart.
479 * But if this iterator was a clone, on transaction restart what we did
480 * to this iterator isn't going to be preserved.
482 * Possibly we could add an iterator field for the parent iterator when
483 * an iterator is a copy - for now, we'll just upgrade any other
484 * iterators with the same btree id.
486 * The code below used to be needed to ensure ancestor nodes get locked
487 * before interior nodes - now that's handled by
488 * bch2_btree_path_traverse_all().
490 if (!path->cached && !trans->in_traverse_all)
491 trans_for_each_path(trans, linked)
492 if (linked != path &&
493 linked->cached == path->cached &&
494 linked->btree_id == path->btree_id &&
495 linked->locks_want < new_locks_want) {
496 linked->locks_want = new_locks_want;
497 btree_path_get_locks(trans, linked, true);
503 void __bch2_btree_path_downgrade(struct btree_path *path,
504 unsigned new_locks_want)
508 EBUG_ON(path->locks_want < new_locks_want);
510 path->locks_want = new_locks_want;
512 while (path->nodes_locked &&
513 (l = __fls(path->nodes_locked)) >= path->locks_want) {
514 if (l > path->level) {
515 btree_node_unlock(path, l);
517 if (btree_node_intent_locked(path, l)) {
518 six_lock_downgrade(&path->l[l].b->c.lock);
519 path->nodes_intent_locked ^= 1 << l;
525 bch2_btree_path_verify_locks(path);
528 void bch2_trans_downgrade(struct btree_trans *trans)
530 struct btree_path *path;
532 trans_for_each_path(trans, path)
533 bch2_btree_path_downgrade(path);
536 /* Btree transaction locking: */
538 bool bch2_trans_relock(struct btree_trans *trans)
540 struct btree_path *path;
542 if (unlikely(trans->restarted))
545 trans_for_each_path(trans, path)
546 if (path->should_be_locked &&
547 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
548 trace_trans_restart_relock(trans->fn, _RET_IP_,
549 path->btree_id, &path->pos);
550 BUG_ON(!trans->restarted);
556 void bch2_trans_unlock(struct btree_trans *trans)
558 struct btree_path *path;
560 trans_for_each_path(trans, path)
561 __bch2_btree_path_unlock(path);
564 * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
565 * btree nodes, it implements its own walking:
567 BUG_ON(!trans->is_initial_gc &&
568 lock_class_is_held(&bch2_btree_node_lock_key));
571 /* Btree iterator: */
573 #ifdef CONFIG_BCACHEFS_DEBUG
575 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
576 struct btree_path *path)
578 struct bkey_cached *ck;
579 bool locked = btree_node_locked(path, 0);
581 if (!bch2_btree_node_relock(trans, path, 0))
584 ck = (void *) path->l[0].b;
585 BUG_ON(ck->key.btree_id != path->btree_id ||
586 bkey_cmp(ck->key.pos, path->pos));
589 btree_node_unlock(path, 0);
592 static void bch2_btree_path_verify_level(struct btree_trans *trans,
593 struct btree_path *path, unsigned level)
595 struct btree_path_level *l;
596 struct btree_node_iter tmp;
598 struct bkey_packed *p, *k;
599 struct printbuf buf1 = PRINTBUF;
600 struct printbuf buf2 = PRINTBUF;
601 struct printbuf buf3 = PRINTBUF;
604 if (!bch2_debug_check_iterators)
609 locked = btree_node_locked(path, level);
613 bch2_btree_path_verify_cached(trans, path);
617 if (!btree_path_node(path, level))
620 if (!bch2_btree_node_relock(trans, path, level))
623 BUG_ON(!btree_path_pos_in_node(path, l->b));
625 bch2_btree_node_iter_verify(&l->iter, l->b);
628 * For interior nodes, the iterator will have skipped past deleted keys:
631 ? bch2_btree_node_iter_prev(&tmp, l->b)
632 : bch2_btree_node_iter_prev_all(&tmp, l->b);
633 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
635 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
640 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
646 btree_node_unlock(path, level);
649 bch2_bpos_to_text(&buf1, path->pos);
652 struct bkey uk = bkey_unpack_key(l->b, p);
653 bch2_bkey_to_text(&buf2, &uk);
655 prt_printf(&buf2, "(none)");
659 struct bkey uk = bkey_unpack_key(l->b, k);
660 bch2_bkey_to_text(&buf3, &uk);
662 prt_printf(&buf3, "(none)");
665 panic("path should be %s key at level %u:\n"
669 msg, level, buf1.buf, buf2.buf, buf3.buf);
672 static void bch2_btree_path_verify(struct btree_trans *trans,
673 struct btree_path *path)
675 struct bch_fs *c = trans->c;
678 EBUG_ON(path->btree_id >= BTREE_ID_NR);
680 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
682 BUG_ON(!path->cached &&
683 c->btree_roots[path->btree_id].b->c.level > i);
687 bch2_btree_path_verify_level(trans, path, i);
690 bch2_btree_path_verify_locks(path);
693 void bch2_trans_verify_paths(struct btree_trans *trans)
695 struct btree_path *path;
697 trans_for_each_path(trans, path)
698 bch2_btree_path_verify(trans, path);
701 static void bch2_btree_iter_verify(struct btree_iter *iter)
703 struct btree_trans *trans = iter->trans;
705 BUG_ON(iter->btree_id >= BTREE_ID_NR);
707 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
709 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
710 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
712 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
713 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
714 !btree_type_has_snapshots(iter->btree_id));
716 if (iter->update_path)
717 bch2_btree_path_verify(trans, iter->update_path);
718 bch2_btree_path_verify(trans, iter->path);
721 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
723 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
724 !iter->pos.snapshot);
726 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
727 iter->pos.snapshot != iter->snapshot);
729 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
730 bkey_cmp(iter->pos, iter->k.p) > 0);
733 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
735 struct btree_trans *trans = iter->trans;
736 struct btree_iter copy;
737 struct bkey_s_c prev;
740 if (!bch2_debug_check_iterators)
743 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
746 if (bkey_err(k) || !k.k)
749 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
753 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
754 BTREE_ITER_NOPRESERVE|
755 BTREE_ITER_ALL_SNAPSHOTS);
756 prev = bch2_btree_iter_prev(©);
760 ret = bkey_err(prev);
764 if (!bkey_cmp(prev.k->p, k.k->p) &&
765 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
766 prev.k->p.snapshot) > 0) {
767 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
769 bch2_bkey_to_text(&buf1, k.k);
770 bch2_bkey_to_text(&buf2, prev.k);
772 panic("iter snap %u\n"
779 bch2_trans_iter_exit(trans, ©);
783 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
784 struct bpos pos, bool key_cache)
786 struct btree_path *path;
788 struct printbuf buf = PRINTBUF;
790 trans_for_each_path_inorder(trans, path, idx) {
791 int cmp = cmp_int(path->btree_id, id) ?:
792 cmp_int(path->cached, key_cache);
799 if (!(path->nodes_locked & 1) ||
800 !path->should_be_locked)
804 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
805 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
808 if (!bkey_cmp(pos, path->pos))
813 bch2_dump_trans_paths_updates(trans);
814 bch2_bpos_to_text(&buf, pos);
816 panic("not locked: %s %s%s\n",
817 bch2_btree_ids[id], buf.buf,
818 key_cache ? " cached" : "");
823 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
824 struct btree_path *path, unsigned l) {}
825 static inline void bch2_btree_path_verify(struct btree_trans *trans,
826 struct btree_path *path) {}
827 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
828 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
829 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
833 /* Btree path: fixups after btree updates */
835 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
838 struct bkey_packed *k)
840 struct btree_node_iter_set *set;
842 btree_node_iter_for_each(iter, set)
843 if (set->end == t->end_offset) {
844 set->k = __btree_node_key_to_offset(b, k);
845 bch2_btree_node_iter_sort(iter, b);
849 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
852 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
854 struct bkey_packed *where)
856 struct btree_path_level *l = &path->l[b->c.level];
858 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
861 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
862 bch2_btree_node_iter_advance(&l->iter, l->b);
865 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
867 struct bkey_packed *where)
869 struct btree_path *path;
871 trans_for_each_path_with_node(trans, b, path) {
872 __bch2_btree_path_fix_key_modified(path, b, where);
873 bch2_btree_path_verify_level(trans, path, b->c.level);
877 static void __bch2_btree_node_iter_fix(struct btree_path *path,
879 struct btree_node_iter *node_iter,
881 struct bkey_packed *where,
882 unsigned clobber_u64s,
885 const struct bkey_packed *end = btree_bkey_last(b, t);
886 struct btree_node_iter_set *set;
887 unsigned offset = __btree_node_key_to_offset(b, where);
888 int shift = new_u64s - clobber_u64s;
889 unsigned old_end = t->end_offset - shift;
890 unsigned orig_iter_pos = node_iter->data[0].k;
891 bool iter_current_key_modified =
892 orig_iter_pos >= offset &&
893 orig_iter_pos <= offset + clobber_u64s;
895 btree_node_iter_for_each(node_iter, set)
896 if (set->end == old_end)
899 /* didn't find the bset in the iterator - might have to readd it: */
901 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
902 bch2_btree_node_iter_push(node_iter, b, where, end);
905 /* Iterator is after key that changed */
909 set->end = t->end_offset;
911 /* Iterator hasn't gotten to the key that changed yet: */
916 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
918 } else if (set->k < offset + clobber_u64s) {
919 set->k = offset + new_u64s;
920 if (set->k == set->end)
921 bch2_btree_node_iter_set_drop(node_iter, set);
923 /* Iterator is after key that changed */
924 set->k = (int) set->k + shift;
928 bch2_btree_node_iter_sort(node_iter, b);
930 if (node_iter->data[0].k != orig_iter_pos)
931 iter_current_key_modified = true;
934 * When a new key is added, and the node iterator now points to that
935 * key, the iterator might have skipped past deleted keys that should
936 * come after the key the iterator now points to. We have to rewind to
937 * before those deleted keys - otherwise
938 * bch2_btree_node_iter_prev_all() breaks:
940 if (!bch2_btree_node_iter_end(node_iter) &&
941 iter_current_key_modified &&
944 struct bkey_packed *k, *k2, *p;
946 k = bch2_btree_node_iter_peek_all(node_iter, b);
948 for_each_bset(b, t) {
949 bool set_pos = false;
951 if (node_iter->data[0].end == t->end_offset)
954 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
956 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
957 bkey_iter_cmp(b, k, p) < 0) {
963 btree_node_iter_set_set_pos(node_iter,
969 void bch2_btree_node_iter_fix(struct btree_trans *trans,
970 struct btree_path *path,
972 struct btree_node_iter *node_iter,
973 struct bkey_packed *where,
974 unsigned clobber_u64s,
977 struct bset_tree *t = bch2_bkey_to_bset(b, where);
978 struct btree_path *linked;
980 if (node_iter != &path->l[b->c.level].iter) {
981 __bch2_btree_node_iter_fix(path, b, node_iter, t,
982 where, clobber_u64s, new_u64s);
984 if (bch2_debug_check_iterators)
985 bch2_btree_node_iter_verify(node_iter, b);
988 trans_for_each_path_with_node(trans, b, linked) {
989 __bch2_btree_node_iter_fix(linked, b,
990 &linked->l[b->c.level].iter, t,
991 where, clobber_u64s, new_u64s);
992 bch2_btree_path_verify_level(trans, linked, b->c.level);
996 /* Btree path level: pointer to a particular btree node and node iter */
998 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
999 struct btree_path_level *l,
1001 struct bkey_packed *k)
1005 * signal to bch2_btree_iter_peek_slot() that we're currently at
1008 u->type = KEY_TYPE_deleted;
1009 return bkey_s_c_null;
1012 return bkey_disassemble(l->b, k, u);
1015 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1016 struct btree_path_level *l,
1019 return __btree_iter_unpack(c, l, u,
1020 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1023 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
1024 struct btree_path *path,
1025 struct btree_path_level *l,
1028 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1029 bch2_btree_node_iter_peek(&l->iter, l->b));
1031 path->pos = k.k ? k.k->p : l->b->key.k.p;
1035 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
1036 struct btree_path *path,
1037 struct btree_path_level *l,
1040 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1041 bch2_btree_node_iter_prev(&l->iter, l->b));
1043 path->pos = k.k ? k.k->p : l->b->data->min_key;
1047 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1048 struct btree_path_level *l,
1051 struct bkey_packed *k;
1052 int nr_advanced = 0;
1054 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1055 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1056 if (max_advance > 0 && nr_advanced >= max_advance)
1059 bch2_btree_node_iter_advance(&l->iter, l->b);
1067 * Verify that iterator for parent node points to child node:
1069 static void btree_path_verify_new_node(struct btree_trans *trans,
1070 struct btree_path *path, struct btree *b)
1072 struct bch_fs *c = trans->c;
1073 struct btree_path_level *l;
1076 struct bkey_packed *k;
1078 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1081 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1084 plevel = b->c.level + 1;
1085 if (!btree_path_node(path, plevel))
1088 parent_locked = btree_node_locked(path, plevel);
1090 if (!bch2_btree_node_relock(trans, path, plevel))
1093 l = &path->l[plevel];
1094 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1097 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1098 struct printbuf buf1 = PRINTBUF;
1099 struct printbuf buf2 = PRINTBUF;
1100 struct printbuf buf3 = PRINTBUF;
1101 struct printbuf buf4 = PRINTBUF;
1102 struct bkey uk = bkey_unpack_key(b, k);
1104 bch2_dump_btree_node(c, l->b);
1105 bch2_bpos_to_text(&buf1, path->pos);
1106 bch2_bkey_to_text(&buf2, &uk);
1107 bch2_bpos_to_text(&buf3, b->data->min_key);
1108 bch2_bpos_to_text(&buf3, b->data->max_key);
1109 panic("parent iter doesn't point to new node:\n"
1113 bch2_btree_ids[path->btree_id],
1114 buf1.buf, buf2.buf, buf3.buf, buf4.buf);
1118 btree_node_unlock(path, plevel);
1121 static inline void __btree_path_level_init(struct btree_path *path,
1124 struct btree_path_level *l = &path->l[level];
1126 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1129 * Iterators to interior nodes should always be pointed at the first non
1133 bch2_btree_node_iter_peek(&l->iter, l->b);
1136 static inline void btree_path_level_init(struct btree_trans *trans,
1137 struct btree_path *path,
1140 BUG_ON(path->cached);
1142 btree_path_verify_new_node(trans, path, b);
1144 EBUG_ON(!btree_path_pos_in_node(path, b));
1145 EBUG_ON(b->c.lock.state.seq & 1);
1147 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1148 path->l[b->c.level].b = b;
1149 __btree_path_level_init(path, b->c.level);
1152 /* Btree path: fixups after btree node updates: */
1155 * A btree node is being replaced - update the iterator to point to the new
1158 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1160 struct btree_path *path;
1162 trans_for_each_path(trans, path)
1163 if (!path->cached &&
1164 btree_path_pos_in_node(path, b)) {
1165 enum btree_node_locked_type t =
1166 btree_lock_want(path, b->c.level);
1168 if (path->nodes_locked &&
1169 t != BTREE_NODE_UNLOCKED) {
1170 btree_node_unlock(path, b->c.level);
1171 six_lock_increment(&b->c.lock, t);
1172 mark_btree_node_locked(trans, path, b->c.level, t);
1175 btree_path_level_init(trans, path, b);
1180 * A btree node has been modified in such a way as to invalidate iterators - fix
1183 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1185 struct btree_path *path;
1187 trans_for_each_path_with_node(trans, b, path)
1188 __btree_path_level_init(path, b->c.level);
1191 /* Btree path: traverse, set_pos: */
1193 static int lock_root_check_fn(struct six_lock *lock, void *p)
1195 struct btree *b = container_of(lock, struct btree, c.lock);
1196 struct btree **rootp = p;
1198 return b == *rootp ? 0 : -1;
1201 static inline int btree_path_lock_root(struct btree_trans *trans,
1202 struct btree_path *path,
1203 unsigned depth_want,
1204 unsigned long trace_ip)
1206 struct bch_fs *c = trans->c;
1207 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1208 enum six_lock_type lock_type;
1211 EBUG_ON(path->nodes_locked);
1214 b = READ_ONCE(*rootp);
1215 path->level = READ_ONCE(b->c.level);
1217 if (unlikely(path->level < depth_want)) {
1219 * the root is at a lower depth than the depth we want:
1220 * got to the end of the btree, or we're walking nodes
1221 * greater than some depth and there are no nodes >=
1224 path->level = depth_want;
1225 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1226 path->l[i].b = NULL;
1230 lock_type = __btree_lock_want(path, path->level);
1231 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1232 path->level, lock_type,
1233 lock_root_check_fn, rootp,
1235 if (trans->restarted)
1240 if (likely(b == READ_ONCE(*rootp) &&
1241 b->c.level == path->level &&
1243 for (i = 0; i < path->level; i++)
1244 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1245 path->l[path->level].b = b;
1246 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1247 path->l[i].b = NULL;
1249 mark_btree_node_locked(trans, path, path->level, lock_type);
1250 btree_path_level_init(trans, path, b);
1254 six_unlock_type(&b->c.lock, lock_type);
1259 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1261 struct bch_fs *c = trans->c;
1262 struct btree_path_level *l = path_l(path);
1263 struct btree_node_iter node_iter = l->iter;
1264 struct bkey_packed *k;
1265 struct bkey_buf tmp;
1266 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1267 ? (path->level > 1 ? 0 : 2)
1268 : (path->level > 1 ? 1 : 16);
1269 bool was_locked = btree_node_locked(path, path->level);
1272 bch2_bkey_buf_init(&tmp);
1274 while (nr && !ret) {
1275 if (!bch2_btree_node_relock(trans, path, path->level))
1278 bch2_btree_node_iter_advance(&node_iter, l->b);
1279 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1283 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1284 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1289 btree_node_unlock(path, path->level);
1291 bch2_bkey_buf_exit(&tmp, c);
1295 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1296 struct btree_and_journal_iter *jiter)
1298 struct bch_fs *c = trans->c;
1300 struct bkey_buf tmp;
1301 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1302 ? (path->level > 1 ? 0 : 2)
1303 : (path->level > 1 ? 1 : 16);
1304 bool was_locked = btree_node_locked(path, path->level);
1307 bch2_bkey_buf_init(&tmp);
1309 while (nr && !ret) {
1310 if (!bch2_btree_node_relock(trans, path, path->level))
1313 bch2_btree_and_journal_iter_advance(jiter);
1314 k = bch2_btree_and_journal_iter_peek(jiter);
1318 bch2_bkey_buf_reassemble(&tmp, c, k);
1319 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1324 btree_node_unlock(path, path->level);
1326 bch2_bkey_buf_exit(&tmp, c);
1330 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1331 struct btree_path *path,
1332 unsigned plevel, struct btree *b)
1334 struct btree_path_level *l = &path->l[plevel];
1335 bool locked = btree_node_locked(path, plevel);
1336 struct bkey_packed *k;
1337 struct bch_btree_ptr_v2 *bp;
1339 if (!bch2_btree_node_relock(trans, path, plevel))
1342 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1343 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1345 bp = (void *) bkeyp_val(&l->b->format, k);
1346 bp->mem_ptr = (unsigned long)b;
1349 btree_node_unlock(path, plevel);
1352 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1353 struct btree_path *path,
1355 struct bkey_buf *out)
1357 struct bch_fs *c = trans->c;
1358 struct btree_path_level *l = path_l(path);
1359 struct btree_and_journal_iter jiter;
1363 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1365 k = bch2_btree_and_journal_iter_peek(&jiter);
1367 bch2_bkey_buf_reassemble(out, c, k);
1369 if (flags & BTREE_ITER_PREFETCH)
1370 ret = btree_path_prefetch_j(trans, path, &jiter);
1372 bch2_btree_and_journal_iter_exit(&jiter);
1376 static __always_inline int btree_path_down(struct btree_trans *trans,
1377 struct btree_path *path,
1379 unsigned long trace_ip)
1381 struct bch_fs *c = trans->c;
1382 struct btree_path_level *l = path_l(path);
1384 unsigned level = path->level - 1;
1385 enum six_lock_type lock_type = __btree_lock_want(path, level);
1386 bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
1387 struct bkey_buf tmp;
1390 EBUG_ON(!btree_node_locked(path, path->level));
1392 bch2_bkey_buf_init(&tmp);
1394 if (unlikely(!replay_done)) {
1395 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1399 bch2_bkey_buf_unpack(&tmp, c, l->b,
1400 bch2_btree_node_iter_peek(&l->iter, l->b));
1402 if (flags & BTREE_ITER_PREFETCH) {
1403 ret = btree_path_prefetch(trans, path);
1409 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1410 ret = PTR_ERR_OR_ZERO(b);
1414 mark_btree_node_locked(trans, path, level, lock_type);
1415 btree_path_level_init(trans, path, b);
1417 if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1418 unlikely(b != btree_node_mem_ptr(tmp.k)))
1419 btree_node_mem_ptr_set(trans, path, level + 1, b);
1421 if (btree_node_read_locked(path, level + 1))
1422 btree_node_unlock(path, level + 1);
1423 path->level = level;
1425 bch2_btree_path_verify_locks(path);
1427 bch2_bkey_buf_exit(&tmp, c);
1431 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1432 unsigned, unsigned long);
1434 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1436 struct bch_fs *c = trans->c;
1437 struct btree_path *path;
1438 unsigned long trace_ip = _RET_IP_;
1441 if (trans->in_traverse_all)
1444 trans->in_traverse_all = true;
1446 trans->restarted = false;
1447 trans->traverse_all_idx = U8_MAX;
1449 trans_for_each_path(trans, path)
1450 path->should_be_locked = false;
1452 btree_trans_verify_sorted(trans);
1454 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1455 struct btree_path *path1 = trans->paths + trans->sorted[i];
1456 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1458 if (path1->btree_id == path2->btree_id &&
1459 path1->locks_want < path2->locks_want)
1460 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1461 else if (!path1->locks_want && path2->locks_want)
1462 __bch2_btree_path_upgrade(trans, path1, 1);
1465 bch2_trans_unlock(trans);
1468 if (unlikely(trans->memory_allocation_failure)) {
1471 closure_init_stack(&cl);
1474 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1479 /* Now, redo traversals in correct order: */
1480 trans->traverse_all_idx = 0;
1481 while (trans->traverse_all_idx < trans->nr_sorted) {
1482 path = trans->paths + trans->sorted[trans->traverse_all_idx];
1485 * Traversing a path can cause another path to be added at about
1486 * the same position:
1488 if (path->uptodate) {
1489 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1490 if (ret == -EINTR || ret == -ENOMEM)
1494 BUG_ON(path->uptodate);
1496 trans->traverse_all_idx++;
1501 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1502 * and relock(), relock() won't relock since path->should_be_locked
1503 * isn't set yet, which is all fine
1505 trans_for_each_path(trans, path)
1506 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1508 bch2_btree_cache_cannibalize_unlock(c);
1510 trans->in_traverse_all = false;
1512 trace_trans_traverse_all(trans->fn, trace_ip);
1516 static inline bool btree_path_good_node(struct btree_trans *trans,
1517 struct btree_path *path,
1518 unsigned l, int check_pos)
1520 if (!is_btree_node(path, l) ||
1521 !bch2_btree_node_relock(trans, path, l))
1524 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1526 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1531 static void btree_path_set_level_up(struct btree_path *path)
1533 btree_node_unlock(path, path->level);
1534 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
1536 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1539 static void btree_path_set_level_down(struct btree_trans *trans,
1540 struct btree_path *path,
1545 path->level = new_level;
1547 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1548 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1549 btree_node_unlock(path, l);
1551 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1552 bch2_btree_path_verify(trans, path);
1555 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1556 struct btree_path *path,
1559 unsigned i, l = path->level;
1561 while (btree_path_node(path, l) &&
1562 !btree_path_good_node(trans, path, l, check_pos)) {
1563 btree_node_unlock(path, l);
1564 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1568 /* If we need intent locks, take them too: */
1570 i < path->locks_want && btree_path_node(path, i);
1572 if (!bch2_btree_node_relock(trans, path, i))
1574 btree_node_unlock(path, l);
1575 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1583 * This is the main state machine for walking down the btree - walks down to a
1586 * Returns 0 on success, -EIO on error (error reading in a btree node).
1588 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1589 * stashed in the iterator and returned from bch2_trans_exit().
1591 static int btree_path_traverse_one(struct btree_trans *trans,
1592 struct btree_path *path,
1594 unsigned long trace_ip)
1596 unsigned depth_want = path->level;
1599 if (unlikely(trans->restarted)) {
1605 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1606 * and re-traverse the path without a transaction restart:
1608 if (path->should_be_locked) {
1609 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1614 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1618 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1621 path->level = btree_path_up_until_good_node(trans, path, 0);
1624 * Note: path->nodes[path->level] may be temporarily NULL here - that
1625 * would indicate to other code that we got to the end of the btree,
1626 * here it indicates that relocking the root failed - it's critical that
1627 * btree_path_lock_root() comes next and that it can't fail
1629 while (path->level > depth_want) {
1630 ret = btree_path_node(path, path->level)
1631 ? btree_path_down(trans, path, flags, trace_ip)
1632 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1633 if (unlikely(ret)) {
1636 * No nodes at this level - got to the end of
1643 __bch2_btree_path_unlock(path);
1644 path->level = depth_want;
1647 path->l[path->level].b =
1648 BTREE_ITER_NO_NODE_ERROR;
1650 path->l[path->level].b =
1651 BTREE_ITER_NO_NODE_DOWN;
1656 path->uptodate = BTREE_ITER_UPTODATE;
1658 BUG_ON((ret == -EINTR) != !!trans->restarted);
1659 bch2_btree_path_verify(trans, path);
1663 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1664 struct btree_path *path, unsigned flags)
1666 if (!(local_clock() % 128))
1667 return btree_trans_restart(trans);
1669 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1672 return bch2_trans_cond_resched(trans) ?:
1673 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1676 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1677 struct btree_path *src)
1679 unsigned i, offset = offsetof(struct btree_path, pos);
1681 memcpy((void *) dst + offset,
1682 (void *) src + offset,
1683 sizeof(struct btree_path) - offset);
1685 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1686 if (btree_node_locked(dst, i))
1687 six_lock_increment(&dst->l[i].b->c.lock,
1688 __btree_lock_want(dst, i));
1690 bch2_btree_path_check_sort(trans, dst, 0);
1693 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1696 struct btree_path *new = btree_path_alloc(trans, src);
1698 btree_path_copy(trans, new, src);
1699 __btree_path_get(new, intent);
1703 inline struct btree_path * __must_check
1704 bch2_btree_path_make_mut(struct btree_trans *trans,
1705 struct btree_path *path, bool intent,
1708 if (path->ref > 1 || path->preserve) {
1709 __btree_path_put(path, intent);
1710 path = btree_path_clone(trans, path, intent);
1711 path->preserve = false;
1712 #ifdef CONFIG_BCACHEFS_DEBUG
1713 path->ip_allocated = ip;
1715 btree_trans_verify_sorted(trans);
1718 path->should_be_locked = false;
1722 struct btree_path * __must_check
1723 bch2_btree_path_set_pos(struct btree_trans *trans,
1724 struct btree_path *path, struct bpos new_pos,
1725 bool intent, unsigned long ip)
1727 int cmp = bpos_cmp(new_pos, path->pos);
1728 unsigned l = path->level;
1730 EBUG_ON(trans->restarted);
1731 EBUG_ON(!path->ref);
1736 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1738 path->pos = new_pos;
1740 bch2_btree_path_check_sort(trans, path, cmp);
1742 if (unlikely(path->cached)) {
1743 btree_node_unlock(path, 0);
1744 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1745 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1749 l = btree_path_up_until_good_node(trans, path, cmp);
1751 if (btree_path_node(path, l)) {
1752 BUG_ON(!btree_node_locked(path, l));
1754 * We might have to skip over many keys, or just a few: try
1755 * advancing the node iterator, and if we have to skip over too
1756 * many keys just reinit it (or if we're rewinding, since that
1760 !btree_path_advance_to_pos(path, &path->l[l], 8))
1761 __btree_path_level_init(path, l);
1764 if (l != path->level) {
1765 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1766 __bch2_btree_path_unlock(path);
1769 bch2_btree_path_verify(trans, path);
1773 /* Btree path: main interface: */
1775 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1777 struct btree_path *next;
1779 next = prev_btree_path(trans, path);
1780 if (next && !btree_path_cmp(next, path))
1783 next = next_btree_path(trans, path);
1784 if (next && !btree_path_cmp(next, path))
1790 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1792 struct btree_path *next;
1794 next = prev_btree_path(trans, path);
1795 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1798 next = next_btree_path(trans, path);
1799 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1805 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1807 __bch2_btree_path_unlock(path);
1808 btree_path_list_remove(trans, path);
1809 trans->paths_allocated &= ~(1ULL << path->idx);
1812 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1814 struct btree_path *dup;
1816 EBUG_ON(trans->paths + path->idx != path);
1817 EBUG_ON(!path->ref);
1819 if (!__btree_path_put(path, intent))
1823 * Perhaps instead we should check for duplicate paths in traverse_all:
1825 if (path->preserve &&
1826 (dup = have_path_at_pos(trans, path))) {
1827 dup->preserve = true;
1828 path->preserve = false;
1832 if (!path->preserve &&
1833 (dup = have_node_at_pos(trans, path)))
1837 if (path->should_be_locked &&
1838 !btree_node_locked(dup, path->level))
1841 dup->should_be_locked |= path->should_be_locked;
1842 __bch2_path_free(trans, path);
1845 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1847 struct btree_insert_entry *i;
1849 prt_printf(buf, "transaction updates for %s journal seq %llu",
1850 trans->fn, trans->journal_res.seq);
1852 printbuf_indent_add(buf, 2);
1854 trans_for_each_update(trans, i) {
1855 struct bkey_s_c old = { &i->old_k, i->old_v };
1857 prt_printf(buf, "update: btree=%s cached=%u %pS",
1858 bch2_btree_ids[i->btree_id],
1860 (void *) i->ip_allocated);
1863 prt_printf(buf, " old ");
1864 bch2_bkey_val_to_text(buf, trans->c, old);
1867 prt_printf(buf, " new ");
1868 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1872 printbuf_indent_sub(buf, 2);
1876 void bch2_dump_trans_updates(struct btree_trans *trans)
1878 struct printbuf buf = PRINTBUF;
1880 bch2_trans_updates_to_text(&buf, trans);
1881 bch_err(trans->c, "%s", buf.buf);
1882 printbuf_exit(&buf);
1886 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1888 struct btree_path *path;
1889 struct printbuf buf = PRINTBUF;
1892 trans_for_each_path_inorder(trans, path, idx) {
1893 printbuf_reset(&buf);
1895 bch2_bpos_to_text(&buf, path->pos);
1897 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
1898 path->idx, path->ref, path->intent_ref,
1899 path->should_be_locked ? " S" : "",
1900 path->preserve ? " P" : "",
1901 bch2_btree_ids[path->btree_id],
1905 #ifdef CONFIG_BCACHEFS_DEBUG
1906 (void *) path->ip_allocated
1913 printbuf_exit(&buf);
1915 bch2_dump_trans_updates(trans);
1918 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1919 struct btree_path *pos)
1921 struct btree_path *path;
1924 if (unlikely(trans->paths_allocated ==
1925 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1926 bch2_dump_trans_paths_updates(trans);
1927 panic("trans path oveflow\n");
1930 idx = __ffs64(~trans->paths_allocated);
1931 trans->paths_allocated |= 1ULL << idx;
1933 path = &trans->paths[idx];
1937 path->intent_ref = 0;
1938 path->nodes_locked = 0;
1939 path->nodes_intent_locked = 0;
1941 btree_path_list_add(trans, pos, path);
1945 struct btree_path *bch2_path_get(struct btree_trans *trans,
1946 enum btree_id btree_id, struct bpos pos,
1947 unsigned locks_want, unsigned level,
1948 unsigned flags, unsigned long ip)
1950 struct btree_path *path, *path_pos = NULL;
1951 bool cached = flags & BTREE_ITER_CACHED;
1952 bool intent = flags & BTREE_ITER_INTENT;
1955 BUG_ON(trans->restarted);
1956 btree_trans_verify_sorted(trans);
1957 bch2_trans_verify_locks(trans);
1959 trans_for_each_path_inorder(trans, path, i) {
1960 if (__btree_path_cmp(path,
1971 path_pos->cached == cached &&
1972 path_pos->btree_id == btree_id &&
1973 path_pos->level == level) {
1974 __btree_path_get(path_pos, intent);
1975 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1977 path = btree_path_alloc(trans, path_pos);
1980 __btree_path_get(path, intent);
1982 path->btree_id = btree_id;
1983 path->cached = cached;
1984 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1985 path->should_be_locked = false;
1986 path->level = level;
1987 path->locks_want = locks_want;
1988 path->nodes_locked = 0;
1989 path->nodes_intent_locked = 0;
1990 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1991 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1992 #ifdef CONFIG_BCACHEFS_DEBUG
1993 path->ip_allocated = ip;
1995 btree_trans_verify_sorted(trans);
1998 if (!(flags & BTREE_ITER_NOPRESERVE))
1999 path->preserve = true;
2001 if (path->intent_ref)
2002 locks_want = max(locks_want, level + 1);
2005 * If the path has locks_want greater than requested, we don't downgrade
2006 * it here - on transaction restart because btree node split needs to
2007 * upgrade locks, we might be putting/getting the iterator again.
2008 * Downgrading iterators only happens via bch2_trans_downgrade(), after
2009 * a successful transaction commit.
2012 locks_want = min(locks_want, BTREE_MAX_DEPTH);
2013 if (locks_want > path->locks_want) {
2014 path->locks_want = locks_want;
2015 btree_path_get_locks(trans, path, true);
2021 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
2026 if (!path->cached) {
2027 struct btree_path_level *l = path_l(path);
2028 struct bkey_packed *_k;
2030 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2032 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
2033 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
2035 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
2037 if (!k.k || bpos_cmp(path->pos, k.k->p))
2040 struct bkey_cached *ck = (void *) path->l[0].b;
2043 (path->btree_id != ck->key.btree_id ||
2044 bkey_cmp(path->pos, ck->key.pos)));
2046 /* BTREE_ITER_CACHED_NOFILL|BTREE_ITER_CACHED_NOCREATE? */
2047 if (unlikely(!ck || !ck->valid))
2048 return bkey_s_c_null;
2050 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2053 k = bkey_i_to_s_c(ck->k);
2060 return (struct bkey_s_c) { u, NULL };
2063 /* Btree iterators: */
2066 __bch2_btree_iter_traverse(struct btree_iter *iter)
2068 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2072 bch2_btree_iter_traverse(struct btree_iter *iter)
2076 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
2077 btree_iter_search_key(iter),
2078 iter->flags & BTREE_ITER_INTENT,
2079 btree_iter_ip_allocated(iter));
2081 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2085 iter->path->should_be_locked = true;
2089 /* Iterate across nodes (leaf and interior nodes) */
2091 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2093 struct btree_trans *trans = iter->trans;
2094 struct btree *b = NULL;
2097 EBUG_ON(iter->path->cached);
2098 bch2_btree_iter_verify(iter);
2100 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2104 b = btree_path_node(iter->path, iter->path->level);
2108 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2110 bkey_init(&iter->k);
2111 iter->k.p = iter->pos = b->key.k.p;
2113 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2114 iter->flags & BTREE_ITER_INTENT,
2115 btree_iter_ip_allocated(iter));
2116 iter->path->should_be_locked = true;
2117 BUG_ON(iter->path->uptodate);
2119 bch2_btree_iter_verify_entry_exit(iter);
2120 bch2_btree_iter_verify(iter);
2128 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2130 struct btree_trans *trans = iter->trans;
2131 struct btree_path *path = iter->path;
2132 struct btree *b = NULL;
2135 BUG_ON(trans->restarted);
2136 EBUG_ON(iter->path->cached);
2137 bch2_btree_iter_verify(iter);
2139 /* already at end? */
2140 if (!btree_path_node(path, path->level))
2144 if (!btree_path_node(path, path->level + 1)) {
2145 btree_path_set_level_up(path);
2149 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2150 __bch2_btree_path_unlock(path);
2151 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2152 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2153 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2154 trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
2155 path->btree_id, &path->pos);
2156 btree_trans_restart(trans);
2161 b = btree_path_node(path, path->level + 1);
2163 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2164 btree_node_unlock(path, path->level);
2165 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2169 * Haven't gotten to the end of the parent node: go back down to
2170 * the next child node
2173 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2174 iter->flags & BTREE_ITER_INTENT,
2175 btree_iter_ip_allocated(iter));
2177 btree_path_set_level_down(trans, path, iter->min_depth);
2179 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2183 b = path->l[path->level].b;
2186 bkey_init(&iter->k);
2187 iter->k.p = iter->pos = b->key.k.p;
2189 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2190 iter->flags & BTREE_ITER_INTENT,
2191 btree_iter_ip_allocated(iter));
2192 iter->path->should_be_locked = true;
2193 BUG_ON(iter->path->uptodate);
2195 bch2_btree_iter_verify_entry_exit(iter);
2196 bch2_btree_iter_verify(iter);
2204 /* Iterate across keys (in leaf nodes only) */
2206 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2208 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
2209 struct bpos pos = iter->k.p;
2210 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2211 ? bpos_cmp(pos, SPOS_MAX)
2212 : bkey_cmp(pos, SPOS_MAX)) != 0;
2214 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2215 pos = bkey_successor(iter, pos);
2216 bch2_btree_iter_set_pos(iter, pos);
2219 if (!btree_path_node(iter->path, iter->path->level))
2222 iter->advanced = true;
2227 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2229 struct bpos pos = bkey_start_pos(&iter->k);
2230 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2231 ? bpos_cmp(pos, POS_MIN)
2232 : bkey_cmp(pos, POS_MIN)) != 0;
2234 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2235 pos = bkey_predecessor(iter, pos);
2236 bch2_btree_iter_set_pos(iter, pos);
2240 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
2241 enum btree_id btree_id,
2244 struct btree_insert_entry *i;
2245 struct bkey_i *ret = NULL;
2247 trans_for_each_update(trans, i) {
2248 if (i->btree_id < btree_id)
2250 if (i->btree_id > btree_id)
2252 if (bpos_cmp(i->k->k.p, pos) < 0)
2254 if (i->key_cache_already_flushed)
2256 if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
2263 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2264 struct btree_iter *iter,
2265 struct bpos start_pos,
2266 struct bpos end_pos)
2270 if (bpos_cmp(start_pos, iter->journal_pos) < 0)
2271 iter->journal_idx = 0;
2273 k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
2275 &iter->journal_idx);
2277 iter->journal_pos = k ? k->k.p : end_pos;
2281 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
2282 struct btree_iter *iter,
2285 return bch2_btree_journal_peek(trans, iter, pos, pos);
2289 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2290 struct btree_iter *iter,
2293 struct bkey_i *next_journal =
2294 bch2_btree_journal_peek(trans, iter, iter->path->pos,
2295 k.k ? k.k->p : iter->path->l[0].b->key.k.p);
2298 iter->k = next_journal->k;
2299 k = bkey_i_to_s_c(next_journal);
2306 * Checks btree key cache for key at iter->pos and returns it if present, or
2310 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2312 struct btree_trans *trans = iter->trans;
2313 struct bch_fs *c = trans->c;
2317 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2318 return bkey_s_c_null;
2320 if (!iter->key_cache_path)
2321 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2322 iter->flags & BTREE_ITER_INTENT, 0,
2323 iter->flags|BTREE_ITER_CACHED,
2326 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2327 iter->flags & BTREE_ITER_INTENT,
2328 btree_iter_ip_allocated(iter));
2330 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
2332 return bkey_s_c_err(ret);
2334 iter->key_cache_path->should_be_locked = true;
2336 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
2339 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2341 struct btree_trans *trans = iter->trans;
2342 struct bkey_i *next_update;
2343 struct bkey_s_c k, k2;
2346 EBUG_ON(iter->path->cached || iter->path->level);
2347 bch2_btree_iter_verify(iter);
2350 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2351 iter->flags & BTREE_ITER_INTENT,
2352 btree_iter_ip_allocated(iter));
2354 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2355 if (unlikely(ret)) {
2356 /* ensure that iter->k is consistent with iter->pos: */
2357 bch2_btree_iter_set_pos(iter, iter->pos);
2358 k = bkey_s_c_err(ret);
2362 iter->path->should_be_locked = true;
2364 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2366 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2368 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2372 bch2_btree_iter_set_pos(iter, iter->pos);
2380 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2381 k = btree_trans_peek_journal(trans, iter, k);
2383 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2384 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2387 bpos_cmp(next_update->k.p,
2388 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2389 iter->k = next_update->k;
2390 k = bkey_i_to_s_c(next_update);
2393 if (k.k && bkey_deleted(k.k)) {
2395 * If we've got a whiteout, and it's after the search
2396 * key, advance the search key to the whiteout instead
2397 * of just after the whiteout - it might be a btree
2398 * whiteout, with a real key at the same position, since
2399 * in the btree deleted keys sort before non deleted.
2401 search_key = bpos_cmp(search_key, k.k->p)
2403 : bpos_successor(k.k->p);
2409 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2410 /* Advance to next leaf node: */
2411 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2414 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2420 bch2_btree_iter_verify(iter);
2426 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2429 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2431 struct btree_trans *trans = iter->trans;
2432 struct bpos search_key = btree_iter_search_key(iter);
2434 struct bpos iter_pos;
2437 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2439 if (iter->update_path) {
2440 bch2_path_put(trans, iter->update_path,
2441 iter->flags & BTREE_ITER_INTENT);
2442 iter->update_path = NULL;
2445 bch2_btree_iter_verify_entry_exit(iter);
2448 k = __bch2_btree_iter_peek(iter, search_key);
2449 if (!k.k || bkey_err(k))
2453 * iter->pos should be mononotically increasing, and always be
2454 * equal to the key we just returned - except extents can
2455 * straddle iter->pos:
2457 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2459 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2460 iter_pos = bkey_start_pos(k.k);
2462 iter_pos = iter->pos;
2464 if (bkey_cmp(iter_pos, end) > 0) {
2465 bch2_btree_iter_set_pos(iter, end);
2470 if (iter->update_path &&
2471 bkey_cmp(iter->update_path->pos, k.k->p)) {
2472 bch2_path_put(trans, iter->update_path,
2473 iter->flags & BTREE_ITER_INTENT);
2474 iter->update_path = NULL;
2477 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2478 (iter->flags & BTREE_ITER_INTENT) &&
2479 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2480 !iter->update_path) {
2481 struct bpos pos = k.k->p;
2483 if (pos.snapshot < iter->snapshot) {
2484 search_key = bpos_successor(k.k->p);
2488 pos.snapshot = iter->snapshot;
2491 * advance, same as on exit for iter->path, but only up
2494 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2495 iter->update_path = iter->path;
2497 iter->update_path = bch2_btree_path_set_pos(trans,
2498 iter->update_path, pos,
2499 iter->flags & BTREE_ITER_INTENT,
2504 * We can never have a key in a leaf node at POS_MAX, so
2505 * we don't have to check these successor() calls:
2507 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2508 !bch2_snapshot_is_ancestor(trans->c,
2511 search_key = bpos_successor(k.k->p);
2515 if (bkey_whiteout(k.k) &&
2516 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2517 search_key = bkey_successor(iter, k.k->p);
2524 iter->pos = iter_pos;
2526 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2527 iter->flags & BTREE_ITER_INTENT,
2528 btree_iter_ip_allocated(iter));
2529 BUG_ON(!iter->path->nodes_locked);
2531 if (iter->update_path) {
2532 if (iter->update_path->uptodate &&
2533 !bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)) {
2534 k = bkey_s_c_err(-EINTR);
2536 BUG_ON(!(iter->update_path->nodes_locked & 1));
2537 iter->update_path->should_be_locked = true;
2540 iter->path->should_be_locked = true;
2542 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2543 iter->pos.snapshot = iter->snapshot;
2545 ret = bch2_btree_iter_verify_ret(iter, k);
2546 if (unlikely(ret)) {
2547 bch2_btree_iter_set_pos(iter, iter->pos);
2548 k = bkey_s_c_err(ret);
2551 bch2_btree_iter_verify_entry_exit(iter);
2557 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2558 * to iterator's current position, returning keys from every level of the btree.
2559 * For keys at different levels of the btree that compare equal, the key from
2560 * the lower level (leaf) is returned first.
2562 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2564 struct btree_trans *trans = iter->trans;
2568 EBUG_ON(iter->path->cached);
2569 bch2_btree_iter_verify(iter);
2570 BUG_ON(iter->path->level < iter->min_depth);
2571 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2572 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2575 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2576 iter->flags & BTREE_ITER_INTENT,
2577 btree_iter_ip_allocated(iter));
2579 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2580 if (unlikely(ret)) {
2581 /* ensure that iter->k is consistent with iter->pos: */
2582 bch2_btree_iter_set_pos(iter, iter->pos);
2583 k = bkey_s_c_err(ret);
2587 /* Already at end? */
2588 if (!btree_path_node(iter->path, iter->path->level)) {
2593 k = btree_path_level_peek_all(trans->c,
2594 &iter->path->l[iter->path->level], &iter->k);
2596 /* Check if we should go up to the parent node: */
2599 !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
2600 iter->pos = path_l(iter->path)->b->key.k.p;
2601 btree_path_set_level_up(iter->path);
2602 iter->advanced = false;
2607 * Check if we should go back down to a leaf:
2608 * If we're not in a leaf node, we only return the current key
2609 * if it exactly matches iter->pos - otherwise we first have to
2610 * go back to the leaf:
2612 if (iter->path->level != iter->min_depth &&
2615 bpos_cmp(iter->pos, k.k->p))) {
2616 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2617 iter->pos = bpos_successor(iter->pos);
2618 iter->advanced = false;
2622 /* Check if we should go to the next key: */
2623 if (iter->path->level == iter->min_depth &&
2626 !bpos_cmp(iter->pos, k.k->p)) {
2627 iter->pos = bpos_successor(iter->pos);
2628 iter->advanced = false;
2632 if (iter->advanced &&
2633 iter->path->level == iter->min_depth &&
2634 bpos_cmp(k.k->p, iter->pos))
2635 iter->advanced = false;
2637 BUG_ON(iter->advanced);
2644 iter->path->should_be_locked = true;
2645 bch2_btree_iter_verify(iter);
2651 * bch2_btree_iter_next: returns first key greater than iterator's current
2654 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2656 if (!bch2_btree_iter_advance(iter))
2657 return bkey_s_c_null;
2659 return bch2_btree_iter_peek(iter);
2663 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2664 * iterator's current position
2666 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2668 struct btree_trans *trans = iter->trans;
2669 struct bpos search_key = iter->pos;
2670 struct btree_path *saved_path = NULL;
2672 struct bkey saved_k;
2673 const struct bch_val *saved_v;
2676 EBUG_ON(iter->path->cached || iter->path->level);
2677 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2679 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2680 return bkey_s_c_err(-EIO);
2682 bch2_btree_iter_verify(iter);
2683 bch2_btree_iter_verify_entry_exit(iter);
2685 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2686 search_key.snapshot = U32_MAX;
2689 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2690 iter->flags & BTREE_ITER_INTENT,
2691 btree_iter_ip_allocated(iter));
2693 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2694 if (unlikely(ret)) {
2695 /* ensure that iter->k is consistent with iter->pos: */
2696 bch2_btree_iter_set_pos(iter, iter->pos);
2697 k = bkey_s_c_err(ret);
2701 k = btree_path_level_peek(trans->c, iter->path,
2702 &iter->path->l[0], &iter->k);
2704 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2705 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2706 : bpos_cmp(k.k->p, search_key) > 0))
2707 k = btree_path_level_prev(trans->c, iter->path,
2708 &iter->path->l[0], &iter->k);
2710 bch2_btree_path_check_sort(trans, iter->path, 0);
2713 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2714 if (k.k->p.snapshot == iter->snapshot)
2718 * If we have a saved candidate, and we're no
2719 * longer at the same _key_ (not pos), return
2722 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2723 bch2_path_put(trans, iter->path,
2724 iter->flags & BTREE_ITER_INTENT);
2725 iter->path = saved_path;
2732 if (bch2_snapshot_is_ancestor(iter->trans->c,
2736 bch2_path_put(trans, saved_path,
2737 iter->flags & BTREE_ITER_INTENT);
2738 saved_path = btree_path_clone(trans, iter->path,
2739 iter->flags & BTREE_ITER_INTENT);
2744 search_key = bpos_predecessor(k.k->p);
2748 if (bkey_whiteout(k.k) &&
2749 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2750 search_key = bkey_predecessor(iter, k.k->p);
2751 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2752 search_key.snapshot = U32_MAX;
2757 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2758 /* Advance to previous leaf node: */
2759 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2761 /* Start of btree: */
2762 bch2_btree_iter_set_pos(iter, POS_MIN);
2768 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2770 /* Extents can straddle iter->pos: */
2771 if (bkey_cmp(k.k->p, iter->pos) < 0)
2774 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2775 iter->pos.snapshot = iter->snapshot;
2778 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2779 iter->path->should_be_locked = true;
2781 bch2_btree_iter_verify_entry_exit(iter);
2782 bch2_btree_iter_verify(iter);
2788 * bch2_btree_iter_prev: returns first key less than iterator's current
2791 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2793 if (!bch2_btree_iter_rewind(iter))
2794 return bkey_s_c_null;
2796 return bch2_btree_iter_peek_prev(iter);
2799 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2801 struct btree_trans *trans = iter->trans;
2802 struct bpos search_key;
2806 bch2_btree_iter_verify(iter);
2807 bch2_btree_iter_verify_entry_exit(iter);
2808 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2809 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2811 /* extents can't span inode numbers: */
2812 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2813 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2814 if (iter->pos.inode == KEY_INODE_MAX)
2815 return bkey_s_c_null;
2817 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2820 search_key = btree_iter_search_key(iter);
2821 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2822 iter->flags & BTREE_ITER_INTENT,
2823 btree_iter_ip_allocated(iter));
2825 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2827 return bkey_s_c_err(ret);
2829 if ((iter->flags & BTREE_ITER_CACHED) ||
2830 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2831 struct bkey_i *next_update;
2833 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2834 (next_update = btree_trans_peek_updates(trans,
2835 iter->btree_id, search_key)) &&
2836 !bpos_cmp(next_update->k.p, iter->pos)) {
2837 iter->k = next_update->k;
2838 k = bkey_i_to_s_c(next_update);
2842 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2843 (next_update = bch2_btree_journal_peek_slot(trans,
2844 iter, iter->pos))) {
2845 iter->k = next_update->k;
2846 k = bkey_i_to_s_c(next_update);
2850 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2851 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2857 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2861 EBUG_ON(iter->path->level);
2863 if (iter->flags & BTREE_ITER_INTENT) {
2864 struct btree_iter iter2;
2865 struct bpos end = iter->pos;
2867 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2868 end.offset = U64_MAX;
2870 bch2_trans_copy_iter(&iter2, iter);
2871 k = bch2_btree_iter_peek_upto(&iter2, end);
2873 if (k.k && !bkey_err(k)) {
2877 bch2_trans_iter_exit(trans, &iter2);
2879 struct bpos pos = iter->pos;
2881 k = bch2_btree_iter_peek(iter);
2885 if (unlikely(bkey_err(k)))
2888 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2890 if (bkey_cmp(iter->pos, next) < 0) {
2891 bkey_init(&iter->k);
2892 iter->k.p = iter->pos;
2894 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2895 bch2_key_resize(&iter->k,
2896 min_t(u64, KEY_SIZE_MAX,
2897 (next.inode == iter->pos.inode
2901 EBUG_ON(!iter->k.size);
2904 k = (struct bkey_s_c) { &iter->k, NULL };
2908 iter->path->should_be_locked = true;
2910 bch2_btree_iter_verify_entry_exit(iter);
2911 bch2_btree_iter_verify(iter);
2912 ret = bch2_btree_iter_verify_ret(iter, k);
2914 return bkey_s_c_err(ret);
2919 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2921 if (!bch2_btree_iter_advance(iter))
2922 return bkey_s_c_null;
2924 return bch2_btree_iter_peek_slot(iter);
2927 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2929 if (!bch2_btree_iter_rewind(iter))
2930 return bkey_s_c_null;
2932 return bch2_btree_iter_peek_slot(iter);
2935 /* new transactional stuff: */
2937 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2938 struct btree_path *path)
2940 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2941 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2942 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2945 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2947 #ifdef CONFIG_BCACHEFS_DEBUG
2950 for (i = 0; i < trans->nr_sorted; i++)
2951 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2955 static void btree_trans_verify_sorted(struct btree_trans *trans)
2957 #ifdef CONFIG_BCACHEFS_DEBUG
2958 struct btree_path *path, *prev = NULL;
2961 if (!bch2_debug_check_iterators)
2964 trans_for_each_path_inorder(trans, path, i) {
2965 if (prev && btree_path_cmp(prev, path) > 0) {
2966 bch2_dump_trans_paths_updates(trans);
2967 panic("trans paths out of order!\n");
2974 static inline void btree_path_swap(struct btree_trans *trans,
2975 struct btree_path *l, struct btree_path *r)
2977 swap(l->sorted_idx, r->sorted_idx);
2978 swap(trans->sorted[l->sorted_idx],
2979 trans->sorted[r->sorted_idx]);
2981 btree_path_verify_sorted_ref(trans, l);
2982 btree_path_verify_sorted_ref(trans, r);
2985 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2988 struct btree_path *n;
2991 n = prev_btree_path(trans, path);
2992 if (n && btree_path_cmp(n, path) > 0) {
2994 btree_path_swap(trans, n, path);
2995 n = prev_btree_path(trans, path);
2996 } while (n && btree_path_cmp(n, path) > 0);
3003 n = next_btree_path(trans, path);
3004 if (n && btree_path_cmp(path, n) > 0) {
3006 btree_path_swap(trans, path, n);
3007 n = next_btree_path(trans, path);
3008 } while (n && btree_path_cmp(path, n) > 0);
3012 btree_trans_verify_sorted(trans);
3015 static inline void btree_path_list_remove(struct btree_trans *trans,
3016 struct btree_path *path)
3020 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
3022 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3024 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3025 trans->paths[trans->sorted[i]].sorted_idx = i;
3027 path->sorted_idx = U8_MAX;
3029 btree_trans_verify_sorted_refs(trans);
3032 static inline void btree_path_list_add(struct btree_trans *trans,
3033 struct btree_path *pos,
3034 struct btree_path *path)
3038 btree_trans_verify_sorted_refs(trans);
3040 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
3042 if (trans->in_traverse_all &&
3043 trans->traverse_all_idx != U8_MAX &&
3044 trans->traverse_all_idx >= path->sorted_idx)
3045 trans->traverse_all_idx++;
3047 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
3049 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3050 trans->paths[trans->sorted[i]].sorted_idx = i;
3052 btree_trans_verify_sorted_refs(trans);
3055 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3058 bch2_path_put(trans, iter->path,
3059 iter->flags & BTREE_ITER_INTENT);
3060 if (iter->update_path)
3061 bch2_path_put(trans, iter->update_path,
3062 iter->flags & BTREE_ITER_INTENT);
3063 if (iter->key_cache_path)
3064 bch2_path_put(trans, iter->key_cache_path,
3065 iter->flags & BTREE_ITER_INTENT);
3067 iter->update_path = NULL;
3068 iter->key_cache_path = NULL;
3071 static void __bch2_trans_iter_init(struct btree_trans *trans,
3072 struct btree_iter *iter,
3073 unsigned btree_id, struct bpos pos,
3074 unsigned locks_want,
3079 EBUG_ON(trans->restarted);
3081 if (flags & BTREE_ITER_ALL_LEVELS)
3082 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
3084 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
3085 btree_node_type_is_extents(btree_id))
3086 flags |= BTREE_ITER_IS_EXTENTS;
3088 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
3089 !btree_type_has_snapshots(btree_id))
3090 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
3092 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
3093 btree_type_has_snapshots(btree_id))
3094 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
3096 if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
3097 flags |= BTREE_ITER_WITH_JOURNAL;
3099 iter->trans = trans;
3101 iter->update_path = NULL;
3102 iter->key_cache_path = NULL;
3103 iter->btree_id = btree_id;
3104 iter->min_depth = depth;
3105 iter->flags = flags;
3106 iter->snapshot = pos.snapshot;
3108 iter->k.type = KEY_TYPE_deleted;
3111 iter->journal_idx = 0;
3112 iter->journal_pos = POS_MIN;
3113 #ifdef CONFIG_BCACHEFS_DEBUG
3114 iter->ip_allocated = ip;
3117 iter->path = bch2_path_get(trans, btree_id, iter->pos,
3118 locks_want, depth, flags, ip);
3121 void bch2_trans_iter_init(struct btree_trans *trans,
3122 struct btree_iter *iter,
3123 unsigned btree_id, struct bpos pos,
3126 if (!btree_id_cached(trans->c, btree_id)) {
3127 flags &= ~BTREE_ITER_CACHED;
3128 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
3129 } else if (!(flags & BTREE_ITER_CACHED))
3130 flags |= BTREE_ITER_WITH_KEY_CACHE;
3132 __bch2_trans_iter_init(trans, iter, btree_id, pos,
3133 0, 0, flags, _RET_IP_);
3136 void bch2_trans_node_iter_init(struct btree_trans *trans,
3137 struct btree_iter *iter,
3138 enum btree_id btree_id,
3140 unsigned locks_want,
3144 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
3145 BTREE_ITER_NOT_EXTENTS|
3146 __BTREE_ITER_ALL_SNAPSHOTS|
3147 BTREE_ITER_ALL_SNAPSHOTS|
3149 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
3150 BUG_ON(iter->path->level != depth);
3151 BUG_ON(iter->min_depth != depth);
3154 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3158 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
3159 if (src->update_path)
3160 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
3161 dst->key_cache_path = NULL;
3164 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3166 size_t new_top = trans->mem_top + size;
3169 if (new_top > trans->mem_bytes) {
3170 size_t old_bytes = trans->mem_bytes;
3171 size_t new_bytes = roundup_pow_of_two(new_top);
3174 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3176 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
3177 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3178 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
3179 new_bytes = BTREE_TRANS_MEM_MAX;
3184 return ERR_PTR(-ENOMEM);
3186 trans->mem = new_mem;
3187 trans->mem_bytes = new_bytes;
3190 trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
3191 btree_trans_restart(trans);
3192 return ERR_PTR(-EINTR);
3196 p = trans->mem + trans->mem_top;
3197 trans->mem_top += size;
3203 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3204 * @trans: transaction to reset
3206 * While iterating over nodes or updating nodes a attempt to lock a btree
3207 * node may return EINTR when the trylock fails. When this occurs
3208 * bch2_trans_begin() should be called and the transaction retried.
3210 void bch2_trans_begin(struct btree_trans *trans)
3212 struct btree_path *path;
3214 bch2_trans_reset_updates(trans);
3218 if (trans->fs_usage_deltas) {
3219 trans->fs_usage_deltas->used = 0;
3220 memset((void *) trans->fs_usage_deltas +
3221 offsetof(struct replicas_delta_list, memset_start), 0,
3222 (void *) &trans->fs_usage_deltas->memset_end -
3223 (void *) &trans->fs_usage_deltas->memset_start);
3226 trans_for_each_path(trans, path) {
3227 path->should_be_locked = false;
3230 * If the transaction wasn't restarted, we're presuming to be
3231 * doing something new: dont keep iterators excpt the ones that
3232 * are in use - except for the subvolumes btree:
3234 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3235 path->preserve = false;
3238 * XXX: we probably shouldn't be doing this if the transaction
3239 * was restarted, but currently we still overflow transaction
3240 * iterators if we do that
3242 if (!path->ref && !path->preserve)
3243 __bch2_path_free(trans, path);
3245 path->preserve = false;
3248 if (!trans->restarted &&
3250 ktime_get_ns() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
3251 bch2_trans_unlock(trans);
3253 bch2_trans_relock(trans);
3256 if (trans->restarted)
3257 bch2_btree_path_traverse_all(trans);
3259 trans->restarted = false;
3260 trans->last_begin_time = ktime_get_ns();
3263 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
3265 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
3266 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
3269 BUG_ON(trans->used_mempool);
3272 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
3275 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
3277 trans->paths = p; p += paths_bytes;
3278 trans->updates = p; p += updates_bytes;
3281 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
3282 unsigned expected_nr_iters,
3283 size_t expected_mem_bytes,
3285 __acquires(&c->btree_trans_barrier)
3287 struct btree_trans *pos;
3289 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
3291 memset(trans, 0, sizeof(*trans));
3294 trans->last_begin_time = ktime_get_ns();
3295 trans->task = current;
3297 bch2_trans_alloc_paths(trans, c);
3299 if (expected_mem_bytes) {
3300 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
3301 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
3303 if (!unlikely(trans->mem)) {
3304 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3305 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
3309 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3311 mutex_lock(&c->btree_trans_lock);
3312 list_for_each_entry(pos, &c->btree_trans_list, list) {
3313 if (trans->task->pid < pos->task->pid) {
3314 list_add_tail(&trans->list, &pos->list);
3318 list_add_tail(&trans->list, &c->btree_trans_list);
3320 mutex_unlock(&c->btree_trans_lock);
3323 static void check_btree_paths_leaked(struct btree_trans *trans)
3325 #ifdef CONFIG_BCACHEFS_DEBUG
3326 struct bch_fs *c = trans->c;
3327 struct btree_path *path;
3329 trans_for_each_path(trans, path)
3334 bch_err(c, "btree paths leaked from %s!", trans->fn);
3335 trans_for_each_path(trans, path)
3337 printk(KERN_ERR " btree %s %pS\n",
3338 bch2_btree_ids[path->btree_id],
3339 (void *) path->ip_allocated);
3340 /* Be noisy about this: */
3341 bch2_fatal_error(c);
3345 void bch2_trans_exit(struct btree_trans *trans)
3346 __releases(&c->btree_trans_barrier)
3348 struct btree_insert_entry *i;
3349 struct bch_fs *c = trans->c;
3351 bch2_trans_unlock(trans);
3353 trans_for_each_update(trans, i)
3354 __btree_path_put(i->path, true);
3355 trans->nr_updates = 0;
3357 check_btree_paths_leaked(trans);
3359 mutex_lock(&c->btree_trans_lock);
3360 list_del(&trans->list);
3361 mutex_unlock(&c->btree_trans_lock);
3363 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3365 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3367 kfree(trans->extra_journal_entries.data);
3369 if (trans->fs_usage_deltas) {
3370 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3371 REPLICAS_DELTA_LIST_MAX)
3372 mempool_free(trans->fs_usage_deltas,
3373 &c->replicas_delta_pool);
3375 kfree(trans->fs_usage_deltas);
3378 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3379 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3385 * Userspace doesn't have a real percpu implementation:
3387 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3391 mempool_free(trans->paths, &c->btree_paths_pool);
3393 trans->mem = (void *) 0x1;
3394 trans->paths = (void *) 0x1;
3397 static void __maybe_unused
3398 bch2_btree_path_node_to_text(struct printbuf *out,
3399 struct btree_bkey_cached_common *_b,
3402 prt_printf(out, " l=%u %s:",
3403 _b->level, bch2_btree_ids[_b->btree_id]);
3404 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
3407 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3409 struct btree_path *path;
3411 static char lock_types[] = { 'r', 'i', 'w' };
3414 prt_printf(out, "%i %s\n", trans->task->pid, trans->fn);
3416 trans_for_each_path(trans, path) {
3417 if (!path->nodes_locked)
3420 prt_printf(out, " path %u %c l=%u %s:",
3422 path->cached ? 'c' : 'b',
3424 bch2_btree_ids[path->btree_id]);
3425 bch2_bpos_to_text(out, path->pos);
3426 prt_printf(out, "\n");
3428 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3429 if (btree_node_locked(path, l)) {
3430 prt_printf(out, " %s l=%u ",
3431 btree_node_intent_locked(path, l) ? "i" : "r", l);
3432 bch2_btree_path_node_to_text(out,
3433 (void *) path->l[l].b,
3435 prt_printf(out, "\n");
3440 b = READ_ONCE(trans->locking);
3442 path = &trans->paths[trans->locking_path_idx];
3443 prt_printf(out, " locking path %u %c l=%u %c %s:",
3444 trans->locking_path_idx,
3445 path->cached ? 'c' : 'b',
3446 trans->locking_level,
3447 lock_types[trans->locking_lock_type],
3448 bch2_btree_ids[trans->locking_btree_id]);
3449 bch2_bpos_to_text(out, trans->locking_pos);
3451 prt_printf(out, " node ");
3452 bch2_btree_path_node_to_text(out,
3453 (void *) b, path->cached);
3454 prt_printf(out, "\n");
3458 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3460 if (c->btree_trans_barrier_initialized)
3461 cleanup_srcu_struct(&c->btree_trans_barrier);
3462 mempool_exit(&c->btree_trans_mem_pool);
3463 mempool_exit(&c->btree_paths_pool);
3466 int bch2_fs_btree_iter_init(struct bch_fs *c)
3468 unsigned nr = BTREE_ITER_MAX;
3471 INIT_LIST_HEAD(&c->btree_trans_list);
3472 mutex_init(&c->btree_trans_lock);
3474 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3475 sizeof(struct btree_path) * nr +
3476 sizeof(struct btree_insert_entry) * nr) ?:
3477 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3478 BTREE_TRANS_MEM_MAX) ?:
3479 init_srcu_struct(&c->btree_trans_barrier);
3481 c->btree_trans_barrier_initialized = true;