1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prandom.h>
20 #include <linux/prefetch.h>
21 #include <trace/events/bcachefs.h>
23 static void btree_trans_verify_sorted(struct btree_trans *);
24 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
26 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
27 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
30 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
32 #ifdef CONFIG_BCACHEFS_DEBUG
33 return iter->ip_allocated;
39 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
42 * Unlocks before scheduling
43 * Note: does not revalidate iterator
45 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
47 if (need_resched() || race_fault()) {
48 bch2_trans_unlock(trans);
50 return bch2_trans_relock(trans);
56 static inline int __btree_path_cmp(const struct btree_path *l,
57 enum btree_id r_btree_id,
63 * Must match lock ordering as defined by __bch2_btree_node_lock:
65 return cmp_int(l->btree_id, r_btree_id) ?:
66 cmp_int((int) l->cached, (int) r_cached) ?:
67 bpos_cmp(l->pos, r_pos) ?:
68 -cmp_int(l->level, r_level);
71 static inline int btree_path_cmp(const struct btree_path *l,
72 const struct btree_path *r)
74 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
77 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
79 /* Are we iterating over keys in all snapshots? */
80 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
81 p = bpos_successor(p);
83 p = bpos_nosnap_successor(p);
84 p.snapshot = iter->snapshot;
90 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
92 /* Are we iterating over keys in all snapshots? */
93 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
94 p = bpos_predecessor(p);
96 p = bpos_nosnap_predecessor(p);
97 p.snapshot = iter->snapshot;
103 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
105 struct bpos pos = iter->pos;
107 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
108 bkey_cmp(pos, POS_MAX))
109 pos = bkey_successor(iter, pos);
113 static inline bool btree_path_pos_before_node(struct btree_path *path,
116 return bpos_cmp(path->pos, b->data->min_key) < 0;
119 static inline bool btree_path_pos_after_node(struct btree_path *path,
122 return bpos_cmp(b->key.k.p, path->pos) < 0;
125 static inline bool btree_path_pos_in_node(struct btree_path *path,
128 return path->btree_id == b->c.btree_id &&
129 !btree_path_pos_before_node(path, b) &&
130 !btree_path_pos_after_node(path, b);
133 /* Btree node locking: */
135 void bch2_btree_node_unlock_write(struct btree_trans *trans,
136 struct btree_path *path, struct btree *b)
138 bch2_btree_node_unlock_write_inlined(trans, path, b);
141 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
142 struct btree_path *skip,
146 struct btree_path *path;
147 struct six_lock_count ret = { 0, 0 };
149 if (IS_ERR_OR_NULL(b))
152 trans_for_each_path(trans, path)
153 if (path != skip && path->l[level].b == b) {
154 ret.read += btree_node_read_locked(path, level);
155 ret.intent += btree_node_intent_locked(path, level);
161 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
164 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
166 this_cpu_add(*lock->readers, nr);
169 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
171 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).read;
174 * Must drop our read locks before calling six_lock_write() -
175 * six_unlock() won't do wakeups until the reader count
176 * goes to 0, and it's safe because we have the node intent
179 six_lock_readers_add(&b->c.lock, -readers);
180 six_lock_write(&b->c.lock, NULL, NULL);
181 six_lock_readers_add(&b->c.lock, readers);
184 bool __bch2_btree_node_relock(struct btree_trans *trans,
185 struct btree_path *path, unsigned level)
187 struct btree *b = btree_path_node(path, level);
188 int want = __btree_lock_want(path, level);
190 if (!is_btree_node(path, level))
196 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
197 (btree_node_lock_seq_matches(path, b, level) &&
198 btree_node_lock_increment(trans, b, level, want))) {
199 mark_btree_node_locked(trans, path, level, want);
203 if (b != ERR_PTR(-BCH_ERR_no_btree_node_cached) &&
204 b != ERR_PTR(-BCH_ERR_no_btree_node_init))
205 trace_btree_node_relock_fail(trans, _RET_IP_, path, level);
209 bool bch2_btree_node_upgrade(struct btree_trans *trans,
210 struct btree_path *path, unsigned level)
212 struct btree *b = path->l[level].b;
214 if (!is_btree_node(path, level))
217 switch (btree_lock_want(path, level)) {
218 case BTREE_NODE_UNLOCKED:
219 BUG_ON(btree_node_locked(path, level));
221 case BTREE_NODE_READ_LOCKED:
222 BUG_ON(btree_node_intent_locked(path, level));
223 return bch2_btree_node_relock(trans, path, level);
224 case BTREE_NODE_INTENT_LOCKED:
228 if (btree_node_intent_locked(path, level))
234 if (btree_node_locked(path, level)
235 ? six_lock_tryupgrade(&b->c.lock)
236 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
239 if (btree_node_lock_seq_matches(path, b, level) &&
240 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
241 btree_node_unlock(trans, path, level);
245 trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level);
248 mark_btree_node_intent_locked(trans, path, level);
252 static inline bool btree_path_get_locks(struct btree_trans *trans,
253 struct btree_path *path,
256 unsigned l = path->level;
260 if (!btree_path_node(path, l))
264 ? bch2_btree_node_upgrade(trans, path, l)
265 : bch2_btree_node_relock(trans, path, l)))
269 } while (l < path->locks_want);
272 * When we fail to get a lock, we have to ensure that any child nodes
273 * can't be relocked so bch2_btree_path_traverse has to walk back up to
274 * the node that we failed to relock:
277 __bch2_btree_path_unlock(trans, path);
278 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
281 path->l[fail_idx].b = upgrade
282 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
283 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
285 } while (fail_idx >= 0);
288 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
289 path->uptodate = BTREE_ITER_UPTODATE;
291 bch2_trans_verify_locks(trans);
293 return path->uptodate < BTREE_ITER_NEED_RELOCK;
296 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
300 ? container_of(_b, struct btree, c)->key.k.p
301 : container_of(_b, struct bkey_cached, c)->key.pos;
305 int __bch2_btree_node_lock(struct btree_trans *trans,
306 struct btree_path *path,
308 struct bpos pos, unsigned level,
309 enum six_lock_type type,
310 six_lock_should_sleep_fn should_sleep_fn, void *p,
313 struct btree_path *linked;
316 /* Check if it's safe to block: */
317 trans_for_each_path(trans, linked) {
318 if (!linked->nodes_locked)
322 * Can't block taking an intent lock if we have _any_ nodes read
325 * - Our read lock blocks another thread with an intent lock on
326 * the same node from getting a write lock, and thus from
327 * dropping its intent lock
329 * - And the other thread may have multiple nodes intent locked:
330 * both the node we want to intent lock, and the node we
331 * already have read locked - deadlock:
333 if (type == SIX_LOCK_intent &&
334 linked->nodes_locked != linked->nodes_intent_locked) {
339 if (linked->btree_id != path->btree_id) {
340 if (linked->btree_id < path->btree_id)
348 * Within the same btree, non-cached paths come before cached
351 if (linked->cached != path->cached) {
360 * Interior nodes must be locked before their descendants: if
361 * another path has possible descendants locked of the node
362 * we're about to lock, it must have the ancestors locked too:
364 if (level > __fls(linked->nodes_locked)) {
369 /* Must lock btree nodes in key order: */
370 if (btree_node_locked(linked, level) &&
371 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
372 linked->cached)) <= 0) {
378 return btree_node_lock_type(trans, path, b, pos, level,
379 type, should_sleep_fn, p);
381 trace_trans_restart_would_deadlock(trans, ip, reason, linked, path, &pos);
382 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
385 /* Btree iterator locking: */
387 #ifdef CONFIG_BCACHEFS_DEBUG
389 static void bch2_btree_path_verify_locks(struct btree_path *path)
393 if (!path->nodes_locked) {
394 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
395 btree_path_node(path, path->level));
399 for (l = 0; btree_path_node(path, l); l++)
400 BUG_ON(btree_lock_want(path, l) !=
401 btree_node_locked_type(path, l));
404 void bch2_trans_verify_locks(struct btree_trans *trans)
406 struct btree_path *path;
408 trans_for_each_path(trans, path)
409 bch2_btree_path_verify_locks(path);
412 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
415 /* Btree path locking: */
418 * Only for btree_cache.c - only relocks intent locks
420 int bch2_btree_path_relock_intent(struct btree_trans *trans,
421 struct btree_path *path)
425 for (l = path->level;
426 l < path->locks_want && btree_path_node(path, l);
428 if (!bch2_btree_node_relock(trans, path, l)) {
429 __bch2_btree_path_unlock(trans, path);
430 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
431 trace_trans_restart_relock_path_intent(trans, _RET_IP_, path);
432 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
440 static bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
441 struct btree_path *path, unsigned long trace_ip)
443 return btree_path_get_locks(trans, path, false);
446 static int bch2_btree_path_relock(struct btree_trans *trans,
447 struct btree_path *path, unsigned long trace_ip)
449 if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
450 trace_trans_restart_relock_path(trans, trace_ip, path);
451 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
457 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
458 struct btree_path *path,
459 unsigned new_locks_want)
461 struct btree_path *linked;
463 EBUG_ON(path->locks_want >= new_locks_want);
465 path->locks_want = new_locks_want;
467 if (btree_path_get_locks(trans, path, true))
471 * XXX: this is ugly - we'd prefer to not be mucking with other
472 * iterators in the btree_trans here.
474 * On failure to upgrade the iterator, setting iter->locks_want and
475 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
476 * get the locks we want on transaction restart.
478 * But if this iterator was a clone, on transaction restart what we did
479 * to this iterator isn't going to be preserved.
481 * Possibly we could add an iterator field for the parent iterator when
482 * an iterator is a copy - for now, we'll just upgrade any other
483 * iterators with the same btree id.
485 * The code below used to be needed to ensure ancestor nodes get locked
486 * before interior nodes - now that's handled by
487 * bch2_btree_path_traverse_all().
489 if (!path->cached && !trans->in_traverse_all)
490 trans_for_each_path(trans, linked)
491 if (linked != path &&
492 linked->cached == path->cached &&
493 linked->btree_id == path->btree_id &&
494 linked->locks_want < new_locks_want) {
495 linked->locks_want = new_locks_want;
496 btree_path_get_locks(trans, linked, true);
502 void __bch2_btree_path_downgrade(struct btree_trans *trans,
503 struct btree_path *path,
504 unsigned new_locks_want)
508 EBUG_ON(path->locks_want < new_locks_want);
510 path->locks_want = new_locks_want;
512 while (path->nodes_locked &&
513 (l = __fls(path->nodes_locked)) >= path->locks_want) {
514 if (l > path->level) {
515 btree_node_unlock(trans, path, l);
517 if (btree_node_intent_locked(path, l)) {
518 six_lock_downgrade(&path->l[l].b->c.lock);
519 path->nodes_intent_locked ^= 1 << l;
525 bch2_btree_path_verify_locks(path);
528 void bch2_trans_downgrade(struct btree_trans *trans)
530 struct btree_path *path;
532 trans_for_each_path(trans, path)
533 bch2_btree_path_downgrade(trans, path);
536 /* Btree transaction locking: */
538 int bch2_trans_relock(struct btree_trans *trans)
540 struct btree_path *path;
542 if (unlikely(trans->restarted))
543 return -BCH_ERR_transaction_restart_relock;
545 trans_for_each_path(trans, path)
546 if (path->should_be_locked &&
547 bch2_btree_path_relock(trans, path, _RET_IP_)) {
548 trace_trans_restart_relock(trans, _RET_IP_, path);
549 BUG_ON(!trans->restarted);
550 return -BCH_ERR_transaction_restart_relock;
555 void bch2_trans_unlock(struct btree_trans *trans)
557 struct btree_path *path;
559 trans_for_each_path(trans, path)
560 __bch2_btree_path_unlock(trans, path);
563 * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
564 * btree nodes, it implements its own walking:
566 BUG_ON(!trans->is_initial_gc &&
567 lock_class_is_held(&bch2_btree_node_lock_key));
570 /* Btree iterator: */
572 #ifdef CONFIG_BCACHEFS_DEBUG
574 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
575 struct btree_path *path)
577 struct bkey_cached *ck;
578 bool locked = btree_node_locked(path, 0);
580 if (!bch2_btree_node_relock(trans, path, 0))
583 ck = (void *) path->l[0].b;
584 BUG_ON(ck->key.btree_id != path->btree_id ||
585 bkey_cmp(ck->key.pos, path->pos));
588 btree_node_unlock(trans, path, 0);
591 static void bch2_btree_path_verify_level(struct btree_trans *trans,
592 struct btree_path *path, unsigned level)
594 struct btree_path_level *l;
595 struct btree_node_iter tmp;
597 struct bkey_packed *p, *k;
598 struct printbuf buf1 = PRINTBUF;
599 struct printbuf buf2 = PRINTBUF;
600 struct printbuf buf3 = PRINTBUF;
603 if (!bch2_debug_check_iterators)
608 locked = btree_node_locked(path, level);
612 bch2_btree_path_verify_cached(trans, path);
616 if (!btree_path_node(path, level))
619 if (!bch2_btree_node_relock(trans, path, level))
622 BUG_ON(!btree_path_pos_in_node(path, l->b));
624 bch2_btree_node_iter_verify(&l->iter, l->b);
627 * For interior nodes, the iterator will have skipped past deleted keys:
630 ? bch2_btree_node_iter_prev(&tmp, l->b)
631 : bch2_btree_node_iter_prev_all(&tmp, l->b);
632 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
634 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
639 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
645 btree_node_unlock(trans, path, level);
648 bch2_bpos_to_text(&buf1, path->pos);
651 struct bkey uk = bkey_unpack_key(l->b, p);
652 bch2_bkey_to_text(&buf2, &uk);
654 prt_printf(&buf2, "(none)");
658 struct bkey uk = bkey_unpack_key(l->b, k);
659 bch2_bkey_to_text(&buf3, &uk);
661 prt_printf(&buf3, "(none)");
664 panic("path should be %s key at level %u:\n"
668 msg, level, buf1.buf, buf2.buf, buf3.buf);
671 static void bch2_btree_path_verify(struct btree_trans *trans,
672 struct btree_path *path)
674 struct bch_fs *c = trans->c;
677 EBUG_ON(path->btree_id >= BTREE_ID_NR);
679 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
681 BUG_ON(!path->cached &&
682 c->btree_roots[path->btree_id].b->c.level > i);
686 bch2_btree_path_verify_level(trans, path, i);
689 bch2_btree_path_verify_locks(path);
692 void bch2_trans_verify_paths(struct btree_trans *trans)
694 struct btree_path *path;
696 trans_for_each_path(trans, path)
697 bch2_btree_path_verify(trans, path);
700 static void bch2_btree_iter_verify(struct btree_iter *iter)
702 struct btree_trans *trans = iter->trans;
704 BUG_ON(iter->btree_id >= BTREE_ID_NR);
706 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
708 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
709 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
711 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
712 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
713 !btree_type_has_snapshots(iter->btree_id));
715 if (iter->update_path)
716 bch2_btree_path_verify(trans, iter->update_path);
717 bch2_btree_path_verify(trans, iter->path);
720 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
722 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
723 !iter->pos.snapshot);
725 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
726 iter->pos.snapshot != iter->snapshot);
728 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
729 bkey_cmp(iter->pos, iter->k.p) > 0);
732 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
734 struct btree_trans *trans = iter->trans;
735 struct btree_iter copy;
736 struct bkey_s_c prev;
739 if (!bch2_debug_check_iterators)
742 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
745 if (bkey_err(k) || !k.k)
748 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
752 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
753 BTREE_ITER_NOPRESERVE|
754 BTREE_ITER_ALL_SNAPSHOTS);
755 prev = bch2_btree_iter_prev(©);
759 ret = bkey_err(prev);
763 if (!bkey_cmp(prev.k->p, k.k->p) &&
764 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
765 prev.k->p.snapshot) > 0) {
766 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
768 bch2_bkey_to_text(&buf1, k.k);
769 bch2_bkey_to_text(&buf2, prev.k);
771 panic("iter snap %u\n"
778 bch2_trans_iter_exit(trans, ©);
782 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
783 struct bpos pos, bool key_cache)
785 struct btree_path *path;
787 struct printbuf buf = PRINTBUF;
789 trans_for_each_path_inorder(trans, path, idx) {
790 int cmp = cmp_int(path->btree_id, id) ?:
791 cmp_int(path->cached, key_cache);
798 if (!(path->nodes_locked & 1) ||
799 !path->should_be_locked)
803 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
804 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
807 if (!bkey_cmp(pos, path->pos))
812 bch2_dump_trans_paths_updates(trans);
813 bch2_bpos_to_text(&buf, pos);
815 panic("not locked: %s %s%s\n",
816 bch2_btree_ids[id], buf.buf,
817 key_cache ? " cached" : "");
822 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
823 struct btree_path *path, unsigned l) {}
824 static inline void bch2_btree_path_verify(struct btree_trans *trans,
825 struct btree_path *path) {}
826 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
827 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
828 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
832 /* Btree path: fixups after btree updates */
834 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
837 struct bkey_packed *k)
839 struct btree_node_iter_set *set;
841 btree_node_iter_for_each(iter, set)
842 if (set->end == t->end_offset) {
843 set->k = __btree_node_key_to_offset(b, k);
844 bch2_btree_node_iter_sort(iter, b);
848 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
851 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
853 struct bkey_packed *where)
855 struct btree_path_level *l = &path->l[b->c.level];
857 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
860 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
861 bch2_btree_node_iter_advance(&l->iter, l->b);
864 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
866 struct bkey_packed *where)
868 struct btree_path *path;
870 trans_for_each_path_with_node(trans, b, path) {
871 __bch2_btree_path_fix_key_modified(path, b, where);
872 bch2_btree_path_verify_level(trans, path, b->c.level);
876 static void __bch2_btree_node_iter_fix(struct btree_path *path,
878 struct btree_node_iter *node_iter,
880 struct bkey_packed *where,
881 unsigned clobber_u64s,
884 const struct bkey_packed *end = btree_bkey_last(b, t);
885 struct btree_node_iter_set *set;
886 unsigned offset = __btree_node_key_to_offset(b, where);
887 int shift = new_u64s - clobber_u64s;
888 unsigned old_end = t->end_offset - shift;
889 unsigned orig_iter_pos = node_iter->data[0].k;
890 bool iter_current_key_modified =
891 orig_iter_pos >= offset &&
892 orig_iter_pos <= offset + clobber_u64s;
894 btree_node_iter_for_each(node_iter, set)
895 if (set->end == old_end)
898 /* didn't find the bset in the iterator - might have to readd it: */
900 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
901 bch2_btree_node_iter_push(node_iter, b, where, end);
904 /* Iterator is after key that changed */
908 set->end = t->end_offset;
910 /* Iterator hasn't gotten to the key that changed yet: */
915 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
917 } else if (set->k < offset + clobber_u64s) {
918 set->k = offset + new_u64s;
919 if (set->k == set->end)
920 bch2_btree_node_iter_set_drop(node_iter, set);
922 /* Iterator is after key that changed */
923 set->k = (int) set->k + shift;
927 bch2_btree_node_iter_sort(node_iter, b);
929 if (node_iter->data[0].k != orig_iter_pos)
930 iter_current_key_modified = true;
933 * When a new key is added, and the node iterator now points to that
934 * key, the iterator might have skipped past deleted keys that should
935 * come after the key the iterator now points to. We have to rewind to
936 * before those deleted keys - otherwise
937 * bch2_btree_node_iter_prev_all() breaks:
939 if (!bch2_btree_node_iter_end(node_iter) &&
940 iter_current_key_modified &&
943 struct bkey_packed *k, *k2, *p;
945 k = bch2_btree_node_iter_peek_all(node_iter, b);
947 for_each_bset(b, t) {
948 bool set_pos = false;
950 if (node_iter->data[0].end == t->end_offset)
953 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
955 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
956 bkey_iter_cmp(b, k, p) < 0) {
962 btree_node_iter_set_set_pos(node_iter,
968 void bch2_btree_node_iter_fix(struct btree_trans *trans,
969 struct btree_path *path,
971 struct btree_node_iter *node_iter,
972 struct bkey_packed *where,
973 unsigned clobber_u64s,
976 struct bset_tree *t = bch2_bkey_to_bset(b, where);
977 struct btree_path *linked;
979 if (node_iter != &path->l[b->c.level].iter) {
980 __bch2_btree_node_iter_fix(path, b, node_iter, t,
981 where, clobber_u64s, new_u64s);
983 if (bch2_debug_check_iterators)
984 bch2_btree_node_iter_verify(node_iter, b);
987 trans_for_each_path_with_node(trans, b, linked) {
988 __bch2_btree_node_iter_fix(linked, b,
989 &linked->l[b->c.level].iter, t,
990 where, clobber_u64s, new_u64s);
991 bch2_btree_path_verify_level(trans, linked, b->c.level);
995 /* Btree path level: pointer to a particular btree node and node iter */
997 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
998 struct btree_path_level *l,
1000 struct bkey_packed *k)
1004 * signal to bch2_btree_iter_peek_slot() that we're currently at
1007 u->type = KEY_TYPE_deleted;
1008 return bkey_s_c_null;
1011 return bkey_disassemble(l->b, k, u);
1014 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1015 struct btree_path_level *l,
1018 return __btree_iter_unpack(c, l, u,
1019 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1022 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
1023 struct btree_path *path,
1024 struct btree_path_level *l,
1027 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
1028 bch2_btree_node_iter_peek(&l->iter, l->b));
1030 path->pos = k.k ? k.k->p : l->b->key.k.p;
1031 bch2_btree_path_verify_level(trans, path, l - path->l);
1035 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
1036 struct btree_path *path,
1037 struct btree_path_level *l,
1040 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
1041 bch2_btree_node_iter_prev(&l->iter, l->b));
1043 path->pos = k.k ? k.k->p : l->b->data->min_key;
1044 bch2_btree_path_verify_level(trans, path, l - path->l);
1048 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1049 struct btree_path_level *l,
1052 struct bkey_packed *k;
1053 int nr_advanced = 0;
1055 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1056 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1057 if (max_advance > 0 && nr_advanced >= max_advance)
1060 bch2_btree_node_iter_advance(&l->iter, l->b);
1068 * Verify that iterator for parent node points to child node:
1070 static void btree_path_verify_new_node(struct btree_trans *trans,
1071 struct btree_path *path, struct btree *b)
1073 struct bch_fs *c = trans->c;
1074 struct btree_path_level *l;
1077 struct bkey_packed *k;
1079 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1082 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1085 plevel = b->c.level + 1;
1086 if (!btree_path_node(path, plevel))
1089 parent_locked = btree_node_locked(path, plevel);
1091 if (!bch2_btree_node_relock(trans, path, plevel))
1094 l = &path->l[plevel];
1095 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1098 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1099 struct printbuf buf1 = PRINTBUF;
1100 struct printbuf buf2 = PRINTBUF;
1101 struct printbuf buf3 = PRINTBUF;
1102 struct printbuf buf4 = PRINTBUF;
1103 struct bkey uk = bkey_unpack_key(b, k);
1105 bch2_dump_btree_node(c, l->b);
1106 bch2_bpos_to_text(&buf1, path->pos);
1107 bch2_bkey_to_text(&buf2, &uk);
1108 bch2_bpos_to_text(&buf3, b->data->min_key);
1109 bch2_bpos_to_text(&buf3, b->data->max_key);
1110 panic("parent iter doesn't point to new node:\n"
1114 bch2_btree_ids[path->btree_id],
1115 buf1.buf, buf2.buf, buf3.buf, buf4.buf);
1119 btree_node_unlock(trans, path, plevel);
1122 static inline void __btree_path_level_init(struct btree_path *path,
1125 struct btree_path_level *l = &path->l[level];
1127 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1130 * Iterators to interior nodes should always be pointed at the first non
1134 bch2_btree_node_iter_peek(&l->iter, l->b);
1137 static inline void btree_path_level_init(struct btree_trans *trans,
1138 struct btree_path *path,
1141 BUG_ON(path->cached);
1143 btree_path_verify_new_node(trans, path, b);
1145 EBUG_ON(!btree_path_pos_in_node(path, b));
1146 EBUG_ON(b->c.lock.state.seq & 1);
1148 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1149 path->l[b->c.level].b = b;
1150 __btree_path_level_init(path, b->c.level);
1153 /* Btree path: fixups after btree node updates: */
1156 * A btree node is being replaced - update the iterator to point to the new
1159 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1161 struct btree_path *path;
1163 trans_for_each_path(trans, path)
1164 if (!path->cached &&
1165 btree_path_pos_in_node(path, b)) {
1166 enum btree_node_locked_type t =
1167 btree_lock_want(path, b->c.level);
1169 if (path->nodes_locked &&
1170 t != BTREE_NODE_UNLOCKED) {
1171 btree_node_unlock(trans, path, b->c.level);
1172 six_lock_increment(&b->c.lock, t);
1173 mark_btree_node_locked(trans, path, b->c.level, t);
1176 btree_path_level_init(trans, path, b);
1181 * A btree node has been modified in such a way as to invalidate iterators - fix
1184 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1186 struct btree_path *path;
1188 trans_for_each_path_with_node(trans, b, path)
1189 __btree_path_level_init(path, b->c.level);
1192 /* Btree path: traverse, set_pos: */
1194 static int lock_root_check_fn(struct six_lock *lock, void *p)
1196 struct btree *b = container_of(lock, struct btree, c.lock);
1197 struct btree **rootp = p;
1200 return BCH_ERR_lock_fail_root_changed;
1204 static inline int btree_path_lock_root(struct btree_trans *trans,
1205 struct btree_path *path,
1206 unsigned depth_want,
1207 unsigned long trace_ip)
1209 struct bch_fs *c = trans->c;
1210 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1211 enum six_lock_type lock_type;
1215 EBUG_ON(path->nodes_locked);
1218 b = READ_ONCE(*rootp);
1219 path->level = READ_ONCE(b->c.level);
1221 if (unlikely(path->level < depth_want)) {
1223 * the root is at a lower depth than the depth we want:
1224 * got to the end of the btree, or we're walking nodes
1225 * greater than some depth and there are no nodes >=
1228 path->level = depth_want;
1229 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1230 path->l[i].b = NULL;
1234 lock_type = __btree_lock_want(path, path->level);
1235 ret = btree_node_lock(trans, path, b, SPOS_MAX,
1236 path->level, lock_type,
1237 lock_root_check_fn, rootp,
1239 if (unlikely(ret)) {
1240 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
1242 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1247 if (likely(b == READ_ONCE(*rootp) &&
1248 b->c.level == path->level &&
1250 for (i = 0; i < path->level; i++)
1251 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
1252 path->l[path->level].b = b;
1253 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1254 path->l[i].b = NULL;
1256 mark_btree_node_locked(trans, path, path->level, lock_type);
1257 btree_path_level_init(trans, path, b);
1261 six_unlock_type(&b->c.lock, lock_type);
1266 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1268 struct bch_fs *c = trans->c;
1269 struct btree_path_level *l = path_l(path);
1270 struct btree_node_iter node_iter = l->iter;
1271 struct bkey_packed *k;
1272 struct bkey_buf tmp;
1273 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1274 ? (path->level > 1 ? 0 : 2)
1275 : (path->level > 1 ? 1 : 16);
1276 bool was_locked = btree_node_locked(path, path->level);
1279 bch2_bkey_buf_init(&tmp);
1281 while (nr && !ret) {
1282 if (!bch2_btree_node_relock(trans, path, path->level))
1285 bch2_btree_node_iter_advance(&node_iter, l->b);
1286 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1290 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1291 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1296 btree_node_unlock(trans, path, path->level);
1298 bch2_bkey_buf_exit(&tmp, c);
1302 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1303 struct btree_and_journal_iter *jiter)
1305 struct bch_fs *c = trans->c;
1307 struct bkey_buf tmp;
1308 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1309 ? (path->level > 1 ? 0 : 2)
1310 : (path->level > 1 ? 1 : 16);
1311 bool was_locked = btree_node_locked(path, path->level);
1314 bch2_bkey_buf_init(&tmp);
1316 while (nr && !ret) {
1317 if (!bch2_btree_node_relock(trans, path, path->level))
1320 bch2_btree_and_journal_iter_advance(jiter);
1321 k = bch2_btree_and_journal_iter_peek(jiter);
1325 bch2_bkey_buf_reassemble(&tmp, c, k);
1326 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1331 btree_node_unlock(trans, path, path->level);
1333 bch2_bkey_buf_exit(&tmp, c);
1337 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1338 struct btree_path *path,
1339 unsigned plevel, struct btree *b)
1341 struct btree_path_level *l = &path->l[plevel];
1342 bool locked = btree_node_locked(path, plevel);
1343 struct bkey_packed *k;
1344 struct bch_btree_ptr_v2 *bp;
1346 if (!bch2_btree_node_relock(trans, path, plevel))
1349 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1350 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1352 bp = (void *) bkeyp_val(&l->b->format, k);
1353 bp->mem_ptr = (unsigned long)b;
1356 btree_node_unlock(trans, path, plevel);
1359 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1360 struct btree_path *path,
1362 struct bkey_buf *out)
1364 struct bch_fs *c = trans->c;
1365 struct btree_path_level *l = path_l(path);
1366 struct btree_and_journal_iter jiter;
1370 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1372 k = bch2_btree_and_journal_iter_peek(&jiter);
1374 bch2_bkey_buf_reassemble(out, c, k);
1376 if (flags & BTREE_ITER_PREFETCH)
1377 ret = btree_path_prefetch_j(trans, path, &jiter);
1379 bch2_btree_and_journal_iter_exit(&jiter);
1383 static __always_inline int btree_path_down(struct btree_trans *trans,
1384 struct btree_path *path,
1386 unsigned long trace_ip)
1388 struct bch_fs *c = trans->c;
1389 struct btree_path_level *l = path_l(path);
1391 unsigned level = path->level - 1;
1392 enum six_lock_type lock_type = __btree_lock_want(path, level);
1393 bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
1394 struct bkey_buf tmp;
1397 EBUG_ON(!btree_node_locked(path, path->level));
1399 bch2_bkey_buf_init(&tmp);
1401 if (unlikely(!replay_done)) {
1402 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1406 bch2_bkey_buf_unpack(&tmp, c, l->b,
1407 bch2_btree_node_iter_peek(&l->iter, l->b));
1409 if (flags & BTREE_ITER_PREFETCH) {
1410 ret = btree_path_prefetch(trans, path);
1416 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1417 ret = PTR_ERR_OR_ZERO(b);
1421 if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1422 unlikely(b != btree_node_mem_ptr(tmp.k)))
1423 btree_node_mem_ptr_set(trans, path, level + 1, b);
1425 if (btree_node_read_locked(path, level + 1))
1426 btree_node_unlock(trans, path, level + 1);
1428 mark_btree_node_locked(trans, path, level, lock_type);
1429 path->level = level;
1430 btree_path_level_init(trans, path, b);
1432 bch2_btree_path_verify_locks(path);
1434 bch2_bkey_buf_exit(&tmp, c);
1438 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1439 unsigned, unsigned long);
1441 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1443 struct bch_fs *c = trans->c;
1444 struct btree_path *path;
1445 unsigned long trace_ip = _RET_IP_;
1448 if (trans->in_traverse_all)
1449 return -BCH_ERR_transaction_restart_in_traverse_all;
1451 trans->in_traverse_all = true;
1453 trans->restarted = 0;
1454 trans->traverse_all_idx = U8_MAX;
1456 trans_for_each_path(trans, path)
1457 path->should_be_locked = false;
1459 btree_trans_verify_sorted(trans);
1461 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1462 struct btree_path *path1 = trans->paths + trans->sorted[i];
1463 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1465 if (path1->btree_id == path2->btree_id &&
1466 path1->locks_want < path2->locks_want)
1467 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1468 else if (!path1->locks_want && path2->locks_want)
1469 __bch2_btree_path_upgrade(trans, path1, 1);
1472 bch2_trans_unlock(trans);
1475 if (unlikely(trans->memory_allocation_failure)) {
1478 closure_init_stack(&cl);
1481 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1486 /* Now, redo traversals in correct order: */
1487 trans->traverse_all_idx = 0;
1488 while (trans->traverse_all_idx < trans->nr_sorted) {
1489 path = trans->paths + trans->sorted[trans->traverse_all_idx];
1492 * Traversing a path can cause another path to be added at about
1493 * the same position:
1495 if (path->uptodate) {
1496 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1497 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1502 BUG_ON(path->uptodate);
1504 trans->traverse_all_idx++;
1509 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1510 * and relock(), relock() won't relock since path->should_be_locked
1511 * isn't set yet, which is all fine
1513 trans_for_each_path(trans, path)
1514 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1516 bch2_btree_cache_cannibalize_unlock(c);
1518 trans->in_traverse_all = false;
1520 trace_trans_traverse_all(trans, trace_ip);
1524 static inline bool btree_path_good_node(struct btree_trans *trans,
1525 struct btree_path *path,
1526 unsigned l, int check_pos)
1528 if (!is_btree_node(path, l) ||
1529 !bch2_btree_node_relock(trans, path, l))
1532 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1534 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1539 static void btree_path_set_level_down(struct btree_trans *trans,
1540 struct btree_path *path,
1545 path->level = new_level;
1547 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1548 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1549 btree_node_unlock(trans, path, l);
1551 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1552 bch2_btree_path_verify(trans, path);
1555 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1556 struct btree_path *path,
1559 unsigned i, l = path->level;
1561 while (btree_path_node(path, l) &&
1562 !btree_path_good_node(trans, path, l, check_pos))
1563 __btree_path_set_level_up(trans, path, l++);
1565 /* If we need intent locks, take them too: */
1567 i < path->locks_want && btree_path_node(path, i);
1569 if (!bch2_btree_node_relock(trans, path, i))
1571 __btree_path_set_level_up(trans, path, l++);
1577 * This is the main state machine for walking down the btree - walks down to a
1580 * Returns 0 on success, -EIO on error (error reading in a btree node).
1582 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1583 * stashed in the iterator and returned from bch2_trans_exit().
1585 static int btree_path_traverse_one(struct btree_trans *trans,
1586 struct btree_path *path,
1588 unsigned long trace_ip)
1590 unsigned depth_want = path->level;
1591 int ret = trans->restarted;
1597 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1598 * and re-traverse the path without a transaction restart:
1600 if (path->should_be_locked) {
1601 ret = bch2_btree_path_relock(trans, path, trace_ip);
1606 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1610 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1613 path->level = btree_path_up_until_good_node(trans, path, 0);
1616 * Note: path->nodes[path->level] may be temporarily NULL here - that
1617 * would indicate to other code that we got to the end of the btree,
1618 * here it indicates that relocking the root failed - it's critical that
1619 * btree_path_lock_root() comes next and that it can't fail
1621 while (path->level > depth_want) {
1622 ret = btree_path_node(path, path->level)
1623 ? btree_path_down(trans, path, flags, trace_ip)
1624 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1625 if (unlikely(ret)) {
1628 * No nodes at this level - got to the end of
1635 __bch2_btree_path_unlock(trans, path);
1636 path->level = depth_want;
1637 path->l[path->level].b = ERR_PTR(ret);
1642 path->uptodate = BTREE_ITER_UPTODATE;
1644 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
1645 bch2_btree_path_verify(trans, path);
1649 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1650 struct btree_path *path, unsigned flags)
1652 if (0 && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1653 unsigned restart_probability_bits = 4 << min(trans->restart_count, 32U);
1654 u64 mask = ~(~0ULL << restart_probability_bits);
1656 if ((prandom_u32() & mask) == mask) {
1657 trace_transaction_restart_injected(trans, _RET_IP_);
1658 return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
1662 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1665 return bch2_trans_cond_resched(trans) ?:
1666 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1669 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1670 struct btree_path *src)
1672 unsigned i, offset = offsetof(struct btree_path, pos);
1674 memcpy((void *) dst + offset,
1675 (void *) src + offset,
1676 sizeof(struct btree_path) - offset);
1678 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1679 if (btree_node_locked(dst, i))
1680 six_lock_increment(&dst->l[i].b->c.lock,
1681 __btree_lock_want(dst, i));
1683 bch2_btree_path_check_sort(trans, dst, 0);
1686 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1689 struct btree_path *new = btree_path_alloc(trans, src);
1691 btree_path_copy(trans, new, src);
1692 __btree_path_get(new, intent);
1696 inline struct btree_path * __must_check
1697 bch2_btree_path_make_mut(struct btree_trans *trans,
1698 struct btree_path *path, bool intent,
1701 if (path->ref > 1 || path->preserve) {
1702 __btree_path_put(path, intent);
1703 path = btree_path_clone(trans, path, intent);
1704 path->preserve = false;
1705 #ifdef CONFIG_BCACHEFS_DEBUG
1706 path->ip_allocated = ip;
1708 btree_trans_verify_sorted(trans);
1711 path->should_be_locked = false;
1715 struct btree_path * __must_check
1716 bch2_btree_path_set_pos(struct btree_trans *trans,
1717 struct btree_path *path, struct bpos new_pos,
1718 bool intent, unsigned long ip)
1720 int cmp = bpos_cmp(new_pos, path->pos);
1721 unsigned l = path->level;
1723 EBUG_ON(trans->restarted);
1724 EBUG_ON(!path->ref);
1729 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1731 path->pos = new_pos;
1733 bch2_btree_path_check_sort(trans, path, cmp);
1735 if (unlikely(path->cached)) {
1736 btree_node_unlock(trans, path, 0);
1737 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1738 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1742 l = btree_path_up_until_good_node(trans, path, cmp);
1744 if (btree_path_node(path, l)) {
1745 BUG_ON(!btree_node_locked(path, l));
1747 * We might have to skip over many keys, or just a few: try
1748 * advancing the node iterator, and if we have to skip over too
1749 * many keys just reinit it (or if we're rewinding, since that
1753 !btree_path_advance_to_pos(path, &path->l[l], 8))
1754 __btree_path_level_init(path, l);
1757 if (l != path->level) {
1758 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1759 __bch2_btree_path_unlock(trans, path);
1762 bch2_btree_path_verify(trans, path);
1766 /* Btree path: main interface: */
1768 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1770 struct btree_path *sib;
1772 sib = prev_btree_path(trans, path);
1773 if (sib && !btree_path_cmp(sib, path))
1776 sib = next_btree_path(trans, path);
1777 if (sib && !btree_path_cmp(sib, path))
1783 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1785 struct btree_path *sib;
1787 sib = prev_btree_path(trans, path);
1788 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1791 sib = next_btree_path(trans, path);
1792 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1798 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1800 __bch2_btree_path_unlock(trans, path);
1801 btree_path_list_remove(trans, path);
1802 trans->paths_allocated &= ~(1ULL << path->idx);
1805 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1807 struct btree_path *dup;
1809 EBUG_ON(trans->paths + path->idx != path);
1810 EBUG_ON(!path->ref);
1812 if (!__btree_path_put(path, intent))
1815 dup = path->preserve
1816 ? have_path_at_pos(trans, path)
1817 : have_node_at_pos(trans, path);
1819 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1822 if (path->should_be_locked &&
1823 !trans->restarted &&
1824 (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
1828 dup->preserve |= path->preserve;
1829 dup->should_be_locked |= path->should_be_locked;
1832 __bch2_path_free(trans, path);
1835 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1837 struct btree_insert_entry *i;
1839 prt_printf(buf, "transaction updates for %s journal seq %llu",
1840 trans->fn, trans->journal_res.seq);
1842 printbuf_indent_add(buf, 2);
1844 trans_for_each_update(trans, i) {
1845 struct bkey_s_c old = { &i->old_k, i->old_v };
1847 prt_printf(buf, "update: btree=%s cached=%u %pS",
1848 bch2_btree_ids[i->btree_id],
1850 (void *) i->ip_allocated);
1853 prt_printf(buf, " old ");
1854 bch2_bkey_val_to_text(buf, trans->c, old);
1857 prt_printf(buf, " new ");
1858 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1862 printbuf_indent_sub(buf, 2);
1866 void bch2_dump_trans_updates(struct btree_trans *trans)
1868 struct printbuf buf = PRINTBUF;
1870 bch2_trans_updates_to_text(&buf, trans);
1871 bch_err(trans->c, "%s", buf.buf);
1872 printbuf_exit(&buf);
1875 void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
1877 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1878 path->idx, path->ref, path->intent_ref,
1879 path->preserve ? 'P' : ' ',
1880 path->should_be_locked ? 'S' : ' ',
1881 bch2_btree_ids[path->btree_id],
1883 bch2_bpos_to_text(out, path->pos);
1885 prt_printf(out, " locks %u", path->nodes_locked);
1886 #ifdef CONFIG_BCACHEFS_DEBUG
1887 prt_printf(out, " %pS", (void *) path->ip_allocated);
1892 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1894 struct btree_path *path;
1897 trans_for_each_path_inorder(trans, path, idx)
1898 bch2_btree_path_to_text(out, path);
1902 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1904 struct printbuf buf = PRINTBUF;
1906 bch2_trans_paths_to_text(&buf, trans);
1908 printk(KERN_ERR "%s", buf.buf);
1909 printbuf_exit(&buf);
1911 bch2_dump_trans_updates(trans);
1915 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1917 struct btree_transaction_stats *s = btree_trans_stats(trans);
1918 struct printbuf buf = PRINTBUF;
1920 bch2_trans_paths_to_text(&buf, trans);
1922 if (!buf.allocation_failure) {
1923 mutex_lock(&s->lock);
1924 if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
1925 s->nr_max_paths = hweight64(trans->paths_allocated);
1926 swap(s->max_paths_text, buf.buf);
1928 mutex_unlock(&s->lock);
1931 printbuf_exit(&buf);
1934 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1935 struct btree_path *pos)
1937 struct btree_transaction_stats *s = btree_trans_stats(trans);
1938 struct btree_path *path;
1941 if (unlikely(trans->paths_allocated ==
1942 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1943 bch2_dump_trans_paths_updates(trans);
1944 panic("trans path oveflow\n");
1947 idx = __ffs64(~trans->paths_allocated);
1948 trans->paths_allocated |= 1ULL << idx;
1950 if (s && unlikely(hweight64(trans->paths_allocated) > s->nr_max_paths))
1951 bch2_trans_update_max_paths(trans);
1953 path = &trans->paths[idx];
1957 path->intent_ref = 0;
1958 path->nodes_locked = 0;
1959 path->nodes_intent_locked = 0;
1961 btree_path_list_add(trans, pos, path);
1965 struct btree_path *bch2_path_get(struct btree_trans *trans,
1966 enum btree_id btree_id, struct bpos pos,
1967 unsigned locks_want, unsigned level,
1968 unsigned flags, unsigned long ip)
1970 struct btree_path *path, *path_pos = NULL;
1971 bool cached = flags & BTREE_ITER_CACHED;
1972 bool intent = flags & BTREE_ITER_INTENT;
1975 BUG_ON(trans->restarted);
1976 btree_trans_verify_sorted(trans);
1977 bch2_trans_verify_locks(trans);
1979 trans_for_each_path_inorder(trans, path, i) {
1980 if (__btree_path_cmp(path,
1991 path_pos->cached == cached &&
1992 path_pos->btree_id == btree_id &&
1993 path_pos->level == level) {
1994 __btree_path_get(path_pos, intent);
1995 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1997 path = btree_path_alloc(trans, path_pos);
2000 __btree_path_get(path, intent);
2002 path->btree_id = btree_id;
2003 path->cached = cached;
2004 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
2005 path->should_be_locked = false;
2006 path->level = level;
2007 path->locks_want = locks_want;
2008 path->nodes_locked = 0;
2009 path->nodes_intent_locked = 0;
2010 for (i = 0; i < ARRAY_SIZE(path->l); i++)
2011 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
2012 #ifdef CONFIG_BCACHEFS_DEBUG
2013 path->ip_allocated = ip;
2015 btree_trans_verify_sorted(trans);
2018 if (!(flags & BTREE_ITER_NOPRESERVE))
2019 path->preserve = true;
2021 if (path->intent_ref)
2022 locks_want = max(locks_want, level + 1);
2025 * If the path has locks_want greater than requested, we don't downgrade
2026 * it here - on transaction restart because btree node split needs to
2027 * upgrade locks, we might be putting/getting the iterator again.
2028 * Downgrading iterators only happens via bch2_trans_downgrade(), after
2029 * a successful transaction commit.
2032 locks_want = min(locks_want, BTREE_MAX_DEPTH);
2033 if (locks_want > path->locks_want) {
2034 path->locks_want = locks_want;
2035 btree_path_get_locks(trans, path, true);
2041 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
2046 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2047 EBUG_ON(!btree_node_locked(path, path->level));
2049 if (!path->cached) {
2050 struct btree_path_level *l = path_l(path);
2051 struct bkey_packed *_k;
2053 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
2054 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
2056 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
2058 if (!k.k || bpos_cmp(path->pos, k.k->p))
2061 struct bkey_cached *ck = (void *) path->l[0].b;
2064 (path->btree_id != ck->key.btree_id ||
2065 bkey_cmp(path->pos, ck->key.pos)));
2066 EBUG_ON(!ck || !ck->valid);
2069 k = bkey_i_to_s_c(ck->k);
2076 return (struct bkey_s_c) { u, NULL };
2079 /* Btree iterators: */
2082 __bch2_btree_iter_traverse(struct btree_iter *iter)
2084 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2088 bch2_btree_iter_traverse(struct btree_iter *iter)
2092 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
2093 btree_iter_search_key(iter),
2094 iter->flags & BTREE_ITER_INTENT,
2095 btree_iter_ip_allocated(iter));
2097 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2101 btree_path_set_should_be_locked(iter->path);
2105 /* Iterate across nodes (leaf and interior nodes) */
2107 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2109 struct btree_trans *trans = iter->trans;
2110 struct btree *b = NULL;
2113 EBUG_ON(iter->path->cached);
2114 bch2_btree_iter_verify(iter);
2116 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2120 b = btree_path_node(iter->path, iter->path->level);
2124 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2126 bkey_init(&iter->k);
2127 iter->k.p = iter->pos = b->key.k.p;
2129 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2130 iter->flags & BTREE_ITER_INTENT,
2131 btree_iter_ip_allocated(iter));
2132 btree_path_set_should_be_locked(iter->path);
2134 bch2_btree_iter_verify_entry_exit(iter);
2135 bch2_btree_iter_verify(iter);
2143 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2145 struct btree_trans *trans = iter->trans;
2146 struct btree_path *path = iter->path;
2147 struct btree *b = NULL;
2150 BUG_ON(trans->restarted);
2151 EBUG_ON(iter->path->cached);
2152 bch2_btree_iter_verify(iter);
2154 /* already at end? */
2155 if (!btree_path_node(path, path->level))
2159 if (!btree_path_node(path, path->level + 1)) {
2160 btree_path_set_level_up(trans, path);
2164 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2165 __bch2_btree_path_unlock(trans, path);
2166 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2167 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
2168 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2169 trace_trans_restart_relock_next_node(trans, _THIS_IP_, path);
2170 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
2174 b = btree_path_node(path, path->level + 1);
2176 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2177 __btree_path_set_level_up(trans, path, path->level++);
2180 * Haven't gotten to the end of the parent node: go back down to
2181 * the next child node
2184 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2185 iter->flags & BTREE_ITER_INTENT,
2186 btree_iter_ip_allocated(iter));
2188 btree_path_set_level_down(trans, path, iter->min_depth);
2190 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2194 b = path->l[path->level].b;
2197 bkey_init(&iter->k);
2198 iter->k.p = iter->pos = b->key.k.p;
2200 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2201 iter->flags & BTREE_ITER_INTENT,
2202 btree_iter_ip_allocated(iter));
2203 btree_path_set_should_be_locked(iter->path);
2204 BUG_ON(iter->path->uptodate);
2206 bch2_btree_iter_verify_entry_exit(iter);
2207 bch2_btree_iter_verify(iter);
2215 /* Iterate across keys (in leaf nodes only) */
2217 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2219 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
2220 struct bpos pos = iter->k.p;
2221 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2222 ? bpos_cmp(pos, SPOS_MAX)
2223 : bkey_cmp(pos, SPOS_MAX)) != 0;
2225 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2226 pos = bkey_successor(iter, pos);
2227 bch2_btree_iter_set_pos(iter, pos);
2230 if (!btree_path_node(iter->path, iter->path->level))
2233 iter->advanced = true;
2238 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2240 struct bpos pos = bkey_start_pos(&iter->k);
2241 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2242 ? bpos_cmp(pos, POS_MIN)
2243 : bkey_cmp(pos, POS_MIN)) != 0;
2245 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2246 pos = bkey_predecessor(iter, pos);
2247 bch2_btree_iter_set_pos(iter, pos);
2251 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
2252 enum btree_id btree_id,
2255 struct btree_insert_entry *i;
2256 struct bkey_i *ret = NULL;
2258 trans_for_each_update(trans, i) {
2259 if (i->btree_id < btree_id)
2261 if (i->btree_id > btree_id)
2263 if (bpos_cmp(i->k->k.p, pos) < 0)
2265 if (i->key_cache_already_flushed)
2267 if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
2274 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
2275 struct btree_iter *iter,
2276 struct bpos start_pos,
2277 struct bpos end_pos)
2281 if (bpos_cmp(start_pos, iter->journal_pos) < 0)
2282 iter->journal_idx = 0;
2284 k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
2286 &iter->journal_idx);
2288 iter->journal_pos = k ? k->k.p : end_pos;
2292 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
2293 struct btree_iter *iter,
2296 return bch2_btree_journal_peek(trans, iter, pos, pos);
2300 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2301 struct btree_iter *iter,
2304 struct bkey_i *next_journal =
2305 bch2_btree_journal_peek(trans, iter, iter->path->pos,
2306 k.k ? k.k->p : iter->path->l[0].b->key.k.p);
2309 iter->k = next_journal->k;
2310 k = bkey_i_to_s_c(next_journal);
2317 * Checks btree key cache for key at iter->pos and returns it if present, or
2321 struct bkey_s_c __btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2323 struct btree_trans *trans = iter->trans;
2324 struct bch_fs *c = trans->c;
2328 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2329 return bkey_s_c_null;
2331 if (!iter->key_cache_path)
2332 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2333 iter->flags & BTREE_ITER_INTENT, 0,
2334 iter->flags|BTREE_ITER_CACHED,
2337 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2338 iter->flags & BTREE_ITER_INTENT,
2339 btree_iter_ip_allocated(iter));
2341 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
2343 return bkey_s_c_err(ret);
2345 btree_path_set_should_be_locked(iter->key_cache_path);
2347 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
2351 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2353 struct bkey_s_c ret = __btree_trans_peek_key_cache(iter, pos);
2354 int err = bkey_err(ret) ?: bch2_btree_path_relock(iter->trans, iter->path, _THIS_IP_);
2356 return err ? bkey_s_c_err(err) : ret;
2359 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2361 struct btree_trans *trans = iter->trans;
2362 struct bkey_i *next_update;
2363 struct bkey_s_c k, k2;
2366 EBUG_ON(iter->path->cached || iter->path->level);
2367 bch2_btree_iter_verify(iter);
2370 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2371 iter->flags & BTREE_ITER_INTENT,
2372 btree_iter_ip_allocated(iter));
2374 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2375 if (unlikely(ret)) {
2376 /* ensure that iter->k is consistent with iter->pos: */
2377 bch2_btree_iter_set_pos(iter, iter->pos);
2378 k = bkey_s_c_err(ret);
2382 btree_path_set_should_be_locked(iter->path);
2384 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2386 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2388 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2392 bch2_btree_iter_set_pos(iter, iter->pos);
2397 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2398 k = btree_trans_peek_journal(trans, iter, k);
2400 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2401 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2404 bpos_cmp(next_update->k.p,
2405 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2406 iter->k = next_update->k;
2407 k = bkey_i_to_s_c(next_update);
2410 if (k.k && bkey_deleted(k.k)) {
2412 * If we've got a whiteout, and it's after the search
2413 * key, advance the search key to the whiteout instead
2414 * of just after the whiteout - it might be a btree
2415 * whiteout, with a real key at the same position, since
2416 * in the btree deleted keys sort before non deleted.
2418 search_key = bpos_cmp(search_key, k.k->p)
2420 : bpos_successor(k.k->p);
2426 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2427 /* Advance to next leaf node: */
2428 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2431 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2437 bch2_btree_iter_verify(iter);
2443 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2446 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2448 struct btree_trans *trans = iter->trans;
2449 struct bpos search_key = btree_iter_search_key(iter);
2451 struct bpos iter_pos;
2454 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2456 if (iter->update_path) {
2457 bch2_path_put(trans, iter->update_path,
2458 iter->flags & BTREE_ITER_INTENT);
2459 iter->update_path = NULL;
2462 bch2_btree_iter_verify_entry_exit(iter);
2465 k = __bch2_btree_iter_peek(iter, search_key);
2466 if (!k.k || bkey_err(k))
2470 * iter->pos should be mononotically increasing, and always be
2471 * equal to the key we just returned - except extents can
2472 * straddle iter->pos:
2474 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2476 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2477 iter_pos = bkey_start_pos(k.k);
2479 iter_pos = iter->pos;
2481 if (bkey_cmp(iter_pos, end) > 0) {
2482 bch2_btree_iter_set_pos(iter, end);
2487 if (iter->update_path &&
2488 bkey_cmp(iter->update_path->pos, k.k->p)) {
2489 bch2_path_put(trans, iter->update_path,
2490 iter->flags & BTREE_ITER_INTENT);
2491 iter->update_path = NULL;
2494 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2495 (iter->flags & BTREE_ITER_INTENT) &&
2496 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2497 !iter->update_path) {
2498 struct bpos pos = k.k->p;
2500 if (pos.snapshot < iter->snapshot) {
2501 search_key = bpos_successor(k.k->p);
2505 pos.snapshot = iter->snapshot;
2508 * advance, same as on exit for iter->path, but only up
2511 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2512 iter->update_path = iter->path;
2514 iter->update_path = bch2_btree_path_set_pos(trans,
2515 iter->update_path, pos,
2516 iter->flags & BTREE_ITER_INTENT,
2521 * We can never have a key in a leaf node at POS_MAX, so
2522 * we don't have to check these successor() calls:
2524 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2525 !bch2_snapshot_is_ancestor(trans->c,
2528 search_key = bpos_successor(k.k->p);
2532 if (bkey_whiteout(k.k) &&
2533 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2534 search_key = bkey_successor(iter, k.k->p);
2541 iter->pos = iter_pos;
2543 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2544 iter->flags & BTREE_ITER_INTENT,
2545 btree_iter_ip_allocated(iter));
2547 btree_path_set_should_be_locked(iter->path);
2549 if (iter->update_path) {
2550 if (iter->update_path->uptodate &&
2551 (ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)))
2552 k = bkey_s_c_err(ret);
2554 btree_path_set_should_be_locked(iter->update_path);
2557 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2558 iter->pos.snapshot = iter->snapshot;
2560 ret = bch2_btree_iter_verify_ret(iter, k);
2561 if (unlikely(ret)) {
2562 bch2_btree_iter_set_pos(iter, iter->pos);
2563 k = bkey_s_c_err(ret);
2566 bch2_btree_iter_verify_entry_exit(iter);
2572 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2573 * to iterator's current position, returning keys from every level of the btree.
2574 * For keys at different levels of the btree that compare equal, the key from
2575 * the lower level (leaf) is returned first.
2577 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2579 struct btree_trans *trans = iter->trans;
2583 EBUG_ON(iter->path->cached);
2584 bch2_btree_iter_verify(iter);
2585 BUG_ON(iter->path->level < iter->min_depth);
2586 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2587 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2590 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2591 iter->flags & BTREE_ITER_INTENT,
2592 btree_iter_ip_allocated(iter));
2594 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2595 if (unlikely(ret)) {
2596 /* ensure that iter->k is consistent with iter->pos: */
2597 bch2_btree_iter_set_pos(iter, iter->pos);
2598 k = bkey_s_c_err(ret);
2602 /* Already at end? */
2603 if (!btree_path_node(iter->path, iter->path->level)) {
2608 k = btree_path_level_peek_all(trans->c,
2609 &iter->path->l[iter->path->level], &iter->k);
2611 /* Check if we should go up to the parent node: */
2614 !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
2615 iter->pos = path_l(iter->path)->b->key.k.p;
2616 btree_path_set_level_up(trans, iter->path);
2617 iter->advanced = false;
2622 * Check if we should go back down to a leaf:
2623 * If we're not in a leaf node, we only return the current key
2624 * if it exactly matches iter->pos - otherwise we first have to
2625 * go back to the leaf:
2627 if (iter->path->level != iter->min_depth &&
2630 bpos_cmp(iter->pos, k.k->p))) {
2631 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2632 iter->pos = bpos_successor(iter->pos);
2633 iter->advanced = false;
2637 /* Check if we should go to the next key: */
2638 if (iter->path->level == iter->min_depth &&
2641 !bpos_cmp(iter->pos, k.k->p)) {
2642 iter->pos = bpos_successor(iter->pos);
2643 iter->advanced = false;
2647 if (iter->advanced &&
2648 iter->path->level == iter->min_depth &&
2649 bpos_cmp(k.k->p, iter->pos))
2650 iter->advanced = false;
2652 BUG_ON(iter->advanced);
2658 btree_path_set_should_be_locked(iter->path);
2660 bch2_btree_iter_verify(iter);
2666 * bch2_btree_iter_next: returns first key greater than iterator's current
2669 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2671 if (!bch2_btree_iter_advance(iter))
2672 return bkey_s_c_null;
2674 return bch2_btree_iter_peek(iter);
2678 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2679 * iterator's current position
2681 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2683 struct btree_trans *trans = iter->trans;
2684 struct bpos search_key = iter->pos;
2685 struct btree_path *saved_path = NULL;
2687 struct bkey saved_k;
2688 const struct bch_val *saved_v;
2691 EBUG_ON(iter->path->cached || iter->path->level);
2692 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2694 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2695 return bkey_s_c_err(-EIO);
2697 bch2_btree_iter_verify(iter);
2698 bch2_btree_iter_verify_entry_exit(iter);
2700 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2701 search_key.snapshot = U32_MAX;
2704 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2705 iter->flags & BTREE_ITER_INTENT,
2706 btree_iter_ip_allocated(iter));
2708 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2709 if (unlikely(ret)) {
2710 /* ensure that iter->k is consistent with iter->pos: */
2711 bch2_btree_iter_set_pos(iter, iter->pos);
2712 k = bkey_s_c_err(ret);
2716 k = btree_path_level_peek(trans, iter->path,
2717 &iter->path->l[0], &iter->k);
2719 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2720 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2721 : bpos_cmp(k.k->p, search_key) > 0))
2722 k = btree_path_level_prev(trans, iter->path,
2723 &iter->path->l[0], &iter->k);
2725 bch2_btree_path_check_sort(trans, iter->path, 0);
2728 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2729 if (k.k->p.snapshot == iter->snapshot)
2733 * If we have a saved candidate, and we're no
2734 * longer at the same _key_ (not pos), return
2737 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2738 bch2_path_put(trans, iter->path,
2739 iter->flags & BTREE_ITER_INTENT);
2740 iter->path = saved_path;
2747 if (bch2_snapshot_is_ancestor(iter->trans->c,
2751 bch2_path_put(trans, saved_path,
2752 iter->flags & BTREE_ITER_INTENT);
2753 saved_path = btree_path_clone(trans, iter->path,
2754 iter->flags & BTREE_ITER_INTENT);
2759 search_key = bpos_predecessor(k.k->p);
2763 if (bkey_whiteout(k.k) &&
2764 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2765 search_key = bkey_predecessor(iter, k.k->p);
2766 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2767 search_key.snapshot = U32_MAX;
2772 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2773 /* Advance to previous leaf node: */
2774 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2776 /* Start of btree: */
2777 bch2_btree_iter_set_pos(iter, POS_MIN);
2783 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2785 /* Extents can straddle iter->pos: */
2786 if (bkey_cmp(k.k->p, iter->pos) < 0)
2789 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2790 iter->pos.snapshot = iter->snapshot;
2792 btree_path_set_should_be_locked(iter->path);
2795 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2797 bch2_btree_iter_verify_entry_exit(iter);
2798 bch2_btree_iter_verify(iter);
2804 * bch2_btree_iter_prev: returns first key less than iterator's current
2807 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2809 if (!bch2_btree_iter_rewind(iter))
2810 return bkey_s_c_null;
2812 return bch2_btree_iter_peek_prev(iter);
2815 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2817 struct btree_trans *trans = iter->trans;
2818 struct bpos search_key;
2822 bch2_btree_iter_verify(iter);
2823 bch2_btree_iter_verify_entry_exit(iter);
2824 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2825 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2827 /* extents can't span inode numbers: */
2828 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2829 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2830 if (iter->pos.inode == KEY_INODE_MAX)
2831 return bkey_s_c_null;
2833 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2836 search_key = btree_iter_search_key(iter);
2837 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2838 iter->flags & BTREE_ITER_INTENT,
2839 btree_iter_ip_allocated(iter));
2841 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2842 if (unlikely(ret)) {
2843 k = bkey_s_c_err(ret);
2847 if ((iter->flags & BTREE_ITER_CACHED) ||
2848 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2849 struct bkey_i *next_update;
2851 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2852 (next_update = btree_trans_peek_updates(trans,
2853 iter->btree_id, search_key)) &&
2854 !bpos_cmp(next_update->k.p, iter->pos)) {
2855 iter->k = next_update->k;
2856 k = bkey_i_to_s_c(next_update);
2860 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2861 (next_update = bch2_btree_journal_peek_slot(trans,
2862 iter, iter->pos))) {
2863 iter->k = next_update->k;
2864 k = bkey_i_to_s_c(next_update);
2868 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2869 (k = __btree_trans_peek_key_cache(iter, iter->pos)).k) {
2872 /* We're not returning a key from iter->path: */
2876 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2880 EBUG_ON(iter->path->level);
2882 if (iter->flags & BTREE_ITER_INTENT) {
2883 struct btree_iter iter2;
2884 struct bpos end = iter->pos;
2886 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2887 end.offset = U64_MAX;
2889 bch2_trans_copy_iter(&iter2, iter);
2890 k = bch2_btree_iter_peek_upto(&iter2, end);
2892 if (k.k && !bkey_err(k)) {
2896 bch2_trans_iter_exit(trans, &iter2);
2898 struct bpos pos = iter->pos;
2900 k = bch2_btree_iter_peek(iter);
2901 if (unlikely(bkey_err(k)))
2902 bch2_btree_iter_set_pos(iter, pos);
2907 if (unlikely(bkey_err(k)))
2910 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2912 if (bkey_cmp(iter->pos, next) < 0) {
2913 bkey_init(&iter->k);
2914 iter->k.p = iter->pos;
2916 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2917 bch2_key_resize(&iter->k,
2918 min_t(u64, KEY_SIZE_MAX,
2919 (next.inode == iter->pos.inode
2923 EBUG_ON(!iter->k.size);
2926 k = (struct bkey_s_c) { &iter->k, NULL };
2930 btree_path_set_should_be_locked(iter->path);
2932 bch2_btree_iter_verify_entry_exit(iter);
2933 bch2_btree_iter_verify(iter);
2934 ret = bch2_btree_iter_verify_ret(iter, k);
2936 return bkey_s_c_err(ret);
2941 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2943 if (!bch2_btree_iter_advance(iter))
2944 return bkey_s_c_null;
2946 return bch2_btree_iter_peek_slot(iter);
2949 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2951 if (!bch2_btree_iter_rewind(iter))
2952 return bkey_s_c_null;
2954 return bch2_btree_iter_peek_slot(iter);
2957 /* new transactional stuff: */
2959 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2960 struct btree_path *path)
2962 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2963 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2964 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2967 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2969 #ifdef CONFIG_BCACHEFS_DEBUG
2972 for (i = 0; i < trans->nr_sorted; i++)
2973 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2977 static void btree_trans_verify_sorted(struct btree_trans *trans)
2979 #ifdef CONFIG_BCACHEFS_DEBUG
2980 struct btree_path *path, *prev = NULL;
2983 if (!bch2_debug_check_iterators)
2986 trans_for_each_path_inorder(trans, path, i) {
2987 if (prev && btree_path_cmp(prev, path) > 0) {
2988 bch2_dump_trans_paths_updates(trans);
2989 panic("trans paths out of order!\n");
2996 static inline void btree_path_swap(struct btree_trans *trans,
2997 struct btree_path *l, struct btree_path *r)
2999 swap(l->sorted_idx, r->sorted_idx);
3000 swap(trans->sorted[l->sorted_idx],
3001 trans->sorted[r->sorted_idx]);
3003 btree_path_verify_sorted_ref(trans, l);
3004 btree_path_verify_sorted_ref(trans, r);
3007 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
3010 struct btree_path *n;
3013 n = prev_btree_path(trans, path);
3014 if (n && btree_path_cmp(n, path) > 0) {
3016 btree_path_swap(trans, n, path);
3017 n = prev_btree_path(trans, path);
3018 } while (n && btree_path_cmp(n, path) > 0);
3025 n = next_btree_path(trans, path);
3026 if (n && btree_path_cmp(path, n) > 0) {
3028 btree_path_swap(trans, path, n);
3029 n = next_btree_path(trans, path);
3030 } while (n && btree_path_cmp(path, n) > 0);
3034 btree_trans_verify_sorted(trans);
3037 static inline void btree_path_list_remove(struct btree_trans *trans,
3038 struct btree_path *path)
3042 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
3044 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
3046 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3047 trans->paths[trans->sorted[i]].sorted_idx = i;
3049 path->sorted_idx = U8_MAX;
3051 btree_trans_verify_sorted_refs(trans);
3054 static inline void btree_path_list_add(struct btree_trans *trans,
3055 struct btree_path *pos,
3056 struct btree_path *path)
3060 btree_trans_verify_sorted_refs(trans);
3062 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
3064 if (trans->in_traverse_all &&
3065 trans->traverse_all_idx != U8_MAX &&
3066 trans->traverse_all_idx >= path->sorted_idx)
3067 trans->traverse_all_idx++;
3069 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
3071 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
3072 trans->paths[trans->sorted[i]].sorted_idx = i;
3074 btree_trans_verify_sorted_refs(trans);
3077 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
3080 bch2_path_put(trans, iter->path,
3081 iter->flags & BTREE_ITER_INTENT);
3082 if (iter->update_path)
3083 bch2_path_put(trans, iter->update_path,
3084 iter->flags & BTREE_ITER_INTENT);
3085 if (iter->key_cache_path)
3086 bch2_path_put(trans, iter->key_cache_path,
3087 iter->flags & BTREE_ITER_INTENT);
3089 iter->update_path = NULL;
3090 iter->key_cache_path = NULL;
3093 static void __bch2_trans_iter_init(struct btree_trans *trans,
3094 struct btree_iter *iter,
3095 unsigned btree_id, struct bpos pos,
3096 unsigned locks_want,
3101 EBUG_ON(trans->restarted);
3103 if (flags & BTREE_ITER_ALL_LEVELS)
3104 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
3106 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
3107 btree_node_type_is_extents(btree_id))
3108 flags |= BTREE_ITER_IS_EXTENTS;
3110 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
3111 !btree_type_has_snapshots(btree_id))
3112 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
3114 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
3115 btree_type_has_snapshots(btree_id))
3116 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
3118 if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
3119 flags |= BTREE_ITER_WITH_JOURNAL;
3121 iter->trans = trans;
3123 iter->update_path = NULL;
3124 iter->key_cache_path = NULL;
3125 iter->btree_id = btree_id;
3126 iter->min_depth = depth;
3127 iter->flags = flags;
3128 iter->snapshot = pos.snapshot;
3130 iter->k.type = KEY_TYPE_deleted;
3133 iter->journal_idx = 0;
3134 iter->journal_pos = POS_MIN;
3135 #ifdef CONFIG_BCACHEFS_DEBUG
3136 iter->ip_allocated = ip;
3139 iter->path = bch2_path_get(trans, btree_id, iter->pos,
3140 locks_want, depth, flags, ip);
3143 void bch2_trans_iter_init(struct btree_trans *trans,
3144 struct btree_iter *iter,
3145 unsigned btree_id, struct bpos pos,
3148 if (!btree_id_cached(trans->c, btree_id)) {
3149 flags &= ~BTREE_ITER_CACHED;
3150 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
3151 } else if (!(flags & BTREE_ITER_CACHED))
3152 flags |= BTREE_ITER_WITH_KEY_CACHE;
3154 __bch2_trans_iter_init(trans, iter, btree_id, pos,
3155 0, 0, flags, _RET_IP_);
3158 void bch2_trans_node_iter_init(struct btree_trans *trans,
3159 struct btree_iter *iter,
3160 enum btree_id btree_id,
3162 unsigned locks_want,
3166 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
3167 BTREE_ITER_NOT_EXTENTS|
3168 __BTREE_ITER_ALL_SNAPSHOTS|
3169 BTREE_ITER_ALL_SNAPSHOTS|
3171 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
3172 BUG_ON(iter->path->level != depth);
3173 BUG_ON(iter->min_depth != depth);
3176 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
3180 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
3181 if (src->update_path)
3182 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
3183 dst->key_cache_path = NULL;
3186 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
3188 size_t new_top = trans->mem_top + size;
3191 if (new_top > trans->mem_bytes) {
3192 size_t old_bytes = trans->mem_bytes;
3193 size_t new_bytes = roundup_pow_of_two(new_top);
3196 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
3198 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
3199 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
3200 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
3201 new_bytes = BTREE_TRANS_MEM_MAX;
3206 return ERR_PTR(-ENOMEM);
3208 trans->mem = new_mem;
3209 trans->mem_bytes = new_bytes;
3212 trace_trans_restart_mem_realloced(trans, _RET_IP_, new_bytes);
3213 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
3217 p = trans->mem + trans->mem_top;
3218 trans->mem_top += size;
3224 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3225 * @trans: transaction to reset
3227 * While iterating over nodes or updating nodes a attempt to lock a btree node
3228 * may return BCH_ERR_transaction_restart when the trylock fails. When this
3229 * occurs bch2_trans_begin() should be called and the transaction retried.
3231 u32 bch2_trans_begin(struct btree_trans *trans)
3233 struct btree_path *path;
3235 bch2_trans_reset_updates(trans);
3237 trans->restart_count++;
3240 if (trans->fs_usage_deltas) {
3241 trans->fs_usage_deltas->used = 0;
3242 memset((void *) trans->fs_usage_deltas +
3243 offsetof(struct replicas_delta_list, memset_start), 0,
3244 (void *) &trans->fs_usage_deltas->memset_end -
3245 (void *) &trans->fs_usage_deltas->memset_start);
3248 trans_for_each_path(trans, path) {
3249 path->should_be_locked = false;
3252 * If the transaction wasn't restarted, we're presuming to be
3253 * doing something new: dont keep iterators excpt the ones that
3254 * are in use - except for the subvolumes btree:
3256 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
3257 path->preserve = false;
3260 * XXX: we probably shouldn't be doing this if the transaction
3261 * was restarted, but currently we still overflow transaction
3262 * iterators if we do that
3264 if (!path->ref && !path->preserve)
3265 __bch2_path_free(trans, path);
3267 path->preserve = false;
3270 if (!trans->restarted &&
3272 ktime_get_ns() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
3273 bch2_trans_unlock(trans);
3275 bch2_trans_relock(trans);
3278 trans->last_restarted_ip = _RET_IP_;
3279 if (trans->restarted)
3280 bch2_btree_path_traverse_all(trans);
3282 trans->last_begin_time = ktime_get_ns();
3283 return trans->restart_count;
3286 void bch2_trans_verify_not_restarted(struct btree_trans *trans, u32 restart_count)
3288 if (trans_was_restarted(trans, restart_count))
3289 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
3290 trans->restart_count, restart_count,
3291 (void *) trans->last_restarted_ip);
3294 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
3296 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
3297 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
3300 BUG_ON(trans->used_mempool);
3303 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
3306 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
3308 trans->paths = p; p += paths_bytes;
3309 trans->updates = p; p += updates_bytes;
3312 static inline unsigned bch2_trans_get_fn_idx(struct btree_trans *trans, struct bch_fs *c,
3317 for (i = 0; i < ARRAY_SIZE(c->btree_transaction_fns); i++)
3318 if (!c->btree_transaction_fns[i] ||
3319 c->btree_transaction_fns[i] == fn) {
3320 c->btree_transaction_fns[i] = fn;
3324 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
3328 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
3329 unsigned expected_nr_iters,
3330 size_t expected_mem_bytes,
3332 __acquires(&c->btree_trans_barrier)
3334 struct btree_trans *pos;
3336 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
3338 memset(trans, 0, sizeof(*trans));
3341 trans->last_begin_time = ktime_get_ns();
3342 trans->task = current;
3343 trans->fn_idx = bch2_trans_get_fn_idx(trans, c, fn);
3345 bch2_trans_alloc_paths(trans, c);
3347 if (expected_mem_bytes) {
3348 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
3349 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
3351 if (!unlikely(trans->mem)) {
3352 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3353 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
3357 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3359 mutex_lock(&c->btree_trans_lock);
3360 list_for_each_entry(pos, &c->btree_trans_list, list) {
3361 if (trans->task->pid < pos->task->pid) {
3362 list_add_tail(&trans->list, &pos->list);
3366 list_add_tail(&trans->list, &c->btree_trans_list);
3368 mutex_unlock(&c->btree_trans_lock);
3371 static void check_btree_paths_leaked(struct btree_trans *trans)
3373 #ifdef CONFIG_BCACHEFS_DEBUG
3374 struct bch_fs *c = trans->c;
3375 struct btree_path *path;
3377 trans_for_each_path(trans, path)
3382 bch_err(c, "btree paths leaked from %s!", trans->fn);
3383 trans_for_each_path(trans, path)
3385 printk(KERN_ERR " btree %s %pS\n",
3386 bch2_btree_ids[path->btree_id],
3387 (void *) path->ip_allocated);
3388 /* Be noisy about this: */
3389 bch2_fatal_error(c);
3393 void bch2_trans_exit(struct btree_trans *trans)
3394 __releases(&c->btree_trans_barrier)
3396 struct btree_insert_entry *i;
3397 struct bch_fs *c = trans->c;
3399 bch2_trans_unlock(trans);
3401 trans_for_each_update(trans, i)
3402 __btree_path_put(i->path, true);
3403 trans->nr_updates = 0;
3405 check_btree_paths_leaked(trans);
3407 mutex_lock(&c->btree_trans_lock);
3408 list_del(&trans->list);
3409 mutex_unlock(&c->btree_trans_lock);
3411 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3413 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3415 kfree(trans->extra_journal_entries.data);
3417 if (trans->fs_usage_deltas) {
3418 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3419 REPLICAS_DELTA_LIST_MAX)
3420 mempool_free(trans->fs_usage_deltas,
3421 &c->replicas_delta_pool);
3423 kfree(trans->fs_usage_deltas);
3426 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3427 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3433 * Userspace doesn't have a real percpu implementation:
3435 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3439 mempool_free(trans->paths, &c->btree_paths_pool);
3441 trans->mem = (void *) 0x1;
3442 trans->paths = (void *) 0x1;
3445 static void __maybe_unused
3446 bch2_btree_path_node_to_text(struct printbuf *out,
3447 struct btree_bkey_cached_common *b,
3450 prt_printf(out, " l=%u %s:",
3451 b->level, bch2_btree_ids[b->btree_id]);
3452 bch2_bpos_to_text(out, btree_node_pos(b, cached));
3455 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3457 struct btree_path *path;
3458 struct btree_bkey_cached_common *b;
3459 static char lock_types[] = { 'r', 'i', 'w' };
3462 prt_printf(out, "%i %s\n", trans->task->pid, trans->fn);
3464 trans_for_each_path(trans, path) {
3465 if (!path->nodes_locked)
3468 prt_printf(out, " path %u %c l=%u %s:",
3470 path->cached ? 'c' : 'b',
3472 bch2_btree_ids[path->btree_id]);
3473 bch2_bpos_to_text(out, path->pos);
3474 prt_printf(out, "\n");
3476 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3477 if (btree_node_locked(path, l) &&
3478 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3479 prt_printf(out, " %s l=%u ",
3480 btree_node_intent_locked(path, l) ? "i" : "r", l);
3481 bch2_btree_path_node_to_text(out, b, path->cached);
3482 prt_printf(out, "\n");
3487 b = READ_ONCE(trans->locking);
3489 path = &trans->paths[trans->locking_path_idx];
3490 prt_printf(out, " locking path %u %c l=%u %c %s:",
3491 trans->locking_path_idx,
3492 path->cached ? 'c' : 'b',
3493 trans->locking_level,
3494 lock_types[trans->locking_lock_type],
3495 bch2_btree_ids[trans->locking_btree_id]);
3496 bch2_bpos_to_text(out, trans->locking_pos);
3498 prt_printf(out, " node ");
3499 bch2_btree_path_node_to_text(out, b, path->cached);
3500 prt_printf(out, "\n");
3504 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3506 if (c->btree_trans_barrier_initialized)
3507 cleanup_srcu_struct(&c->btree_trans_barrier);
3508 mempool_exit(&c->btree_trans_mem_pool);
3509 mempool_exit(&c->btree_paths_pool);
3512 int bch2_fs_btree_iter_init(struct bch_fs *c)
3514 unsigned i, nr = BTREE_ITER_MAX;
3517 for (i = 0; i < ARRAY_SIZE(c->btree_transaction_stats); i++)
3518 mutex_init(&c->btree_transaction_stats[i].lock);
3520 INIT_LIST_HEAD(&c->btree_trans_list);
3521 mutex_init(&c->btree_trans_lock);
3523 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3524 sizeof(struct btree_path) * nr +
3525 sizeof(struct btree_insert_entry) * nr) ?:
3526 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3527 BTREE_TRANS_MEM_MAX) ?:
3528 init_srcu_struct(&c->btree_trans_barrier);
3530 c->btree_trans_barrier_initialized = true;