1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prefetch.h>
20 #include <trace/events/bcachefs.h>
22 static void btree_trans_verify_sorted(struct btree_trans *);
23 static void btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
25 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
26 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
29 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
31 #ifdef CONFIG_BCACHEFS_DEBUG
32 return iter->ip_allocated;
38 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
41 * Unlocks before scheduling
42 * Note: does not revalidate iterator
44 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
46 if (need_resched() || race_fault()) {
47 bch2_trans_unlock(trans);
49 return bch2_trans_relock(trans) ? 0 : -EINTR;
55 static inline int __btree_path_cmp(const struct btree_path *l,
56 enum btree_id r_btree_id,
61 return cmp_int(l->btree_id, r_btree_id) ?:
62 cmp_int((int) l->cached, (int) r_cached) ?:
63 bpos_cmp(l->pos, r_pos) ?:
64 -cmp_int(l->level, r_level);
67 static inline int btree_path_cmp(const struct btree_path *l,
68 const struct btree_path *r)
70 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
73 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
75 /* Are we iterating over keys in all snapshots? */
76 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
77 p = bpos_successor(p);
79 p = bpos_nosnap_successor(p);
80 p.snapshot = iter->snapshot;
86 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
88 /* Are we iterating over keys in all snapshots? */
89 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
90 p = bpos_predecessor(p);
92 p = bpos_nosnap_predecessor(p);
93 p.snapshot = iter->snapshot;
99 static inline bool is_btree_node(struct btree_path *path, unsigned l)
101 return l < BTREE_MAX_DEPTH &&
102 (unsigned long) path->l[l].b >= 128;
105 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
107 struct bpos pos = iter->pos;
109 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
110 bkey_cmp(pos, POS_MAX))
111 pos = bkey_successor(iter, pos);
115 static inline bool btree_path_pos_before_node(struct btree_path *path,
118 return bpos_cmp(path->pos, b->data->min_key) < 0;
121 static inline bool btree_path_pos_after_node(struct btree_path *path,
124 return bpos_cmp(b->key.k.p, path->pos) < 0;
127 static inline bool btree_path_pos_in_node(struct btree_path *path,
130 return path->btree_id == b->c.btree_id &&
131 !btree_path_pos_before_node(path, b) &&
132 !btree_path_pos_after_node(path, b);
135 /* Btree node locking: */
137 void bch2_btree_node_unlock_write(struct btree_trans *trans,
138 struct btree_path *path, struct btree *b)
140 bch2_btree_node_unlock_write_inlined(trans, path, b);
143 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
145 struct btree_path *linked;
146 unsigned readers = 0;
148 trans_for_each_path(trans, linked)
149 if (linked->l[b->c.level].b == b &&
150 btree_node_read_locked(linked, b->c.level))
154 * Must drop our read locks before calling six_lock_write() -
155 * six_unlock() won't do wakeups until the reader count
156 * goes to 0, and it's safe because we have the node intent
159 if (!b->c.lock.readers)
160 atomic64_sub(__SIX_VAL(read_lock, readers),
161 &b->c.lock.state.counter);
163 this_cpu_sub(*b->c.lock.readers, readers);
165 btree_node_lock_type(trans->c, b, SIX_LOCK_write);
167 if (!b->c.lock.readers)
168 atomic64_add(__SIX_VAL(read_lock, readers),
169 &b->c.lock.state.counter);
171 this_cpu_add(*b->c.lock.readers, readers);
174 bool __bch2_btree_node_relock(struct btree_trans *trans,
175 struct btree_path *path, unsigned level)
177 struct btree *b = btree_path_node(path, level);
178 int want = __btree_lock_want(path, level);
180 if (!is_btree_node(path, level))
186 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
187 (btree_node_lock_seq_matches(path, b, level) &&
188 btree_node_lock_increment(trans, b, level, want))) {
189 mark_btree_node_locked(path, level, want);
196 bool bch2_btree_node_upgrade(struct btree_trans *trans,
197 struct btree_path *path, unsigned level)
199 struct btree *b = path->l[level].b;
201 if (!is_btree_node(path, level))
204 switch (btree_lock_want(path, level)) {
205 case BTREE_NODE_UNLOCKED:
206 BUG_ON(btree_node_locked(path, level));
208 case BTREE_NODE_READ_LOCKED:
209 BUG_ON(btree_node_intent_locked(path, level));
210 return bch2_btree_node_relock(trans, path, level);
211 case BTREE_NODE_INTENT_LOCKED:
215 if (btree_node_intent_locked(path, level))
221 if (btree_node_locked(path, level)
222 ? six_lock_tryupgrade(&b->c.lock)
223 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
226 if (btree_node_lock_seq_matches(path, b, level) &&
227 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
228 btree_node_unlock(path, level);
234 mark_btree_node_intent_locked(path, level);
238 static inline bool btree_path_get_locks(struct btree_trans *trans,
239 struct btree_path *path,
240 bool upgrade, unsigned long trace_ip)
242 unsigned l = path->level;
246 if (!btree_path_node(path, l))
250 ? bch2_btree_node_upgrade(trans, path, l)
251 : bch2_btree_node_relock(trans, path, l)))
255 } while (l < path->locks_want);
258 * When we fail to get a lock, we have to ensure that any child nodes
259 * can't be relocked so bch2_btree_path_traverse has to walk back up to
260 * the node that we failed to relock:
263 __bch2_btree_path_unlock(path);
264 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
267 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
269 } while (fail_idx >= 0);
272 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
273 path->uptodate = BTREE_ITER_UPTODATE;
275 bch2_trans_verify_locks(trans);
277 return path->uptodate < BTREE_ITER_NEED_RELOCK;
280 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
284 ? container_of(_b, struct btree, c)->key.k.p
285 : container_of(_b, struct bkey_cached, c)->key.pos;
289 bool __bch2_btree_node_lock(struct btree_trans *trans,
290 struct btree_path *path,
292 struct bpos pos, unsigned level,
293 enum six_lock_type type,
294 six_lock_should_sleep_fn should_sleep_fn, void *p,
297 struct btree_path *linked, *deadlock_path = NULL;
298 u64 start_time = local_clock();
302 /* Check if it's safe to block: */
303 trans_for_each_path(trans, linked) {
304 if (!linked->nodes_locked)
308 * Can't block taking an intent lock if we have _any_ nodes read
311 * - Our read lock blocks another thread with an intent lock on
312 * the same node from getting a write lock, and thus from
313 * dropping its intent lock
315 * - And the other thread may have multiple nodes intent locked:
316 * both the node we want to intent lock, and the node we
317 * already have read locked - deadlock:
319 if (type == SIX_LOCK_intent &&
320 linked->nodes_locked != linked->nodes_intent_locked) {
321 deadlock_path = linked;
325 if (linked->btree_id != path->btree_id) {
326 if (linked->btree_id > path->btree_id) {
327 deadlock_path = linked;
334 * Within the same btree, cached paths come before non
337 if (linked->cached != path->cached) {
339 deadlock_path = linked;
346 * Interior nodes must be locked before their descendants: if
347 * another path has possible descendants locked of the node
348 * we're about to lock, it must have the ancestors locked too:
350 if (level > __fls(linked->nodes_locked)) {
351 deadlock_path = linked;
355 /* Must lock btree nodes in key order: */
356 if (btree_node_locked(linked, level) &&
357 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
358 linked->cached)) <= 0) {
359 deadlock_path = linked;
361 BUG_ON(trans->in_traverse_all);
365 if (unlikely(deadlock_path)) {
366 trace_trans_restart_would_deadlock(trans->fn, ip,
367 trans->in_traverse_all, reason,
368 deadlock_path->btree_id,
369 deadlock_path->cached,
374 btree_trans_restart(trans);
378 if (six_trylock_type(&b->c.lock, type))
381 trans->locking_path_idx = path->idx;
382 trans->locking_pos = pos;
383 trans->locking_btree_id = path->btree_id;
384 trans->locking_level = level;
387 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
389 trans->locking = NULL;
392 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
397 /* Btree iterator locking: */
399 #ifdef CONFIG_BCACHEFS_DEBUG
401 static void bch2_btree_path_verify_locks(struct btree_path *path)
405 if (!path->nodes_locked) {
406 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
407 btree_path_node(path, path->level));
411 for (l = 0; btree_path_node(path, l); l++)
412 BUG_ON(btree_lock_want(path, l) !=
413 btree_node_locked_type(path, l));
416 void bch2_trans_verify_locks(struct btree_trans *trans)
418 struct btree_path *path;
420 trans_for_each_path(trans, path)
421 bch2_btree_path_verify_locks(path);
424 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
427 /* Btree path locking: */
430 * Only for btree_cache.c - only relocks intent locks
432 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
433 struct btree_path *path)
437 for (l = path->level;
438 l < path->locks_want && btree_path_node(path, l);
440 if (!bch2_btree_node_relock(trans, path, l)) {
441 __bch2_btree_path_unlock(path);
442 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
443 btree_trans_restart(trans);
452 static bool bch2_btree_path_relock(struct btree_trans *trans,
453 struct btree_path *path, unsigned long trace_ip)
455 bool ret = btree_path_get_locks(trans, path, false, trace_ip);
458 btree_trans_restart(trans);
462 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
463 struct btree_path *path,
464 unsigned new_locks_want)
466 struct btree_path *linked;
468 EBUG_ON(path->locks_want >= new_locks_want);
470 path->locks_want = new_locks_want;
472 if (btree_path_get_locks(trans, path, true, _THIS_IP_))
476 * XXX: this is ugly - we'd prefer to not be mucking with other
477 * iterators in the btree_trans here.
479 * On failure to upgrade the iterator, setting iter->locks_want and
480 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
481 * get the locks we want on transaction restart.
483 * But if this iterator was a clone, on transaction restart what we did
484 * to this iterator isn't going to be preserved.
486 * Possibly we could add an iterator field for the parent iterator when
487 * an iterator is a copy - for now, we'll just upgrade any other
488 * iterators with the same btree id.
490 * The code below used to be needed to ensure ancestor nodes get locked
491 * before interior nodes - now that's handled by
492 * bch2_btree_path_traverse_all().
494 trans_for_each_path(trans, linked)
495 if (linked != path &&
496 linked->cached == path->cached &&
497 linked->btree_id == path->btree_id &&
498 linked->locks_want < new_locks_want) {
499 linked->locks_want = new_locks_want;
500 btree_path_get_locks(trans, linked, true, _THIS_IP_);
506 void __bch2_btree_path_downgrade(struct btree_path *path,
507 unsigned new_locks_want)
511 EBUG_ON(path->locks_want < new_locks_want);
513 path->locks_want = new_locks_want;
515 while (path->nodes_locked &&
516 (l = __fls(path->nodes_locked)) >= path->locks_want) {
517 if (l > path->level) {
518 btree_node_unlock(path, l);
520 if (btree_node_intent_locked(path, l)) {
521 six_lock_downgrade(&path->l[l].b->c.lock);
522 path->nodes_intent_locked ^= 1 << l;
528 bch2_btree_path_verify_locks(path);
531 void bch2_trans_downgrade(struct btree_trans *trans)
533 struct btree_path *path;
535 trans_for_each_path(trans, path)
536 bch2_btree_path_downgrade(path);
539 /* Btree transaction locking: */
541 bool bch2_trans_relock(struct btree_trans *trans)
543 struct btree_path *path;
545 if (unlikely(trans->restarted))
548 trans_for_each_path(trans, path)
549 if (path->should_be_locked &&
550 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
551 trace_trans_restart_relock(trans->fn, _RET_IP_,
552 path->btree_id, &path->pos);
553 BUG_ON(!trans->restarted);
559 void bch2_trans_unlock(struct btree_trans *trans)
561 struct btree_path *path;
563 trans_for_each_path(trans, path)
564 __bch2_btree_path_unlock(path);
566 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
569 /* Btree iterator: */
571 #ifdef CONFIG_BCACHEFS_DEBUG
573 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
574 struct btree_path *path)
576 struct bkey_cached *ck;
577 bool locked = btree_node_locked(path, 0);
579 if (!bch2_btree_node_relock(trans, path, 0))
582 ck = (void *) path->l[0].b;
583 BUG_ON(ck->key.btree_id != path->btree_id ||
584 bkey_cmp(ck->key.pos, path->pos));
587 btree_node_unlock(path, 0);
590 static void bch2_btree_path_verify_level(struct btree_trans *trans,
591 struct btree_path *path, unsigned level)
593 struct btree_path_level *l;
594 struct btree_node_iter tmp;
596 struct bkey_packed *p, *k;
597 char buf1[100], buf2[100], buf3[100];
600 if (!bch2_debug_check_iterators)
605 locked = btree_node_locked(path, level);
609 bch2_btree_path_verify_cached(trans, path);
613 if (!btree_path_node(path, level))
616 if (!bch2_btree_node_relock(trans, path, level))
619 BUG_ON(!btree_path_pos_in_node(path, l->b));
621 bch2_btree_node_iter_verify(&l->iter, l->b);
624 * For interior nodes, the iterator will have skipped past deleted keys:
627 ? bch2_btree_node_iter_prev(&tmp, l->b)
628 : bch2_btree_node_iter_prev_all(&tmp, l->b);
629 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
631 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
636 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
642 btree_node_unlock(path, level);
645 strcpy(buf2, "(none)");
646 strcpy(buf3, "(none)");
648 bch2_bpos_to_text(&PBUF(buf1), path->pos);
651 struct bkey uk = bkey_unpack_key(l->b, p);
652 bch2_bkey_to_text(&PBUF(buf2), &uk);
656 struct bkey uk = bkey_unpack_key(l->b, k);
657 bch2_bkey_to_text(&PBUF(buf3), &uk);
660 panic("path should be %s key at level %u:\n"
664 msg, level, buf1, buf2, buf3);
667 static void bch2_btree_path_verify(struct btree_trans *trans,
668 struct btree_path *path)
670 struct bch_fs *c = trans->c;
673 EBUG_ON(path->btree_id >= BTREE_ID_NR);
675 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
677 BUG_ON(!path->cached &&
678 c->btree_roots[path->btree_id].b->c.level > i);
682 bch2_btree_path_verify_level(trans, path, i);
685 bch2_btree_path_verify_locks(path);
688 void bch2_trans_verify_paths(struct btree_trans *trans)
690 struct btree_path *path;
692 trans_for_each_path(trans, path)
693 bch2_btree_path_verify(trans, path);
696 static void bch2_btree_iter_verify(struct btree_iter *iter)
698 struct btree_trans *trans = iter->trans;
700 BUG_ON(iter->btree_id >= BTREE_ID_NR);
702 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
704 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
705 iter->pos.snapshot != iter->snapshot);
707 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
708 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
710 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
711 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
712 !btree_type_has_snapshots(iter->btree_id));
714 bch2_btree_path_verify(trans, iter->path);
717 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
719 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
720 !iter->pos.snapshot);
722 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
723 iter->pos.snapshot != iter->snapshot);
725 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
726 bkey_cmp(iter->pos, iter->k.p) > 0);
729 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
731 struct btree_trans *trans = iter->trans;
732 struct btree_iter copy;
733 struct bkey_s_c prev;
736 if (!bch2_debug_check_iterators)
739 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
742 if (bkey_err(k) || !k.k)
745 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
749 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
750 BTREE_ITER_NOPRESERVE|
751 BTREE_ITER_ALL_SNAPSHOTS);
752 prev = bch2_btree_iter_prev(©);
756 ret = bkey_err(prev);
760 if (!bkey_cmp(prev.k->p, k.k->p) &&
761 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
762 prev.k->p.snapshot) > 0) {
763 char buf1[100], buf2[200];
765 bch2_bkey_to_text(&PBUF(buf1), k.k);
766 bch2_bkey_to_text(&PBUF(buf2), prev.k);
768 panic("iter snap %u\n"
775 bch2_trans_iter_exit(trans, ©);
779 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
780 struct bpos pos, bool key_cache)
782 struct btree_path *path;
786 trans_for_each_path_inorder(trans, path, idx) {
787 int cmp = cmp_int(path->btree_id, id) ?:
788 cmp_int(path->cached, key_cache);
795 if (!(path->nodes_locked & 1) ||
796 !path->should_be_locked)
800 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
801 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
804 if (!bkey_cmp(pos, path->pos))
809 bch2_dump_trans_paths_updates(trans);
810 panic("not locked: %s %s%s\n",
812 (bch2_bpos_to_text(&PBUF(buf), pos), buf),
813 key_cache ? " cached" : "");
818 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
819 struct btree_path *path, unsigned l) {}
820 static inline void bch2_btree_path_verify(struct btree_trans *trans,
821 struct btree_path *path) {}
822 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
823 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
824 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
828 /* Btree path: fixups after btree updates */
830 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
833 struct bkey_packed *k)
835 struct btree_node_iter_set *set;
837 btree_node_iter_for_each(iter, set)
838 if (set->end == t->end_offset) {
839 set->k = __btree_node_key_to_offset(b, k);
840 bch2_btree_node_iter_sort(iter, b);
844 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
847 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
849 struct bkey_packed *where)
851 struct btree_path_level *l = &path->l[b->c.level];
853 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
856 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
857 bch2_btree_node_iter_advance(&l->iter, l->b);
860 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
862 struct bkey_packed *where)
864 struct btree_path *path;
866 trans_for_each_path_with_node(trans, b, path) {
867 __bch2_btree_path_fix_key_modified(path, b, where);
868 bch2_btree_path_verify_level(trans, path, b->c.level);
872 static void __bch2_btree_node_iter_fix(struct btree_path *path,
874 struct btree_node_iter *node_iter,
876 struct bkey_packed *where,
877 unsigned clobber_u64s,
880 const struct bkey_packed *end = btree_bkey_last(b, t);
881 struct btree_node_iter_set *set;
882 unsigned offset = __btree_node_key_to_offset(b, where);
883 int shift = new_u64s - clobber_u64s;
884 unsigned old_end = t->end_offset - shift;
885 unsigned orig_iter_pos = node_iter->data[0].k;
886 bool iter_current_key_modified =
887 orig_iter_pos >= offset &&
888 orig_iter_pos <= offset + clobber_u64s;
890 btree_node_iter_for_each(node_iter, set)
891 if (set->end == old_end)
894 /* didn't find the bset in the iterator - might have to readd it: */
896 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
897 bch2_btree_node_iter_push(node_iter, b, where, end);
900 /* Iterator is after key that changed */
904 set->end = t->end_offset;
906 /* Iterator hasn't gotten to the key that changed yet: */
911 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
913 } else if (set->k < offset + clobber_u64s) {
914 set->k = offset + new_u64s;
915 if (set->k == set->end)
916 bch2_btree_node_iter_set_drop(node_iter, set);
918 /* Iterator is after key that changed */
919 set->k = (int) set->k + shift;
923 bch2_btree_node_iter_sort(node_iter, b);
925 if (node_iter->data[0].k != orig_iter_pos)
926 iter_current_key_modified = true;
929 * When a new key is added, and the node iterator now points to that
930 * key, the iterator might have skipped past deleted keys that should
931 * come after the key the iterator now points to. We have to rewind to
932 * before those deleted keys - otherwise
933 * bch2_btree_node_iter_prev_all() breaks:
935 if (!bch2_btree_node_iter_end(node_iter) &&
936 iter_current_key_modified &&
939 struct bkey_packed *k, *k2, *p;
941 k = bch2_btree_node_iter_peek_all(node_iter, b);
943 for_each_bset(b, t) {
944 bool set_pos = false;
946 if (node_iter->data[0].end == t->end_offset)
949 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
951 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
952 bkey_iter_cmp(b, k, p) < 0) {
958 btree_node_iter_set_set_pos(node_iter,
964 void bch2_btree_node_iter_fix(struct btree_trans *trans,
965 struct btree_path *path,
967 struct btree_node_iter *node_iter,
968 struct bkey_packed *where,
969 unsigned clobber_u64s,
972 struct bset_tree *t = bch2_bkey_to_bset(b, where);
973 struct btree_path *linked;
975 if (node_iter != &path->l[b->c.level].iter) {
976 __bch2_btree_node_iter_fix(path, b, node_iter, t,
977 where, clobber_u64s, new_u64s);
979 if (bch2_debug_check_iterators)
980 bch2_btree_node_iter_verify(node_iter, b);
983 trans_for_each_path_with_node(trans, b, linked) {
984 __bch2_btree_node_iter_fix(linked, b,
985 &linked->l[b->c.level].iter, t,
986 where, clobber_u64s, new_u64s);
987 bch2_btree_path_verify_level(trans, linked, b->c.level);
991 /* Btree path level: pointer to a particular btree node and node iter */
993 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
994 struct btree_path_level *l,
996 struct bkey_packed *k)
1002 * signal to bch2_btree_iter_peek_slot() that we're currently at
1005 u->type = KEY_TYPE_deleted;
1006 return bkey_s_c_null;
1009 ret = bkey_disassemble(l->b, k, u);
1012 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
1013 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
1014 * being overwritten but doesn't change k->size. But this is ok, because
1015 * those keys are never written out, we just have to avoid a spurious
1018 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
1019 bch2_bkey_debugcheck(c, l->b, ret);
1024 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1025 struct btree_path_level *l,
1028 return __btree_iter_unpack(c, l, u,
1029 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1032 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
1033 struct btree_path *path,
1034 struct btree_path_level *l,
1037 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1038 bch2_btree_node_iter_peek(&l->iter, l->b));
1040 path->pos = k.k ? k.k->p : l->b->key.k.p;
1044 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
1045 struct btree_path *path,
1046 struct btree_path_level *l,
1049 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1050 bch2_btree_node_iter_prev(&l->iter, l->b));
1052 path->pos = k.k ? k.k->p : l->b->data->min_key;
1056 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1057 struct btree_path_level *l,
1060 struct bkey_packed *k;
1061 int nr_advanced = 0;
1063 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1064 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1065 if (max_advance > 0 && nr_advanced >= max_advance)
1068 bch2_btree_node_iter_advance(&l->iter, l->b);
1076 * Verify that iterator for parent node points to child node:
1078 static void btree_path_verify_new_node(struct btree_trans *trans,
1079 struct btree_path *path, struct btree *b)
1081 struct bch_fs *c = trans->c;
1082 struct btree_path_level *l;
1085 struct bkey_packed *k;
1087 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1090 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1093 plevel = b->c.level + 1;
1094 if (!btree_path_node(path, plevel))
1097 parent_locked = btree_node_locked(path, plevel);
1099 if (!bch2_btree_node_relock(trans, path, plevel))
1102 l = &path->l[plevel];
1103 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1106 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1111 struct bkey uk = bkey_unpack_key(b, k);
1113 bch2_dump_btree_node(c, l->b);
1114 bch2_bpos_to_text(&PBUF(buf1), path->pos);
1115 bch2_bkey_to_text(&PBUF(buf2), &uk);
1116 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
1117 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
1118 panic("parent iter doesn't point to new node:\n"
1122 bch2_btree_ids[path->btree_id], buf1,
1127 btree_node_unlock(path, plevel);
1130 static inline void __btree_path_level_init(struct btree_path *path,
1133 struct btree_path_level *l = &path->l[level];
1135 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1138 * Iterators to interior nodes should always be pointed at the first non
1142 bch2_btree_node_iter_peek(&l->iter, l->b);
1145 static inline void btree_path_level_init(struct btree_trans *trans,
1146 struct btree_path *path,
1149 BUG_ON(path->cached);
1151 btree_path_verify_new_node(trans, path, b);
1153 EBUG_ON(!btree_path_pos_in_node(path, b));
1154 EBUG_ON(b->c.lock.state.seq & 1);
1156 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1157 path->l[b->c.level].b = b;
1158 __btree_path_level_init(path, b->c.level);
1161 /* Btree path: fixups after btree node updates: */
1164 * A btree node is being replaced - update the iterator to point to the new
1167 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1169 struct btree_path *path;
1171 trans_for_each_path(trans, path)
1172 if (!path->cached &&
1173 btree_path_pos_in_node(path, b)) {
1174 enum btree_node_locked_type t =
1175 btree_lock_want(path, b->c.level);
1177 if (path->nodes_locked &&
1178 t != BTREE_NODE_UNLOCKED) {
1179 btree_node_unlock(path, b->c.level);
1180 six_lock_increment(&b->c.lock, t);
1181 mark_btree_node_locked(path, b->c.level, t);
1184 btree_path_level_init(trans, path, b);
1189 * A btree node has been modified in such a way as to invalidate iterators - fix
1192 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1194 struct btree_path *path;
1196 trans_for_each_path_with_node(trans, b, path)
1197 __btree_path_level_init(path, b->c.level);
1200 /* Btree path: traverse, set_pos: */
1202 static int lock_root_check_fn(struct six_lock *lock, void *p)
1204 struct btree *b = container_of(lock, struct btree, c.lock);
1205 struct btree **rootp = p;
1207 return b == *rootp ? 0 : -1;
1210 static inline int btree_path_lock_root(struct btree_trans *trans,
1211 struct btree_path *path,
1212 unsigned depth_want,
1213 unsigned long trace_ip)
1215 struct bch_fs *c = trans->c;
1216 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1217 enum six_lock_type lock_type;
1220 EBUG_ON(path->nodes_locked);
1223 b = READ_ONCE(*rootp);
1224 path->level = READ_ONCE(b->c.level);
1226 if (unlikely(path->level < depth_want)) {
1228 * the root is at a lower depth than the depth we want:
1229 * got to the end of the btree, or we're walking nodes
1230 * greater than some depth and there are no nodes >=
1233 path->level = depth_want;
1234 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1235 path->l[i].b = NULL;
1239 lock_type = __btree_lock_want(path, path->level);
1240 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1241 path->level, lock_type,
1242 lock_root_check_fn, rootp,
1244 if (trans->restarted)
1249 if (likely(b == READ_ONCE(*rootp) &&
1250 b->c.level == path->level &&
1252 for (i = 0; i < path->level; i++)
1253 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1254 path->l[path->level].b = b;
1255 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1256 path->l[i].b = NULL;
1258 mark_btree_node_locked(path, path->level, lock_type);
1259 btree_path_level_init(trans, path, b);
1263 six_unlock_type(&b->c.lock, lock_type);
1268 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1270 struct bch_fs *c = trans->c;
1271 struct btree_path_level *l = path_l(path);
1272 struct btree_node_iter node_iter = l->iter;
1273 struct bkey_packed *k;
1274 struct bkey_buf tmp;
1275 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1276 ? (path->level > 1 ? 0 : 2)
1277 : (path->level > 1 ? 1 : 16);
1278 bool was_locked = btree_node_locked(path, path->level);
1281 bch2_bkey_buf_init(&tmp);
1283 while (nr && !ret) {
1284 if (!bch2_btree_node_relock(trans, path, path->level))
1287 bch2_btree_node_iter_advance(&node_iter, l->b);
1288 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1292 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1293 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1298 btree_node_unlock(path, path->level);
1300 bch2_bkey_buf_exit(&tmp, c);
1304 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1305 struct btree_and_journal_iter *jiter)
1307 struct bch_fs *c = trans->c;
1309 struct bkey_buf tmp;
1310 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1311 ? (path->level > 1 ? 0 : 2)
1312 : (path->level > 1 ? 1 : 16);
1313 bool was_locked = btree_node_locked(path, path->level);
1316 bch2_bkey_buf_init(&tmp);
1318 while (nr && !ret) {
1319 if (!bch2_btree_node_relock(trans, path, path->level))
1322 bch2_btree_and_journal_iter_advance(jiter);
1323 k = bch2_btree_and_journal_iter_peek(jiter);
1327 bch2_bkey_buf_reassemble(&tmp, c, k);
1328 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1333 btree_node_unlock(path, path->level);
1335 bch2_bkey_buf_exit(&tmp, c);
1339 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1340 struct btree_path *path,
1341 unsigned plevel, struct btree *b)
1343 struct btree_path_level *l = &path->l[plevel];
1344 bool locked = btree_node_locked(path, plevel);
1345 struct bkey_packed *k;
1346 struct bch_btree_ptr_v2 *bp;
1348 if (!bch2_btree_node_relock(trans, path, plevel))
1351 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1352 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1354 bp = (void *) bkeyp_val(&l->b->format, k);
1355 bp->mem_ptr = (unsigned long)b;
1358 btree_node_unlock(path, plevel);
1361 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1362 struct btree_path *path,
1364 struct bkey_buf *out)
1366 struct bch_fs *c = trans->c;
1367 struct btree_path_level *l = path_l(path);
1368 struct btree_and_journal_iter jiter;
1372 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1374 k = bch2_btree_and_journal_iter_peek(&jiter);
1376 bch2_bkey_buf_reassemble(out, c, k);
1378 if (flags & BTREE_ITER_PREFETCH)
1379 ret = btree_path_prefetch_j(trans, path, &jiter);
1381 bch2_btree_and_journal_iter_exit(&jiter);
1385 static __always_inline int btree_path_down(struct btree_trans *trans,
1386 struct btree_path *path,
1388 unsigned long trace_ip)
1390 struct bch_fs *c = trans->c;
1391 struct btree_path_level *l = path_l(path);
1393 unsigned level = path->level - 1;
1394 enum six_lock_type lock_type = __btree_lock_want(path, level);
1395 bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
1396 struct bkey_buf tmp;
1399 EBUG_ON(!btree_node_locked(path, path->level));
1401 bch2_bkey_buf_init(&tmp);
1403 if (unlikely(!replay_done)) {
1404 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1408 bch2_bkey_buf_unpack(&tmp, c, l->b,
1409 bch2_btree_node_iter_peek(&l->iter, l->b));
1411 if (flags & BTREE_ITER_PREFETCH) {
1412 ret = btree_path_prefetch(trans, path);
1418 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1419 ret = PTR_ERR_OR_ZERO(b);
1423 mark_btree_node_locked(path, level, lock_type);
1424 btree_path_level_init(trans, path, b);
1426 if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1427 unlikely(b != btree_node_mem_ptr(tmp.k)))
1428 btree_node_mem_ptr_set(trans, path, level + 1, b);
1430 if (btree_node_read_locked(path, level + 1))
1431 btree_node_unlock(path, level + 1);
1432 path->level = level;
1434 bch2_btree_path_verify_locks(path);
1436 bch2_bkey_buf_exit(&tmp, c);
1440 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1441 unsigned, unsigned long);
1443 static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
1444 unsigned long trace_ip)
1446 struct bch_fs *c = trans->c;
1447 struct btree_path *path;
1450 if (trans->in_traverse_all)
1453 trans->in_traverse_all = true;
1455 trans->restarted = false;
1457 trans_for_each_path(trans, path)
1458 path->should_be_locked = false;
1460 btree_trans_verify_sorted(trans);
1462 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1463 struct btree_path *path1 = trans->paths + trans->sorted[i];
1464 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1466 if (path1->btree_id == path2->btree_id &&
1467 path1->locks_want < path2->locks_want)
1468 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1469 else if (!path1->locks_want && path2->locks_want)
1470 __bch2_btree_path_upgrade(trans, path1, 1);
1473 bch2_trans_unlock(trans);
1476 if (unlikely(ret == -ENOMEM)) {
1479 closure_init_stack(&cl);
1482 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1487 if (unlikely(ret == -EIO))
1490 BUG_ON(ret && ret != -EINTR);
1492 /* Now, redo traversals in correct order: */
1494 while (i < trans->nr_sorted) {
1495 path = trans->paths + trans->sorted[i];
1497 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1499 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1503 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1505 if (path->nodes_locked ||
1506 !btree_path_node(path, path->level))
1511 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1512 * and relock(), relock() won't relock since path->should_be_locked
1513 * isn't set yet, which is all fine
1515 trans_for_each_path(trans, path)
1516 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1518 bch2_btree_cache_cannibalize_unlock(c);
1520 trans->in_traverse_all = false;
1522 trace_trans_traverse_all(trans->fn, trace_ip);
1526 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1528 return __btree_path_traverse_all(trans, 0, _RET_IP_);
1531 static inline bool btree_path_good_node(struct btree_trans *trans,
1532 struct btree_path *path,
1533 unsigned l, int check_pos)
1535 if (!is_btree_node(path, l) ||
1536 !bch2_btree_node_relock(trans, path, l))
1539 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1541 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1546 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1547 struct btree_path *path,
1550 unsigned i, l = path->level;
1552 while (btree_path_node(path, l) &&
1553 !btree_path_good_node(trans, path, l, check_pos)) {
1554 btree_node_unlock(path, l);
1555 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1559 /* If we need intent locks, take them too: */
1561 i < path->locks_want && btree_path_node(path, i);
1563 if (!bch2_btree_node_relock(trans, path, i))
1565 btree_node_unlock(path, l);
1566 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1574 * This is the main state machine for walking down the btree - walks down to a
1577 * Returns 0 on success, -EIO on error (error reading in a btree node).
1579 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1580 * stashed in the iterator and returned from bch2_trans_exit().
1582 static int btree_path_traverse_one(struct btree_trans *trans,
1583 struct btree_path *path,
1585 unsigned long trace_ip)
1587 unsigned depth_want = path->level;
1590 if (unlikely(trans->restarted)) {
1596 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1597 * and re-traverse the path without a transaction restart:
1599 if (path->should_be_locked) {
1600 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1605 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1609 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1612 path->level = btree_path_up_until_good_node(trans, path, 0);
1615 * Note: path->nodes[path->level] may be temporarily NULL here - that
1616 * would indicate to other code that we got to the end of the btree,
1617 * here it indicates that relocking the root failed - it's critical that
1618 * btree_path_lock_root() comes next and that it can't fail
1620 while (path->level > depth_want) {
1621 ret = btree_path_node(path, path->level)
1622 ? btree_path_down(trans, path, flags, trace_ip)
1623 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1624 if (unlikely(ret)) {
1627 * No nodes at this level - got to the end of
1634 __bch2_btree_path_unlock(path);
1635 path->level = depth_want;
1638 path->l[path->level].b =
1639 BTREE_ITER_NO_NODE_ERROR;
1641 path->l[path->level].b =
1642 BTREE_ITER_NO_NODE_DOWN;
1647 path->uptodate = BTREE_ITER_UPTODATE;
1649 BUG_ON((ret == -EINTR) != !!trans->restarted);
1650 bch2_btree_path_verify(trans, path);
1654 static int __btree_path_traverse_all(struct btree_trans *, int, unsigned long);
1656 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1657 struct btree_path *path, unsigned flags)
1659 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1662 return bch2_trans_cond_resched(trans) ?:
1663 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1666 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1667 struct btree_path *src)
1671 memcpy(&dst->pos, &src->pos,
1672 sizeof(struct btree_path) - offsetof(struct btree_path, pos));
1674 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1675 if (btree_node_locked(dst, i))
1676 six_lock_increment(&dst->l[i].b->c.lock,
1677 __btree_lock_want(dst, i));
1679 btree_path_check_sort(trans, dst, 0);
1682 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1685 struct btree_path *new = btree_path_alloc(trans, src);
1687 btree_path_copy(trans, new, src);
1688 __btree_path_get(new, intent);
1692 inline struct btree_path * __must_check
1693 bch2_btree_path_make_mut(struct btree_trans *trans,
1694 struct btree_path *path, bool intent,
1697 if (path->ref > 1 || path->preserve) {
1698 __btree_path_put(path, intent);
1699 path = btree_path_clone(trans, path, intent);
1700 path->preserve = false;
1701 #ifdef CONFIG_BCACHEFS_DEBUG
1702 path->ip_allocated = ip;
1704 btree_trans_verify_sorted(trans);
1710 static struct btree_path * __must_check
1711 btree_path_set_pos(struct btree_trans *trans,
1712 struct btree_path *path, struct bpos new_pos,
1713 bool intent, unsigned long ip)
1715 int cmp = bpos_cmp(new_pos, path->pos);
1716 unsigned l = path->level;
1718 EBUG_ON(trans->restarted);
1719 EBUG_ON(!path->ref);
1724 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1726 path->pos = new_pos;
1727 path->should_be_locked = false;
1729 btree_path_check_sort(trans, path, cmp);
1731 if (unlikely(path->cached)) {
1732 btree_node_unlock(path, 0);
1733 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1734 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1738 l = btree_path_up_until_good_node(trans, path, cmp);
1740 if (btree_path_node(path, l)) {
1742 * We might have to skip over many keys, or just a few: try
1743 * advancing the node iterator, and if we have to skip over too
1744 * many keys just reinit it (or if we're rewinding, since that
1748 !btree_path_advance_to_pos(path, &path->l[l], 8))
1749 __btree_path_level_init(path, l);
1752 if (l != path->level) {
1753 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1754 __bch2_btree_path_unlock(path);
1757 bch2_btree_path_verify(trans, path);
1761 /* Btree path: main interface: */
1763 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1765 struct btree_path *next;
1767 next = prev_btree_path(trans, path);
1768 if (next && !btree_path_cmp(next, path))
1771 next = next_btree_path(trans, path);
1772 if (next && !btree_path_cmp(next, path))
1778 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1780 struct btree_path *next;
1782 next = prev_btree_path(trans, path);
1783 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1786 next = next_btree_path(trans, path);
1787 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1793 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1795 __bch2_btree_path_unlock(path);
1796 btree_path_list_remove(trans, path);
1797 trans->paths_allocated &= ~(1ULL << path->idx);
1800 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1802 struct btree_path *dup;
1804 EBUG_ON(trans->paths + path->idx != path);
1805 EBUG_ON(!path->ref);
1807 if (!__btree_path_put(path, intent))
1811 * Perhaps instead we should check for duplicate paths in traverse_all:
1813 if (path->preserve &&
1814 (dup = have_path_at_pos(trans, path))) {
1815 dup->preserve = true;
1816 path->preserve = false;
1820 if (!path->preserve &&
1821 (dup = have_node_at_pos(trans, path)))
1825 if (path->should_be_locked &&
1826 !btree_node_locked(dup, path->level))
1829 dup->should_be_locked |= path->should_be_locked;
1830 __bch2_path_free(trans, path);
1834 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1836 struct btree_path *path;
1837 struct btree_insert_entry *i;
1839 char buf1[300], buf2[300];
1841 btree_trans_verify_sorted(trans);
1843 trans_for_each_path_inorder(trans, path, idx)
1844 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n",
1845 path->idx, path->ref, path->intent_ref,
1846 path->should_be_locked ? " S" : "",
1847 path->preserve ? " P" : "",
1848 bch2_btree_ids[path->btree_id],
1849 (bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1),
1851 #ifdef CONFIG_BCACHEFS_DEBUG
1852 (void *) path->ip_allocated
1858 trans_for_each_update(trans, i) {
1860 struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
1862 printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
1863 bch2_btree_ids[i->btree_id],
1864 (void *) i->ip_allocated,
1865 (bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1),
1866 (bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2));
1870 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1871 struct btree_path *pos)
1873 struct btree_path *path;
1876 if (unlikely(trans->paths_allocated ==
1877 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1878 bch2_dump_trans_paths_updates(trans);
1879 panic("trans path oveflow\n");
1882 idx = __ffs64(~trans->paths_allocated);
1883 trans->paths_allocated |= 1ULL << idx;
1885 path = &trans->paths[idx];
1889 path->intent_ref = 0;
1890 path->nodes_locked = 0;
1891 path->nodes_intent_locked = 0;
1893 btree_path_list_add(trans, pos, path);
1897 struct btree_path *bch2_path_get(struct btree_trans *trans,
1898 enum btree_id btree_id, struct bpos pos,
1899 unsigned locks_want, unsigned level,
1900 unsigned flags, unsigned long ip)
1902 struct btree_path *path, *path_pos = NULL;
1903 bool cached = flags & BTREE_ITER_CACHED;
1904 bool intent = flags & BTREE_ITER_INTENT;
1907 BUG_ON(trans->restarted);
1909 trans_for_each_path_inorder(trans, path, i) {
1910 if (__btree_path_cmp(path,
1921 path_pos->cached == cached &&
1922 path_pos->btree_id == btree_id &&
1923 path_pos->level == level) {
1924 __btree_path_get(path_pos, intent);
1925 path = btree_path_set_pos(trans, path_pos, pos, intent, ip);
1927 path = btree_path_alloc(trans, path_pos);
1930 __btree_path_get(path, intent);
1932 path->btree_id = btree_id;
1933 path->cached = cached;
1934 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1935 path->should_be_locked = false;
1936 path->level = level;
1937 path->locks_want = locks_want;
1938 path->nodes_locked = 0;
1939 path->nodes_intent_locked = 0;
1940 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1941 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1942 #ifdef CONFIG_BCACHEFS_DEBUG
1943 path->ip_allocated = ip;
1945 btree_trans_verify_sorted(trans);
1948 if (!(flags & BTREE_ITER_NOPRESERVE))
1949 path->preserve = true;
1951 if (path->intent_ref)
1952 locks_want = max(locks_want, level + 1);
1955 * If the path has locks_want greater than requested, we don't downgrade
1956 * it here - on transaction restart because btree node split needs to
1957 * upgrade locks, we might be putting/getting the iterator again.
1958 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1959 * a successful transaction commit.
1962 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1963 if (locks_want > path->locks_want) {
1964 path->locks_want = locks_want;
1965 btree_path_get_locks(trans, path, true, _THIS_IP_);
1971 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1976 BUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1978 if (!path->cached) {
1979 struct btree_path_level *l = path_l(path);
1980 struct bkey_packed *_k =
1981 bch2_btree_node_iter_peek_all(&l->iter, l->b);
1983 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1985 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1987 if (!k.k || bpos_cmp(path->pos, k.k->p))
1990 struct bkey_cached *ck = (void *) path->l[0].b;
1992 EBUG_ON(path->btree_id != ck->key.btree_id ||
1993 bkey_cmp(path->pos, ck->key.pos));
1995 /* BTREE_ITER_CACHED_NOFILL? */
1996 if (unlikely(!ck->valid))
1999 k = bkey_i_to_s_c(ck->k);
2006 return (struct bkey_s_c) { u, NULL };
2009 /* Btree iterators: */
2012 __bch2_btree_iter_traverse(struct btree_iter *iter)
2014 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2018 bch2_btree_iter_traverse(struct btree_iter *iter)
2022 iter->path = btree_path_set_pos(iter->trans, iter->path,
2023 btree_iter_search_key(iter),
2024 iter->flags & BTREE_ITER_INTENT,
2025 btree_iter_ip_allocated(iter));
2027 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2031 iter->path->should_be_locked = true;
2035 /* Iterate across nodes (leaf and interior nodes) */
2037 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2039 struct btree_trans *trans = iter->trans;
2040 struct btree *b = NULL;
2043 EBUG_ON(iter->path->cached);
2044 bch2_btree_iter_verify(iter);
2046 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2050 b = btree_path_node(iter->path, iter->path->level);
2054 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2056 bkey_init(&iter->k);
2057 iter->k.p = iter->pos = b->key.k.p;
2059 iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
2060 iter->flags & BTREE_ITER_INTENT,
2061 btree_iter_ip_allocated(iter));
2062 iter->path->should_be_locked = true;
2063 BUG_ON(iter->path->uptodate);
2065 bch2_btree_iter_verify_entry_exit(iter);
2066 bch2_btree_iter_verify(iter);
2074 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2076 struct btree_trans *trans = iter->trans;
2077 struct btree_path *path = iter->path;
2078 struct btree *b = NULL;
2082 BUG_ON(trans->restarted);
2083 EBUG_ON(iter->path->cached);
2084 bch2_btree_iter_verify(iter);
2086 /* already at end? */
2087 if (!btree_path_node(path, path->level))
2091 if (!btree_path_node(path, path->level + 1)) {
2092 btree_node_unlock(path, path->level);
2093 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2098 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2099 __bch2_btree_path_unlock(path);
2100 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2101 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2102 btree_trans_restart(trans);
2107 b = btree_path_node(path, path->level + 1);
2109 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2110 btree_node_unlock(path, path->level);
2111 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2115 * Haven't gotten to the end of the parent node: go back down to
2116 * the next child node
2119 btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2120 iter->flags & BTREE_ITER_INTENT,
2121 btree_iter_ip_allocated(iter));
2123 path->level = iter->min_depth;
2125 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
2126 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
2127 btree_node_unlock(path, l);
2129 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2130 bch2_btree_iter_verify(iter);
2132 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2136 b = path->l[path->level].b;
2139 bkey_init(&iter->k);
2140 iter->k.p = iter->pos = b->key.k.p;
2142 iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
2143 iter->flags & BTREE_ITER_INTENT,
2144 btree_iter_ip_allocated(iter));
2145 iter->path->should_be_locked = true;
2146 BUG_ON(iter->path->uptodate);
2148 bch2_btree_iter_verify_entry_exit(iter);
2149 bch2_btree_iter_verify(iter);
2157 /* Iterate across keys (in leaf nodes only) */
2159 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2161 struct bpos pos = iter->k.p;
2162 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2163 ? bpos_cmp(pos, SPOS_MAX)
2164 : bkey_cmp(pos, SPOS_MAX)) != 0;
2166 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2167 pos = bkey_successor(iter, pos);
2168 bch2_btree_iter_set_pos(iter, pos);
2172 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2174 struct bpos pos = bkey_start_pos(&iter->k);
2175 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2176 ? bpos_cmp(pos, POS_MIN)
2177 : bkey_cmp(pos, POS_MIN)) != 0;
2179 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2180 pos = bkey_predecessor(iter, pos);
2181 bch2_btree_iter_set_pos(iter, pos);
2186 struct bkey_i *__btree_trans_peek_journal(struct btree_trans *trans,
2187 struct btree_path *path)
2189 struct journal_keys *keys = &trans->c->journal_keys;
2190 size_t idx = bch2_journal_key_search(keys, path->btree_id,
2191 path->level, path->pos);
2193 while (idx < keys->nr && keys->d[idx].overwritten)
2196 return (idx < keys->nr &&
2197 keys->d[idx].btree_id == path->btree_id &&
2198 keys->d[idx].level == path->level)
2204 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2205 struct btree_iter *iter,
2208 struct bkey_i *next_journal =
2209 __btree_trans_peek_journal(trans, iter->path);
2212 bpos_cmp(next_journal->k.p,
2213 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2214 iter->k = next_journal->k;
2215 k = bkey_i_to_s_c(next_journal);
2222 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2225 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
2227 struct btree_trans *trans = iter->trans;
2228 struct bpos search_key = btree_iter_search_key(iter);
2229 struct bkey_i *next_update;
2233 EBUG_ON(iter->path->cached || iter->path->level);
2234 bch2_btree_iter_verify(iter);
2235 bch2_btree_iter_verify_entry_exit(iter);
2238 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2239 iter->flags & BTREE_ITER_INTENT,
2240 btree_iter_ip_allocated(iter));
2242 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2243 if (unlikely(ret)) {
2244 /* ensure that iter->k is consistent with iter->pos: */
2245 bch2_btree_iter_set_pos(iter, iter->pos);
2246 k = bkey_s_c_err(ret);
2250 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2252 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2253 k = btree_trans_peek_journal(trans, iter, k);
2255 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2256 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2259 bpos_cmp(next_update->k.p,
2260 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2261 iter->k = next_update->k;
2262 k = bkey_i_to_s_c(next_update);
2265 if (k.k && bkey_deleted(k.k)) {
2267 * If we've got a whiteout, and it's after the search
2268 * key, advance the search key to the whiteout instead
2269 * of just after the whiteout - it might be a btree
2270 * whiteout, with a real key at the same position, since
2271 * in the btree deleted keys sort before non deleted.
2273 search_key = bpos_cmp(search_key, k.k->p)
2275 : bpos_successor(k.k->p);
2281 * We can never have a key in a leaf node at POS_MAX, so
2282 * we don't have to check these successor() calls:
2284 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2285 !bch2_snapshot_is_ancestor(trans->c,
2288 search_key = bpos_successor(k.k->p);
2292 if (bkey_whiteout(k.k) &&
2293 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2294 search_key = bkey_successor(iter, k.k->p);
2299 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2300 /* Advance to next leaf node: */
2301 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2304 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2311 * iter->pos should be mononotically increasing, and always be equal to
2312 * the key we just returned - except extents can straddle iter->pos:
2314 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2316 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2317 iter->pos = bkey_start_pos(k.k);
2319 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2320 iter->pos.snapshot = iter->snapshot;
2322 iter->path = btree_path_set_pos(trans, iter->path, k.k->p,
2323 iter->flags & BTREE_ITER_INTENT,
2324 btree_iter_ip_allocated(iter));
2325 BUG_ON(!iter->path->nodes_locked);
2327 iter->path->should_be_locked = true;
2329 bch2_btree_iter_verify_entry_exit(iter);
2330 bch2_btree_iter_verify(iter);
2331 ret = bch2_btree_iter_verify_ret(iter, k);
2333 return bkey_s_c_err(ret);
2339 * bch2_btree_iter_next: returns first key greater than iterator's current
2342 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2344 if (!bch2_btree_iter_advance(iter))
2345 return bkey_s_c_null;
2347 return bch2_btree_iter_peek(iter);
2351 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2352 * iterator's current position
2354 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2356 struct btree_trans *trans = iter->trans;
2357 struct bpos search_key = iter->pos;
2358 struct btree_path *saved_path = NULL;
2360 struct bkey saved_k;
2361 const struct bch_val *saved_v;
2364 EBUG_ON(iter->path->cached || iter->path->level);
2365 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2367 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2368 return bkey_s_c_err(-EIO);
2370 bch2_btree_iter_verify(iter);
2371 bch2_btree_iter_verify_entry_exit(iter);
2373 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2374 search_key.snapshot = U32_MAX;
2377 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2378 iter->flags & BTREE_ITER_INTENT,
2379 btree_iter_ip_allocated(iter));
2381 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2382 if (unlikely(ret)) {
2383 /* ensure that iter->k is consistent with iter->pos: */
2384 bch2_btree_iter_set_pos(iter, iter->pos);
2385 k = bkey_s_c_err(ret);
2389 k = btree_path_level_peek(trans->c, iter->path,
2390 &iter->path->l[0], &iter->k);
2392 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2393 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2394 : bpos_cmp(k.k->p, search_key) > 0))
2395 k = btree_path_level_prev(trans->c, iter->path,
2396 &iter->path->l[0], &iter->k);
2398 btree_path_check_sort(trans, iter->path, 0);
2401 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2402 if (k.k->p.snapshot == iter->snapshot)
2406 * If we have a saved candidate, and we're no
2407 * longer at the same _key_ (not pos), return
2410 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2411 bch2_path_put(trans, iter->path,
2412 iter->flags & BTREE_ITER_INTENT);
2413 iter->path = saved_path;
2420 if (bch2_snapshot_is_ancestor(iter->trans->c,
2424 bch2_path_put(trans, saved_path,
2425 iter->flags & BTREE_ITER_INTENT);
2426 saved_path = btree_path_clone(trans, iter->path,
2427 iter->flags & BTREE_ITER_INTENT);
2432 search_key = bpos_predecessor(k.k->p);
2436 if (bkey_whiteout(k.k) &&
2437 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2438 search_key = bkey_predecessor(iter, k.k->p);
2439 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2440 search_key.snapshot = U32_MAX;
2445 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2446 /* Advance to previous leaf node: */
2447 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2449 /* Start of btree: */
2450 bch2_btree_iter_set_pos(iter, POS_MIN);
2456 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2458 /* Extents can straddle iter->pos: */
2459 if (bkey_cmp(k.k->p, iter->pos) < 0)
2462 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2463 iter->pos.snapshot = iter->snapshot;
2466 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2467 iter->path->should_be_locked = true;
2469 bch2_btree_iter_verify_entry_exit(iter);
2470 bch2_btree_iter_verify(iter);
2476 * bch2_btree_iter_prev: returns first key less than iterator's current
2479 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2481 if (!bch2_btree_iter_rewind(iter))
2482 return bkey_s_c_null;
2484 return bch2_btree_iter_peek_prev(iter);
2487 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2489 struct btree_trans *trans = iter->trans;
2490 struct bpos search_key;
2494 EBUG_ON(iter->path->level);
2495 bch2_btree_iter_verify(iter);
2496 bch2_btree_iter_verify_entry_exit(iter);
2498 /* extents can't span inode numbers: */
2499 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2500 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2501 if (iter->pos.inode == KEY_INODE_MAX)
2502 return bkey_s_c_null;
2504 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2507 search_key = btree_iter_search_key(iter);
2508 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2509 iter->flags & BTREE_ITER_INTENT,
2510 btree_iter_ip_allocated(iter));
2512 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2514 return bkey_s_c_err(ret);
2516 if ((iter->flags & BTREE_ITER_CACHED) ||
2517 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2518 struct bkey_i *next_update;
2520 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2521 (next_update = btree_trans_peek_updates(trans,
2522 iter->btree_id, search_key)) &&
2523 !bpos_cmp(next_update->k.p, iter->pos)) {
2524 iter->k = next_update->k;
2525 k = bkey_i_to_s_c(next_update);
2529 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2530 (next_update = __btree_trans_peek_journal(trans, iter->path)) &&
2531 !bpos_cmp(next_update->k.p, iter->pos)) {
2532 iter->k = next_update->k;
2533 k = bkey_i_to_s_c(next_update);
2537 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2541 if (iter->flags & BTREE_ITER_INTENT) {
2542 struct btree_iter iter2;
2544 bch2_trans_copy_iter(&iter2, iter);
2545 k = bch2_btree_iter_peek(&iter2);
2547 if (k.k && !bkey_err(k)) {
2551 bch2_trans_iter_exit(trans, &iter2);
2553 struct bpos pos = iter->pos;
2555 k = bch2_btree_iter_peek(iter);
2559 if (unlikely(bkey_err(k)))
2562 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2564 if (bkey_cmp(iter->pos, next) < 0) {
2565 bkey_init(&iter->k);
2566 iter->k.p = iter->pos;
2568 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2569 bch2_key_resize(&iter->k,
2570 min_t(u64, KEY_SIZE_MAX,
2571 (next.inode == iter->pos.inode
2575 EBUG_ON(!iter->k.size);
2578 k = (struct bkey_s_c) { &iter->k, NULL };
2582 iter->path->should_be_locked = true;
2584 bch2_btree_iter_verify_entry_exit(iter);
2585 bch2_btree_iter_verify(iter);
2586 ret = bch2_btree_iter_verify_ret(iter, k);
2588 return bkey_s_c_err(ret);
2593 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2595 if (!bch2_btree_iter_advance(iter))
2596 return bkey_s_c_null;
2598 return bch2_btree_iter_peek_slot(iter);
2601 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2603 if (!bch2_btree_iter_rewind(iter))
2604 return bkey_s_c_null;
2606 return bch2_btree_iter_peek_slot(iter);
2609 /* new transactional stuff: */
2611 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2612 struct btree_path *path)
2614 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2615 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2616 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2619 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2621 #ifdef CONFIG_BCACHEFS_DEBUG
2624 for (i = 0; i < trans->nr_sorted; i++)
2625 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2629 static void btree_trans_verify_sorted(struct btree_trans *trans)
2631 #ifdef CONFIG_BCACHEFS_DEBUG
2632 struct btree_path *path, *prev = NULL;
2635 trans_for_each_path_inorder(trans, path, i) {
2636 BUG_ON(prev && btree_path_cmp(prev, path) > 0);
2642 static inline void btree_path_swap(struct btree_trans *trans,
2643 struct btree_path *l, struct btree_path *r)
2645 swap(l->sorted_idx, r->sorted_idx);
2646 swap(trans->sorted[l->sorted_idx],
2647 trans->sorted[r->sorted_idx]);
2649 btree_path_verify_sorted_ref(trans, l);
2650 btree_path_verify_sorted_ref(trans, r);
2653 static void btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2656 struct btree_path *n;
2659 n = prev_btree_path(trans, path);
2660 if (n && btree_path_cmp(n, path) > 0) {
2662 btree_path_swap(trans, n, path);
2663 n = prev_btree_path(trans, path);
2664 } while (n && btree_path_cmp(n, path) > 0);
2671 n = next_btree_path(trans, path);
2672 if (n && btree_path_cmp(path, n) > 0) {
2674 btree_path_swap(trans, path, n);
2675 n = next_btree_path(trans, path);
2676 } while (n && btree_path_cmp(path, n) > 0);
2680 btree_trans_verify_sorted(trans);
2683 static inline void btree_path_list_remove(struct btree_trans *trans,
2684 struct btree_path *path)
2688 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2690 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2692 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2693 trans->paths[trans->sorted[i]].sorted_idx = i;
2695 path->sorted_idx = U8_MAX;
2697 btree_trans_verify_sorted_refs(trans);
2700 static inline void btree_path_list_add(struct btree_trans *trans,
2701 struct btree_path *pos,
2702 struct btree_path *path)
2706 btree_trans_verify_sorted_refs(trans);
2708 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2710 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2712 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2713 trans->paths[trans->sorted[i]].sorted_idx = i;
2715 btree_trans_verify_sorted_refs(trans);
2718 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2721 bch2_path_put(trans, iter->path,
2722 iter->flags & BTREE_ITER_INTENT);
2726 static void __bch2_trans_iter_init(struct btree_trans *trans,
2727 struct btree_iter *iter,
2728 unsigned btree_id, struct bpos pos,
2729 unsigned locks_want,
2734 EBUG_ON(trans->restarted);
2736 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2737 btree_node_type_is_extents(btree_id))
2738 flags |= BTREE_ITER_IS_EXTENTS;
2740 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2741 !btree_type_has_snapshots(btree_id))
2742 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2744 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2745 btree_type_has_snapshots(btree_id))
2746 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2748 if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
2749 flags |= BTREE_ITER_WITH_JOURNAL;
2751 iter->trans = trans;
2753 iter->btree_id = btree_id;
2754 iter->min_depth = depth;
2755 iter->flags = flags;
2756 iter->snapshot = pos.snapshot;
2758 iter->k.type = KEY_TYPE_deleted;
2761 #ifdef CONFIG_BCACHEFS_DEBUG
2762 iter->ip_allocated = ip;
2765 iter->path = bch2_path_get(trans, btree_id, iter->pos,
2766 locks_want, depth, flags, ip);
2769 void bch2_trans_iter_init(struct btree_trans *trans,
2770 struct btree_iter *iter,
2771 unsigned btree_id, struct bpos pos,
2774 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2775 0, 0, flags, _RET_IP_);
2778 void bch2_trans_node_iter_init(struct btree_trans *trans,
2779 struct btree_iter *iter,
2780 enum btree_id btree_id,
2782 unsigned locks_want,
2786 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2787 BTREE_ITER_NOT_EXTENTS|
2788 __BTREE_ITER_ALL_SNAPSHOTS|
2789 BTREE_ITER_ALL_SNAPSHOTS|
2791 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2792 BUG_ON(iter->path->level != depth);
2793 BUG_ON(iter->min_depth != depth);
2796 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2800 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2803 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2805 size_t new_top = trans->mem_top + size;
2808 if (new_top > trans->mem_bytes) {
2809 size_t old_bytes = trans->mem_bytes;
2810 size_t new_bytes = roundup_pow_of_two(new_top);
2813 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2815 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2816 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2817 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2818 new_bytes = BTREE_TRANS_MEM_MAX;
2823 return ERR_PTR(-ENOMEM);
2825 trans->mem = new_mem;
2826 trans->mem_bytes = new_bytes;
2829 trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
2830 btree_trans_restart(trans);
2831 return ERR_PTR(-EINTR);
2835 p = trans->mem + trans->mem_top;
2836 trans->mem_top += size;
2842 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2843 * @trans: transaction to reset
2845 * While iterating over nodes or updating nodes a attempt to lock a btree
2846 * node may return EINTR when the trylock fails. When this occurs
2847 * bch2_trans_begin() should be called and the transaction retried.
2849 void bch2_trans_begin(struct btree_trans *trans)
2851 struct btree_insert_entry *i;
2852 struct btree_path *path;
2854 trans_for_each_update(trans, i)
2855 __btree_path_put(i->path, true);
2857 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
2858 trans->extra_journal_res = 0;
2859 trans->nr_updates = 0;
2862 trans->hooks = NULL;
2863 trans->extra_journal_entries = NULL;
2864 trans->extra_journal_entry_u64s = 0;
2866 if (trans->fs_usage_deltas) {
2867 trans->fs_usage_deltas->used = 0;
2868 memset(&trans->fs_usage_deltas->memset_start, 0,
2869 (void *) &trans->fs_usage_deltas->memset_end -
2870 (void *) &trans->fs_usage_deltas->memset_start);
2873 trans_for_each_path(trans, path) {
2874 path->should_be_locked = false;
2877 * XXX: we probably shouldn't be doing this if the transaction
2878 * was restarted, but currently we still overflow transaction
2879 * iterators if we do that
2881 if (!path->ref && !path->preserve)
2882 __bch2_path_free(trans, path);
2883 else if (!path->ref)
2884 path->preserve = false;
2887 bch2_trans_cond_resched(trans);
2889 if (trans->restarted)
2890 bch2_btree_path_traverse_all(trans);
2892 trans->restarted = false;
2895 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2897 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2898 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2901 BUG_ON(trans->used_mempool);
2904 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
2907 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2909 trans->paths = p; p += paths_bytes;
2910 trans->updates = p; p += updates_bytes;
2913 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2914 unsigned expected_nr_iters,
2915 size_t expected_mem_bytes,
2917 __acquires(&c->btree_trans_barrier)
2919 memset(trans, 0, sizeof(*trans));
2923 bch2_trans_alloc_paths(trans, c);
2925 if (expected_mem_bytes) {
2926 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2927 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2929 if (!unlikely(trans->mem)) {
2930 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2931 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2935 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2937 trans->pid = current->pid;
2938 mutex_lock(&c->btree_trans_lock);
2939 list_add(&trans->list, &c->btree_trans_list);
2940 mutex_unlock(&c->btree_trans_lock);
2943 static void check_btree_paths_leaked(struct btree_trans *trans)
2945 #ifdef CONFIG_BCACHEFS_DEBUG
2946 struct bch_fs *c = trans->c;
2947 struct btree_path *path;
2949 trans_for_each_path(trans, path)
2954 bch_err(c, "btree paths leaked from %s!", trans->fn);
2955 trans_for_each_path(trans, path)
2957 printk(KERN_ERR " btree %s %pS\n",
2958 bch2_btree_ids[path->btree_id],
2959 (void *) path->ip_allocated);
2960 /* Be noisy about this: */
2961 bch2_fatal_error(c);
2965 void bch2_trans_exit(struct btree_trans *trans)
2966 __releases(&c->btree_trans_barrier)
2968 struct btree_insert_entry *i;
2969 struct bch_fs *c = trans->c;
2971 bch2_trans_unlock(trans);
2973 trans_for_each_update(trans, i)
2974 __btree_path_put(i->path, true);
2975 trans->nr_updates = 0;
2977 check_btree_paths_leaked(trans);
2979 mutex_lock(&c->btree_trans_lock);
2980 list_del(&trans->list);
2981 mutex_unlock(&c->btree_trans_lock);
2983 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2985 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
2987 if (trans->fs_usage_deltas) {
2988 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2989 REPLICAS_DELTA_LIST_MAX)
2990 mempool_free(trans->fs_usage_deltas,
2991 &c->replicas_delta_pool);
2993 kfree(trans->fs_usage_deltas);
2996 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2997 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3003 * Userspace doesn't have a real percpu implementation:
3005 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3009 mempool_free(trans->paths, &c->btree_paths_pool);
3011 trans->mem = (void *) 0x1;
3012 trans->paths = (void *) 0x1;
3015 static void __maybe_unused
3016 bch2_btree_path_node_to_text(struct printbuf *out,
3017 struct btree_bkey_cached_common *_b,
3020 pr_buf(out, " l=%u %s:",
3021 _b->level, bch2_btree_ids[_b->btree_id]);
3022 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
3025 static bool trans_has_locks(struct btree_trans *trans)
3027 struct btree_path *path;
3029 trans_for_each_path(trans, path)
3030 if (path->nodes_locked)
3035 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
3037 struct btree_trans *trans;
3038 struct btree_path *path;
3042 mutex_lock(&c->btree_trans_lock);
3043 list_for_each_entry(trans, &c->btree_trans_list, list) {
3044 if (!trans_has_locks(trans))
3047 pr_buf(out, "%i %s\n", trans->pid, trans->fn);
3049 trans_for_each_path(trans, path) {
3050 if (!path->nodes_locked)
3053 pr_buf(out, " path %u %c l=%u %s:",
3055 path->cached ? 'c' : 'b',
3057 bch2_btree_ids[path->btree_id]);
3058 bch2_bpos_to_text(out, path->pos);
3061 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3062 if (btree_node_locked(path, l)) {
3063 pr_buf(out, " %s l=%u ",
3064 btree_node_intent_locked(path, l) ? "i" : "r", l);
3065 bch2_btree_path_node_to_text(out,
3066 (void *) path->l[l].b,
3073 b = READ_ONCE(trans->locking);
3075 path = &trans->paths[trans->locking_path_idx];
3076 pr_buf(out, " locking path %u %c l=%u %s:",
3077 trans->locking_path_idx,
3078 path->cached ? 'c' : 'b',
3079 trans->locking_level,
3080 bch2_btree_ids[trans->locking_btree_id]);
3081 bch2_bpos_to_text(out, trans->locking_pos);
3083 pr_buf(out, " node ");
3084 bch2_btree_path_node_to_text(out,
3085 (void *) b, path->cached);
3089 mutex_unlock(&c->btree_trans_lock);
3092 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3094 if (c->btree_trans_barrier_initialized)
3095 cleanup_srcu_struct(&c->btree_trans_barrier);
3096 mempool_exit(&c->btree_trans_mem_pool);
3097 mempool_exit(&c->btree_paths_pool);
3100 int bch2_fs_btree_iter_init(struct bch_fs *c)
3102 unsigned nr = BTREE_ITER_MAX;
3105 INIT_LIST_HEAD(&c->btree_trans_list);
3106 mutex_init(&c->btree_trans_lock);
3108 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3109 sizeof(struct btree_path) * nr +
3110 sizeof(struct btree_insert_entry) * nr) ?:
3111 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3112 BTREE_TRANS_MEM_MAX) ?:
3113 init_srcu_struct(&c->btree_trans_barrier);
3115 c->btree_trans_barrier_initialized = true;