1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prefetch.h>
20 #include <trace/events/bcachefs.h>
22 static void btree_trans_verify_sorted(struct btree_trans *);
23 static void btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
25 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
26 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
29 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
31 #ifdef CONFIG_BCACHEFS_DEBUG
32 return iter->ip_allocated;
38 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
41 * Unlocks before scheduling
42 * Note: does not revalidate iterator
44 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
46 if (need_resched() || race_fault()) {
47 bch2_trans_unlock(trans);
49 return bch2_trans_relock(trans) ? 0 : -EINTR;
55 static inline int __btree_path_cmp(const struct btree_path *l,
56 enum btree_id r_btree_id,
61 return cmp_int(l->btree_id, r_btree_id) ?:
62 cmp_int((int) l->cached, (int) r_cached) ?:
63 bpos_cmp(l->pos, r_pos) ?:
64 -cmp_int(l->level, r_level);
67 static inline int btree_path_cmp(const struct btree_path *l,
68 const struct btree_path *r)
70 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
73 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
75 /* Are we iterating over keys in all snapshots? */
76 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
77 p = bpos_successor(p);
79 p = bpos_nosnap_successor(p);
80 p.snapshot = iter->snapshot;
86 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
88 /* Are we iterating over keys in all snapshots? */
89 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
90 p = bpos_predecessor(p);
92 p = bpos_nosnap_predecessor(p);
93 p.snapshot = iter->snapshot;
99 static inline bool is_btree_node(struct btree_path *path, unsigned l)
101 return l < BTREE_MAX_DEPTH &&
102 (unsigned long) path->l[l].b >= 128;
105 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
107 struct bpos pos = iter->pos;
109 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
110 bkey_cmp(pos, POS_MAX))
111 pos = bkey_successor(iter, pos);
115 static inline bool btree_path_pos_before_node(struct btree_path *path,
118 return bpos_cmp(path->pos, b->data->min_key) < 0;
121 static inline bool btree_path_pos_after_node(struct btree_path *path,
124 return bpos_cmp(b->key.k.p, path->pos) < 0;
127 static inline bool btree_path_pos_in_node(struct btree_path *path,
130 return path->btree_id == b->c.btree_id &&
131 !btree_path_pos_before_node(path, b) &&
132 !btree_path_pos_after_node(path, b);
135 /* Btree node locking: */
137 void bch2_btree_node_unlock_write(struct btree_trans *trans,
138 struct btree_path *path, struct btree *b)
140 bch2_btree_node_unlock_write_inlined(trans, path, b);
143 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
145 struct btree_path *linked;
146 unsigned readers = 0;
148 trans_for_each_path(trans, linked)
149 if (linked->l[b->c.level].b == b &&
150 btree_node_read_locked(linked, b->c.level))
154 * Must drop our read locks before calling six_lock_write() -
155 * six_unlock() won't do wakeups until the reader count
156 * goes to 0, and it's safe because we have the node intent
159 if (!b->c.lock.readers)
160 atomic64_sub(__SIX_VAL(read_lock, readers),
161 &b->c.lock.state.counter);
163 this_cpu_sub(*b->c.lock.readers, readers);
165 btree_node_lock_type(trans->c, b, SIX_LOCK_write);
167 if (!b->c.lock.readers)
168 atomic64_add(__SIX_VAL(read_lock, readers),
169 &b->c.lock.state.counter);
171 this_cpu_add(*b->c.lock.readers, readers);
174 bool __bch2_btree_node_relock(struct btree_trans *trans,
175 struct btree_path *path, unsigned level)
177 struct btree *b = btree_path_node(path, level);
178 int want = __btree_lock_want(path, level);
180 if (!is_btree_node(path, level))
186 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
187 (btree_node_lock_seq_matches(path, b, level) &&
188 btree_node_lock_increment(trans, b, level, want))) {
189 mark_btree_node_locked(path, level, want);
193 trace_btree_node_relock_fail(trans->fn, _RET_IP_,
197 path->l[level].lock_seq,
198 is_btree_node(path, level) ? b->c.lock.state.seq : 0);
202 bool bch2_btree_node_upgrade(struct btree_trans *trans,
203 struct btree_path *path, unsigned level)
205 struct btree *b = path->l[level].b;
207 if (!is_btree_node(path, level))
210 switch (btree_lock_want(path, level)) {
211 case BTREE_NODE_UNLOCKED:
212 BUG_ON(btree_node_locked(path, level));
214 case BTREE_NODE_READ_LOCKED:
215 BUG_ON(btree_node_intent_locked(path, level));
216 return bch2_btree_node_relock(trans, path, level);
217 case BTREE_NODE_INTENT_LOCKED:
221 if (btree_node_intent_locked(path, level))
227 if (btree_node_locked(path, level)
228 ? six_lock_tryupgrade(&b->c.lock)
229 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
232 if (btree_node_lock_seq_matches(path, b, level) &&
233 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
234 btree_node_unlock(path, level);
240 mark_btree_node_intent_locked(path, level);
244 static inline bool btree_path_get_locks(struct btree_trans *trans,
245 struct btree_path *path,
248 unsigned l = path->level;
252 if (!btree_path_node(path, l))
256 ? bch2_btree_node_upgrade(trans, path, l)
257 : bch2_btree_node_relock(trans, path, l)))
261 } while (l < path->locks_want);
264 * When we fail to get a lock, we have to ensure that any child nodes
265 * can't be relocked so bch2_btree_path_traverse has to walk back up to
266 * the node that we failed to relock:
269 __bch2_btree_path_unlock(path);
270 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
273 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
275 } while (fail_idx >= 0);
278 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
279 path->uptodate = BTREE_ITER_UPTODATE;
281 bch2_trans_verify_locks(trans);
283 return path->uptodate < BTREE_ITER_NEED_RELOCK;
286 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
290 ? container_of(_b, struct btree, c)->key.k.p
291 : container_of(_b, struct bkey_cached, c)->key.pos;
295 bool __bch2_btree_node_lock(struct btree_trans *trans,
296 struct btree_path *path,
298 struct bpos pos, unsigned level,
299 enum six_lock_type type,
300 six_lock_should_sleep_fn should_sleep_fn, void *p,
303 struct btree_path *linked, *deadlock_path = NULL;
304 u64 start_time = local_clock();
308 /* Check if it's safe to block: */
309 trans_for_each_path(trans, linked) {
310 if (!linked->nodes_locked)
314 * Can't block taking an intent lock if we have _any_ nodes read
317 * - Our read lock blocks another thread with an intent lock on
318 * the same node from getting a write lock, and thus from
319 * dropping its intent lock
321 * - And the other thread may have multiple nodes intent locked:
322 * both the node we want to intent lock, and the node we
323 * already have read locked - deadlock:
325 if (type == SIX_LOCK_intent &&
326 linked->nodes_locked != linked->nodes_intent_locked) {
327 deadlock_path = linked;
331 if (linked->btree_id != path->btree_id) {
332 if (linked->btree_id > path->btree_id) {
333 deadlock_path = linked;
340 * Within the same btree, cached paths come before non
343 if (linked->cached != path->cached) {
345 deadlock_path = linked;
352 * Interior nodes must be locked before their descendants: if
353 * another path has possible descendants locked of the node
354 * we're about to lock, it must have the ancestors locked too:
356 if (level > __fls(linked->nodes_locked)) {
357 deadlock_path = linked;
361 /* Must lock btree nodes in key order: */
362 if (btree_node_locked(linked, level) &&
363 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
364 linked->cached)) <= 0) {
365 deadlock_path = linked;
367 BUG_ON(trans->in_traverse_all);
371 if (unlikely(deadlock_path)) {
372 trace_trans_restart_would_deadlock(trans->fn, ip,
373 trans->in_traverse_all, reason,
374 deadlock_path->btree_id,
375 deadlock_path->cached,
380 btree_trans_restart(trans);
384 if (six_trylock_type(&b->c.lock, type))
387 trans->locking_path_idx = path->idx;
388 trans->locking_pos = pos;
389 trans->locking_btree_id = path->btree_id;
390 trans->locking_level = level;
393 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
395 trans->locking = NULL;
398 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
403 /* Btree iterator locking: */
405 #ifdef CONFIG_BCACHEFS_DEBUG
407 static void bch2_btree_path_verify_locks(struct btree_path *path)
411 if (!path->nodes_locked) {
412 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
413 btree_path_node(path, path->level));
417 for (l = 0; btree_path_node(path, l); l++)
418 BUG_ON(btree_lock_want(path, l) !=
419 btree_node_locked_type(path, l));
422 void bch2_trans_verify_locks(struct btree_trans *trans)
424 struct btree_path *path;
426 trans_for_each_path(trans, path)
427 bch2_btree_path_verify_locks(path);
430 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
433 /* Btree path locking: */
436 * Only for btree_cache.c - only relocks intent locks
438 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
439 struct btree_path *path)
443 for (l = path->level;
444 l < path->locks_want && btree_path_node(path, l);
446 if (!bch2_btree_node_relock(trans, path, l)) {
447 __bch2_btree_path_unlock(path);
448 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
449 trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
450 path->btree_id, &path->pos);
451 btree_trans_restart(trans);
460 static bool bch2_btree_path_relock(struct btree_trans *trans,
461 struct btree_path *path, unsigned long trace_ip)
463 bool ret = btree_path_get_locks(trans, path, false);
466 trace_trans_restart_relock_path(trans->fn, trace_ip,
467 path->btree_id, &path->pos);
468 btree_trans_restart(trans);
473 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
474 struct btree_path *path,
475 unsigned new_locks_want)
477 struct btree_path *linked;
479 EBUG_ON(path->locks_want >= new_locks_want);
481 path->locks_want = new_locks_want;
483 if (btree_path_get_locks(trans, path, true))
487 * XXX: this is ugly - we'd prefer to not be mucking with other
488 * iterators in the btree_trans here.
490 * On failure to upgrade the iterator, setting iter->locks_want and
491 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
492 * get the locks we want on transaction restart.
494 * But if this iterator was a clone, on transaction restart what we did
495 * to this iterator isn't going to be preserved.
497 * Possibly we could add an iterator field for the parent iterator when
498 * an iterator is a copy - for now, we'll just upgrade any other
499 * iterators with the same btree id.
501 * The code below used to be needed to ensure ancestor nodes get locked
502 * before interior nodes - now that's handled by
503 * bch2_btree_path_traverse_all().
505 trans_for_each_path(trans, linked)
506 if (linked != path &&
507 linked->cached == path->cached &&
508 linked->btree_id == path->btree_id &&
509 linked->locks_want < new_locks_want) {
510 linked->locks_want = new_locks_want;
511 btree_path_get_locks(trans, linked, true);
517 void __bch2_btree_path_downgrade(struct btree_path *path,
518 unsigned new_locks_want)
522 EBUG_ON(path->locks_want < new_locks_want);
524 path->locks_want = new_locks_want;
526 while (path->nodes_locked &&
527 (l = __fls(path->nodes_locked)) >= path->locks_want) {
528 if (l > path->level) {
529 btree_node_unlock(path, l);
531 if (btree_node_intent_locked(path, l)) {
532 six_lock_downgrade(&path->l[l].b->c.lock);
533 path->nodes_intent_locked ^= 1 << l;
539 bch2_btree_path_verify_locks(path);
542 void bch2_trans_downgrade(struct btree_trans *trans)
544 struct btree_path *path;
546 trans_for_each_path(trans, path)
547 bch2_btree_path_downgrade(path);
550 /* Btree transaction locking: */
552 bool bch2_trans_relock(struct btree_trans *trans)
554 struct btree_path *path;
556 if (unlikely(trans->restarted))
559 trans_for_each_path(trans, path)
560 if (path->should_be_locked &&
561 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
562 trace_trans_restart_relock(trans->fn, _RET_IP_,
563 path->btree_id, &path->pos);
564 BUG_ON(!trans->restarted);
570 void bch2_trans_unlock(struct btree_trans *trans)
572 struct btree_path *path;
574 trans_for_each_path(trans, path)
575 __bch2_btree_path_unlock(path);
577 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
580 /* Btree iterator: */
582 #ifdef CONFIG_BCACHEFS_DEBUG
584 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
585 struct btree_path *path)
587 struct bkey_cached *ck;
588 bool locked = btree_node_locked(path, 0);
590 if (!bch2_btree_node_relock(trans, path, 0))
593 ck = (void *) path->l[0].b;
594 BUG_ON(ck->key.btree_id != path->btree_id ||
595 bkey_cmp(ck->key.pos, path->pos));
598 btree_node_unlock(path, 0);
601 static void bch2_btree_path_verify_level(struct btree_trans *trans,
602 struct btree_path *path, unsigned level)
604 struct btree_path_level *l;
605 struct btree_node_iter tmp;
607 struct bkey_packed *p, *k;
608 char buf1[100], buf2[100], buf3[100];
611 if (!bch2_debug_check_iterators)
616 locked = btree_node_locked(path, level);
620 bch2_btree_path_verify_cached(trans, path);
624 if (!btree_path_node(path, level))
627 if (!bch2_btree_node_relock(trans, path, level))
630 BUG_ON(!btree_path_pos_in_node(path, l->b));
632 bch2_btree_node_iter_verify(&l->iter, l->b);
635 * For interior nodes, the iterator will have skipped past deleted keys:
638 ? bch2_btree_node_iter_prev(&tmp, l->b)
639 : bch2_btree_node_iter_prev_all(&tmp, l->b);
640 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
642 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
647 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
653 btree_node_unlock(path, level);
656 strcpy(buf2, "(none)");
657 strcpy(buf3, "(none)");
659 bch2_bpos_to_text(&PBUF(buf1), path->pos);
662 struct bkey uk = bkey_unpack_key(l->b, p);
663 bch2_bkey_to_text(&PBUF(buf2), &uk);
667 struct bkey uk = bkey_unpack_key(l->b, k);
668 bch2_bkey_to_text(&PBUF(buf3), &uk);
671 panic("path should be %s key at level %u:\n"
675 msg, level, buf1, buf2, buf3);
678 static void bch2_btree_path_verify(struct btree_trans *trans,
679 struct btree_path *path)
681 struct bch_fs *c = trans->c;
684 EBUG_ON(path->btree_id >= BTREE_ID_NR);
686 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
688 BUG_ON(!path->cached &&
689 c->btree_roots[path->btree_id].b->c.level > i);
693 bch2_btree_path_verify_level(trans, path, i);
696 bch2_btree_path_verify_locks(path);
699 void bch2_trans_verify_paths(struct btree_trans *trans)
701 struct btree_path *path;
703 trans_for_each_path(trans, path)
704 bch2_btree_path_verify(trans, path);
707 static void bch2_btree_iter_verify(struct btree_iter *iter)
709 struct btree_trans *trans = iter->trans;
711 BUG_ON(iter->btree_id >= BTREE_ID_NR);
713 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
715 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
716 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
718 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
719 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
720 !btree_type_has_snapshots(iter->btree_id));
722 if (iter->update_path)
723 bch2_btree_path_verify(trans, iter->update_path);
724 bch2_btree_path_verify(trans, iter->path);
727 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
729 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
730 !iter->pos.snapshot);
732 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
733 iter->pos.snapshot != iter->snapshot);
735 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
736 bkey_cmp(iter->pos, iter->k.p) > 0);
739 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
741 struct btree_trans *trans = iter->trans;
742 struct btree_iter copy;
743 struct bkey_s_c prev;
746 if (!bch2_debug_check_iterators)
749 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
752 if (bkey_err(k) || !k.k)
755 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
759 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
760 BTREE_ITER_NOPRESERVE|
761 BTREE_ITER_ALL_SNAPSHOTS);
762 prev = bch2_btree_iter_prev(©);
766 ret = bkey_err(prev);
770 if (!bkey_cmp(prev.k->p, k.k->p) &&
771 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
772 prev.k->p.snapshot) > 0) {
773 char buf1[100], buf2[200];
775 bch2_bkey_to_text(&PBUF(buf1), k.k);
776 bch2_bkey_to_text(&PBUF(buf2), prev.k);
778 panic("iter snap %u\n"
785 bch2_trans_iter_exit(trans, ©);
789 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
790 struct bpos pos, bool key_cache)
792 struct btree_path *path;
796 trans_for_each_path_inorder(trans, path, idx) {
797 int cmp = cmp_int(path->btree_id, id) ?:
798 cmp_int(path->cached, key_cache);
805 if (!(path->nodes_locked & 1) ||
806 !path->should_be_locked)
810 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
811 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
814 if (!bkey_cmp(pos, path->pos))
819 bch2_dump_trans_paths_updates(trans);
820 panic("not locked: %s %s%s\n",
822 (bch2_bpos_to_text(&PBUF(buf), pos), buf),
823 key_cache ? " cached" : "");
828 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
829 struct btree_path *path, unsigned l) {}
830 static inline void bch2_btree_path_verify(struct btree_trans *trans,
831 struct btree_path *path) {}
832 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
833 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
834 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
838 /* Btree path: fixups after btree updates */
840 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
843 struct bkey_packed *k)
845 struct btree_node_iter_set *set;
847 btree_node_iter_for_each(iter, set)
848 if (set->end == t->end_offset) {
849 set->k = __btree_node_key_to_offset(b, k);
850 bch2_btree_node_iter_sort(iter, b);
854 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
857 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
859 struct bkey_packed *where)
861 struct btree_path_level *l = &path->l[b->c.level];
863 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
866 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
867 bch2_btree_node_iter_advance(&l->iter, l->b);
870 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
872 struct bkey_packed *where)
874 struct btree_path *path;
876 trans_for_each_path_with_node(trans, b, path) {
877 __bch2_btree_path_fix_key_modified(path, b, where);
878 bch2_btree_path_verify_level(trans, path, b->c.level);
882 static void __bch2_btree_node_iter_fix(struct btree_path *path,
884 struct btree_node_iter *node_iter,
886 struct bkey_packed *where,
887 unsigned clobber_u64s,
890 const struct bkey_packed *end = btree_bkey_last(b, t);
891 struct btree_node_iter_set *set;
892 unsigned offset = __btree_node_key_to_offset(b, where);
893 int shift = new_u64s - clobber_u64s;
894 unsigned old_end = t->end_offset - shift;
895 unsigned orig_iter_pos = node_iter->data[0].k;
896 bool iter_current_key_modified =
897 orig_iter_pos >= offset &&
898 orig_iter_pos <= offset + clobber_u64s;
900 btree_node_iter_for_each(node_iter, set)
901 if (set->end == old_end)
904 /* didn't find the bset in the iterator - might have to readd it: */
906 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
907 bch2_btree_node_iter_push(node_iter, b, where, end);
910 /* Iterator is after key that changed */
914 set->end = t->end_offset;
916 /* Iterator hasn't gotten to the key that changed yet: */
921 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
923 } else if (set->k < offset + clobber_u64s) {
924 set->k = offset + new_u64s;
925 if (set->k == set->end)
926 bch2_btree_node_iter_set_drop(node_iter, set);
928 /* Iterator is after key that changed */
929 set->k = (int) set->k + shift;
933 bch2_btree_node_iter_sort(node_iter, b);
935 if (node_iter->data[0].k != orig_iter_pos)
936 iter_current_key_modified = true;
939 * When a new key is added, and the node iterator now points to that
940 * key, the iterator might have skipped past deleted keys that should
941 * come after the key the iterator now points to. We have to rewind to
942 * before those deleted keys - otherwise
943 * bch2_btree_node_iter_prev_all() breaks:
945 if (!bch2_btree_node_iter_end(node_iter) &&
946 iter_current_key_modified &&
949 struct bkey_packed *k, *k2, *p;
951 k = bch2_btree_node_iter_peek_all(node_iter, b);
953 for_each_bset(b, t) {
954 bool set_pos = false;
956 if (node_iter->data[0].end == t->end_offset)
959 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
961 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
962 bkey_iter_cmp(b, k, p) < 0) {
968 btree_node_iter_set_set_pos(node_iter,
974 void bch2_btree_node_iter_fix(struct btree_trans *trans,
975 struct btree_path *path,
977 struct btree_node_iter *node_iter,
978 struct bkey_packed *where,
979 unsigned clobber_u64s,
982 struct bset_tree *t = bch2_bkey_to_bset(b, where);
983 struct btree_path *linked;
985 if (node_iter != &path->l[b->c.level].iter) {
986 __bch2_btree_node_iter_fix(path, b, node_iter, t,
987 where, clobber_u64s, new_u64s);
989 if (bch2_debug_check_iterators)
990 bch2_btree_node_iter_verify(node_iter, b);
993 trans_for_each_path_with_node(trans, b, linked) {
994 __bch2_btree_node_iter_fix(linked, b,
995 &linked->l[b->c.level].iter, t,
996 where, clobber_u64s, new_u64s);
997 bch2_btree_path_verify_level(trans, linked, b->c.level);
1001 /* Btree path level: pointer to a particular btree node and node iter */
1003 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
1004 struct btree_path_level *l,
1006 struct bkey_packed *k)
1008 struct bkey_s_c ret;
1012 * signal to bch2_btree_iter_peek_slot() that we're currently at
1015 u->type = KEY_TYPE_deleted;
1016 return bkey_s_c_null;
1019 ret = bkey_disassemble(l->b, k, u);
1022 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
1023 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
1024 * being overwritten but doesn't change k->size. But this is ok, because
1025 * those keys are never written out, we just have to avoid a spurious
1028 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
1029 bch2_bkey_debugcheck(c, l->b, ret);
1034 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1035 struct btree_path_level *l,
1038 return __btree_iter_unpack(c, l, u,
1039 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1042 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
1043 struct btree_path *path,
1044 struct btree_path_level *l,
1047 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1048 bch2_btree_node_iter_peek(&l->iter, l->b));
1050 path->pos = k.k ? k.k->p : l->b->key.k.p;
1054 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
1055 struct btree_path *path,
1056 struct btree_path_level *l,
1059 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1060 bch2_btree_node_iter_prev(&l->iter, l->b));
1062 path->pos = k.k ? k.k->p : l->b->data->min_key;
1066 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1067 struct btree_path_level *l,
1070 struct bkey_packed *k;
1071 int nr_advanced = 0;
1073 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1074 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1075 if (max_advance > 0 && nr_advanced >= max_advance)
1078 bch2_btree_node_iter_advance(&l->iter, l->b);
1086 * Verify that iterator for parent node points to child node:
1088 static void btree_path_verify_new_node(struct btree_trans *trans,
1089 struct btree_path *path, struct btree *b)
1091 struct bch_fs *c = trans->c;
1092 struct btree_path_level *l;
1095 struct bkey_packed *k;
1097 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1100 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1103 plevel = b->c.level + 1;
1104 if (!btree_path_node(path, plevel))
1107 parent_locked = btree_node_locked(path, plevel);
1109 if (!bch2_btree_node_relock(trans, path, plevel))
1112 l = &path->l[plevel];
1113 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1116 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1121 struct bkey uk = bkey_unpack_key(b, k);
1123 bch2_dump_btree_node(c, l->b);
1124 bch2_bpos_to_text(&PBUF(buf1), path->pos);
1125 bch2_bkey_to_text(&PBUF(buf2), &uk);
1126 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
1127 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
1128 panic("parent iter doesn't point to new node:\n"
1132 bch2_btree_ids[path->btree_id], buf1,
1137 btree_node_unlock(path, plevel);
1140 static inline void __btree_path_level_init(struct btree_path *path,
1143 struct btree_path_level *l = &path->l[level];
1145 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1148 * Iterators to interior nodes should always be pointed at the first non
1152 bch2_btree_node_iter_peek(&l->iter, l->b);
1155 static inline void btree_path_level_init(struct btree_trans *trans,
1156 struct btree_path *path,
1159 BUG_ON(path->cached);
1161 btree_path_verify_new_node(trans, path, b);
1163 EBUG_ON(!btree_path_pos_in_node(path, b));
1164 EBUG_ON(b->c.lock.state.seq & 1);
1166 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1167 path->l[b->c.level].b = b;
1168 __btree_path_level_init(path, b->c.level);
1171 /* Btree path: fixups after btree node updates: */
1174 * A btree node is being replaced - update the iterator to point to the new
1177 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1179 struct btree_path *path;
1181 trans_for_each_path(trans, path)
1182 if (!path->cached &&
1183 btree_path_pos_in_node(path, b)) {
1184 enum btree_node_locked_type t =
1185 btree_lock_want(path, b->c.level);
1187 if (path->nodes_locked &&
1188 t != BTREE_NODE_UNLOCKED) {
1189 btree_node_unlock(path, b->c.level);
1190 six_lock_increment(&b->c.lock, t);
1191 mark_btree_node_locked(path, b->c.level, t);
1194 btree_path_level_init(trans, path, b);
1199 * A btree node has been modified in such a way as to invalidate iterators - fix
1202 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1204 struct btree_path *path;
1206 trans_for_each_path_with_node(trans, b, path)
1207 __btree_path_level_init(path, b->c.level);
1210 /* Btree path: traverse, set_pos: */
1212 static int lock_root_check_fn(struct six_lock *lock, void *p)
1214 struct btree *b = container_of(lock, struct btree, c.lock);
1215 struct btree **rootp = p;
1217 return b == *rootp ? 0 : -1;
1220 static inline int btree_path_lock_root(struct btree_trans *trans,
1221 struct btree_path *path,
1222 unsigned depth_want,
1223 unsigned long trace_ip)
1225 struct bch_fs *c = trans->c;
1226 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1227 enum six_lock_type lock_type;
1230 EBUG_ON(path->nodes_locked);
1233 b = READ_ONCE(*rootp);
1234 path->level = READ_ONCE(b->c.level);
1236 if (unlikely(path->level < depth_want)) {
1238 * the root is at a lower depth than the depth we want:
1239 * got to the end of the btree, or we're walking nodes
1240 * greater than some depth and there are no nodes >=
1243 path->level = depth_want;
1244 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1245 path->l[i].b = NULL;
1249 lock_type = __btree_lock_want(path, path->level);
1250 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1251 path->level, lock_type,
1252 lock_root_check_fn, rootp,
1254 if (trans->restarted)
1259 if (likely(b == READ_ONCE(*rootp) &&
1260 b->c.level == path->level &&
1262 for (i = 0; i < path->level; i++)
1263 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1264 path->l[path->level].b = b;
1265 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1266 path->l[i].b = NULL;
1268 mark_btree_node_locked(path, path->level, lock_type);
1269 btree_path_level_init(trans, path, b);
1273 six_unlock_type(&b->c.lock, lock_type);
1278 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1280 struct bch_fs *c = trans->c;
1281 struct btree_path_level *l = path_l(path);
1282 struct btree_node_iter node_iter = l->iter;
1283 struct bkey_packed *k;
1284 struct bkey_buf tmp;
1285 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1286 ? (path->level > 1 ? 0 : 2)
1287 : (path->level > 1 ? 1 : 16);
1288 bool was_locked = btree_node_locked(path, path->level);
1291 bch2_bkey_buf_init(&tmp);
1293 while (nr && !ret) {
1294 if (!bch2_btree_node_relock(trans, path, path->level))
1297 bch2_btree_node_iter_advance(&node_iter, l->b);
1298 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1302 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1303 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1308 btree_node_unlock(path, path->level);
1310 bch2_bkey_buf_exit(&tmp, c);
1314 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1315 struct btree_and_journal_iter *jiter)
1317 struct bch_fs *c = trans->c;
1319 struct bkey_buf tmp;
1320 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1321 ? (path->level > 1 ? 0 : 2)
1322 : (path->level > 1 ? 1 : 16);
1323 bool was_locked = btree_node_locked(path, path->level);
1326 bch2_bkey_buf_init(&tmp);
1328 while (nr && !ret) {
1329 if (!bch2_btree_node_relock(trans, path, path->level))
1332 bch2_btree_and_journal_iter_advance(jiter);
1333 k = bch2_btree_and_journal_iter_peek(jiter);
1337 bch2_bkey_buf_reassemble(&tmp, c, k);
1338 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1343 btree_node_unlock(path, path->level);
1345 bch2_bkey_buf_exit(&tmp, c);
1349 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1350 struct btree_path *path,
1351 unsigned plevel, struct btree *b)
1353 struct btree_path_level *l = &path->l[plevel];
1354 bool locked = btree_node_locked(path, plevel);
1355 struct bkey_packed *k;
1356 struct bch_btree_ptr_v2 *bp;
1358 if (!bch2_btree_node_relock(trans, path, plevel))
1361 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1362 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1364 bp = (void *) bkeyp_val(&l->b->format, k);
1365 bp->mem_ptr = (unsigned long)b;
1368 btree_node_unlock(path, plevel);
1371 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1372 struct btree_path *path,
1374 struct bkey_buf *out)
1376 struct bch_fs *c = trans->c;
1377 struct btree_path_level *l = path_l(path);
1378 struct btree_and_journal_iter jiter;
1382 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1384 k = bch2_btree_and_journal_iter_peek(&jiter);
1386 bch2_bkey_buf_reassemble(out, c, k);
1388 if (flags & BTREE_ITER_PREFETCH)
1389 ret = btree_path_prefetch_j(trans, path, &jiter);
1391 bch2_btree_and_journal_iter_exit(&jiter);
1395 static __always_inline int btree_path_down(struct btree_trans *trans,
1396 struct btree_path *path,
1398 unsigned long trace_ip)
1400 struct bch_fs *c = trans->c;
1401 struct btree_path_level *l = path_l(path);
1403 unsigned level = path->level - 1;
1404 enum six_lock_type lock_type = __btree_lock_want(path, level);
1405 bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
1406 struct bkey_buf tmp;
1409 EBUG_ON(!btree_node_locked(path, path->level));
1411 bch2_bkey_buf_init(&tmp);
1413 if (unlikely(!replay_done)) {
1414 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1418 bch2_bkey_buf_unpack(&tmp, c, l->b,
1419 bch2_btree_node_iter_peek(&l->iter, l->b));
1421 if (flags & BTREE_ITER_PREFETCH) {
1422 ret = btree_path_prefetch(trans, path);
1428 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1429 ret = PTR_ERR_OR_ZERO(b);
1433 mark_btree_node_locked(path, level, lock_type);
1434 btree_path_level_init(trans, path, b);
1436 if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1437 unlikely(b != btree_node_mem_ptr(tmp.k)))
1438 btree_node_mem_ptr_set(trans, path, level + 1, b);
1440 if (btree_node_read_locked(path, level + 1))
1441 btree_node_unlock(path, level + 1);
1442 path->level = level;
1444 bch2_btree_path_verify_locks(path);
1446 bch2_bkey_buf_exit(&tmp, c);
1450 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1451 unsigned, unsigned long);
1453 static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
1454 unsigned long trace_ip)
1456 struct bch_fs *c = trans->c;
1457 struct btree_path *path;
1460 if (trans->in_traverse_all)
1463 trans->in_traverse_all = true;
1465 trans->restarted = false;
1467 trans_for_each_path(trans, path)
1468 path->should_be_locked = false;
1470 btree_trans_verify_sorted(trans);
1472 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1473 struct btree_path *path1 = trans->paths + trans->sorted[i];
1474 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1476 if (path1->btree_id == path2->btree_id &&
1477 path1->locks_want < path2->locks_want)
1478 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1479 else if (!path1->locks_want && path2->locks_want)
1480 __bch2_btree_path_upgrade(trans, path1, 1);
1483 bch2_trans_unlock(trans);
1486 if (unlikely(ret == -ENOMEM)) {
1489 closure_init_stack(&cl);
1492 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1497 if (unlikely(ret == -EIO))
1500 BUG_ON(ret && ret != -EINTR);
1502 /* Now, redo traversals in correct order: */
1504 while (i < trans->nr_sorted) {
1505 path = trans->paths + trans->sorted[i];
1507 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1509 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1513 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1515 if (path->nodes_locked ||
1516 !btree_path_node(path, path->level))
1521 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1522 * and relock(), relock() won't relock since path->should_be_locked
1523 * isn't set yet, which is all fine
1525 trans_for_each_path(trans, path)
1526 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1528 bch2_btree_cache_cannibalize_unlock(c);
1530 trans->in_traverse_all = false;
1532 trace_trans_traverse_all(trans->fn, trace_ip);
1536 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1538 return __btree_path_traverse_all(trans, 0, _RET_IP_);
1541 static inline bool btree_path_good_node(struct btree_trans *trans,
1542 struct btree_path *path,
1543 unsigned l, int check_pos)
1545 if (!is_btree_node(path, l) ||
1546 !bch2_btree_node_relock(trans, path, l))
1549 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1551 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1556 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1557 struct btree_path *path,
1560 unsigned i, l = path->level;
1562 while (btree_path_node(path, l) &&
1563 !btree_path_good_node(trans, path, l, check_pos)) {
1564 btree_node_unlock(path, l);
1565 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1569 /* If we need intent locks, take them too: */
1571 i < path->locks_want && btree_path_node(path, i);
1573 if (!bch2_btree_node_relock(trans, path, i))
1575 btree_node_unlock(path, l);
1576 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1584 * This is the main state machine for walking down the btree - walks down to a
1587 * Returns 0 on success, -EIO on error (error reading in a btree node).
1589 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1590 * stashed in the iterator and returned from bch2_trans_exit().
1592 static int btree_path_traverse_one(struct btree_trans *trans,
1593 struct btree_path *path,
1595 unsigned long trace_ip)
1597 unsigned depth_want = path->level;
1600 if (unlikely(trans->restarted)) {
1606 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1607 * and re-traverse the path without a transaction restart:
1609 if (path->should_be_locked) {
1610 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1615 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1619 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1622 path->level = btree_path_up_until_good_node(trans, path, 0);
1625 * Note: path->nodes[path->level] may be temporarily NULL here - that
1626 * would indicate to other code that we got to the end of the btree,
1627 * here it indicates that relocking the root failed - it's critical that
1628 * btree_path_lock_root() comes next and that it can't fail
1630 while (path->level > depth_want) {
1631 ret = btree_path_node(path, path->level)
1632 ? btree_path_down(trans, path, flags, trace_ip)
1633 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1634 if (unlikely(ret)) {
1637 * No nodes at this level - got to the end of
1644 __bch2_btree_path_unlock(path);
1645 path->level = depth_want;
1648 path->l[path->level].b =
1649 BTREE_ITER_NO_NODE_ERROR;
1651 path->l[path->level].b =
1652 BTREE_ITER_NO_NODE_DOWN;
1657 path->uptodate = BTREE_ITER_UPTODATE;
1659 BUG_ON((ret == -EINTR) != !!trans->restarted);
1660 bch2_btree_path_verify(trans, path);
1664 static int __btree_path_traverse_all(struct btree_trans *, int, unsigned long);
1666 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1667 struct btree_path *path, unsigned flags)
1669 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1672 return bch2_trans_cond_resched(trans) ?:
1673 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1676 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1677 struct btree_path *src)
1681 memcpy(&dst->pos, &src->pos,
1682 sizeof(struct btree_path) - offsetof(struct btree_path, pos));
1684 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1685 if (btree_node_locked(dst, i))
1686 six_lock_increment(&dst->l[i].b->c.lock,
1687 __btree_lock_want(dst, i));
1689 btree_path_check_sort(trans, dst, 0);
1692 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1695 struct btree_path *new = btree_path_alloc(trans, src);
1697 btree_path_copy(trans, new, src);
1698 __btree_path_get(new, intent);
1702 inline struct btree_path * __must_check
1703 bch2_btree_path_make_mut(struct btree_trans *trans,
1704 struct btree_path *path, bool intent,
1707 if (path->ref > 1 || path->preserve) {
1708 __btree_path_put(path, intent);
1709 path = btree_path_clone(trans, path, intent);
1710 path->preserve = false;
1711 #ifdef CONFIG_BCACHEFS_DEBUG
1712 path->ip_allocated = ip;
1714 btree_trans_verify_sorted(trans);
1720 struct btree_path * __must_check
1721 bch2_btree_path_set_pos(struct btree_trans *trans,
1722 struct btree_path *path, struct bpos new_pos,
1723 bool intent, unsigned long ip)
1725 int cmp = bpos_cmp(new_pos, path->pos);
1726 unsigned l = path->level;
1728 EBUG_ON(trans->restarted);
1729 EBUG_ON(!path->ref);
1734 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1736 path->pos = new_pos;
1737 path->should_be_locked = false;
1739 btree_path_check_sort(trans, path, cmp);
1741 if (unlikely(path->cached)) {
1742 btree_node_unlock(path, 0);
1743 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1744 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1748 l = btree_path_up_until_good_node(trans, path, cmp);
1750 if (btree_path_node(path, l)) {
1752 * We might have to skip over many keys, or just a few: try
1753 * advancing the node iterator, and if we have to skip over too
1754 * many keys just reinit it (or if we're rewinding, since that
1758 !btree_path_advance_to_pos(path, &path->l[l], 8))
1759 __btree_path_level_init(path, l);
1762 if (l != path->level) {
1763 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1764 __bch2_btree_path_unlock(path);
1767 bch2_btree_path_verify(trans, path);
1771 /* Btree path: main interface: */
1773 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1775 struct btree_path *next;
1777 next = prev_btree_path(trans, path);
1778 if (next && !btree_path_cmp(next, path))
1781 next = next_btree_path(trans, path);
1782 if (next && !btree_path_cmp(next, path))
1788 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1790 struct btree_path *next;
1792 next = prev_btree_path(trans, path);
1793 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1796 next = next_btree_path(trans, path);
1797 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1803 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1805 __bch2_btree_path_unlock(path);
1806 btree_path_list_remove(trans, path);
1807 trans->paths_allocated &= ~(1ULL << path->idx);
1810 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1812 struct btree_path *dup;
1814 EBUG_ON(trans->paths + path->idx != path);
1815 EBUG_ON(!path->ref);
1817 if (!__btree_path_put(path, intent))
1821 * Perhaps instead we should check for duplicate paths in traverse_all:
1823 if (path->preserve &&
1824 (dup = have_path_at_pos(trans, path))) {
1825 dup->preserve = true;
1826 path->preserve = false;
1830 if (!path->preserve &&
1831 (dup = have_node_at_pos(trans, path)))
1835 if (path->should_be_locked &&
1836 !btree_node_locked(dup, path->level))
1839 dup->should_be_locked |= path->should_be_locked;
1840 __bch2_path_free(trans, path);
1844 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1846 struct btree_path *path;
1847 struct btree_insert_entry *i;
1849 char buf1[300], buf2[300];
1851 btree_trans_verify_sorted(trans);
1853 trans_for_each_path_inorder(trans, path, idx)
1854 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n",
1855 path->idx, path->ref, path->intent_ref,
1856 path->should_be_locked ? " S" : "",
1857 path->preserve ? " P" : "",
1858 bch2_btree_ids[path->btree_id],
1859 (bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1),
1861 #ifdef CONFIG_BCACHEFS_DEBUG
1862 (void *) path->ip_allocated
1868 trans_for_each_update(trans, i) {
1870 struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
1872 printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
1873 bch2_btree_ids[i->btree_id],
1874 (void *) i->ip_allocated,
1875 (bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1),
1876 (bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2));
1880 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1881 struct btree_path *pos)
1883 struct btree_path *path;
1886 if (unlikely(trans->paths_allocated ==
1887 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1888 bch2_dump_trans_paths_updates(trans);
1889 panic("trans path oveflow\n");
1892 idx = __ffs64(~trans->paths_allocated);
1893 trans->paths_allocated |= 1ULL << idx;
1895 path = &trans->paths[idx];
1899 path->intent_ref = 0;
1900 path->nodes_locked = 0;
1901 path->nodes_intent_locked = 0;
1903 btree_path_list_add(trans, pos, path);
1907 struct btree_path *bch2_path_get(struct btree_trans *trans,
1908 enum btree_id btree_id, struct bpos pos,
1909 unsigned locks_want, unsigned level,
1910 unsigned flags, unsigned long ip)
1912 struct btree_path *path, *path_pos = NULL;
1913 bool cached = flags & BTREE_ITER_CACHED;
1914 bool intent = flags & BTREE_ITER_INTENT;
1917 BUG_ON(trans->restarted);
1919 trans_for_each_path_inorder(trans, path, i) {
1920 if (__btree_path_cmp(path,
1931 path_pos->cached == cached &&
1932 path_pos->btree_id == btree_id &&
1933 path_pos->level == level) {
1934 __btree_path_get(path_pos, intent);
1935 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1937 path = btree_path_alloc(trans, path_pos);
1940 __btree_path_get(path, intent);
1942 path->btree_id = btree_id;
1943 path->cached = cached;
1944 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1945 path->should_be_locked = false;
1946 path->level = level;
1947 path->locks_want = locks_want;
1948 path->nodes_locked = 0;
1949 path->nodes_intent_locked = 0;
1950 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1951 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1952 #ifdef CONFIG_BCACHEFS_DEBUG
1953 path->ip_allocated = ip;
1955 btree_trans_verify_sorted(trans);
1958 if (!(flags & BTREE_ITER_NOPRESERVE))
1959 path->preserve = true;
1961 if (path->intent_ref)
1962 locks_want = max(locks_want, level + 1);
1965 * If the path has locks_want greater than requested, we don't downgrade
1966 * it here - on transaction restart because btree node split needs to
1967 * upgrade locks, we might be putting/getting the iterator again.
1968 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1969 * a successful transaction commit.
1972 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1973 if (locks_want > path->locks_want) {
1974 path->locks_want = locks_want;
1975 btree_path_get_locks(trans, path, true);
1981 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1986 if (!path->cached) {
1987 struct btree_path_level *l = path_l(path);
1988 struct bkey_packed *_k;
1990 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1992 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1993 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1995 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1997 if (!k.k || bpos_cmp(path->pos, k.k->p))
2000 struct bkey_cached *ck = (void *) path->l[0].b;
2003 (path->btree_id != ck->key.btree_id ||
2004 bkey_cmp(path->pos, ck->key.pos)));
2006 /* BTREE_ITER_CACHED_NOFILL|BTREE_ITER_CACHED_NOCREATE? */
2007 if (unlikely(!ck || !ck->valid))
2008 return bkey_s_c_null;
2010 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
2012 k = bkey_i_to_s_c(ck->k);
2019 return (struct bkey_s_c) { u, NULL };
2022 /* Btree iterators: */
2025 __bch2_btree_iter_traverse(struct btree_iter *iter)
2027 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2031 bch2_btree_iter_traverse(struct btree_iter *iter)
2035 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
2036 btree_iter_search_key(iter),
2037 iter->flags & BTREE_ITER_INTENT,
2038 btree_iter_ip_allocated(iter));
2040 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2044 iter->path->should_be_locked = true;
2048 /* Iterate across nodes (leaf and interior nodes) */
2050 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2052 struct btree_trans *trans = iter->trans;
2053 struct btree *b = NULL;
2056 EBUG_ON(iter->path->cached);
2057 bch2_btree_iter_verify(iter);
2059 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2063 b = btree_path_node(iter->path, iter->path->level);
2067 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2069 bkey_init(&iter->k);
2070 iter->k.p = iter->pos = b->key.k.p;
2072 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2073 iter->flags & BTREE_ITER_INTENT,
2074 btree_iter_ip_allocated(iter));
2075 iter->path->should_be_locked = true;
2076 BUG_ON(iter->path->uptodate);
2078 bch2_btree_iter_verify_entry_exit(iter);
2079 bch2_btree_iter_verify(iter);
2087 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2089 struct btree_trans *trans = iter->trans;
2090 struct btree_path *path = iter->path;
2091 struct btree *b = NULL;
2095 BUG_ON(trans->restarted);
2096 EBUG_ON(iter->path->cached);
2097 bch2_btree_iter_verify(iter);
2099 /* already at end? */
2100 if (!btree_path_node(path, path->level))
2104 if (!btree_path_node(path, path->level + 1)) {
2105 btree_node_unlock(path, path->level);
2106 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2111 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2112 __bch2_btree_path_unlock(path);
2113 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2114 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2115 trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
2116 path->btree_id, &path->pos);
2117 btree_trans_restart(trans);
2122 b = btree_path_node(path, path->level + 1);
2124 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2125 btree_node_unlock(path, path->level);
2126 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2130 * Haven't gotten to the end of the parent node: go back down to
2131 * the next child node
2134 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2135 iter->flags & BTREE_ITER_INTENT,
2136 btree_iter_ip_allocated(iter));
2138 path->level = iter->min_depth;
2140 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
2141 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
2142 btree_node_unlock(path, l);
2144 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2145 bch2_btree_iter_verify(iter);
2147 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2151 b = path->l[path->level].b;
2154 bkey_init(&iter->k);
2155 iter->k.p = iter->pos = b->key.k.p;
2157 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2158 iter->flags & BTREE_ITER_INTENT,
2159 btree_iter_ip_allocated(iter));
2160 iter->path->should_be_locked = true;
2161 BUG_ON(iter->path->uptodate);
2163 bch2_btree_iter_verify_entry_exit(iter);
2164 bch2_btree_iter_verify(iter);
2172 /* Iterate across keys (in leaf nodes only) */
2174 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2176 struct bpos pos = iter->k.p;
2177 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2178 ? bpos_cmp(pos, SPOS_MAX)
2179 : bkey_cmp(pos, SPOS_MAX)) != 0;
2181 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2182 pos = bkey_successor(iter, pos);
2183 bch2_btree_iter_set_pos(iter, pos);
2187 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2189 struct bpos pos = bkey_start_pos(&iter->k);
2190 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2191 ? bpos_cmp(pos, POS_MIN)
2192 : bkey_cmp(pos, POS_MIN)) != 0;
2194 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2195 pos = bkey_predecessor(iter, pos);
2196 bch2_btree_iter_set_pos(iter, pos);
2200 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
2201 enum btree_id btree_id,
2204 struct btree_insert_entry *i;
2206 trans_for_each_update(trans, i)
2207 if ((cmp_int(btree_id, i->btree_id) ?:
2208 bpos_cmp(pos, i->k->k.p)) <= 0) {
2209 if (btree_id == i->btree_id)
2218 struct bkey_i *__btree_trans_peek_journal(struct btree_trans *trans,
2219 struct btree_path *path)
2221 struct journal_keys *keys = &trans->c->journal_keys;
2222 size_t idx = bch2_journal_key_search(keys, path->btree_id,
2223 path->level, path->pos);
2225 while (idx < keys->nr && keys->d[idx].overwritten)
2228 return (idx < keys->nr &&
2229 keys->d[idx].btree_id == path->btree_id &&
2230 keys->d[idx].level == path->level)
2236 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2237 struct btree_iter *iter,
2240 struct bkey_i *next_journal =
2241 __btree_trans_peek_journal(trans, iter->path);
2244 bpos_cmp(next_journal->k.p,
2245 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2246 iter->k = next_journal->k;
2247 k = bkey_i_to_s_c(next_journal);
2254 * Checks btree key cache for key at iter->pos and returns it if present, or
2258 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2260 struct btree_trans *trans = iter->trans;
2261 struct bch_fs *c = trans->c;
2265 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2266 return bkey_s_c_null;
2268 if (!iter->key_cache_path)
2269 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2270 iter->flags & BTREE_ITER_INTENT, 0,
2271 iter->flags|BTREE_ITER_CACHED,
2274 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2275 iter->flags & BTREE_ITER_INTENT,
2276 btree_iter_ip_allocated(iter));
2278 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
2280 return bkey_s_c_err(ret);
2282 iter->key_cache_path->should_be_locked = true;
2284 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
2287 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2289 struct btree_trans *trans = iter->trans;
2290 struct bkey_i *next_update;
2291 struct bkey_s_c k, k2;
2294 EBUG_ON(iter->path->cached || iter->path->level);
2295 bch2_btree_iter_verify(iter);
2298 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2299 iter->flags & BTREE_ITER_INTENT,
2300 btree_iter_ip_allocated(iter));
2302 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2303 if (unlikely(ret)) {
2304 /* ensure that iter->k is consistent with iter->pos: */
2305 bch2_btree_iter_set_pos(iter, iter->pos);
2306 k = bkey_s_c_err(ret);
2310 iter->path->should_be_locked = true;
2312 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2314 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2316 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2320 bch2_btree_iter_set_pos(iter, iter->pos);
2328 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2329 k = btree_trans_peek_journal(trans, iter, k);
2331 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2332 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2335 bpos_cmp(next_update->k.p,
2336 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2337 iter->k = next_update->k;
2338 k = bkey_i_to_s_c(next_update);
2341 if (k.k && bkey_deleted(k.k)) {
2343 * If we've got a whiteout, and it's after the search
2344 * key, advance the search key to the whiteout instead
2345 * of just after the whiteout - it might be a btree
2346 * whiteout, with a real key at the same position, since
2347 * in the btree deleted keys sort before non deleted.
2349 search_key = bpos_cmp(search_key, k.k->p)
2351 : bpos_successor(k.k->p);
2357 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2358 /* Advance to next leaf node: */
2359 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2362 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2368 bch2_btree_iter_verify(iter);
2374 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2377 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
2379 struct btree_trans *trans = iter->trans;
2380 struct bpos search_key = btree_iter_search_key(iter);
2384 if (iter->update_path) {
2385 bch2_path_put(trans, iter->update_path,
2386 iter->flags & BTREE_ITER_INTENT);
2387 iter->update_path = NULL;
2390 bch2_btree_iter_verify_entry_exit(iter);
2393 k = __bch2_btree_iter_peek(iter, search_key);
2394 if (!k.k || bkey_err(k))
2397 if (iter->update_path &&
2398 bkey_cmp(iter->update_path->pos, k.k->p)) {
2399 bch2_path_put(trans, iter->update_path,
2400 iter->flags & BTREE_ITER_INTENT);
2401 iter->update_path = NULL;
2404 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2405 (iter->flags & BTREE_ITER_INTENT) &&
2406 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2407 !iter->update_path) {
2408 struct bpos pos = k.k->p;
2410 if (pos.snapshot < iter->snapshot) {
2411 search_key = bpos_successor(k.k->p);
2415 pos.snapshot = iter->snapshot;
2418 * advance, same as on exit for iter->path, but only up
2421 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2422 iter->update_path = iter->path;
2424 iter->update_path = bch2_btree_path_set_pos(trans,
2425 iter->update_path, pos,
2426 iter->flags & BTREE_ITER_INTENT,
2427 btree_iter_ip_allocated(iter));
2429 BUG_ON(!(iter->update_path->nodes_locked & 1));
2430 iter->update_path->should_be_locked = true;
2434 * We can never have a key in a leaf node at POS_MAX, so
2435 * we don't have to check these successor() calls:
2437 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2438 !bch2_snapshot_is_ancestor(trans->c,
2441 search_key = bpos_successor(k.k->p);
2445 if (bkey_whiteout(k.k) &&
2446 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2447 search_key = bkey_successor(iter, k.k->p);
2455 * iter->pos should be mononotically increasing, and always be equal to
2456 * the key we just returned - except extents can straddle iter->pos:
2458 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2460 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2461 iter->pos = bkey_start_pos(k.k);
2463 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2464 iter->flags & BTREE_ITER_INTENT,
2465 btree_iter_ip_allocated(iter));
2466 BUG_ON(!iter->path->nodes_locked);
2468 if (iter->update_path) {
2469 BUG_ON(!(iter->update_path->nodes_locked & 1));
2470 iter->update_path->should_be_locked = true;
2472 iter->path->should_be_locked = true;
2474 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2475 iter->pos.snapshot = iter->snapshot;
2477 ret = bch2_btree_iter_verify_ret(iter, k);
2478 if (unlikely(ret)) {
2479 bch2_btree_iter_set_pos(iter, iter->pos);
2480 k = bkey_s_c_err(ret);
2483 bch2_btree_iter_verify_entry_exit(iter);
2489 * bch2_btree_iter_next: returns first key greater than iterator's current
2492 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2494 if (!bch2_btree_iter_advance(iter))
2495 return bkey_s_c_null;
2497 return bch2_btree_iter_peek(iter);
2501 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2502 * iterator's current position
2504 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2506 struct btree_trans *trans = iter->trans;
2507 struct bpos search_key = iter->pos;
2508 struct btree_path *saved_path = NULL;
2510 struct bkey saved_k;
2511 const struct bch_val *saved_v;
2514 EBUG_ON(iter->path->cached || iter->path->level);
2515 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2517 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2518 return bkey_s_c_err(-EIO);
2520 bch2_btree_iter_verify(iter);
2521 bch2_btree_iter_verify_entry_exit(iter);
2523 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2524 search_key.snapshot = U32_MAX;
2527 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2528 iter->flags & BTREE_ITER_INTENT,
2529 btree_iter_ip_allocated(iter));
2531 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2532 if (unlikely(ret)) {
2533 /* ensure that iter->k is consistent with iter->pos: */
2534 bch2_btree_iter_set_pos(iter, iter->pos);
2535 k = bkey_s_c_err(ret);
2539 k = btree_path_level_peek(trans->c, iter->path,
2540 &iter->path->l[0], &iter->k);
2542 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2543 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2544 : bpos_cmp(k.k->p, search_key) > 0))
2545 k = btree_path_level_prev(trans->c, iter->path,
2546 &iter->path->l[0], &iter->k);
2548 btree_path_check_sort(trans, iter->path, 0);
2551 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2552 if (k.k->p.snapshot == iter->snapshot)
2556 * If we have a saved candidate, and we're no
2557 * longer at the same _key_ (not pos), return
2560 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2561 bch2_path_put(trans, iter->path,
2562 iter->flags & BTREE_ITER_INTENT);
2563 iter->path = saved_path;
2570 if (bch2_snapshot_is_ancestor(iter->trans->c,
2574 bch2_path_put(trans, saved_path,
2575 iter->flags & BTREE_ITER_INTENT);
2576 saved_path = btree_path_clone(trans, iter->path,
2577 iter->flags & BTREE_ITER_INTENT);
2582 search_key = bpos_predecessor(k.k->p);
2586 if (bkey_whiteout(k.k) &&
2587 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2588 search_key = bkey_predecessor(iter, k.k->p);
2589 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2590 search_key.snapshot = U32_MAX;
2595 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2596 /* Advance to previous leaf node: */
2597 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2599 /* Start of btree: */
2600 bch2_btree_iter_set_pos(iter, POS_MIN);
2606 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2608 /* Extents can straddle iter->pos: */
2609 if (bkey_cmp(k.k->p, iter->pos) < 0)
2612 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2613 iter->pos.snapshot = iter->snapshot;
2616 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2617 iter->path->should_be_locked = true;
2619 bch2_btree_iter_verify_entry_exit(iter);
2620 bch2_btree_iter_verify(iter);
2626 * bch2_btree_iter_prev: returns first key less than iterator's current
2629 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2631 if (!bch2_btree_iter_rewind(iter))
2632 return bkey_s_c_null;
2634 return bch2_btree_iter_peek_prev(iter);
2637 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2639 struct btree_trans *trans = iter->trans;
2640 struct bpos search_key;
2644 EBUG_ON(iter->path->level);
2645 bch2_btree_iter_verify(iter);
2646 bch2_btree_iter_verify_entry_exit(iter);
2648 /* extents can't span inode numbers: */
2649 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2650 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2651 if (iter->pos.inode == KEY_INODE_MAX)
2652 return bkey_s_c_null;
2654 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2657 search_key = btree_iter_search_key(iter);
2658 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2659 iter->flags & BTREE_ITER_INTENT,
2660 btree_iter_ip_allocated(iter));
2662 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2664 return bkey_s_c_err(ret);
2666 if ((iter->flags & BTREE_ITER_CACHED) ||
2667 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2668 struct bkey_i *next_update;
2670 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2671 (next_update = btree_trans_peek_updates(trans,
2672 iter->btree_id, search_key)) &&
2673 !bpos_cmp(next_update->k.p, iter->pos)) {
2674 iter->k = next_update->k;
2675 k = bkey_i_to_s_c(next_update);
2679 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2680 (next_update = __btree_trans_peek_journal(trans, iter->path)) &&
2681 !bpos_cmp(next_update->k.p, iter->pos)) {
2682 iter->k = next_update->k;
2683 k = bkey_i_to_s_c(next_update);
2687 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2688 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2694 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2698 if (iter->flags & BTREE_ITER_INTENT) {
2699 struct btree_iter iter2;
2701 bch2_trans_copy_iter(&iter2, iter);
2702 k = bch2_btree_iter_peek(&iter2);
2704 if (k.k && !bkey_err(k)) {
2708 bch2_trans_iter_exit(trans, &iter2);
2710 struct bpos pos = iter->pos;
2712 k = bch2_btree_iter_peek(iter);
2716 if (unlikely(bkey_err(k)))
2719 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2721 if (bkey_cmp(iter->pos, next) < 0) {
2722 bkey_init(&iter->k);
2723 iter->k.p = iter->pos;
2725 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2726 bch2_key_resize(&iter->k,
2727 min_t(u64, KEY_SIZE_MAX,
2728 (next.inode == iter->pos.inode
2732 EBUG_ON(!iter->k.size);
2735 k = (struct bkey_s_c) { &iter->k, NULL };
2739 iter->path->should_be_locked = true;
2741 bch2_btree_iter_verify_entry_exit(iter);
2742 bch2_btree_iter_verify(iter);
2743 ret = bch2_btree_iter_verify_ret(iter, k);
2745 return bkey_s_c_err(ret);
2750 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2752 if (!bch2_btree_iter_advance(iter))
2753 return bkey_s_c_null;
2755 return bch2_btree_iter_peek_slot(iter);
2758 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2760 if (!bch2_btree_iter_rewind(iter))
2761 return bkey_s_c_null;
2763 return bch2_btree_iter_peek_slot(iter);
2766 /* new transactional stuff: */
2768 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2769 struct btree_path *path)
2771 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2772 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2773 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2776 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2778 #ifdef CONFIG_BCACHEFS_DEBUG
2781 for (i = 0; i < trans->nr_sorted; i++)
2782 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2786 static void btree_trans_verify_sorted(struct btree_trans *trans)
2788 #ifdef CONFIG_BCACHEFS_DEBUG
2789 struct btree_path *path, *prev = NULL;
2792 trans_for_each_path_inorder(trans, path, i) {
2793 BUG_ON(prev && btree_path_cmp(prev, path) > 0);
2799 static inline void btree_path_swap(struct btree_trans *trans,
2800 struct btree_path *l, struct btree_path *r)
2802 swap(l->sorted_idx, r->sorted_idx);
2803 swap(trans->sorted[l->sorted_idx],
2804 trans->sorted[r->sorted_idx]);
2806 btree_path_verify_sorted_ref(trans, l);
2807 btree_path_verify_sorted_ref(trans, r);
2810 static void btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2813 struct btree_path *n;
2816 n = prev_btree_path(trans, path);
2817 if (n && btree_path_cmp(n, path) > 0) {
2819 btree_path_swap(trans, n, path);
2820 n = prev_btree_path(trans, path);
2821 } while (n && btree_path_cmp(n, path) > 0);
2828 n = next_btree_path(trans, path);
2829 if (n && btree_path_cmp(path, n) > 0) {
2831 btree_path_swap(trans, path, n);
2832 n = next_btree_path(trans, path);
2833 } while (n && btree_path_cmp(path, n) > 0);
2837 btree_trans_verify_sorted(trans);
2840 static inline void btree_path_list_remove(struct btree_trans *trans,
2841 struct btree_path *path)
2845 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2847 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2849 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2850 trans->paths[trans->sorted[i]].sorted_idx = i;
2852 path->sorted_idx = U8_MAX;
2854 btree_trans_verify_sorted_refs(trans);
2857 static inline void btree_path_list_add(struct btree_trans *trans,
2858 struct btree_path *pos,
2859 struct btree_path *path)
2863 btree_trans_verify_sorted_refs(trans);
2865 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2867 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2869 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2870 trans->paths[trans->sorted[i]].sorted_idx = i;
2872 btree_trans_verify_sorted_refs(trans);
2875 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2878 bch2_path_put(trans, iter->path,
2879 iter->flags & BTREE_ITER_INTENT);
2880 if (iter->update_path)
2881 bch2_path_put(trans, iter->update_path,
2882 iter->flags & BTREE_ITER_INTENT);
2883 if (iter->key_cache_path)
2884 bch2_path_put(trans, iter->key_cache_path,
2885 iter->flags & BTREE_ITER_INTENT);
2887 iter->update_path = NULL;
2888 iter->key_cache_path = NULL;
2891 static void __bch2_trans_iter_init(struct btree_trans *trans,
2892 struct btree_iter *iter,
2893 unsigned btree_id, struct bpos pos,
2894 unsigned locks_want,
2899 EBUG_ON(trans->restarted);
2901 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2902 btree_node_type_is_extents(btree_id))
2903 flags |= BTREE_ITER_IS_EXTENTS;
2905 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2906 !btree_type_has_snapshots(btree_id))
2907 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2909 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2910 btree_type_has_snapshots(btree_id))
2911 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2913 if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
2914 flags |= BTREE_ITER_WITH_JOURNAL;
2916 if (!btree_id_cached(trans->c, btree_id)) {
2917 flags &= ~BTREE_ITER_CACHED;
2918 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
2919 } else if (!(flags & BTREE_ITER_CACHED))
2920 flags |= BTREE_ITER_WITH_KEY_CACHE;
2922 iter->trans = trans;
2924 iter->update_path = NULL;
2925 iter->key_cache_path = NULL;
2926 iter->btree_id = btree_id;
2927 iter->min_depth = depth;
2928 iter->flags = flags;
2929 iter->snapshot = pos.snapshot;
2931 iter->k.type = KEY_TYPE_deleted;
2934 #ifdef CONFIG_BCACHEFS_DEBUG
2935 iter->ip_allocated = ip;
2938 iter->path = bch2_path_get(trans, btree_id, iter->pos,
2939 locks_want, depth, flags, ip);
2942 void bch2_trans_iter_init(struct btree_trans *trans,
2943 struct btree_iter *iter,
2944 unsigned btree_id, struct bpos pos,
2947 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2948 0, 0, flags, _RET_IP_);
2951 void bch2_trans_node_iter_init(struct btree_trans *trans,
2952 struct btree_iter *iter,
2953 enum btree_id btree_id,
2955 unsigned locks_want,
2959 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2960 BTREE_ITER_NOT_EXTENTS|
2961 __BTREE_ITER_ALL_SNAPSHOTS|
2962 BTREE_ITER_ALL_SNAPSHOTS|
2964 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2965 BUG_ON(iter->path->level != depth);
2966 BUG_ON(iter->min_depth != depth);
2969 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2973 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2974 if (src->update_path)
2975 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
2976 dst->key_cache_path = NULL;
2979 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2981 size_t new_top = trans->mem_top + size;
2984 if (new_top > trans->mem_bytes) {
2985 size_t old_bytes = trans->mem_bytes;
2986 size_t new_bytes = roundup_pow_of_two(new_top);
2989 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2991 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2992 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2993 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2994 new_bytes = BTREE_TRANS_MEM_MAX;
2999 return ERR_PTR(-ENOMEM);
3001 trans->mem = new_mem;
3002 trans->mem_bytes = new_bytes;
3005 trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
3006 btree_trans_restart(trans);
3007 return ERR_PTR(-EINTR);
3011 p = trans->mem + trans->mem_top;
3012 trans->mem_top += size;
3018 * bch2_trans_begin() - reset a transaction after a interrupted attempt
3019 * @trans: transaction to reset
3021 * While iterating over nodes or updating nodes a attempt to lock a btree
3022 * node may return EINTR when the trylock fails. When this occurs
3023 * bch2_trans_begin() should be called and the transaction retried.
3025 void bch2_trans_begin(struct btree_trans *trans)
3027 struct btree_insert_entry *i;
3028 struct btree_path *path;
3030 trans_for_each_update(trans, i)
3031 __btree_path_put(i->path, true);
3033 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
3034 trans->extra_journal_res = 0;
3035 trans->nr_updates = 0;
3038 trans->hooks = NULL;
3039 trans->extra_journal_entries = NULL;
3040 trans->extra_journal_entry_u64s = 0;
3042 if (trans->fs_usage_deltas) {
3043 trans->fs_usage_deltas->used = 0;
3044 memset(&trans->fs_usage_deltas->memset_start, 0,
3045 (void *) &trans->fs_usage_deltas->memset_end -
3046 (void *) &trans->fs_usage_deltas->memset_start);
3049 trans_for_each_path(trans, path) {
3050 path->should_be_locked = false;
3053 * XXX: we probably shouldn't be doing this if the transaction
3054 * was restarted, but currently we still overflow transaction
3055 * iterators if we do that
3057 if (!path->ref && !path->preserve)
3058 __bch2_path_free(trans, path);
3059 else if (!path->ref)
3060 path->preserve = false;
3063 bch2_trans_cond_resched(trans);
3065 if (trans->restarted)
3066 bch2_btree_path_traverse_all(trans);
3068 trans->restarted = false;
3071 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
3073 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
3074 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
3077 BUG_ON(trans->used_mempool);
3080 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
3083 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
3085 trans->paths = p; p += paths_bytes;
3086 trans->updates = p; p += updates_bytes;
3089 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
3090 unsigned expected_nr_iters,
3091 size_t expected_mem_bytes,
3093 __acquires(&c->btree_trans_barrier)
3095 memset(trans, 0, sizeof(*trans));
3099 bch2_trans_alloc_paths(trans, c);
3101 if (expected_mem_bytes) {
3102 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
3103 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
3105 if (!unlikely(trans->mem)) {
3106 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3107 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
3111 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3113 trans->pid = current->pid;
3114 mutex_lock(&c->btree_trans_lock);
3115 list_add(&trans->list, &c->btree_trans_list);
3116 mutex_unlock(&c->btree_trans_lock);
3119 static void check_btree_paths_leaked(struct btree_trans *trans)
3121 #ifdef CONFIG_BCACHEFS_DEBUG
3122 struct bch_fs *c = trans->c;
3123 struct btree_path *path;
3125 trans_for_each_path(trans, path)
3130 bch_err(c, "btree paths leaked from %s!", trans->fn);
3131 trans_for_each_path(trans, path)
3133 printk(KERN_ERR " btree %s %pS\n",
3134 bch2_btree_ids[path->btree_id],
3135 (void *) path->ip_allocated);
3136 /* Be noisy about this: */
3137 bch2_fatal_error(c);
3141 void bch2_trans_exit(struct btree_trans *trans)
3142 __releases(&c->btree_trans_barrier)
3144 struct btree_insert_entry *i;
3145 struct bch_fs *c = trans->c;
3147 bch2_trans_unlock(trans);
3149 trans_for_each_update(trans, i)
3150 __btree_path_put(i->path, true);
3151 trans->nr_updates = 0;
3153 check_btree_paths_leaked(trans);
3155 mutex_lock(&c->btree_trans_lock);
3156 list_del(&trans->list);
3157 mutex_unlock(&c->btree_trans_lock);
3159 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3161 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3163 if (trans->fs_usage_deltas) {
3164 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3165 REPLICAS_DELTA_LIST_MAX)
3166 mempool_free(trans->fs_usage_deltas,
3167 &c->replicas_delta_pool);
3169 kfree(trans->fs_usage_deltas);
3172 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3173 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3179 * Userspace doesn't have a real percpu implementation:
3181 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3185 mempool_free(trans->paths, &c->btree_paths_pool);
3187 trans->mem = (void *) 0x1;
3188 trans->paths = (void *) 0x1;
3191 static void __maybe_unused
3192 bch2_btree_path_node_to_text(struct printbuf *out,
3193 struct btree_bkey_cached_common *_b,
3196 pr_buf(out, " l=%u %s:",
3197 _b->level, bch2_btree_ids[_b->btree_id]);
3198 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
3201 static bool trans_has_locks(struct btree_trans *trans)
3203 struct btree_path *path;
3205 trans_for_each_path(trans, path)
3206 if (path->nodes_locked)
3211 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
3213 struct btree_trans *trans;
3214 struct btree_path *path;
3218 mutex_lock(&c->btree_trans_lock);
3219 list_for_each_entry(trans, &c->btree_trans_list, list) {
3220 if (!trans_has_locks(trans))
3223 pr_buf(out, "%i %s\n", trans->pid, trans->fn);
3225 trans_for_each_path(trans, path) {
3226 if (!path->nodes_locked)
3229 pr_buf(out, " path %u %c l=%u %s:",
3231 path->cached ? 'c' : 'b',
3233 bch2_btree_ids[path->btree_id]);
3234 bch2_bpos_to_text(out, path->pos);
3237 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3238 if (btree_node_locked(path, l)) {
3239 pr_buf(out, " %s l=%u ",
3240 btree_node_intent_locked(path, l) ? "i" : "r", l);
3241 bch2_btree_path_node_to_text(out,
3242 (void *) path->l[l].b,
3249 b = READ_ONCE(trans->locking);
3251 path = &trans->paths[trans->locking_path_idx];
3252 pr_buf(out, " locking path %u %c l=%u %s:",
3253 trans->locking_path_idx,
3254 path->cached ? 'c' : 'b',
3255 trans->locking_level,
3256 bch2_btree_ids[trans->locking_btree_id]);
3257 bch2_bpos_to_text(out, trans->locking_pos);
3259 pr_buf(out, " node ");
3260 bch2_btree_path_node_to_text(out,
3261 (void *) b, path->cached);
3265 mutex_unlock(&c->btree_trans_lock);
3268 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3270 if (c->btree_trans_barrier_initialized)
3271 cleanup_srcu_struct(&c->btree_trans_barrier);
3272 mempool_exit(&c->btree_trans_mem_pool);
3273 mempool_exit(&c->btree_paths_pool);
3276 int bch2_fs_btree_iter_init(struct bch_fs *c)
3278 unsigned nr = BTREE_ITER_MAX;
3281 INIT_LIST_HEAD(&c->btree_trans_list);
3282 mutex_init(&c->btree_trans_lock);
3284 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3285 sizeof(struct btree_path) * nr +
3286 sizeof(struct btree_insert_entry) * nr) ?:
3287 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3288 BTREE_TRANS_MEM_MAX) ?:
3289 init_srcu_struct(&c->btree_trans_barrier);
3291 c->btree_trans_barrier_initialized = true;