1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
16 #include "subvolume.h"
18 #include <linux/prefetch.h>
19 #include <trace/events/bcachefs.h>
21 static void btree_trans_verify_sorted(struct btree_trans *);
22 static void btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
28 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
31 * Unlocks before scheduling
32 * Note: does not revalidate iterator
34 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
36 if (need_resched() || race_fault()) {
37 bch2_trans_unlock(trans);
39 return bch2_trans_relock(trans) ? 0 : -EINTR;
45 static inline int __btree_path_cmp(const struct btree_path *l,
46 enum btree_id r_btree_id,
51 return cmp_int(l->btree_id, r_btree_id) ?:
52 cmp_int((int) l->cached, (int) r_cached) ?:
53 bpos_cmp(l->pos, r_pos) ?:
54 -cmp_int(l->level, r_level);
57 static inline int btree_path_cmp(const struct btree_path *l,
58 const struct btree_path *r)
60 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
63 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
65 /* Are we iterating over keys in all snapshots? */
66 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
67 p = bpos_successor(p);
69 p = bpos_nosnap_successor(p);
70 p.snapshot = iter->snapshot;
76 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
78 /* Are we iterating over keys in all snapshots? */
79 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
80 p = bpos_predecessor(p);
82 p = bpos_nosnap_predecessor(p);
83 p.snapshot = iter->snapshot;
89 static inline bool is_btree_node(struct btree_path *path, unsigned l)
91 return l < BTREE_MAX_DEPTH &&
92 (unsigned long) path->l[l].b >= 128;
95 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
97 struct bpos pos = iter->pos;
99 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
100 bkey_cmp(pos, POS_MAX))
101 pos = bkey_successor(iter, pos);
105 static inline bool btree_path_pos_before_node(struct btree_path *path,
108 return bpos_cmp(path->pos, b->data->min_key) < 0;
111 static inline bool btree_path_pos_after_node(struct btree_path *path,
114 return bpos_cmp(b->key.k.p, path->pos) < 0;
117 static inline bool btree_path_pos_in_node(struct btree_path *path,
120 return path->btree_id == b->c.btree_id &&
121 !btree_path_pos_before_node(path, b) &&
122 !btree_path_pos_after_node(path, b);
125 /* Btree node locking: */
127 void bch2_btree_node_unlock_write(struct btree_trans *trans,
128 struct btree_path *path, struct btree *b)
130 bch2_btree_node_unlock_write_inlined(trans, path, b);
133 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
135 struct btree_path *linked;
136 unsigned readers = 0;
138 trans_for_each_path(trans, linked)
139 if (linked->l[b->c.level].b == b &&
140 btree_node_read_locked(linked, b->c.level))
144 * Must drop our read locks before calling six_lock_write() -
145 * six_unlock() won't do wakeups until the reader count
146 * goes to 0, and it's safe because we have the node intent
149 atomic64_sub(__SIX_VAL(read_lock, readers),
150 &b->c.lock.state.counter);
151 btree_node_lock_type(trans->c, b, SIX_LOCK_write);
152 atomic64_add(__SIX_VAL(read_lock, readers),
153 &b->c.lock.state.counter);
156 bool __bch2_btree_node_relock(struct btree_trans *trans,
157 struct btree_path *path, unsigned level)
159 struct btree *b = btree_path_node(path, level);
160 int want = __btree_lock_want(path, level);
162 if (!is_btree_node(path, level))
168 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
169 (btree_node_lock_seq_matches(path, b, level) &&
170 btree_node_lock_increment(trans, b, level, want))) {
171 mark_btree_node_locked(path, level, want);
178 bool bch2_btree_node_upgrade(struct btree_trans *trans,
179 struct btree_path *path, unsigned level)
181 struct btree *b = path->l[level].b;
183 if (!is_btree_node(path, level))
186 switch (btree_lock_want(path, level)) {
187 case BTREE_NODE_UNLOCKED:
188 BUG_ON(btree_node_locked(path, level));
190 case BTREE_NODE_READ_LOCKED:
191 BUG_ON(btree_node_intent_locked(path, level));
192 return bch2_btree_node_relock(trans, path, level);
193 case BTREE_NODE_INTENT_LOCKED:
197 if (btree_node_intent_locked(path, level))
203 if (btree_node_locked(path, level)
204 ? six_lock_tryupgrade(&b->c.lock)
205 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
208 if (btree_node_lock_seq_matches(path, b, level) &&
209 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
210 btree_node_unlock(path, level);
216 mark_btree_node_intent_locked(path, level);
220 static inline bool btree_path_get_locks(struct btree_trans *trans,
221 struct btree_path *path,
222 bool upgrade, unsigned long trace_ip)
224 unsigned l = path->level;
228 if (!btree_path_node(path, l))
232 ? bch2_btree_node_upgrade(trans, path, l)
233 : bch2_btree_node_relock(trans, path, l)))
237 } while (l < path->locks_want);
240 * When we fail to get a lock, we have to ensure that any child nodes
241 * can't be relocked so bch2_btree_path_traverse has to walk back up to
242 * the node that we failed to relock:
245 __bch2_btree_path_unlock(path);
246 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
249 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
251 } while (fail_idx >= 0);
254 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
255 path->uptodate = BTREE_ITER_UPTODATE;
257 bch2_trans_verify_locks(trans);
259 return path->uptodate < BTREE_ITER_NEED_RELOCK;
262 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
266 ? container_of(_b, struct btree, c)->key.k.p
267 : container_of(_b, struct bkey_cached, c)->key.pos;
271 bool __bch2_btree_node_lock(struct btree_trans *trans,
272 struct btree_path *path,
274 struct bpos pos, unsigned level,
275 enum six_lock_type type,
276 six_lock_should_sleep_fn should_sleep_fn, void *p,
279 struct btree_path *linked, *deadlock_path = NULL;
280 u64 start_time = local_clock();
284 /* Check if it's safe to block: */
285 trans_for_each_path(trans, linked) {
286 if (!linked->nodes_locked)
290 * Can't block taking an intent lock if we have _any_ nodes read
293 * - Our read lock blocks another thread with an intent lock on
294 * the same node from getting a write lock, and thus from
295 * dropping its intent lock
297 * - And the other thread may have multiple nodes intent locked:
298 * both the node we want to intent lock, and the node we
299 * already have read locked - deadlock:
301 if (type == SIX_LOCK_intent &&
302 linked->nodes_locked != linked->nodes_intent_locked) {
303 deadlock_path = linked;
307 if (linked->btree_id != path->btree_id) {
308 if (linked->btree_id > path->btree_id) {
309 deadlock_path = linked;
316 * Within the same btree, cached paths come before non
319 if (linked->cached != path->cached) {
321 deadlock_path = linked;
328 * Interior nodes must be locked before their descendants: if
329 * another path has possible descendants locked of the node
330 * we're about to lock, it must have the ancestors locked too:
332 if (level > __fls(linked->nodes_locked)) {
333 deadlock_path = linked;
337 /* Must lock btree nodes in key order: */
338 if (btree_node_locked(linked, level) &&
339 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
340 linked->cached)) <= 0) {
341 deadlock_path = linked;
343 BUG_ON(trans->in_traverse_all);
347 if (unlikely(deadlock_path)) {
348 trace_trans_restart_would_deadlock(trans->ip, ip,
349 trans->in_traverse_all, reason,
350 deadlock_path->btree_id,
351 deadlock_path->cached,
356 btree_trans_restart(trans);
360 if (six_trylock_type(&b->c.lock, type))
363 #ifdef CONFIG_BCACHEFS_DEBUG
364 trans->locking_path_idx = path->idx;
365 trans->locking_pos = pos;
366 trans->locking_btree_id = path->btree_id;
367 trans->locking_level = level;
371 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
373 #ifdef CONFIG_BCACHEFS_DEBUG
374 trans->locking = NULL;
377 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
382 /* Btree iterator locking: */
384 #ifdef CONFIG_BCACHEFS_DEBUG
386 static void bch2_btree_path_verify_locks(struct btree_path *path)
390 if (!path->nodes_locked) {
391 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
392 btree_path_node(path, path->level));
396 for (l = 0; btree_path_node(path, l); l++)
397 BUG_ON(btree_lock_want(path, l) !=
398 btree_node_locked_type(path, l));
401 void bch2_trans_verify_locks(struct btree_trans *trans)
403 struct btree_path *path;
405 trans_for_each_path(trans, path)
406 bch2_btree_path_verify_locks(path);
409 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
412 /* Btree path locking: */
415 * Only for btree_cache.c - only relocks intent locks
417 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
418 struct btree_path *path)
422 for (l = path->level;
423 l < path->locks_want && btree_path_node(path, l);
425 if (!bch2_btree_node_relock(trans, path, l)) {
426 __bch2_btree_path_unlock(path);
427 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
428 btree_trans_restart(trans);
437 static bool bch2_btree_path_relock(struct btree_trans *trans,
438 struct btree_path *path, unsigned long trace_ip)
440 bool ret = btree_path_get_locks(trans, path, false, trace_ip);
443 btree_trans_restart(trans);
447 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
448 struct btree_path *path,
449 unsigned new_locks_want)
451 struct btree_path *linked;
453 EBUG_ON(path->locks_want >= new_locks_want);
455 path->locks_want = new_locks_want;
457 if (btree_path_get_locks(trans, path, true, _THIS_IP_))
461 * XXX: this is ugly - we'd prefer to not be mucking with other
462 * iterators in the btree_trans here.
464 * On failure to upgrade the iterator, setting iter->locks_want and
465 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
466 * get the locks we want on transaction restart.
468 * But if this iterator was a clone, on transaction restart what we did
469 * to this iterator isn't going to be preserved.
471 * Possibly we could add an iterator field for the parent iterator when
472 * an iterator is a copy - for now, we'll just upgrade any other
473 * iterators with the same btree id.
475 * The code below used to be needed to ensure ancestor nodes get locked
476 * before interior nodes - now that's handled by
477 * bch2_btree_path_traverse_all().
479 trans_for_each_path(trans, linked)
480 if (linked != path &&
481 linked->cached == path->cached &&
482 linked->btree_id == path->btree_id &&
483 linked->locks_want < new_locks_want) {
484 linked->locks_want = new_locks_want;
485 btree_path_get_locks(trans, linked, true, _THIS_IP_);
491 void __bch2_btree_path_downgrade(struct btree_path *path,
492 unsigned new_locks_want)
496 EBUG_ON(path->locks_want < new_locks_want);
498 path->locks_want = new_locks_want;
500 while (path->nodes_locked &&
501 (l = __fls(path->nodes_locked)) >= path->locks_want) {
502 if (l > path->level) {
503 btree_node_unlock(path, l);
505 if (btree_node_intent_locked(path, l)) {
506 six_lock_downgrade(&path->l[l].b->c.lock);
507 path->nodes_intent_locked ^= 1 << l;
513 bch2_btree_path_verify_locks(path);
516 void bch2_trans_downgrade(struct btree_trans *trans)
518 struct btree_path *path;
520 trans_for_each_path(trans, path)
521 bch2_btree_path_downgrade(path);
524 /* Btree transaction locking: */
526 bool bch2_trans_relock(struct btree_trans *trans)
528 struct btree_path *path;
530 if (unlikely(trans->restarted))
533 trans_for_each_path(trans, path)
534 if (path->should_be_locked &&
535 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
536 trace_trans_restart_relock(trans->ip, _RET_IP_,
537 path->btree_id, &path->pos);
538 BUG_ON(!trans->restarted);
544 void bch2_trans_unlock(struct btree_trans *trans)
546 struct btree_path *path;
548 trans_for_each_path(trans, path)
549 __bch2_btree_path_unlock(path);
551 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
554 /* Btree iterator: */
556 #ifdef CONFIG_BCACHEFS_DEBUG
558 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
559 struct btree_path *path)
561 struct bkey_cached *ck;
562 bool locked = btree_node_locked(path, 0);
564 if (!bch2_btree_node_relock(trans, path, 0))
567 ck = (void *) path->l[0].b;
568 BUG_ON(ck->key.btree_id != path->btree_id ||
569 bkey_cmp(ck->key.pos, path->pos));
572 btree_node_unlock(path, 0);
575 static void bch2_btree_path_verify_level(struct btree_trans *trans,
576 struct btree_path *path, unsigned level)
578 struct btree_path_level *l;
579 struct btree_node_iter tmp;
581 struct bkey_packed *p, *k;
582 char buf1[100], buf2[100], buf3[100];
585 if (!bch2_debug_check_iterators)
590 locked = btree_node_locked(path, level);
594 bch2_btree_path_verify_cached(trans, path);
598 if (!btree_path_node(path, level))
601 if (!bch2_btree_node_relock(trans, path, level))
604 BUG_ON(!btree_path_pos_in_node(path, l->b));
606 bch2_btree_node_iter_verify(&l->iter, l->b);
609 * For interior nodes, the iterator will have skipped past deleted keys:
612 ? bch2_btree_node_iter_prev(&tmp, l->b)
613 : bch2_btree_node_iter_prev_all(&tmp, l->b);
614 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
616 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
621 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
627 btree_node_unlock(path, level);
630 strcpy(buf2, "(none)");
631 strcpy(buf3, "(none)");
633 bch2_bpos_to_text(&PBUF(buf1), path->pos);
636 struct bkey uk = bkey_unpack_key(l->b, p);
637 bch2_bkey_to_text(&PBUF(buf2), &uk);
641 struct bkey uk = bkey_unpack_key(l->b, k);
642 bch2_bkey_to_text(&PBUF(buf3), &uk);
645 panic("path should be %s key at level %u:\n"
649 msg, level, buf1, buf2, buf3);
652 static void bch2_btree_path_verify(struct btree_trans *trans,
653 struct btree_path *path)
655 struct bch_fs *c = trans->c;
658 EBUG_ON(path->btree_id >= BTREE_ID_NR);
660 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
662 BUG_ON(!path->cached &&
663 c->btree_roots[path->btree_id].b->c.level > i);
667 bch2_btree_path_verify_level(trans, path, i);
670 bch2_btree_path_verify_locks(path);
673 void bch2_trans_verify_paths(struct btree_trans *trans)
675 struct btree_path *path;
677 trans_for_each_path(trans, path)
678 bch2_btree_path_verify(trans, path);
681 static void bch2_btree_iter_verify(struct btree_iter *iter)
683 struct btree_trans *trans = iter->trans;
685 BUG_ON(iter->btree_id >= BTREE_ID_NR);
687 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
689 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
690 iter->pos.snapshot != iter->snapshot);
692 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
693 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
695 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
696 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
697 !btree_type_has_snapshots(iter->btree_id));
699 bch2_btree_path_verify(trans, iter->path);
702 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
704 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
705 !iter->pos.snapshot);
707 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
708 iter->pos.snapshot != iter->snapshot);
710 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
711 bkey_cmp(iter->pos, iter->k.p) > 0);
714 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
716 struct btree_trans *trans = iter->trans;
717 struct btree_iter copy;
718 struct bkey_s_c prev;
721 if (!bch2_debug_check_iterators)
724 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
727 if (bkey_err(k) || !k.k)
730 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
734 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
735 BTREE_ITER_ALL_SNAPSHOTS);
736 prev = bch2_btree_iter_prev(©);
740 ret = bkey_err(prev);
744 if (!bkey_cmp(prev.k->p, k.k->p) &&
745 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
746 prev.k->p.snapshot) > 0) {
747 char buf1[100], buf2[200];
749 bch2_bkey_to_text(&PBUF(buf1), k.k);
750 bch2_bkey_to_text(&PBUF(buf2), prev.k);
752 panic("iter snap %u\n"
759 bch2_trans_iter_exit(trans, ©);
763 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
764 struct bpos pos, bool key_cache)
766 struct btree_path *path;
770 trans_for_each_path_inorder(trans, path, idx) {
771 int cmp = cmp_int(path->btree_id, id) ?:
772 cmp_int(path->cached, key_cache);
779 if (!(path->nodes_locked & 1) ||
780 !path->should_be_locked)
784 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
785 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
788 if (!bkey_cmp(pos, path->pos))
793 bch2_dump_trans_paths_updates(trans);
794 panic("not locked: %s %s%s\n",
796 (bch2_bpos_to_text(&PBUF(buf), pos), buf),
797 key_cache ? " cached" : "");
802 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
803 struct btree_path *path, unsigned l) {}
804 static inline void bch2_btree_path_verify(struct btree_trans *trans,
805 struct btree_path *path) {}
806 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
807 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
808 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
812 /* Btree path: fixups after btree updates */
814 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
817 struct bkey_packed *k)
819 struct btree_node_iter_set *set;
821 btree_node_iter_for_each(iter, set)
822 if (set->end == t->end_offset) {
823 set->k = __btree_node_key_to_offset(b, k);
824 bch2_btree_node_iter_sort(iter, b);
828 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
831 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
833 struct bkey_packed *where)
835 struct btree_path_level *l = &path->l[b->c.level];
837 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
840 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
841 bch2_btree_node_iter_advance(&l->iter, l->b);
844 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
846 struct bkey_packed *where)
848 struct btree_path *path;
850 trans_for_each_path_with_node(trans, b, path) {
851 __bch2_btree_path_fix_key_modified(path, b, where);
852 bch2_btree_path_verify_level(trans, path, b->c.level);
856 static void __bch2_btree_node_iter_fix(struct btree_path *path,
858 struct btree_node_iter *node_iter,
860 struct bkey_packed *where,
861 unsigned clobber_u64s,
864 const struct bkey_packed *end = btree_bkey_last(b, t);
865 struct btree_node_iter_set *set;
866 unsigned offset = __btree_node_key_to_offset(b, where);
867 int shift = new_u64s - clobber_u64s;
868 unsigned old_end = t->end_offset - shift;
869 unsigned orig_iter_pos = node_iter->data[0].k;
870 bool iter_current_key_modified =
871 orig_iter_pos >= offset &&
872 orig_iter_pos <= offset + clobber_u64s;
874 btree_node_iter_for_each(node_iter, set)
875 if (set->end == old_end)
878 /* didn't find the bset in the iterator - might have to readd it: */
880 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
881 bch2_btree_node_iter_push(node_iter, b, where, end);
884 /* Iterator is after key that changed */
888 set->end = t->end_offset;
890 /* Iterator hasn't gotten to the key that changed yet: */
895 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
897 } else if (set->k < offset + clobber_u64s) {
898 set->k = offset + new_u64s;
899 if (set->k == set->end)
900 bch2_btree_node_iter_set_drop(node_iter, set);
902 /* Iterator is after key that changed */
903 set->k = (int) set->k + shift;
907 bch2_btree_node_iter_sort(node_iter, b);
909 if (node_iter->data[0].k != orig_iter_pos)
910 iter_current_key_modified = true;
913 * When a new key is added, and the node iterator now points to that
914 * key, the iterator might have skipped past deleted keys that should
915 * come after the key the iterator now points to. We have to rewind to
916 * before those deleted keys - otherwise
917 * bch2_btree_node_iter_prev_all() breaks:
919 if (!bch2_btree_node_iter_end(node_iter) &&
920 iter_current_key_modified &&
923 struct bkey_packed *k, *k2, *p;
925 k = bch2_btree_node_iter_peek_all(node_iter, b);
927 for_each_bset(b, t) {
928 bool set_pos = false;
930 if (node_iter->data[0].end == t->end_offset)
933 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
935 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
936 bkey_iter_cmp(b, k, p) < 0) {
942 btree_node_iter_set_set_pos(node_iter,
948 void bch2_btree_node_iter_fix(struct btree_trans *trans,
949 struct btree_path *path,
951 struct btree_node_iter *node_iter,
952 struct bkey_packed *where,
953 unsigned clobber_u64s,
956 struct bset_tree *t = bch2_bkey_to_bset(b, where);
957 struct btree_path *linked;
959 if (node_iter != &path->l[b->c.level].iter) {
960 __bch2_btree_node_iter_fix(path, b, node_iter, t,
961 where, clobber_u64s, new_u64s);
963 if (bch2_debug_check_iterators)
964 bch2_btree_node_iter_verify(node_iter, b);
967 trans_for_each_path_with_node(trans, b, linked) {
968 __bch2_btree_node_iter_fix(linked, b,
969 &linked->l[b->c.level].iter, t,
970 where, clobber_u64s, new_u64s);
971 bch2_btree_path_verify_level(trans, linked, b->c.level);
975 /* Btree path level: pointer to a particular btree node and node iter */
977 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
978 struct btree_path_level *l,
980 struct bkey_packed *k)
986 * signal to bch2_btree_iter_peek_slot() that we're currently at
989 u->type = KEY_TYPE_deleted;
990 return bkey_s_c_null;
993 ret = bkey_disassemble(l->b, k, u);
996 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
997 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
998 * being overwritten but doesn't change k->size. But this is ok, because
999 * those keys are never written out, we just have to avoid a spurious
1002 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
1003 bch2_bkey_debugcheck(c, l->b, ret);
1008 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1009 struct btree_path_level *l,
1012 return __btree_iter_unpack(c, l, u,
1013 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1016 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
1017 struct btree_path *path,
1018 struct btree_path_level *l,
1021 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1022 bch2_btree_node_iter_peek(&l->iter, l->b));
1024 path->pos = k.k ? k.k->p : l->b->key.k.p;
1028 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
1029 struct btree_path *path,
1030 struct btree_path_level *l,
1033 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1034 bch2_btree_node_iter_prev(&l->iter, l->b));
1036 path->pos = k.k ? k.k->p : l->b->data->min_key;
1040 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1041 struct btree_path_level *l,
1044 struct bkey_packed *k;
1045 int nr_advanced = 0;
1047 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1048 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1049 if (max_advance > 0 && nr_advanced >= max_advance)
1052 bch2_btree_node_iter_advance(&l->iter, l->b);
1060 * Verify that iterator for parent node points to child node:
1062 static void btree_path_verify_new_node(struct btree_trans *trans,
1063 struct btree_path *path, struct btree *b)
1065 struct btree_path_level *l;
1068 struct bkey_packed *k;
1070 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1073 plevel = b->c.level + 1;
1074 if (!btree_path_node(path, plevel))
1077 parent_locked = btree_node_locked(path, plevel);
1079 if (!bch2_btree_node_relock(trans, path, plevel))
1082 l = &path->l[plevel];
1083 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1086 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1091 struct bkey uk = bkey_unpack_key(b, k);
1093 bch2_dump_btree_node(trans->c, l->b);
1094 bch2_bpos_to_text(&PBUF(buf1), path->pos);
1095 bch2_bkey_to_text(&PBUF(buf2), &uk);
1096 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
1097 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
1098 panic("parent iter doesn't point to new node:\n"
1102 bch2_btree_ids[path->btree_id], buf1,
1107 btree_node_unlock(path, plevel);
1110 static inline void __btree_path_level_init(struct btree_path *path,
1113 struct btree_path_level *l = &path->l[level];
1115 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1118 * Iterators to interior nodes should always be pointed at the first non
1122 bch2_btree_node_iter_peek(&l->iter, l->b);
1125 static inline void btree_path_level_init(struct btree_trans *trans,
1126 struct btree_path *path,
1129 BUG_ON(path->cached);
1131 btree_path_verify_new_node(trans, path, b);
1133 EBUG_ON(!btree_path_pos_in_node(path, b));
1134 EBUG_ON(b->c.lock.state.seq & 1);
1136 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1137 path->l[b->c.level].b = b;
1138 __btree_path_level_init(path, b->c.level);
1141 /* Btree path: fixups after btree node updates: */
1144 * A btree node is being replaced - update the iterator to point to the new
1147 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1149 struct btree_path *path;
1151 trans_for_each_path(trans, path)
1152 if (!path->cached &&
1153 btree_path_pos_in_node(path, b)) {
1154 enum btree_node_locked_type t =
1155 btree_lock_want(path, b->c.level);
1157 if (path->nodes_locked &&
1158 t != BTREE_NODE_UNLOCKED) {
1159 btree_node_unlock(path, b->c.level);
1160 six_lock_increment(&b->c.lock, t);
1161 mark_btree_node_locked(path, b->c.level, t);
1164 btree_path_level_init(trans, path, b);
1169 * A btree node has been modified in such a way as to invalidate iterators - fix
1172 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1174 struct btree_path *path;
1176 trans_for_each_path_with_node(trans, b, path)
1177 __btree_path_level_init(path, b->c.level);
1180 /* Btree path: traverse, set_pos: */
1182 static int lock_root_check_fn(struct six_lock *lock, void *p)
1184 struct btree *b = container_of(lock, struct btree, c.lock);
1185 struct btree **rootp = p;
1187 return b == *rootp ? 0 : -1;
1190 static inline int btree_path_lock_root(struct btree_trans *trans,
1191 struct btree_path *path,
1192 unsigned depth_want,
1193 unsigned long trace_ip)
1195 struct bch_fs *c = trans->c;
1196 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1197 enum six_lock_type lock_type;
1200 EBUG_ON(path->nodes_locked);
1203 b = READ_ONCE(*rootp);
1204 path->level = READ_ONCE(b->c.level);
1206 if (unlikely(path->level < depth_want)) {
1208 * the root is at a lower depth than the depth we want:
1209 * got to the end of the btree, or we're walking nodes
1210 * greater than some depth and there are no nodes >=
1213 path->level = depth_want;
1214 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1215 path->l[i].b = NULL;
1219 lock_type = __btree_lock_want(path, path->level);
1220 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1221 path->level, lock_type,
1222 lock_root_check_fn, rootp,
1224 if (trans->restarted)
1229 if (likely(b == READ_ONCE(*rootp) &&
1230 b->c.level == path->level &&
1232 for (i = 0; i < path->level; i++)
1233 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1234 path->l[path->level].b = b;
1235 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1236 path->l[i].b = NULL;
1238 mark_btree_node_locked(path, path->level, lock_type);
1239 btree_path_level_init(trans, path, b);
1243 six_unlock_type(&b->c.lock, lock_type);
1248 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1250 struct bch_fs *c = trans->c;
1251 struct btree_path_level *l = path_l(path);
1252 struct btree_node_iter node_iter = l->iter;
1253 struct bkey_packed *k;
1254 struct bkey_buf tmp;
1255 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1256 ? (path->level > 1 ? 0 : 2)
1257 : (path->level > 1 ? 1 : 16);
1258 bool was_locked = btree_node_locked(path, path->level);
1261 bch2_bkey_buf_init(&tmp);
1263 while (nr && !ret) {
1264 if (!bch2_btree_node_relock(trans, path, path->level))
1267 bch2_btree_node_iter_advance(&node_iter, l->b);
1268 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1272 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1273 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1278 btree_node_unlock(path, path->level);
1280 bch2_bkey_buf_exit(&tmp, c);
1284 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1285 struct btree_path *path,
1286 unsigned plevel, struct btree *b)
1288 struct btree_path_level *l = &path->l[plevel];
1289 bool locked = btree_node_locked(path, plevel);
1290 struct bkey_packed *k;
1291 struct bch_btree_ptr_v2 *bp;
1293 if (!bch2_btree_node_relock(trans, path, plevel))
1296 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1297 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1299 bp = (void *) bkeyp_val(&l->b->format, k);
1300 bp->mem_ptr = (unsigned long)b;
1303 btree_node_unlock(path, plevel);
1306 static __always_inline int btree_path_down(struct btree_trans *trans,
1307 struct btree_path *path,
1309 unsigned long trace_ip)
1311 struct bch_fs *c = trans->c;
1312 struct btree_path_level *l = path_l(path);
1314 unsigned level = path->level - 1;
1315 enum six_lock_type lock_type = __btree_lock_want(path, level);
1316 struct bkey_buf tmp;
1319 EBUG_ON(!btree_node_locked(path, path->level));
1321 bch2_bkey_buf_init(&tmp);
1322 bch2_bkey_buf_unpack(&tmp, c, l->b,
1323 bch2_btree_node_iter_peek(&l->iter, l->b));
1325 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1326 ret = PTR_ERR_OR_ZERO(b);
1330 mark_btree_node_locked(path, level, lock_type);
1331 btree_path_level_init(trans, path, b);
1333 if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1334 unlikely(b != btree_node_mem_ptr(tmp.k)))
1335 btree_node_mem_ptr_set(trans, path, level + 1, b);
1337 if (flags & BTREE_ITER_PREFETCH)
1338 ret = btree_path_prefetch(trans, path);
1340 if (btree_node_read_locked(path, level + 1))
1341 btree_node_unlock(path, level + 1);
1342 path->level = level;
1344 bch2_btree_path_verify_locks(path);
1346 bch2_bkey_buf_exit(&tmp, c);
1350 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1351 unsigned, unsigned long);
1353 static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
1354 unsigned long trace_ip)
1356 struct bch_fs *c = trans->c;
1357 struct btree_path *path;
1360 if (trans->in_traverse_all)
1363 trans->in_traverse_all = true;
1365 trans->restarted = false;
1367 trans_for_each_path(trans, path)
1368 path->should_be_locked = false;
1370 btree_trans_verify_sorted(trans);
1372 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1373 struct btree_path *path1 = trans->paths + trans->sorted[i];
1374 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1376 if (path1->btree_id == path2->btree_id &&
1377 path1->locks_want < path2->locks_want)
1378 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1379 else if (!path1->locks_want && path2->locks_want)
1380 __bch2_btree_path_upgrade(trans, path1, 1);
1383 bch2_trans_unlock(trans);
1386 if (unlikely(ret == -ENOMEM)) {
1389 closure_init_stack(&cl);
1392 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1397 if (unlikely(ret == -EIO))
1400 BUG_ON(ret && ret != -EINTR);
1402 /* Now, redo traversals in correct order: */
1404 while (i < trans->nr_sorted) {
1405 path = trans->paths + trans->sorted[i];
1407 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1409 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1413 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1415 if (path->nodes_locked ||
1416 !btree_path_node(path, path->level))
1421 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1422 * and relock(), relock() won't relock since path->should_be_locked
1423 * isn't set yet, which is all fine
1425 trans_for_each_path(trans, path)
1426 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1428 bch2_btree_cache_cannibalize_unlock(c);
1430 trans->in_traverse_all = false;
1432 trace_trans_traverse_all(trans->ip, trace_ip);
1436 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1438 return __btree_path_traverse_all(trans, 0, _RET_IP_);
1441 static inline bool btree_path_good_node(struct btree_trans *trans,
1442 struct btree_path *path,
1443 unsigned l, int check_pos)
1445 if (!is_btree_node(path, l) ||
1446 !bch2_btree_node_relock(trans, path, l))
1449 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1451 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1456 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1457 struct btree_path *path,
1460 unsigned i, l = path->level;
1462 while (btree_path_node(path, l) &&
1463 !btree_path_good_node(trans, path, l, check_pos)) {
1464 btree_node_unlock(path, l);
1465 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1469 /* If we need intent locks, take them too: */
1471 i < path->locks_want && btree_path_node(path, i);
1473 if (!bch2_btree_node_relock(trans, path, i))
1475 btree_node_unlock(path, l);
1476 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1484 * This is the main state machine for walking down the btree - walks down to a
1487 * Returns 0 on success, -EIO on error (error reading in a btree node).
1489 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1490 * stashed in the iterator and returned from bch2_trans_exit().
1492 static int btree_path_traverse_one(struct btree_trans *trans,
1493 struct btree_path *path,
1495 unsigned long trace_ip)
1497 unsigned depth_want = path->level;
1500 if (unlikely(trans->restarted)) {
1506 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1507 * and re-traverse the path without a transaction restart:
1509 if (path->should_be_locked) {
1510 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1515 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1519 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1522 path->level = btree_path_up_until_good_node(trans, path, 0);
1525 * Note: path->nodes[path->level] may be temporarily NULL here - that
1526 * would indicate to other code that we got to the end of the btree,
1527 * here it indicates that relocking the root failed - it's critical that
1528 * btree_path_lock_root() comes next and that it can't fail
1530 while (path->level > depth_want) {
1531 ret = btree_path_node(path, path->level)
1532 ? btree_path_down(trans, path, flags, trace_ip)
1533 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1534 if (unlikely(ret)) {
1537 * No nodes at this level - got to the end of
1544 __bch2_btree_path_unlock(path);
1545 path->level = depth_want;
1548 path->l[path->level].b =
1549 BTREE_ITER_NO_NODE_ERROR;
1551 path->l[path->level].b =
1552 BTREE_ITER_NO_NODE_DOWN;
1557 path->uptodate = BTREE_ITER_UPTODATE;
1559 BUG_ON((ret == -EINTR) != !!trans->restarted);
1560 bch2_btree_path_verify(trans, path);
1564 static int __btree_path_traverse_all(struct btree_trans *, int, unsigned long);
1566 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1567 struct btree_path *path, unsigned flags)
1569 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1572 return bch2_trans_cond_resched(trans) ?:
1573 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1576 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1577 struct btree_path *src)
1581 memcpy(&dst->pos, &src->pos,
1582 sizeof(struct btree_path) - offsetof(struct btree_path, pos));
1584 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1585 if (btree_node_locked(dst, i))
1586 six_lock_increment(&dst->l[i].b->c.lock,
1587 __btree_lock_want(dst, i));
1589 btree_path_check_sort(trans, dst, 0);
1592 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1595 struct btree_path *new = btree_path_alloc(trans, src);
1597 btree_path_copy(trans, new, src);
1598 __btree_path_get(new, intent);
1602 inline struct btree_path * __must_check
1603 bch2_btree_path_make_mut(struct btree_trans *trans,
1604 struct btree_path *path, bool intent)
1606 if (path->ref > 1 || path->preserve) {
1607 __btree_path_put(path, intent);
1608 path = btree_path_clone(trans, path, intent);
1609 path->preserve = false;
1610 #ifdef CONFIG_BCACHEFS_DEBUG
1611 path->ip_allocated = _RET_IP_;
1613 btree_trans_verify_sorted(trans);
1619 static struct btree_path * __must_check
1620 btree_path_set_pos(struct btree_trans *trans,
1621 struct btree_path *path, struct bpos new_pos,
1624 int cmp = bpos_cmp(new_pos, path->pos);
1625 unsigned l = path->level;
1627 EBUG_ON(trans->restarted);
1628 EBUG_ON(!path->ref);
1633 path = bch2_btree_path_make_mut(trans, path, intent);
1635 path->pos = new_pos;
1636 path->should_be_locked = false;
1638 btree_path_check_sort(trans, path, cmp);
1640 if (unlikely(path->cached)) {
1641 btree_node_unlock(path, 0);
1642 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1643 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1647 l = btree_path_up_until_good_node(trans, path, cmp);
1649 if (btree_path_node(path, l)) {
1651 * We might have to skip over many keys, or just a few: try
1652 * advancing the node iterator, and if we have to skip over too
1653 * many keys just reinit it (or if we're rewinding, since that
1657 !btree_path_advance_to_pos(path, &path->l[l], 8))
1658 __btree_path_level_init(path, l);
1661 if (l != path->level) {
1662 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1663 __bch2_btree_path_unlock(path);
1666 bch2_btree_path_verify(trans, path);
1670 /* Btree path: main interface: */
1672 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1674 struct btree_path *next;
1676 next = prev_btree_path(trans, path);
1677 if (next && !btree_path_cmp(next, path))
1680 next = next_btree_path(trans, path);
1681 if (next && !btree_path_cmp(next, path))
1687 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1689 struct btree_path *next;
1691 next = prev_btree_path(trans, path);
1692 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1695 next = next_btree_path(trans, path);
1696 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1702 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1704 __bch2_btree_path_unlock(path);
1705 btree_path_list_remove(trans, path);
1706 trans->paths_allocated &= ~(1ULL << path->idx);
1709 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1711 struct btree_path *dup;
1713 EBUG_ON(trans->paths + path->idx != path);
1714 EBUG_ON(!path->ref);
1716 if (!__btree_path_put(path, intent))
1720 * Perhaps instead we should check for duplicate paths in traverse_all:
1722 if (path->preserve &&
1723 (dup = have_path_at_pos(trans, path))) {
1724 dup->preserve = true;
1725 path->preserve = false;
1729 if (!path->preserve &&
1730 (dup = have_node_at_pos(trans, path)))
1734 if (path->should_be_locked &&
1735 !btree_node_locked(dup, path->level))
1738 dup->should_be_locked |= path->should_be_locked;
1739 __bch2_path_free(trans, path);
1743 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1745 struct btree_path *path;
1746 struct btree_insert_entry *i;
1748 char buf1[300], buf2[300];
1750 btree_trans_verify_sorted(trans);
1752 trans_for_each_path_inorder(trans, path, idx)
1753 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n",
1754 path->idx, path->ref, path->intent_ref,
1755 path->should_be_locked ? " S" : "",
1756 path->preserve ? " P" : "",
1757 bch2_btree_ids[path->btree_id],
1758 (bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1),
1760 #ifdef CONFIG_BCACHEFS_DEBUG
1761 (void *) path->ip_allocated
1767 trans_for_each_update(trans, i) {
1769 struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
1771 printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
1772 bch2_btree_ids[i->btree_id],
1773 (void *) i->ip_allocated,
1774 (bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1),
1775 (bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2));
1779 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1780 struct btree_path *pos)
1782 struct btree_path *path;
1785 if (unlikely(trans->paths_allocated ==
1786 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1787 bch2_dump_trans_paths_updates(trans);
1788 panic("trans path oveflow\n");
1791 idx = __ffs64(~trans->paths_allocated);
1792 trans->paths_allocated |= 1ULL << idx;
1794 path = &trans->paths[idx];
1798 path->intent_ref = 0;
1799 path->nodes_locked = 0;
1800 path->nodes_intent_locked = 0;
1802 btree_path_list_add(trans, pos, path);
1806 struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached,
1807 enum btree_id btree_id, struct bpos pos,
1808 unsigned locks_want, unsigned level,
1811 struct btree_path *path, *path_pos = NULL;
1814 BUG_ON(trans->restarted);
1816 trans_for_each_path_inorder(trans, path, i) {
1817 if (__btree_path_cmp(path,
1828 path_pos->cached == cached &&
1829 path_pos->btree_id == btree_id &&
1830 path_pos->level == level) {
1831 __btree_path_get(path_pos, intent);
1832 path = btree_path_set_pos(trans, path_pos, pos, intent);
1833 path->preserve = true;
1835 path = btree_path_alloc(trans, path_pos);
1838 __btree_path_get(path, intent);
1840 path->btree_id = btree_id;
1841 path->cached = cached;
1842 path->preserve = true;
1843 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1844 path->should_be_locked = false;
1845 path->level = level;
1846 path->locks_want = locks_want;
1847 path->nodes_locked = 0;
1848 path->nodes_intent_locked = 0;
1849 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1850 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1851 #ifdef CONFIG_BCACHEFS_DEBUG
1852 path->ip_allocated = _RET_IP_;
1854 btree_trans_verify_sorted(trans);
1857 if (path->intent_ref)
1858 locks_want = max(locks_want, level + 1);
1861 * If the path has locks_want greater than requested, we don't downgrade
1862 * it here - on transaction restart because btree node split needs to
1863 * upgrade locks, we might be putting/getting the iterator again.
1864 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1865 * a successful transaction commit.
1868 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1869 if (locks_want > path->locks_want) {
1870 path->locks_want = locks_want;
1871 btree_path_get_locks(trans, path, true, _THIS_IP_);
1877 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1882 BUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1884 if (!path->cached) {
1885 struct btree_path_level *l = path_l(path);
1886 struct bkey_packed *_k =
1887 bch2_btree_node_iter_peek_all(&l->iter, l->b);
1889 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1891 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1893 if (!k.k || bpos_cmp(path->pos, k.k->p))
1896 struct bkey_cached *ck = (void *) path->l[0].b;
1898 EBUG_ON(path->btree_id != ck->key.btree_id ||
1899 bkey_cmp(path->pos, ck->key.pos));
1901 /* BTREE_ITER_CACHED_NOFILL? */
1902 if (unlikely(!ck->valid))
1905 k = bkey_i_to_s_c(ck->k);
1912 return (struct bkey_s_c) { u, NULL };
1915 /* Btree iterators: */
1918 __bch2_btree_iter_traverse(struct btree_iter *iter)
1920 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1924 bch2_btree_iter_traverse(struct btree_iter *iter)
1928 iter->path = btree_path_set_pos(iter->trans, iter->path,
1929 btree_iter_search_key(iter),
1930 iter->flags & BTREE_ITER_INTENT);
1932 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1936 iter->path->should_be_locked = true;
1940 /* Iterate across nodes (leaf and interior nodes) */
1942 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1944 struct btree_trans *trans = iter->trans;
1945 struct btree *b = NULL;
1948 EBUG_ON(iter->path->cached);
1949 bch2_btree_iter_verify(iter);
1951 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1955 b = btree_path_node(iter->path, iter->path->level);
1959 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1961 bkey_init(&iter->k);
1962 iter->k.p = iter->pos = b->key.k.p;
1964 iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
1965 iter->flags & BTREE_ITER_INTENT);
1966 iter->path->should_be_locked = true;
1967 BUG_ON(iter->path->uptodate);
1969 bch2_btree_iter_verify_entry_exit(iter);
1970 bch2_btree_iter_verify(iter);
1978 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1980 struct btree_trans *trans = iter->trans;
1981 struct btree_path *path = iter->path;
1982 struct btree *b = NULL;
1986 BUG_ON(trans->restarted);
1987 EBUG_ON(iter->path->cached);
1988 bch2_btree_iter_verify(iter);
1990 /* already at end? */
1991 if (!btree_path_node(path, path->level))
1995 if (!btree_path_node(path, path->level + 1)) {
1996 btree_node_unlock(path, path->level);
1997 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2002 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2003 __bch2_btree_path_unlock(path);
2004 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2005 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2006 btree_trans_restart(trans);
2011 b = btree_path_node(path, path->level + 1);
2013 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2014 btree_node_unlock(path, path->level);
2015 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2019 * Haven't gotten to the end of the parent node: go back down to
2020 * the next child node
2023 btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2024 iter->flags & BTREE_ITER_INTENT);
2026 path->level = iter->min_depth;
2028 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
2029 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
2030 btree_node_unlock(path, l);
2032 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2033 bch2_btree_iter_verify(iter);
2035 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2039 b = path->l[path->level].b;
2042 bkey_init(&iter->k);
2043 iter->k.p = iter->pos = b->key.k.p;
2045 iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
2046 iter->flags & BTREE_ITER_INTENT);
2047 iter->path->should_be_locked = true;
2048 BUG_ON(iter->path->uptodate);
2050 bch2_btree_iter_verify_entry_exit(iter);
2051 bch2_btree_iter_verify(iter);
2059 /* Iterate across keys (in leaf nodes only) */
2061 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2063 struct bpos pos = iter->k.p;
2064 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2065 ? bpos_cmp(pos, SPOS_MAX)
2066 : bkey_cmp(pos, SPOS_MAX)) != 0;
2068 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2069 pos = bkey_successor(iter, pos);
2070 bch2_btree_iter_set_pos(iter, pos);
2074 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2076 struct bpos pos = bkey_start_pos(&iter->k);
2077 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2078 ? bpos_cmp(pos, POS_MIN)
2079 : bkey_cmp(pos, POS_MIN)) != 0;
2081 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2082 pos = bkey_predecessor(iter, pos);
2083 bch2_btree_iter_set_pos(iter, pos);
2088 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2091 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
2093 struct btree_trans *trans = iter->trans;
2094 struct bpos search_key = btree_iter_search_key(iter);
2095 struct bkey_i *next_update;
2099 EBUG_ON(iter->path->cached || iter->path->level);
2100 bch2_btree_iter_verify(iter);
2101 bch2_btree_iter_verify_entry_exit(iter);
2104 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2105 iter->flags & BTREE_ITER_INTENT);
2107 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2108 if (unlikely(ret)) {
2109 /* ensure that iter->k is consistent with iter->pos: */
2110 bch2_btree_iter_set_pos(iter, iter->pos);
2111 k = bkey_s_c_err(ret);
2115 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2116 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2118 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2120 /* * In the btree, deleted keys sort before non deleted: */
2121 if (k.k && bkey_deleted(k.k) &&
2123 bpos_cmp(k.k->p, next_update->k.p) <= 0)) {
2124 search_key = k.k->p;
2129 bpos_cmp(next_update->k.p,
2130 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2131 iter->k = next_update->k;
2132 k = bkey_i_to_s_c(next_update);
2137 * We can never have a key in a leaf node at POS_MAX, so
2138 * we don't have to check these successor() calls:
2140 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2141 !bch2_snapshot_is_ancestor(trans->c,
2144 search_key = bpos_successor(k.k->p);
2148 if (bkey_whiteout(k.k) &&
2149 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2150 search_key = bkey_successor(iter, k.k->p);
2155 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2156 /* Advance to next leaf node: */
2157 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2160 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2167 * iter->pos should be mononotically increasing, and always be equal to
2168 * the key we just returned - except extents can straddle iter->pos:
2170 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2172 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2173 iter->pos = bkey_start_pos(k.k);
2175 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2176 iter->pos.snapshot = iter->snapshot;
2178 cmp = bpos_cmp(k.k->p, iter->path->pos);
2180 iter->path = bch2_btree_path_make_mut(trans, iter->path,
2181 iter->flags & BTREE_ITER_INTENT);
2182 iter->path->pos = k.k->p;
2183 btree_path_check_sort(trans, iter->path, cmp);
2186 iter->path->should_be_locked = true;
2188 bch2_btree_iter_verify_entry_exit(iter);
2189 bch2_btree_iter_verify(iter);
2190 ret = bch2_btree_iter_verify_ret(iter, k);
2192 return bkey_s_c_err(ret);
2198 * bch2_btree_iter_next: returns first key greater than iterator's current
2201 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2203 if (!bch2_btree_iter_advance(iter))
2204 return bkey_s_c_null;
2206 return bch2_btree_iter_peek(iter);
2210 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2211 * iterator's current position
2213 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2215 struct btree_trans *trans = iter->trans;
2216 struct bpos search_key = iter->pos;
2217 struct btree_path *saved_path = NULL;
2219 struct bkey saved_k;
2220 const struct bch_val *saved_v;
2223 EBUG_ON(iter->path->cached || iter->path->level);
2224 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2225 bch2_btree_iter_verify(iter);
2226 bch2_btree_iter_verify_entry_exit(iter);
2228 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2229 search_key.snapshot = U32_MAX;
2232 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2233 iter->flags & BTREE_ITER_INTENT);
2235 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2236 if (unlikely(ret)) {
2237 /* ensure that iter->k is consistent with iter->pos: */
2238 bch2_btree_iter_set_pos(iter, iter->pos);
2239 k = bkey_s_c_err(ret);
2243 k = btree_path_level_peek(trans->c, iter->path,
2244 &iter->path->l[0], &iter->k);
2246 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2247 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2248 : bpos_cmp(k.k->p, search_key) > 0))
2249 k = btree_path_level_prev(trans->c, iter->path,
2250 &iter->path->l[0], &iter->k);
2252 btree_path_check_sort(trans, iter->path, 0);
2255 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2256 if (k.k->p.snapshot == iter->snapshot)
2260 * If we have a saved candidate, and we're no
2261 * longer at the same _key_ (not pos), return
2264 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2265 bch2_path_put(trans, iter->path,
2266 iter->flags & BTREE_ITER_INTENT);
2267 iter->path = saved_path;
2274 if (bch2_snapshot_is_ancestor(iter->trans->c,
2278 bch2_path_put(trans, saved_path,
2279 iter->flags & BTREE_ITER_INTENT);
2280 saved_path = btree_path_clone(trans, iter->path,
2281 iter->flags & BTREE_ITER_INTENT);
2286 search_key = bpos_predecessor(k.k->p);
2290 if (bkey_whiteout(k.k) &&
2291 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2292 search_key = bkey_predecessor(iter, k.k->p);
2293 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2294 search_key.snapshot = U32_MAX;
2299 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2300 /* Advance to previous leaf node: */
2301 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2303 /* Start of btree: */
2304 bch2_btree_iter_set_pos(iter, POS_MIN);
2310 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2312 /* Extents can straddle iter->pos: */
2313 if (bkey_cmp(k.k->p, iter->pos) < 0)
2316 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2317 iter->pos.snapshot = iter->snapshot;
2320 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2321 iter->path->should_be_locked = true;
2323 bch2_btree_iter_verify_entry_exit(iter);
2324 bch2_btree_iter_verify(iter);
2330 * bch2_btree_iter_prev: returns first key less than iterator's current
2333 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2335 if (!bch2_btree_iter_rewind(iter))
2336 return bkey_s_c_null;
2338 return bch2_btree_iter_peek_prev(iter);
2341 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2343 struct btree_trans *trans = iter->trans;
2344 struct bpos search_key;
2348 EBUG_ON(iter->path->level);
2349 bch2_btree_iter_verify(iter);
2350 bch2_btree_iter_verify_entry_exit(iter);
2352 /* extents can't span inode numbers: */
2353 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2354 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2355 if (iter->pos.inode == KEY_INODE_MAX)
2356 return bkey_s_c_null;
2358 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2361 search_key = btree_iter_search_key(iter);
2362 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2363 iter->flags & BTREE_ITER_INTENT);
2365 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2367 return bkey_s_c_err(ret);
2369 if ((iter->flags & BTREE_ITER_CACHED) ||
2370 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2371 struct bkey_i *next_update;
2373 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2374 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2378 !bpos_cmp(next_update->k.p, iter->pos)) {
2379 iter->k = next_update->k;
2380 k = bkey_i_to_s_c(next_update);
2382 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2387 if (iter->flags & BTREE_ITER_INTENT) {
2388 struct btree_iter iter2;
2390 bch2_trans_copy_iter(&iter2, iter);
2391 k = bch2_btree_iter_peek(&iter2);
2393 if (k.k && !bkey_err(k)) {
2397 bch2_trans_iter_exit(trans, &iter2);
2399 struct bpos pos = iter->pos;
2401 k = bch2_btree_iter_peek(iter);
2405 if (unlikely(bkey_err(k)))
2408 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2410 if (bkey_cmp(iter->pos, next) < 0) {
2411 bkey_init(&iter->k);
2412 iter->k.p = iter->pos;
2413 bch2_key_resize(&iter->k,
2414 min_t(u64, KEY_SIZE_MAX,
2415 (next.inode == iter->pos.inode
2420 k = (struct bkey_s_c) { &iter->k, NULL };
2421 EBUG_ON(!k.k->size);
2425 iter->path->should_be_locked = true;
2427 bch2_btree_iter_verify_entry_exit(iter);
2428 bch2_btree_iter_verify(iter);
2429 ret = bch2_btree_iter_verify_ret(iter, k);
2431 return bkey_s_c_err(ret);
2436 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2438 if (!bch2_btree_iter_advance(iter))
2439 return bkey_s_c_null;
2441 return bch2_btree_iter_peek_slot(iter);
2444 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2446 if (!bch2_btree_iter_rewind(iter))
2447 return bkey_s_c_null;
2449 return bch2_btree_iter_peek_slot(iter);
2452 /* new transactional stuff: */
2454 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2455 struct btree_path *path)
2457 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2458 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2459 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2462 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2464 #ifdef CONFIG_BCACHEFS_DEBUG
2467 for (i = 0; i < trans->nr_sorted; i++)
2468 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2472 static void btree_trans_verify_sorted(struct btree_trans *trans)
2474 #ifdef CONFIG_BCACHEFS_DEBUG
2475 struct btree_path *path, *prev = NULL;
2478 trans_for_each_path_inorder(trans, path, i) {
2479 BUG_ON(prev && btree_path_cmp(prev, path) > 0);
2485 static inline void btree_path_swap(struct btree_trans *trans,
2486 struct btree_path *l, struct btree_path *r)
2488 swap(l->sorted_idx, r->sorted_idx);
2489 swap(trans->sorted[l->sorted_idx],
2490 trans->sorted[r->sorted_idx]);
2492 btree_path_verify_sorted_ref(trans, l);
2493 btree_path_verify_sorted_ref(trans, r);
2496 static void btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2499 struct btree_path *n;
2502 n = prev_btree_path(trans, path);
2503 if (n && btree_path_cmp(n, path) > 0) {
2505 btree_path_swap(trans, n, path);
2506 n = prev_btree_path(trans, path);
2507 } while (n && btree_path_cmp(n, path) > 0);
2514 n = next_btree_path(trans, path);
2515 if (n && btree_path_cmp(path, n) > 0) {
2517 btree_path_swap(trans, path, n);
2518 n = next_btree_path(trans, path);
2519 } while (n && btree_path_cmp(path, n) > 0);
2523 btree_trans_verify_sorted(trans);
2526 static inline void btree_path_list_remove(struct btree_trans *trans,
2527 struct btree_path *path)
2531 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2533 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2535 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2536 trans->paths[trans->sorted[i]].sorted_idx = i;
2538 path->sorted_idx = U8_MAX;
2540 btree_trans_verify_sorted_refs(trans);
2543 static inline void btree_path_list_add(struct btree_trans *trans,
2544 struct btree_path *pos,
2545 struct btree_path *path)
2549 btree_trans_verify_sorted_refs(trans);
2551 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2553 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2555 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2556 trans->paths[trans->sorted[i]].sorted_idx = i;
2558 btree_trans_verify_sorted_refs(trans);
2561 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2564 bch2_path_put(trans, iter->path,
2565 iter->flags & BTREE_ITER_INTENT);
2569 static void __bch2_trans_iter_init(struct btree_trans *trans,
2570 struct btree_iter *iter,
2571 unsigned btree_id, struct bpos pos,
2572 unsigned locks_want,
2576 EBUG_ON(trans->restarted);
2578 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2579 btree_node_type_is_extents(btree_id))
2580 flags |= BTREE_ITER_IS_EXTENTS;
2582 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2583 !btree_type_has_snapshots(btree_id))
2584 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2586 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2587 btree_type_has_snapshots(btree_id))
2588 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2590 iter->trans = trans;
2592 iter->btree_id = btree_id;
2593 iter->min_depth = depth;
2594 iter->flags = flags;
2595 iter->snapshot = pos.snapshot;
2597 iter->k.type = KEY_TYPE_deleted;
2601 iter->path = bch2_path_get(trans,
2602 flags & BTREE_ITER_CACHED,
2607 flags & BTREE_ITER_INTENT);
2610 void bch2_trans_iter_init(struct btree_trans *trans,
2611 struct btree_iter *iter,
2612 unsigned btree_id, struct bpos pos,
2615 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2619 void bch2_trans_node_iter_init(struct btree_trans *trans,
2620 struct btree_iter *iter,
2621 enum btree_id btree_id,
2623 unsigned locks_want,
2627 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2628 BTREE_ITER_NOT_EXTENTS|
2629 __BTREE_ITER_ALL_SNAPSHOTS|
2630 BTREE_ITER_ALL_SNAPSHOTS|
2632 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2633 BUG_ON(iter->path->level != depth);
2634 BUG_ON(iter->min_depth != depth);
2637 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2641 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2644 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2646 size_t new_top = trans->mem_top + size;
2649 if (new_top > trans->mem_bytes) {
2650 size_t old_bytes = trans->mem_bytes;
2651 size_t new_bytes = roundup_pow_of_two(new_top);
2654 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2656 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2657 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2658 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2659 new_bytes = BTREE_TRANS_MEM_MAX;
2664 return ERR_PTR(-ENOMEM);
2666 trans->mem = new_mem;
2667 trans->mem_bytes = new_bytes;
2670 trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2671 btree_trans_restart(trans);
2672 return ERR_PTR(-EINTR);
2676 p = trans->mem + trans->mem_top;
2677 trans->mem_top += size;
2683 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2684 * @trans: transaction to reset
2686 * While iterating over nodes or updating nodes a attempt to lock a btree
2687 * node may return EINTR when the trylock fails. When this occurs
2688 * bch2_trans_begin() should be called and the transaction retried.
2690 void bch2_trans_begin(struct btree_trans *trans)
2692 struct btree_insert_entry *i;
2693 struct btree_path *path;
2695 trans_for_each_update(trans, i)
2696 __btree_path_put(i->path, true);
2698 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
2699 trans->extra_journal_res = 0;
2700 trans->nr_updates = 0;
2703 trans->hooks = NULL;
2704 trans->extra_journal_entries = NULL;
2705 trans->extra_journal_entry_u64s = 0;
2707 if (trans->fs_usage_deltas) {
2708 trans->fs_usage_deltas->used = 0;
2709 memset(&trans->fs_usage_deltas->memset_start, 0,
2710 (void *) &trans->fs_usage_deltas->memset_end -
2711 (void *) &trans->fs_usage_deltas->memset_start);
2714 trans_for_each_path(trans, path) {
2715 path->should_be_locked = false;
2718 * XXX: we probably shouldn't be doing this if the transaction
2719 * was restarted, but currently we still overflow transaction
2720 * iterators if we do that
2722 if (!path->ref && !path->preserve)
2723 __bch2_path_free(trans, path);
2724 else if (!path->ref)
2725 path->preserve = false;
2728 bch2_trans_cond_resched(trans);
2730 if (trans->restarted)
2731 bch2_btree_path_traverse_all(trans);
2733 trans->restarted = false;
2736 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2738 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2739 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2742 BUG_ON(trans->used_mempool);
2745 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
2748 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2750 trans->paths = p; p += paths_bytes;
2751 trans->updates = p; p += updates_bytes;
2754 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2755 unsigned expected_nr_iters,
2756 size_t expected_mem_bytes)
2757 __acquires(&c->btree_trans_barrier)
2759 memset(trans, 0, sizeof(*trans));
2761 trans->ip = _RET_IP_;
2763 bch2_trans_alloc_paths(trans, c);
2765 if (expected_mem_bytes) {
2766 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2767 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2769 if (!unlikely(trans->mem)) {
2770 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2771 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2775 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2777 #ifdef CONFIG_BCACHEFS_DEBUG
2778 trans->pid = current->pid;
2779 mutex_lock(&c->btree_trans_lock);
2780 list_add(&trans->list, &c->btree_trans_list);
2781 mutex_unlock(&c->btree_trans_lock);
2785 static void check_btree_paths_leaked(struct btree_trans *trans)
2787 #ifdef CONFIG_BCACHEFS_DEBUG
2788 struct bch_fs *c = trans->c;
2789 struct btree_path *path;
2791 trans_for_each_path(trans, path)
2796 bch_err(c, "btree paths leaked from %pS!", (void *) trans->ip);
2797 trans_for_each_path(trans, path)
2799 printk(KERN_ERR " btree %s %pS\n",
2800 bch2_btree_ids[path->btree_id],
2801 (void *) path->ip_allocated);
2802 /* Be noisy about this: */
2803 bch2_fatal_error(c);
2807 void bch2_trans_exit(struct btree_trans *trans)
2808 __releases(&c->btree_trans_barrier)
2810 struct btree_insert_entry *i;
2811 struct bch_fs *c = trans->c;
2813 bch2_trans_unlock(trans);
2815 trans_for_each_update(trans, i)
2816 __btree_path_put(i->path, true);
2817 trans->nr_updates = 0;
2819 check_btree_paths_leaked(trans);
2821 #ifdef CONFIG_BCACHEFS_DEBUG
2822 mutex_lock(&c->btree_trans_lock);
2823 list_del(&trans->list);
2824 mutex_unlock(&c->btree_trans_lock);
2827 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2829 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
2831 if (trans->fs_usage_deltas) {
2832 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2833 REPLICAS_DELTA_LIST_MAX)
2834 mempool_free(trans->fs_usage_deltas,
2835 &c->replicas_delta_pool);
2837 kfree(trans->fs_usage_deltas);
2840 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2841 mempool_free(trans->mem, &c->btree_trans_mem_pool);
2847 * Userspace doesn't have a real percpu implementation:
2849 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
2853 mempool_free(trans->paths, &c->btree_paths_pool);
2855 trans->mem = (void *) 0x1;
2856 trans->paths = (void *) 0x1;
2859 static void __maybe_unused
2860 bch2_btree_path_node_to_text(struct printbuf *out,
2861 struct btree_bkey_cached_common *_b,
2864 pr_buf(out, " l=%u %s:",
2865 _b->level, bch2_btree_ids[_b->btree_id]);
2866 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
2869 #ifdef CONFIG_BCACHEFS_DEBUG
2870 static bool trans_has_locks(struct btree_trans *trans)
2872 struct btree_path *path;
2874 trans_for_each_path(trans, path)
2875 if (path->nodes_locked)
2881 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2883 #ifdef CONFIG_BCACHEFS_DEBUG
2884 struct btree_trans *trans;
2885 struct btree_path *path;
2889 mutex_lock(&c->btree_trans_lock);
2890 list_for_each_entry(trans, &c->btree_trans_list, list) {
2891 if (!trans_has_locks(trans))
2894 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2896 trans_for_each_path(trans, path) {
2897 if (!path->nodes_locked)
2900 pr_buf(out, " path %u %c l=%u %s:",
2902 path->cached ? 'c' : 'b',
2904 bch2_btree_ids[path->btree_id]);
2905 bch2_bpos_to_text(out, path->pos);
2908 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2909 if (btree_node_locked(path, l)) {
2910 pr_buf(out, " %s l=%u ",
2911 btree_node_intent_locked(path, l) ? "i" : "r", l);
2912 bch2_btree_path_node_to_text(out,
2913 (void *) path->l[l].b,
2920 b = READ_ONCE(trans->locking);
2922 path = &trans->paths[trans->locking_path_idx];
2923 pr_buf(out, " locking path %u %c l=%u %s:",
2924 trans->locking_path_idx,
2925 path->cached ? 'c' : 'b',
2926 trans->locking_level,
2927 bch2_btree_ids[trans->locking_btree_id]);
2928 bch2_bpos_to_text(out, trans->locking_pos);
2930 pr_buf(out, " node ");
2931 bch2_btree_path_node_to_text(out,
2932 (void *) b, path->cached);
2936 mutex_unlock(&c->btree_trans_lock);
2940 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2942 mempool_exit(&c->btree_trans_mem_pool);
2943 mempool_exit(&c->btree_paths_pool);
2944 cleanup_srcu_struct(&c->btree_trans_barrier);
2947 int bch2_fs_btree_iter_init(struct bch_fs *c)
2949 unsigned nr = BTREE_ITER_MAX;
2951 INIT_LIST_HEAD(&c->btree_trans_list);
2952 mutex_init(&c->btree_trans_lock);
2954 return init_srcu_struct(&c->btree_trans_barrier) ?:
2955 mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
2956 sizeof(struct btree_path) * nr +
2957 sizeof(struct btree_insert_entry) * nr) ?:
2958 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2959 BTREE_TRANS_MEM_MAX);