1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
16 #include "subvolume.h"
18 #include <linux/prefetch.h>
19 #include <trace/events/bcachefs.h>
21 static void btree_trans_verify_sorted(struct btree_trans *);
22 static void btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
30 #ifdef CONFIG_BCACHEFS_DEBUG
31 return iter->ip_allocated;
37 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
40 * Unlocks before scheduling
41 * Note: does not revalidate iterator
43 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
45 if (need_resched() || race_fault()) {
46 bch2_trans_unlock(trans);
48 return bch2_trans_relock(trans) ? 0 : -EINTR;
54 static inline int __btree_path_cmp(const struct btree_path *l,
55 enum btree_id r_btree_id,
60 return cmp_int(l->btree_id, r_btree_id) ?:
61 cmp_int((int) l->cached, (int) r_cached) ?:
62 bpos_cmp(l->pos, r_pos) ?:
63 -cmp_int(l->level, r_level);
66 static inline int btree_path_cmp(const struct btree_path *l,
67 const struct btree_path *r)
69 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
72 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
74 /* Are we iterating over keys in all snapshots? */
75 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
76 p = bpos_successor(p);
78 p = bpos_nosnap_successor(p);
79 p.snapshot = iter->snapshot;
85 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
87 /* Are we iterating over keys in all snapshots? */
88 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
89 p = bpos_predecessor(p);
91 p = bpos_nosnap_predecessor(p);
92 p.snapshot = iter->snapshot;
98 static inline bool is_btree_node(struct btree_path *path, unsigned l)
100 return l < BTREE_MAX_DEPTH &&
101 (unsigned long) path->l[l].b >= 128;
104 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
106 struct bpos pos = iter->pos;
108 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
109 bkey_cmp(pos, POS_MAX))
110 pos = bkey_successor(iter, pos);
114 static inline bool btree_path_pos_before_node(struct btree_path *path,
117 return bpos_cmp(path->pos, b->data->min_key) < 0;
120 static inline bool btree_path_pos_after_node(struct btree_path *path,
123 return bpos_cmp(b->key.k.p, path->pos) < 0;
126 static inline bool btree_path_pos_in_node(struct btree_path *path,
129 return path->btree_id == b->c.btree_id &&
130 !btree_path_pos_before_node(path, b) &&
131 !btree_path_pos_after_node(path, b);
134 /* Btree node locking: */
136 void bch2_btree_node_unlock_write(struct btree_trans *trans,
137 struct btree_path *path, struct btree *b)
139 bch2_btree_node_unlock_write_inlined(trans, path, b);
142 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
144 struct btree_path *linked;
145 unsigned readers = 0;
147 trans_for_each_path(trans, linked)
148 if (linked->l[b->c.level].b == b &&
149 btree_node_read_locked(linked, b->c.level))
153 * Must drop our read locks before calling six_lock_write() -
154 * six_unlock() won't do wakeups until the reader count
155 * goes to 0, and it's safe because we have the node intent
158 if (!b->c.lock.readers)
159 atomic64_sub(__SIX_VAL(read_lock, readers),
160 &b->c.lock.state.counter);
162 this_cpu_sub(*b->c.lock.readers, readers);
164 btree_node_lock_type(trans->c, b, SIX_LOCK_write);
166 if (!b->c.lock.readers)
167 atomic64_add(__SIX_VAL(read_lock, readers),
168 &b->c.lock.state.counter);
170 this_cpu_add(*b->c.lock.readers, readers);
173 bool __bch2_btree_node_relock(struct btree_trans *trans,
174 struct btree_path *path, unsigned level)
176 struct btree *b = btree_path_node(path, level);
177 int want = __btree_lock_want(path, level);
179 if (!is_btree_node(path, level))
185 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
186 (btree_node_lock_seq_matches(path, b, level) &&
187 btree_node_lock_increment(trans, b, level, want))) {
188 mark_btree_node_locked(path, level, want);
195 bool bch2_btree_node_upgrade(struct btree_trans *trans,
196 struct btree_path *path, unsigned level)
198 struct btree *b = path->l[level].b;
200 if (!is_btree_node(path, level))
203 switch (btree_lock_want(path, level)) {
204 case BTREE_NODE_UNLOCKED:
205 BUG_ON(btree_node_locked(path, level));
207 case BTREE_NODE_READ_LOCKED:
208 BUG_ON(btree_node_intent_locked(path, level));
209 return bch2_btree_node_relock(trans, path, level);
210 case BTREE_NODE_INTENT_LOCKED:
214 if (btree_node_intent_locked(path, level))
220 if (btree_node_locked(path, level)
221 ? six_lock_tryupgrade(&b->c.lock)
222 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
225 if (btree_node_lock_seq_matches(path, b, level) &&
226 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
227 btree_node_unlock(path, level);
233 mark_btree_node_intent_locked(path, level);
237 static inline bool btree_path_get_locks(struct btree_trans *trans,
238 struct btree_path *path,
239 bool upgrade, unsigned long trace_ip)
241 unsigned l = path->level;
245 if (!btree_path_node(path, l))
249 ? bch2_btree_node_upgrade(trans, path, l)
250 : bch2_btree_node_relock(trans, path, l)))
254 } while (l < path->locks_want);
257 * When we fail to get a lock, we have to ensure that any child nodes
258 * can't be relocked so bch2_btree_path_traverse has to walk back up to
259 * the node that we failed to relock:
262 __bch2_btree_path_unlock(path);
263 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
266 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
268 } while (fail_idx >= 0);
271 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
272 path->uptodate = BTREE_ITER_UPTODATE;
274 bch2_trans_verify_locks(trans);
276 return path->uptodate < BTREE_ITER_NEED_RELOCK;
279 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
283 ? container_of(_b, struct btree, c)->key.k.p
284 : container_of(_b, struct bkey_cached, c)->key.pos;
288 bool __bch2_btree_node_lock(struct btree_trans *trans,
289 struct btree_path *path,
291 struct bpos pos, unsigned level,
292 enum six_lock_type type,
293 six_lock_should_sleep_fn should_sleep_fn, void *p,
296 struct btree_path *linked, *deadlock_path = NULL;
297 u64 start_time = local_clock();
301 /* Check if it's safe to block: */
302 trans_for_each_path(trans, linked) {
303 if (!linked->nodes_locked)
307 * Can't block taking an intent lock if we have _any_ nodes read
310 * - Our read lock blocks another thread with an intent lock on
311 * the same node from getting a write lock, and thus from
312 * dropping its intent lock
314 * - And the other thread may have multiple nodes intent locked:
315 * both the node we want to intent lock, and the node we
316 * already have read locked - deadlock:
318 if (type == SIX_LOCK_intent &&
319 linked->nodes_locked != linked->nodes_intent_locked) {
320 deadlock_path = linked;
324 if (linked->btree_id != path->btree_id) {
325 if (linked->btree_id > path->btree_id) {
326 deadlock_path = linked;
333 * Within the same btree, cached paths come before non
336 if (linked->cached != path->cached) {
338 deadlock_path = linked;
345 * Interior nodes must be locked before their descendants: if
346 * another path has possible descendants locked of the node
347 * we're about to lock, it must have the ancestors locked too:
349 if (level > __fls(linked->nodes_locked)) {
350 deadlock_path = linked;
354 /* Must lock btree nodes in key order: */
355 if (btree_node_locked(linked, level) &&
356 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
357 linked->cached)) <= 0) {
358 deadlock_path = linked;
360 BUG_ON(trans->in_traverse_all);
364 if (unlikely(deadlock_path)) {
365 trace_trans_restart_would_deadlock(trans->ip, ip,
366 trans->in_traverse_all, reason,
367 deadlock_path->btree_id,
368 deadlock_path->cached,
373 btree_trans_restart(trans);
377 if (six_trylock_type(&b->c.lock, type))
380 trans->locking_path_idx = path->idx;
381 trans->locking_pos = pos;
382 trans->locking_btree_id = path->btree_id;
383 trans->locking_level = level;
386 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
388 trans->locking = NULL;
391 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
396 /* Btree iterator locking: */
398 #ifdef CONFIG_BCACHEFS_DEBUG
400 static void bch2_btree_path_verify_locks(struct btree_path *path)
404 if (!path->nodes_locked) {
405 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
406 btree_path_node(path, path->level));
410 for (l = 0; btree_path_node(path, l); l++)
411 BUG_ON(btree_lock_want(path, l) !=
412 btree_node_locked_type(path, l));
415 void bch2_trans_verify_locks(struct btree_trans *trans)
417 struct btree_path *path;
419 trans_for_each_path(trans, path)
420 bch2_btree_path_verify_locks(path);
423 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
426 /* Btree path locking: */
429 * Only for btree_cache.c - only relocks intent locks
431 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
432 struct btree_path *path)
436 for (l = path->level;
437 l < path->locks_want && btree_path_node(path, l);
439 if (!bch2_btree_node_relock(trans, path, l)) {
440 __bch2_btree_path_unlock(path);
441 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
442 btree_trans_restart(trans);
451 static bool bch2_btree_path_relock(struct btree_trans *trans,
452 struct btree_path *path, unsigned long trace_ip)
454 bool ret = btree_path_get_locks(trans, path, false, trace_ip);
457 btree_trans_restart(trans);
461 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
462 struct btree_path *path,
463 unsigned new_locks_want)
465 struct btree_path *linked;
467 EBUG_ON(path->locks_want >= new_locks_want);
469 path->locks_want = new_locks_want;
471 if (btree_path_get_locks(trans, path, true, _THIS_IP_))
475 * XXX: this is ugly - we'd prefer to not be mucking with other
476 * iterators in the btree_trans here.
478 * On failure to upgrade the iterator, setting iter->locks_want and
479 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
480 * get the locks we want on transaction restart.
482 * But if this iterator was a clone, on transaction restart what we did
483 * to this iterator isn't going to be preserved.
485 * Possibly we could add an iterator field for the parent iterator when
486 * an iterator is a copy - for now, we'll just upgrade any other
487 * iterators with the same btree id.
489 * The code below used to be needed to ensure ancestor nodes get locked
490 * before interior nodes - now that's handled by
491 * bch2_btree_path_traverse_all().
493 trans_for_each_path(trans, linked)
494 if (linked != path &&
495 linked->cached == path->cached &&
496 linked->btree_id == path->btree_id &&
497 linked->locks_want < new_locks_want) {
498 linked->locks_want = new_locks_want;
499 btree_path_get_locks(trans, linked, true, _THIS_IP_);
505 void __bch2_btree_path_downgrade(struct btree_path *path,
506 unsigned new_locks_want)
510 EBUG_ON(path->locks_want < new_locks_want);
512 path->locks_want = new_locks_want;
514 while (path->nodes_locked &&
515 (l = __fls(path->nodes_locked)) >= path->locks_want) {
516 if (l > path->level) {
517 btree_node_unlock(path, l);
519 if (btree_node_intent_locked(path, l)) {
520 six_lock_downgrade(&path->l[l].b->c.lock);
521 path->nodes_intent_locked ^= 1 << l;
527 bch2_btree_path_verify_locks(path);
530 void bch2_trans_downgrade(struct btree_trans *trans)
532 struct btree_path *path;
534 trans_for_each_path(trans, path)
535 bch2_btree_path_downgrade(path);
538 /* Btree transaction locking: */
540 bool bch2_trans_relock(struct btree_trans *trans)
542 struct btree_path *path;
544 if (unlikely(trans->restarted))
547 trans_for_each_path(trans, path)
548 if (path->should_be_locked &&
549 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
550 trace_trans_restart_relock(trans->ip, _RET_IP_,
551 path->btree_id, &path->pos);
552 BUG_ON(!trans->restarted);
558 void bch2_trans_unlock(struct btree_trans *trans)
560 struct btree_path *path;
562 trans_for_each_path(trans, path)
563 __bch2_btree_path_unlock(path);
565 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
568 /* Btree iterator: */
570 #ifdef CONFIG_BCACHEFS_DEBUG
572 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
573 struct btree_path *path)
575 struct bkey_cached *ck;
576 bool locked = btree_node_locked(path, 0);
578 if (!bch2_btree_node_relock(trans, path, 0))
581 ck = (void *) path->l[0].b;
582 BUG_ON(ck->key.btree_id != path->btree_id ||
583 bkey_cmp(ck->key.pos, path->pos));
586 btree_node_unlock(path, 0);
589 static void bch2_btree_path_verify_level(struct btree_trans *trans,
590 struct btree_path *path, unsigned level)
592 struct btree_path_level *l;
593 struct btree_node_iter tmp;
595 struct bkey_packed *p, *k;
596 char buf1[100], buf2[100], buf3[100];
599 if (!bch2_debug_check_iterators)
604 locked = btree_node_locked(path, level);
608 bch2_btree_path_verify_cached(trans, path);
612 if (!btree_path_node(path, level))
615 if (!bch2_btree_node_relock(trans, path, level))
618 BUG_ON(!btree_path_pos_in_node(path, l->b));
620 bch2_btree_node_iter_verify(&l->iter, l->b);
623 * For interior nodes, the iterator will have skipped past deleted keys:
626 ? bch2_btree_node_iter_prev(&tmp, l->b)
627 : bch2_btree_node_iter_prev_all(&tmp, l->b);
628 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
630 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
635 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
641 btree_node_unlock(path, level);
644 strcpy(buf2, "(none)");
645 strcpy(buf3, "(none)");
647 bch2_bpos_to_text(&PBUF(buf1), path->pos);
650 struct bkey uk = bkey_unpack_key(l->b, p);
651 bch2_bkey_to_text(&PBUF(buf2), &uk);
655 struct bkey uk = bkey_unpack_key(l->b, k);
656 bch2_bkey_to_text(&PBUF(buf3), &uk);
659 panic("path should be %s key at level %u:\n"
663 msg, level, buf1, buf2, buf3);
666 static void bch2_btree_path_verify(struct btree_trans *trans,
667 struct btree_path *path)
669 struct bch_fs *c = trans->c;
672 EBUG_ON(path->btree_id >= BTREE_ID_NR);
674 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
676 BUG_ON(!path->cached &&
677 c->btree_roots[path->btree_id].b->c.level > i);
681 bch2_btree_path_verify_level(trans, path, i);
684 bch2_btree_path_verify_locks(path);
687 void bch2_trans_verify_paths(struct btree_trans *trans)
689 struct btree_path *path;
691 trans_for_each_path(trans, path)
692 bch2_btree_path_verify(trans, path);
695 static void bch2_btree_iter_verify(struct btree_iter *iter)
697 struct btree_trans *trans = iter->trans;
699 BUG_ON(iter->btree_id >= BTREE_ID_NR);
701 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
703 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
704 iter->pos.snapshot != iter->snapshot);
706 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
707 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
709 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
710 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
711 !btree_type_has_snapshots(iter->btree_id));
713 bch2_btree_path_verify(trans, iter->path);
716 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
718 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
719 !iter->pos.snapshot);
721 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
722 iter->pos.snapshot != iter->snapshot);
724 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
725 bkey_cmp(iter->pos, iter->k.p) > 0);
728 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
730 struct btree_trans *trans = iter->trans;
731 struct btree_iter copy;
732 struct bkey_s_c prev;
735 if (!bch2_debug_check_iterators)
738 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
741 if (bkey_err(k) || !k.k)
744 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
748 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
749 BTREE_ITER_NOPRESERVE|
750 BTREE_ITER_ALL_SNAPSHOTS);
751 prev = bch2_btree_iter_prev(©);
755 ret = bkey_err(prev);
759 if (!bkey_cmp(prev.k->p, k.k->p) &&
760 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
761 prev.k->p.snapshot) > 0) {
762 char buf1[100], buf2[200];
764 bch2_bkey_to_text(&PBUF(buf1), k.k);
765 bch2_bkey_to_text(&PBUF(buf2), prev.k);
767 panic("iter snap %u\n"
774 bch2_trans_iter_exit(trans, ©);
778 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
779 struct bpos pos, bool key_cache)
781 struct btree_path *path;
785 trans_for_each_path_inorder(trans, path, idx) {
786 int cmp = cmp_int(path->btree_id, id) ?:
787 cmp_int(path->cached, key_cache);
794 if (!(path->nodes_locked & 1) ||
795 !path->should_be_locked)
799 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
800 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
803 if (!bkey_cmp(pos, path->pos))
808 bch2_dump_trans_paths_updates(trans);
809 panic("not locked: %s %s%s\n",
811 (bch2_bpos_to_text(&PBUF(buf), pos), buf),
812 key_cache ? " cached" : "");
817 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
818 struct btree_path *path, unsigned l) {}
819 static inline void bch2_btree_path_verify(struct btree_trans *trans,
820 struct btree_path *path) {}
821 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
822 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
823 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
827 /* Btree path: fixups after btree updates */
829 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
832 struct bkey_packed *k)
834 struct btree_node_iter_set *set;
836 btree_node_iter_for_each(iter, set)
837 if (set->end == t->end_offset) {
838 set->k = __btree_node_key_to_offset(b, k);
839 bch2_btree_node_iter_sort(iter, b);
843 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
846 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
848 struct bkey_packed *where)
850 struct btree_path_level *l = &path->l[b->c.level];
852 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
855 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
856 bch2_btree_node_iter_advance(&l->iter, l->b);
859 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
861 struct bkey_packed *where)
863 struct btree_path *path;
865 trans_for_each_path_with_node(trans, b, path) {
866 __bch2_btree_path_fix_key_modified(path, b, where);
867 bch2_btree_path_verify_level(trans, path, b->c.level);
871 static void __bch2_btree_node_iter_fix(struct btree_path *path,
873 struct btree_node_iter *node_iter,
875 struct bkey_packed *where,
876 unsigned clobber_u64s,
879 const struct bkey_packed *end = btree_bkey_last(b, t);
880 struct btree_node_iter_set *set;
881 unsigned offset = __btree_node_key_to_offset(b, where);
882 int shift = new_u64s - clobber_u64s;
883 unsigned old_end = t->end_offset - shift;
884 unsigned orig_iter_pos = node_iter->data[0].k;
885 bool iter_current_key_modified =
886 orig_iter_pos >= offset &&
887 orig_iter_pos <= offset + clobber_u64s;
889 btree_node_iter_for_each(node_iter, set)
890 if (set->end == old_end)
893 /* didn't find the bset in the iterator - might have to readd it: */
895 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
896 bch2_btree_node_iter_push(node_iter, b, where, end);
899 /* Iterator is after key that changed */
903 set->end = t->end_offset;
905 /* Iterator hasn't gotten to the key that changed yet: */
910 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
912 } else if (set->k < offset + clobber_u64s) {
913 set->k = offset + new_u64s;
914 if (set->k == set->end)
915 bch2_btree_node_iter_set_drop(node_iter, set);
917 /* Iterator is after key that changed */
918 set->k = (int) set->k + shift;
922 bch2_btree_node_iter_sort(node_iter, b);
924 if (node_iter->data[0].k != orig_iter_pos)
925 iter_current_key_modified = true;
928 * When a new key is added, and the node iterator now points to that
929 * key, the iterator might have skipped past deleted keys that should
930 * come after the key the iterator now points to. We have to rewind to
931 * before those deleted keys - otherwise
932 * bch2_btree_node_iter_prev_all() breaks:
934 if (!bch2_btree_node_iter_end(node_iter) &&
935 iter_current_key_modified &&
938 struct bkey_packed *k, *k2, *p;
940 k = bch2_btree_node_iter_peek_all(node_iter, b);
942 for_each_bset(b, t) {
943 bool set_pos = false;
945 if (node_iter->data[0].end == t->end_offset)
948 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
950 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
951 bkey_iter_cmp(b, k, p) < 0) {
957 btree_node_iter_set_set_pos(node_iter,
963 void bch2_btree_node_iter_fix(struct btree_trans *trans,
964 struct btree_path *path,
966 struct btree_node_iter *node_iter,
967 struct bkey_packed *where,
968 unsigned clobber_u64s,
971 struct bset_tree *t = bch2_bkey_to_bset(b, where);
972 struct btree_path *linked;
974 if (node_iter != &path->l[b->c.level].iter) {
975 __bch2_btree_node_iter_fix(path, b, node_iter, t,
976 where, clobber_u64s, new_u64s);
978 if (bch2_debug_check_iterators)
979 bch2_btree_node_iter_verify(node_iter, b);
982 trans_for_each_path_with_node(trans, b, linked) {
983 __bch2_btree_node_iter_fix(linked, b,
984 &linked->l[b->c.level].iter, t,
985 where, clobber_u64s, new_u64s);
986 bch2_btree_path_verify_level(trans, linked, b->c.level);
990 /* Btree path level: pointer to a particular btree node and node iter */
992 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
993 struct btree_path_level *l,
995 struct bkey_packed *k)
1001 * signal to bch2_btree_iter_peek_slot() that we're currently at
1004 u->type = KEY_TYPE_deleted;
1005 return bkey_s_c_null;
1008 ret = bkey_disassemble(l->b, k, u);
1011 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
1012 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
1013 * being overwritten but doesn't change k->size. But this is ok, because
1014 * those keys are never written out, we just have to avoid a spurious
1017 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
1018 bch2_bkey_debugcheck(c, l->b, ret);
1023 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1024 struct btree_path_level *l,
1027 return __btree_iter_unpack(c, l, u,
1028 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1031 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
1032 struct btree_path *path,
1033 struct btree_path_level *l,
1036 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1037 bch2_btree_node_iter_peek(&l->iter, l->b));
1039 path->pos = k.k ? k.k->p : l->b->key.k.p;
1043 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
1044 struct btree_path *path,
1045 struct btree_path_level *l,
1048 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1049 bch2_btree_node_iter_prev(&l->iter, l->b));
1051 path->pos = k.k ? k.k->p : l->b->data->min_key;
1055 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1056 struct btree_path_level *l,
1059 struct bkey_packed *k;
1060 int nr_advanced = 0;
1062 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1063 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1064 if (max_advance > 0 && nr_advanced >= max_advance)
1067 bch2_btree_node_iter_advance(&l->iter, l->b);
1075 * Verify that iterator for parent node points to child node:
1077 static void btree_path_verify_new_node(struct btree_trans *trans,
1078 struct btree_path *path, struct btree *b)
1080 struct btree_path_level *l;
1083 struct bkey_packed *k;
1085 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1088 plevel = b->c.level + 1;
1089 if (!btree_path_node(path, plevel))
1092 parent_locked = btree_node_locked(path, plevel);
1094 if (!bch2_btree_node_relock(trans, path, plevel))
1097 l = &path->l[plevel];
1098 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1101 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1106 struct bkey uk = bkey_unpack_key(b, k);
1108 bch2_dump_btree_node(trans->c, l->b);
1109 bch2_bpos_to_text(&PBUF(buf1), path->pos);
1110 bch2_bkey_to_text(&PBUF(buf2), &uk);
1111 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
1112 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
1113 panic("parent iter doesn't point to new node:\n"
1117 bch2_btree_ids[path->btree_id], buf1,
1122 btree_node_unlock(path, plevel);
1125 static inline void __btree_path_level_init(struct btree_path *path,
1128 struct btree_path_level *l = &path->l[level];
1130 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1133 * Iterators to interior nodes should always be pointed at the first non
1137 bch2_btree_node_iter_peek(&l->iter, l->b);
1140 static inline void btree_path_level_init(struct btree_trans *trans,
1141 struct btree_path *path,
1144 BUG_ON(path->cached);
1146 btree_path_verify_new_node(trans, path, b);
1148 EBUG_ON(!btree_path_pos_in_node(path, b));
1149 EBUG_ON(b->c.lock.state.seq & 1);
1151 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1152 path->l[b->c.level].b = b;
1153 __btree_path_level_init(path, b->c.level);
1156 /* Btree path: fixups after btree node updates: */
1159 * A btree node is being replaced - update the iterator to point to the new
1162 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1164 struct btree_path *path;
1166 trans_for_each_path(trans, path)
1167 if (!path->cached &&
1168 btree_path_pos_in_node(path, b)) {
1169 enum btree_node_locked_type t =
1170 btree_lock_want(path, b->c.level);
1172 if (path->nodes_locked &&
1173 t != BTREE_NODE_UNLOCKED) {
1174 btree_node_unlock(path, b->c.level);
1175 six_lock_increment(&b->c.lock, t);
1176 mark_btree_node_locked(path, b->c.level, t);
1179 btree_path_level_init(trans, path, b);
1184 * A btree node has been modified in such a way as to invalidate iterators - fix
1187 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1189 struct btree_path *path;
1191 trans_for_each_path_with_node(trans, b, path)
1192 __btree_path_level_init(path, b->c.level);
1195 /* Btree path: traverse, set_pos: */
1197 static int lock_root_check_fn(struct six_lock *lock, void *p)
1199 struct btree *b = container_of(lock, struct btree, c.lock);
1200 struct btree **rootp = p;
1202 return b == *rootp ? 0 : -1;
1205 static inline int btree_path_lock_root(struct btree_trans *trans,
1206 struct btree_path *path,
1207 unsigned depth_want,
1208 unsigned long trace_ip)
1210 struct bch_fs *c = trans->c;
1211 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1212 enum six_lock_type lock_type;
1215 EBUG_ON(path->nodes_locked);
1218 b = READ_ONCE(*rootp);
1219 path->level = READ_ONCE(b->c.level);
1221 if (unlikely(path->level < depth_want)) {
1223 * the root is at a lower depth than the depth we want:
1224 * got to the end of the btree, or we're walking nodes
1225 * greater than some depth and there are no nodes >=
1228 path->level = depth_want;
1229 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1230 path->l[i].b = NULL;
1234 lock_type = __btree_lock_want(path, path->level);
1235 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1236 path->level, lock_type,
1237 lock_root_check_fn, rootp,
1239 if (trans->restarted)
1244 if (likely(b == READ_ONCE(*rootp) &&
1245 b->c.level == path->level &&
1247 for (i = 0; i < path->level; i++)
1248 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1249 path->l[path->level].b = b;
1250 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1251 path->l[i].b = NULL;
1253 mark_btree_node_locked(path, path->level, lock_type);
1254 btree_path_level_init(trans, path, b);
1258 six_unlock_type(&b->c.lock, lock_type);
1263 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1265 struct bch_fs *c = trans->c;
1266 struct btree_path_level *l = path_l(path);
1267 struct btree_node_iter node_iter = l->iter;
1268 struct bkey_packed *k;
1269 struct bkey_buf tmp;
1270 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1271 ? (path->level > 1 ? 0 : 2)
1272 : (path->level > 1 ? 1 : 16);
1273 bool was_locked = btree_node_locked(path, path->level);
1276 bch2_bkey_buf_init(&tmp);
1278 while (nr && !ret) {
1279 if (!bch2_btree_node_relock(trans, path, path->level))
1282 bch2_btree_node_iter_advance(&node_iter, l->b);
1283 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1287 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1288 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1293 btree_node_unlock(path, path->level);
1295 bch2_bkey_buf_exit(&tmp, c);
1299 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1300 struct btree_path *path,
1301 unsigned plevel, struct btree *b)
1303 struct btree_path_level *l = &path->l[plevel];
1304 bool locked = btree_node_locked(path, plevel);
1305 struct bkey_packed *k;
1306 struct bch_btree_ptr_v2 *bp;
1308 if (!bch2_btree_node_relock(trans, path, plevel))
1311 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1312 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1314 bp = (void *) bkeyp_val(&l->b->format, k);
1315 bp->mem_ptr = (unsigned long)b;
1318 btree_node_unlock(path, plevel);
1321 static __always_inline int btree_path_down(struct btree_trans *trans,
1322 struct btree_path *path,
1324 unsigned long trace_ip)
1326 struct bch_fs *c = trans->c;
1327 struct btree_path_level *l = path_l(path);
1329 unsigned level = path->level - 1;
1330 enum six_lock_type lock_type = __btree_lock_want(path, level);
1331 struct bkey_buf tmp;
1334 EBUG_ON(!btree_node_locked(path, path->level));
1336 bch2_bkey_buf_init(&tmp);
1337 bch2_bkey_buf_unpack(&tmp, c, l->b,
1338 bch2_btree_node_iter_peek(&l->iter, l->b));
1340 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1341 ret = PTR_ERR_OR_ZERO(b);
1345 mark_btree_node_locked(path, level, lock_type);
1346 btree_path_level_init(trans, path, b);
1348 if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1349 unlikely(b != btree_node_mem_ptr(tmp.k)))
1350 btree_node_mem_ptr_set(trans, path, level + 1, b);
1352 if (flags & BTREE_ITER_PREFETCH)
1353 ret = btree_path_prefetch(trans, path);
1355 if (btree_node_read_locked(path, level + 1))
1356 btree_node_unlock(path, level + 1);
1357 path->level = level;
1359 bch2_btree_path_verify_locks(path);
1361 bch2_bkey_buf_exit(&tmp, c);
1365 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1366 unsigned, unsigned long);
1368 static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
1369 unsigned long trace_ip)
1371 struct bch_fs *c = trans->c;
1372 struct btree_path *path;
1375 if (trans->in_traverse_all)
1378 trans->in_traverse_all = true;
1380 trans->restarted = false;
1382 trans_for_each_path(trans, path)
1383 path->should_be_locked = false;
1385 btree_trans_verify_sorted(trans);
1387 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1388 struct btree_path *path1 = trans->paths + trans->sorted[i];
1389 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1391 if (path1->btree_id == path2->btree_id &&
1392 path1->locks_want < path2->locks_want)
1393 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1394 else if (!path1->locks_want && path2->locks_want)
1395 __bch2_btree_path_upgrade(trans, path1, 1);
1398 bch2_trans_unlock(trans);
1401 if (unlikely(ret == -ENOMEM)) {
1404 closure_init_stack(&cl);
1407 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1412 if (unlikely(ret == -EIO))
1415 BUG_ON(ret && ret != -EINTR);
1417 /* Now, redo traversals in correct order: */
1419 while (i < trans->nr_sorted) {
1420 path = trans->paths + trans->sorted[i];
1422 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1424 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1428 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1430 if (path->nodes_locked ||
1431 !btree_path_node(path, path->level))
1436 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1437 * and relock(), relock() won't relock since path->should_be_locked
1438 * isn't set yet, which is all fine
1440 trans_for_each_path(trans, path)
1441 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1443 bch2_btree_cache_cannibalize_unlock(c);
1445 trans->in_traverse_all = false;
1447 trace_trans_traverse_all(trans->ip, trace_ip);
1451 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1453 return __btree_path_traverse_all(trans, 0, _RET_IP_);
1456 static inline bool btree_path_good_node(struct btree_trans *trans,
1457 struct btree_path *path,
1458 unsigned l, int check_pos)
1460 if (!is_btree_node(path, l) ||
1461 !bch2_btree_node_relock(trans, path, l))
1464 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1466 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1471 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1472 struct btree_path *path,
1475 unsigned i, l = path->level;
1477 while (btree_path_node(path, l) &&
1478 !btree_path_good_node(trans, path, l, check_pos)) {
1479 btree_node_unlock(path, l);
1480 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1484 /* If we need intent locks, take them too: */
1486 i < path->locks_want && btree_path_node(path, i);
1488 if (!bch2_btree_node_relock(trans, path, i))
1490 btree_node_unlock(path, l);
1491 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1499 * This is the main state machine for walking down the btree - walks down to a
1502 * Returns 0 on success, -EIO on error (error reading in a btree node).
1504 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1505 * stashed in the iterator and returned from bch2_trans_exit().
1507 static int btree_path_traverse_one(struct btree_trans *trans,
1508 struct btree_path *path,
1510 unsigned long trace_ip)
1512 unsigned depth_want = path->level;
1515 if (unlikely(trans->restarted)) {
1521 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1522 * and re-traverse the path without a transaction restart:
1524 if (path->should_be_locked) {
1525 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1530 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1534 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1537 path->level = btree_path_up_until_good_node(trans, path, 0);
1540 * Note: path->nodes[path->level] may be temporarily NULL here - that
1541 * would indicate to other code that we got to the end of the btree,
1542 * here it indicates that relocking the root failed - it's critical that
1543 * btree_path_lock_root() comes next and that it can't fail
1545 while (path->level > depth_want) {
1546 ret = btree_path_node(path, path->level)
1547 ? btree_path_down(trans, path, flags, trace_ip)
1548 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1549 if (unlikely(ret)) {
1552 * No nodes at this level - got to the end of
1559 __bch2_btree_path_unlock(path);
1560 path->level = depth_want;
1563 path->l[path->level].b =
1564 BTREE_ITER_NO_NODE_ERROR;
1566 path->l[path->level].b =
1567 BTREE_ITER_NO_NODE_DOWN;
1572 path->uptodate = BTREE_ITER_UPTODATE;
1574 BUG_ON((ret == -EINTR) != !!trans->restarted);
1575 bch2_btree_path_verify(trans, path);
1579 static int __btree_path_traverse_all(struct btree_trans *, int, unsigned long);
1581 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1582 struct btree_path *path, unsigned flags)
1584 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1587 return bch2_trans_cond_resched(trans) ?:
1588 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1591 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1592 struct btree_path *src)
1596 memcpy(&dst->pos, &src->pos,
1597 sizeof(struct btree_path) - offsetof(struct btree_path, pos));
1599 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1600 if (btree_node_locked(dst, i))
1601 six_lock_increment(&dst->l[i].b->c.lock,
1602 __btree_lock_want(dst, i));
1604 btree_path_check_sort(trans, dst, 0);
1607 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1610 struct btree_path *new = btree_path_alloc(trans, src);
1612 btree_path_copy(trans, new, src);
1613 __btree_path_get(new, intent);
1617 inline struct btree_path * __must_check
1618 bch2_btree_path_make_mut(struct btree_trans *trans,
1619 struct btree_path *path, bool intent,
1622 if (path->ref > 1 || path->preserve) {
1623 __btree_path_put(path, intent);
1624 path = btree_path_clone(trans, path, intent);
1625 path->preserve = false;
1626 #ifdef CONFIG_BCACHEFS_DEBUG
1627 path->ip_allocated = ip;
1629 btree_trans_verify_sorted(trans);
1635 static struct btree_path * __must_check
1636 btree_path_set_pos(struct btree_trans *trans,
1637 struct btree_path *path, struct bpos new_pos,
1638 bool intent, unsigned long ip)
1640 int cmp = bpos_cmp(new_pos, path->pos);
1641 unsigned l = path->level;
1643 EBUG_ON(trans->restarted);
1644 EBUG_ON(!path->ref);
1649 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1651 path->pos = new_pos;
1652 path->should_be_locked = false;
1654 btree_path_check_sort(trans, path, cmp);
1656 if (unlikely(path->cached)) {
1657 btree_node_unlock(path, 0);
1658 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1659 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1663 l = btree_path_up_until_good_node(trans, path, cmp);
1665 if (btree_path_node(path, l)) {
1667 * We might have to skip over many keys, or just a few: try
1668 * advancing the node iterator, and if we have to skip over too
1669 * many keys just reinit it (or if we're rewinding, since that
1673 !btree_path_advance_to_pos(path, &path->l[l], 8))
1674 __btree_path_level_init(path, l);
1677 if (l != path->level) {
1678 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1679 __bch2_btree_path_unlock(path);
1682 bch2_btree_path_verify(trans, path);
1686 /* Btree path: main interface: */
1688 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1690 struct btree_path *next;
1692 next = prev_btree_path(trans, path);
1693 if (next && !btree_path_cmp(next, path))
1696 next = next_btree_path(trans, path);
1697 if (next && !btree_path_cmp(next, path))
1703 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1705 struct btree_path *next;
1707 next = prev_btree_path(trans, path);
1708 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1711 next = next_btree_path(trans, path);
1712 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1718 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1720 __bch2_btree_path_unlock(path);
1721 btree_path_list_remove(trans, path);
1722 trans->paths_allocated &= ~(1ULL << path->idx);
1725 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1727 struct btree_path *dup;
1729 EBUG_ON(trans->paths + path->idx != path);
1730 EBUG_ON(!path->ref);
1732 if (!__btree_path_put(path, intent))
1736 * Perhaps instead we should check for duplicate paths in traverse_all:
1738 if (path->preserve &&
1739 (dup = have_path_at_pos(trans, path))) {
1740 dup->preserve = true;
1741 path->preserve = false;
1745 if (!path->preserve &&
1746 (dup = have_node_at_pos(trans, path)))
1750 if (path->should_be_locked &&
1751 !btree_node_locked(dup, path->level))
1754 dup->should_be_locked |= path->should_be_locked;
1755 __bch2_path_free(trans, path);
1759 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1761 struct btree_path *path;
1762 struct btree_insert_entry *i;
1764 char buf1[300], buf2[300];
1766 btree_trans_verify_sorted(trans);
1768 trans_for_each_path_inorder(trans, path, idx)
1769 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree %s pos %s locks %u %pS\n",
1770 path->idx, path->ref, path->intent_ref,
1771 path->should_be_locked ? " S" : "",
1772 path->preserve ? " P" : "",
1773 bch2_btree_ids[path->btree_id],
1774 (bch2_bpos_to_text(&PBUF(buf1), path->pos), buf1),
1776 #ifdef CONFIG_BCACHEFS_DEBUG
1777 (void *) path->ip_allocated
1783 trans_for_each_update(trans, i) {
1785 struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
1787 printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
1788 bch2_btree_ids[i->btree_id],
1789 (void *) i->ip_allocated,
1790 (bch2_bkey_val_to_text(&PBUF(buf1), trans->c, old), buf1),
1791 (bch2_bkey_val_to_text(&PBUF(buf2), trans->c, bkey_i_to_s_c(i->k)), buf2));
1795 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1796 struct btree_path *pos)
1798 struct btree_path *path;
1801 if (unlikely(trans->paths_allocated ==
1802 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1803 bch2_dump_trans_paths_updates(trans);
1804 panic("trans path oveflow\n");
1807 idx = __ffs64(~trans->paths_allocated);
1808 trans->paths_allocated |= 1ULL << idx;
1810 path = &trans->paths[idx];
1814 path->intent_ref = 0;
1815 path->nodes_locked = 0;
1816 path->nodes_intent_locked = 0;
1818 btree_path_list_add(trans, pos, path);
1822 struct btree_path *bch2_path_get(struct btree_trans *trans,
1823 enum btree_id btree_id, struct bpos pos,
1824 unsigned locks_want, unsigned level,
1825 unsigned flags, unsigned long ip)
1827 struct btree_path *path, *path_pos = NULL;
1828 bool cached = flags & BTREE_ITER_CACHED;
1829 bool intent = flags & BTREE_ITER_INTENT;
1832 BUG_ON(trans->restarted);
1834 trans_for_each_path_inorder(trans, path, i) {
1835 if (__btree_path_cmp(path,
1846 path_pos->cached == cached &&
1847 path_pos->btree_id == btree_id &&
1848 path_pos->level == level) {
1849 __btree_path_get(path_pos, intent);
1850 path = btree_path_set_pos(trans, path_pos, pos, intent, ip);
1852 path = btree_path_alloc(trans, path_pos);
1855 __btree_path_get(path, intent);
1857 path->btree_id = btree_id;
1858 path->cached = cached;
1859 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1860 path->should_be_locked = false;
1861 path->level = level;
1862 path->locks_want = locks_want;
1863 path->nodes_locked = 0;
1864 path->nodes_intent_locked = 0;
1865 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1866 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1867 #ifdef CONFIG_BCACHEFS_DEBUG
1868 path->ip_allocated = ip;
1870 btree_trans_verify_sorted(trans);
1873 if (!(flags & BTREE_ITER_NOPRESERVE))
1874 path->preserve = true;
1876 if (path->intent_ref)
1877 locks_want = max(locks_want, level + 1);
1880 * If the path has locks_want greater than requested, we don't downgrade
1881 * it here - on transaction restart because btree node split needs to
1882 * upgrade locks, we might be putting/getting the iterator again.
1883 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1884 * a successful transaction commit.
1887 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1888 if (locks_want > path->locks_want) {
1889 path->locks_want = locks_want;
1890 btree_path_get_locks(trans, path, true, _THIS_IP_);
1896 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1901 BUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1903 if (!path->cached) {
1904 struct btree_path_level *l = path_l(path);
1905 struct bkey_packed *_k =
1906 bch2_btree_node_iter_peek_all(&l->iter, l->b);
1908 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1910 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1912 if (!k.k || bpos_cmp(path->pos, k.k->p))
1915 struct bkey_cached *ck = (void *) path->l[0].b;
1917 EBUG_ON(path->btree_id != ck->key.btree_id ||
1918 bkey_cmp(path->pos, ck->key.pos));
1920 /* BTREE_ITER_CACHED_NOFILL? */
1921 if (unlikely(!ck->valid))
1924 k = bkey_i_to_s_c(ck->k);
1931 return (struct bkey_s_c) { u, NULL };
1934 /* Btree iterators: */
1937 __bch2_btree_iter_traverse(struct btree_iter *iter)
1939 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1943 bch2_btree_iter_traverse(struct btree_iter *iter)
1947 iter->path = btree_path_set_pos(iter->trans, iter->path,
1948 btree_iter_search_key(iter),
1949 iter->flags & BTREE_ITER_INTENT,
1950 btree_iter_ip_allocated(iter));
1952 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1956 iter->path->should_be_locked = true;
1960 /* Iterate across nodes (leaf and interior nodes) */
1962 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1964 struct btree_trans *trans = iter->trans;
1965 struct btree *b = NULL;
1968 EBUG_ON(iter->path->cached);
1969 bch2_btree_iter_verify(iter);
1971 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1975 b = btree_path_node(iter->path, iter->path->level);
1979 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1981 bkey_init(&iter->k);
1982 iter->k.p = iter->pos = b->key.k.p;
1984 iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
1985 iter->flags & BTREE_ITER_INTENT,
1986 btree_iter_ip_allocated(iter));
1987 iter->path->should_be_locked = true;
1988 BUG_ON(iter->path->uptodate);
1990 bch2_btree_iter_verify_entry_exit(iter);
1991 bch2_btree_iter_verify(iter);
1999 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2001 struct btree_trans *trans = iter->trans;
2002 struct btree_path *path = iter->path;
2003 struct btree *b = NULL;
2007 BUG_ON(trans->restarted);
2008 EBUG_ON(iter->path->cached);
2009 bch2_btree_iter_verify(iter);
2011 /* already at end? */
2012 if (!btree_path_node(path, path->level))
2016 if (!btree_path_node(path, path->level + 1)) {
2017 btree_node_unlock(path, path->level);
2018 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2023 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2024 __bch2_btree_path_unlock(path);
2025 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2026 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2027 btree_trans_restart(trans);
2032 b = btree_path_node(path, path->level + 1);
2034 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2035 btree_node_unlock(path, path->level);
2036 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2040 * Haven't gotten to the end of the parent node: go back down to
2041 * the next child node
2044 btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2045 iter->flags & BTREE_ITER_INTENT,
2046 btree_iter_ip_allocated(iter));
2048 path->level = iter->min_depth;
2050 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
2051 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
2052 btree_node_unlock(path, l);
2054 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2055 bch2_btree_iter_verify(iter);
2057 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2061 b = path->l[path->level].b;
2064 bkey_init(&iter->k);
2065 iter->k.p = iter->pos = b->key.k.p;
2067 iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
2068 iter->flags & BTREE_ITER_INTENT,
2069 btree_iter_ip_allocated(iter));
2070 iter->path->should_be_locked = true;
2071 BUG_ON(iter->path->uptodate);
2073 bch2_btree_iter_verify_entry_exit(iter);
2074 bch2_btree_iter_verify(iter);
2082 /* Iterate across keys (in leaf nodes only) */
2084 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2086 struct bpos pos = iter->k.p;
2087 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2088 ? bpos_cmp(pos, SPOS_MAX)
2089 : bkey_cmp(pos, SPOS_MAX)) != 0;
2091 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2092 pos = bkey_successor(iter, pos);
2093 bch2_btree_iter_set_pos(iter, pos);
2097 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2099 struct bpos pos = bkey_start_pos(&iter->k);
2100 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2101 ? bpos_cmp(pos, POS_MIN)
2102 : bkey_cmp(pos, POS_MIN)) != 0;
2104 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2105 pos = bkey_predecessor(iter, pos);
2106 bch2_btree_iter_set_pos(iter, pos);
2111 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2114 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
2116 struct btree_trans *trans = iter->trans;
2117 struct bpos search_key = btree_iter_search_key(iter);
2118 struct bkey_i *next_update;
2122 EBUG_ON(iter->path->cached || iter->path->level);
2123 bch2_btree_iter_verify(iter);
2124 bch2_btree_iter_verify_entry_exit(iter);
2127 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2128 iter->flags & BTREE_ITER_INTENT,
2129 btree_iter_ip_allocated(iter));
2131 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2132 if (unlikely(ret)) {
2133 /* ensure that iter->k is consistent with iter->pos: */
2134 bch2_btree_iter_set_pos(iter, iter->pos);
2135 k = bkey_s_c_err(ret);
2139 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2140 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2142 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2144 /* * In the btree, deleted keys sort before non deleted: */
2145 if (k.k && bkey_deleted(k.k) &&
2147 bpos_cmp(k.k->p, next_update->k.p) <= 0)) {
2148 search_key = k.k->p;
2153 bpos_cmp(next_update->k.p,
2154 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2155 iter->k = next_update->k;
2156 k = bkey_i_to_s_c(next_update);
2161 * We can never have a key in a leaf node at POS_MAX, so
2162 * we don't have to check these successor() calls:
2164 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2165 !bch2_snapshot_is_ancestor(trans->c,
2168 search_key = bpos_successor(k.k->p);
2172 if (bkey_whiteout(k.k) &&
2173 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2174 search_key = bkey_successor(iter, k.k->p);
2179 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2180 /* Advance to next leaf node: */
2181 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2184 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2191 * iter->pos should be mononotically increasing, and always be equal to
2192 * the key we just returned - except extents can straddle iter->pos:
2194 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2196 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2197 iter->pos = bkey_start_pos(k.k);
2199 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2200 iter->pos.snapshot = iter->snapshot;
2202 cmp = bpos_cmp(k.k->p, iter->path->pos);
2204 iter->path = bch2_btree_path_make_mut(trans, iter->path,
2205 iter->flags & BTREE_ITER_INTENT,
2206 btree_iter_ip_allocated(iter));
2207 iter->path->pos = k.k->p;
2208 btree_path_check_sort(trans, iter->path, cmp);
2211 iter->path->should_be_locked = true;
2213 bch2_btree_iter_verify_entry_exit(iter);
2214 bch2_btree_iter_verify(iter);
2215 ret = bch2_btree_iter_verify_ret(iter, k);
2217 return bkey_s_c_err(ret);
2223 * bch2_btree_iter_next: returns first key greater than iterator's current
2226 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2228 if (!bch2_btree_iter_advance(iter))
2229 return bkey_s_c_null;
2231 return bch2_btree_iter_peek(iter);
2235 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2236 * iterator's current position
2238 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2240 struct btree_trans *trans = iter->trans;
2241 struct bpos search_key = iter->pos;
2242 struct btree_path *saved_path = NULL;
2244 struct bkey saved_k;
2245 const struct bch_val *saved_v;
2248 EBUG_ON(iter->path->cached || iter->path->level);
2249 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2250 bch2_btree_iter_verify(iter);
2251 bch2_btree_iter_verify_entry_exit(iter);
2253 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2254 search_key.snapshot = U32_MAX;
2257 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2258 iter->flags & BTREE_ITER_INTENT,
2259 btree_iter_ip_allocated(iter));
2261 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2262 if (unlikely(ret)) {
2263 /* ensure that iter->k is consistent with iter->pos: */
2264 bch2_btree_iter_set_pos(iter, iter->pos);
2265 k = bkey_s_c_err(ret);
2269 k = btree_path_level_peek(trans->c, iter->path,
2270 &iter->path->l[0], &iter->k);
2272 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2273 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2274 : bpos_cmp(k.k->p, search_key) > 0))
2275 k = btree_path_level_prev(trans->c, iter->path,
2276 &iter->path->l[0], &iter->k);
2278 btree_path_check_sort(trans, iter->path, 0);
2281 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2282 if (k.k->p.snapshot == iter->snapshot)
2286 * If we have a saved candidate, and we're no
2287 * longer at the same _key_ (not pos), return
2290 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2291 bch2_path_put(trans, iter->path,
2292 iter->flags & BTREE_ITER_INTENT);
2293 iter->path = saved_path;
2300 if (bch2_snapshot_is_ancestor(iter->trans->c,
2304 bch2_path_put(trans, saved_path,
2305 iter->flags & BTREE_ITER_INTENT);
2306 saved_path = btree_path_clone(trans, iter->path,
2307 iter->flags & BTREE_ITER_INTENT);
2312 search_key = bpos_predecessor(k.k->p);
2316 if (bkey_whiteout(k.k) &&
2317 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2318 search_key = bkey_predecessor(iter, k.k->p);
2319 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2320 search_key.snapshot = U32_MAX;
2325 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2326 /* Advance to previous leaf node: */
2327 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2329 /* Start of btree: */
2330 bch2_btree_iter_set_pos(iter, POS_MIN);
2336 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2338 /* Extents can straddle iter->pos: */
2339 if (bkey_cmp(k.k->p, iter->pos) < 0)
2342 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2343 iter->pos.snapshot = iter->snapshot;
2346 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2347 iter->path->should_be_locked = true;
2349 bch2_btree_iter_verify_entry_exit(iter);
2350 bch2_btree_iter_verify(iter);
2356 * bch2_btree_iter_prev: returns first key less than iterator's current
2359 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2361 if (!bch2_btree_iter_rewind(iter))
2362 return bkey_s_c_null;
2364 return bch2_btree_iter_peek_prev(iter);
2367 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2369 struct btree_trans *trans = iter->trans;
2370 struct bpos search_key;
2374 EBUG_ON(iter->path->level);
2375 bch2_btree_iter_verify(iter);
2376 bch2_btree_iter_verify_entry_exit(iter);
2378 /* extents can't span inode numbers: */
2379 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2380 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2381 if (iter->pos.inode == KEY_INODE_MAX)
2382 return bkey_s_c_null;
2384 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2387 search_key = btree_iter_search_key(iter);
2388 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2389 iter->flags & BTREE_ITER_INTENT,
2390 btree_iter_ip_allocated(iter));
2392 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2394 return bkey_s_c_err(ret);
2396 if ((iter->flags & BTREE_ITER_CACHED) ||
2397 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2398 struct bkey_i *next_update;
2400 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2401 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2405 !bpos_cmp(next_update->k.p, iter->pos)) {
2406 iter->k = next_update->k;
2407 k = bkey_i_to_s_c(next_update);
2409 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2414 if (iter->flags & BTREE_ITER_INTENT) {
2415 struct btree_iter iter2;
2417 bch2_trans_copy_iter(&iter2, iter);
2418 k = bch2_btree_iter_peek(&iter2);
2420 if (k.k && !bkey_err(k)) {
2424 bch2_trans_iter_exit(trans, &iter2);
2426 struct bpos pos = iter->pos;
2428 k = bch2_btree_iter_peek(iter);
2432 if (unlikely(bkey_err(k)))
2435 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2437 if (bkey_cmp(iter->pos, next) < 0) {
2438 bkey_init(&iter->k);
2439 iter->k.p = iter->pos;
2440 bch2_key_resize(&iter->k,
2441 min_t(u64, KEY_SIZE_MAX,
2442 (next.inode == iter->pos.inode
2447 k = (struct bkey_s_c) { &iter->k, NULL };
2448 EBUG_ON(!k.k->size);
2452 iter->path->should_be_locked = true;
2454 bch2_btree_iter_verify_entry_exit(iter);
2455 bch2_btree_iter_verify(iter);
2456 ret = bch2_btree_iter_verify_ret(iter, k);
2458 return bkey_s_c_err(ret);
2463 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2465 if (!bch2_btree_iter_advance(iter))
2466 return bkey_s_c_null;
2468 return bch2_btree_iter_peek_slot(iter);
2471 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2473 if (!bch2_btree_iter_rewind(iter))
2474 return bkey_s_c_null;
2476 return bch2_btree_iter_peek_slot(iter);
2479 /* new transactional stuff: */
2481 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2482 struct btree_path *path)
2484 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2485 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2486 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2489 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2491 #ifdef CONFIG_BCACHEFS_DEBUG
2494 for (i = 0; i < trans->nr_sorted; i++)
2495 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2499 static void btree_trans_verify_sorted(struct btree_trans *trans)
2501 #ifdef CONFIG_BCACHEFS_DEBUG
2502 struct btree_path *path, *prev = NULL;
2505 trans_for_each_path_inorder(trans, path, i) {
2506 BUG_ON(prev && btree_path_cmp(prev, path) > 0);
2512 static inline void btree_path_swap(struct btree_trans *trans,
2513 struct btree_path *l, struct btree_path *r)
2515 swap(l->sorted_idx, r->sorted_idx);
2516 swap(trans->sorted[l->sorted_idx],
2517 trans->sorted[r->sorted_idx]);
2519 btree_path_verify_sorted_ref(trans, l);
2520 btree_path_verify_sorted_ref(trans, r);
2523 static void btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2526 struct btree_path *n;
2529 n = prev_btree_path(trans, path);
2530 if (n && btree_path_cmp(n, path) > 0) {
2532 btree_path_swap(trans, n, path);
2533 n = prev_btree_path(trans, path);
2534 } while (n && btree_path_cmp(n, path) > 0);
2541 n = next_btree_path(trans, path);
2542 if (n && btree_path_cmp(path, n) > 0) {
2544 btree_path_swap(trans, path, n);
2545 n = next_btree_path(trans, path);
2546 } while (n && btree_path_cmp(path, n) > 0);
2550 btree_trans_verify_sorted(trans);
2553 static inline void btree_path_list_remove(struct btree_trans *trans,
2554 struct btree_path *path)
2558 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2560 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2562 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2563 trans->paths[trans->sorted[i]].sorted_idx = i;
2565 path->sorted_idx = U8_MAX;
2567 btree_trans_verify_sorted_refs(trans);
2570 static inline void btree_path_list_add(struct btree_trans *trans,
2571 struct btree_path *pos,
2572 struct btree_path *path)
2576 btree_trans_verify_sorted_refs(trans);
2578 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2580 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2582 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2583 trans->paths[trans->sorted[i]].sorted_idx = i;
2585 btree_trans_verify_sorted_refs(trans);
2588 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2591 bch2_path_put(trans, iter->path,
2592 iter->flags & BTREE_ITER_INTENT);
2596 static void __bch2_trans_iter_init(struct btree_trans *trans,
2597 struct btree_iter *iter,
2598 unsigned btree_id, struct bpos pos,
2599 unsigned locks_want,
2604 EBUG_ON(trans->restarted);
2606 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2607 btree_node_type_is_extents(btree_id))
2608 flags |= BTREE_ITER_IS_EXTENTS;
2610 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2611 !btree_type_has_snapshots(btree_id))
2612 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2614 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2615 btree_type_has_snapshots(btree_id))
2616 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2618 iter->trans = trans;
2620 iter->btree_id = btree_id;
2621 iter->min_depth = depth;
2622 iter->flags = flags;
2623 iter->snapshot = pos.snapshot;
2625 iter->k.type = KEY_TYPE_deleted;
2628 #ifdef CONFIG_BCACHEFS_DEBUG
2629 iter->ip_allocated = ip;
2632 iter->path = bch2_path_get(trans, btree_id, iter->pos,
2633 locks_want, depth, flags, ip);
2636 void bch2_trans_iter_init(struct btree_trans *trans,
2637 struct btree_iter *iter,
2638 unsigned btree_id, struct bpos pos,
2641 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2642 0, 0, flags, _RET_IP_);
2645 void bch2_trans_node_iter_init(struct btree_trans *trans,
2646 struct btree_iter *iter,
2647 enum btree_id btree_id,
2649 unsigned locks_want,
2653 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2654 BTREE_ITER_NOT_EXTENTS|
2655 __BTREE_ITER_ALL_SNAPSHOTS|
2656 BTREE_ITER_ALL_SNAPSHOTS|
2658 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2659 BUG_ON(iter->path->level != depth);
2660 BUG_ON(iter->min_depth != depth);
2663 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2667 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2670 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2672 size_t new_top = trans->mem_top + size;
2675 if (new_top > trans->mem_bytes) {
2676 size_t old_bytes = trans->mem_bytes;
2677 size_t new_bytes = roundup_pow_of_two(new_top);
2680 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2682 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2683 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2684 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2685 new_bytes = BTREE_TRANS_MEM_MAX;
2690 return ERR_PTR(-ENOMEM);
2692 trans->mem = new_mem;
2693 trans->mem_bytes = new_bytes;
2696 trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2697 btree_trans_restart(trans);
2698 return ERR_PTR(-EINTR);
2702 p = trans->mem + trans->mem_top;
2703 trans->mem_top += size;
2709 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2710 * @trans: transaction to reset
2712 * While iterating over nodes or updating nodes a attempt to lock a btree
2713 * node may return EINTR when the trylock fails. When this occurs
2714 * bch2_trans_begin() should be called and the transaction retried.
2716 void bch2_trans_begin(struct btree_trans *trans)
2718 struct btree_insert_entry *i;
2719 struct btree_path *path;
2721 trans_for_each_update(trans, i)
2722 __btree_path_put(i->path, true);
2724 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
2725 trans->extra_journal_res = 0;
2726 trans->nr_updates = 0;
2729 trans->hooks = NULL;
2730 trans->extra_journal_entries = NULL;
2731 trans->extra_journal_entry_u64s = 0;
2733 if (trans->fs_usage_deltas) {
2734 trans->fs_usage_deltas->used = 0;
2735 memset(&trans->fs_usage_deltas->memset_start, 0,
2736 (void *) &trans->fs_usage_deltas->memset_end -
2737 (void *) &trans->fs_usage_deltas->memset_start);
2740 trans_for_each_path(trans, path) {
2741 path->should_be_locked = false;
2744 * XXX: we probably shouldn't be doing this if the transaction
2745 * was restarted, but currently we still overflow transaction
2746 * iterators if we do that
2748 if (!path->ref && !path->preserve)
2749 __bch2_path_free(trans, path);
2750 else if (!path->ref)
2751 path->preserve = false;
2754 bch2_trans_cond_resched(trans);
2756 if (trans->restarted)
2757 bch2_btree_path_traverse_all(trans);
2759 trans->restarted = false;
2762 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2764 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2765 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2768 BUG_ON(trans->used_mempool);
2771 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
2774 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2776 trans->paths = p; p += paths_bytes;
2777 trans->updates = p; p += updates_bytes;
2780 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2781 unsigned expected_nr_iters,
2782 size_t expected_mem_bytes)
2783 __acquires(&c->btree_trans_barrier)
2785 memset(trans, 0, sizeof(*trans));
2787 trans->ip = _RET_IP_;
2789 bch2_trans_alloc_paths(trans, c);
2791 if (expected_mem_bytes) {
2792 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2793 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2795 if (!unlikely(trans->mem)) {
2796 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2797 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2801 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2803 trans->pid = current->pid;
2804 mutex_lock(&c->btree_trans_lock);
2805 list_add(&trans->list, &c->btree_trans_list);
2806 mutex_unlock(&c->btree_trans_lock);
2809 static void check_btree_paths_leaked(struct btree_trans *trans)
2811 #ifdef CONFIG_BCACHEFS_DEBUG
2812 struct bch_fs *c = trans->c;
2813 struct btree_path *path;
2815 trans_for_each_path(trans, path)
2820 bch_err(c, "btree paths leaked from %pS!", (void *) trans->ip);
2821 trans_for_each_path(trans, path)
2823 printk(KERN_ERR " btree %s %pS\n",
2824 bch2_btree_ids[path->btree_id],
2825 (void *) path->ip_allocated);
2826 /* Be noisy about this: */
2827 bch2_fatal_error(c);
2831 void bch2_trans_exit(struct btree_trans *trans)
2832 __releases(&c->btree_trans_barrier)
2834 struct btree_insert_entry *i;
2835 struct bch_fs *c = trans->c;
2837 bch2_trans_unlock(trans);
2839 trans_for_each_update(trans, i)
2840 __btree_path_put(i->path, true);
2841 trans->nr_updates = 0;
2843 check_btree_paths_leaked(trans);
2845 mutex_lock(&c->btree_trans_lock);
2846 list_del(&trans->list);
2847 mutex_unlock(&c->btree_trans_lock);
2849 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2851 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
2853 if (trans->fs_usage_deltas) {
2854 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2855 REPLICAS_DELTA_LIST_MAX)
2856 mempool_free(trans->fs_usage_deltas,
2857 &c->replicas_delta_pool);
2859 kfree(trans->fs_usage_deltas);
2862 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2863 mempool_free(trans->mem, &c->btree_trans_mem_pool);
2869 * Userspace doesn't have a real percpu implementation:
2871 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
2875 mempool_free(trans->paths, &c->btree_paths_pool);
2877 trans->mem = (void *) 0x1;
2878 trans->paths = (void *) 0x1;
2881 static void __maybe_unused
2882 bch2_btree_path_node_to_text(struct printbuf *out,
2883 struct btree_bkey_cached_common *_b,
2886 pr_buf(out, " l=%u %s:",
2887 _b->level, bch2_btree_ids[_b->btree_id]);
2888 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
2891 static bool trans_has_locks(struct btree_trans *trans)
2893 struct btree_path *path;
2895 trans_for_each_path(trans, path)
2896 if (path->nodes_locked)
2901 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2903 struct btree_trans *trans;
2904 struct btree_path *path;
2908 mutex_lock(&c->btree_trans_lock);
2909 list_for_each_entry(trans, &c->btree_trans_list, list) {
2910 if (!trans_has_locks(trans))
2913 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2915 trans_for_each_path(trans, path) {
2916 if (!path->nodes_locked)
2919 pr_buf(out, " path %u %c l=%u %s:",
2921 path->cached ? 'c' : 'b',
2923 bch2_btree_ids[path->btree_id]);
2924 bch2_bpos_to_text(out, path->pos);
2927 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2928 if (btree_node_locked(path, l)) {
2929 pr_buf(out, " %s l=%u ",
2930 btree_node_intent_locked(path, l) ? "i" : "r", l);
2931 bch2_btree_path_node_to_text(out,
2932 (void *) path->l[l].b,
2939 b = READ_ONCE(trans->locking);
2941 path = &trans->paths[trans->locking_path_idx];
2942 pr_buf(out, " locking path %u %c l=%u %s:",
2943 trans->locking_path_idx,
2944 path->cached ? 'c' : 'b',
2945 trans->locking_level,
2946 bch2_btree_ids[trans->locking_btree_id]);
2947 bch2_bpos_to_text(out, trans->locking_pos);
2949 pr_buf(out, " node ");
2950 bch2_btree_path_node_to_text(out,
2951 (void *) b, path->cached);
2955 mutex_unlock(&c->btree_trans_lock);
2958 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2960 if (c->btree_trans_barrier_initialized)
2961 cleanup_srcu_struct(&c->btree_trans_barrier);
2962 mempool_exit(&c->btree_trans_mem_pool);
2963 mempool_exit(&c->btree_paths_pool);
2966 int bch2_fs_btree_iter_init(struct bch_fs *c)
2968 unsigned nr = BTREE_ITER_MAX;
2971 INIT_LIST_HEAD(&c->btree_trans_list);
2972 mutex_init(&c->btree_trans_lock);
2974 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
2975 sizeof(struct btree_path) * nr +
2976 sizeof(struct btree_insert_entry) * nr) ?:
2977 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2978 BTREE_TRANS_MEM_MAX) ?:
2979 init_srcu_struct(&c->btree_trans_barrier);
2981 c->btree_trans_barrier_initialized = true;