1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prefetch.h>
20 #include <trace/events/bcachefs.h>
22 static void btree_trans_verify_sorted(struct btree_trans *);
23 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
25 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
26 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
29 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
31 #ifdef CONFIG_BCACHEFS_DEBUG
32 return iter->ip_allocated;
38 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
41 * Unlocks before scheduling
42 * Note: does not revalidate iterator
44 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
46 if (need_resched() || race_fault()) {
47 bch2_trans_unlock(trans);
49 return bch2_trans_relock(trans) ? 0 : -EINTR;
55 static inline int __btree_path_cmp(const struct btree_path *l,
56 enum btree_id r_btree_id,
62 * Must match lock ordering as defined by __bch2_btree_node_lock:
64 return cmp_int(l->btree_id, r_btree_id) ?:
65 cmp_int((int) l->cached, (int) r_cached) ?:
66 bpos_cmp(l->pos, r_pos) ?:
67 -cmp_int(l->level, r_level);
70 static inline int btree_path_cmp(const struct btree_path *l,
71 const struct btree_path *r)
73 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
76 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
78 /* Are we iterating over keys in all snapshots? */
79 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
80 p = bpos_successor(p);
82 p = bpos_nosnap_successor(p);
83 p.snapshot = iter->snapshot;
89 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
91 /* Are we iterating over keys in all snapshots? */
92 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
93 p = bpos_predecessor(p);
95 p = bpos_nosnap_predecessor(p);
96 p.snapshot = iter->snapshot;
102 static inline bool is_btree_node(struct btree_path *path, unsigned l)
104 return l < BTREE_MAX_DEPTH &&
105 (unsigned long) path->l[l].b >= 128;
108 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
110 struct bpos pos = iter->pos;
112 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
113 bkey_cmp(pos, POS_MAX))
114 pos = bkey_successor(iter, pos);
118 static inline bool btree_path_pos_before_node(struct btree_path *path,
121 return bpos_cmp(path->pos, b->data->min_key) < 0;
124 static inline bool btree_path_pos_after_node(struct btree_path *path,
127 return bpos_cmp(b->key.k.p, path->pos) < 0;
130 static inline bool btree_path_pos_in_node(struct btree_path *path,
133 return path->btree_id == b->c.btree_id &&
134 !btree_path_pos_before_node(path, b) &&
135 !btree_path_pos_after_node(path, b);
138 /* Btree node locking: */
140 void bch2_btree_node_unlock_write(struct btree_trans *trans,
141 struct btree_path *path, struct btree *b)
143 bch2_btree_node_unlock_write_inlined(trans, path, b);
146 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
148 struct btree_path *linked;
149 unsigned readers = 0;
151 trans_for_each_path(trans, linked)
152 if (linked->l[b->c.level].b == b &&
153 btree_node_read_locked(linked, b->c.level))
157 * Must drop our read locks before calling six_lock_write() -
158 * six_unlock() won't do wakeups until the reader count
159 * goes to 0, and it's safe because we have the node intent
162 if (!b->c.lock.readers)
163 atomic64_sub(__SIX_VAL(read_lock, readers),
164 &b->c.lock.state.counter);
166 this_cpu_sub(*b->c.lock.readers, readers);
168 six_lock_write(&b->c.lock, NULL, NULL);
170 if (!b->c.lock.readers)
171 atomic64_add(__SIX_VAL(read_lock, readers),
172 &b->c.lock.state.counter);
174 this_cpu_add(*b->c.lock.readers, readers);
177 bool __bch2_btree_node_relock(struct btree_trans *trans,
178 struct btree_path *path, unsigned level)
180 struct btree *b = btree_path_node(path, level);
181 int want = __btree_lock_want(path, level);
183 if (!is_btree_node(path, level))
189 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
190 (btree_node_lock_seq_matches(path, b, level) &&
191 btree_node_lock_increment(trans, b, level, want))) {
192 mark_btree_node_locked(path, level, want);
196 trace_btree_node_relock_fail(trans->fn, _RET_IP_,
200 path->l[level].lock_seq,
201 is_btree_node(path, level) ? b->c.lock.state.seq : 0);
205 bool bch2_btree_node_upgrade(struct btree_trans *trans,
206 struct btree_path *path, unsigned level)
208 struct btree *b = path->l[level].b;
210 if (!is_btree_node(path, level))
213 switch (btree_lock_want(path, level)) {
214 case BTREE_NODE_UNLOCKED:
215 BUG_ON(btree_node_locked(path, level));
217 case BTREE_NODE_READ_LOCKED:
218 BUG_ON(btree_node_intent_locked(path, level));
219 return bch2_btree_node_relock(trans, path, level);
220 case BTREE_NODE_INTENT_LOCKED:
224 if (btree_node_intent_locked(path, level))
230 if (btree_node_locked(path, level)
231 ? six_lock_tryupgrade(&b->c.lock)
232 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
235 if (btree_node_lock_seq_matches(path, b, level) &&
236 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
237 btree_node_unlock(path, level);
243 mark_btree_node_intent_locked(path, level);
247 static inline bool btree_path_get_locks(struct btree_trans *trans,
248 struct btree_path *path,
251 unsigned l = path->level;
255 if (!btree_path_node(path, l))
259 ? bch2_btree_node_upgrade(trans, path, l)
260 : bch2_btree_node_relock(trans, path, l)))
264 } while (l < path->locks_want);
267 * When we fail to get a lock, we have to ensure that any child nodes
268 * can't be relocked so bch2_btree_path_traverse has to walk back up to
269 * the node that we failed to relock:
272 __bch2_btree_path_unlock(path);
273 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
276 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
278 } while (fail_idx >= 0);
281 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
282 path->uptodate = BTREE_ITER_UPTODATE;
284 bch2_trans_verify_locks(trans);
286 return path->uptodate < BTREE_ITER_NEED_RELOCK;
289 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
293 ? container_of(_b, struct btree, c)->key.k.p
294 : container_of(_b, struct bkey_cached, c)->key.pos;
298 bool __bch2_btree_node_lock(struct btree_trans *trans,
299 struct btree_path *path,
301 struct bpos pos, unsigned level,
302 enum six_lock_type type,
303 six_lock_should_sleep_fn should_sleep_fn, void *p,
306 struct btree_path *linked;
309 /* Check if it's safe to block: */
310 trans_for_each_path(trans, linked) {
311 if (!linked->nodes_locked)
315 * Can't block taking an intent lock if we have _any_ nodes read
318 * - Our read lock blocks another thread with an intent lock on
319 * the same node from getting a write lock, and thus from
320 * dropping its intent lock
322 * - And the other thread may have multiple nodes intent locked:
323 * both the node we want to intent lock, and the node we
324 * already have read locked - deadlock:
326 if (type == SIX_LOCK_intent &&
327 linked->nodes_locked != linked->nodes_intent_locked) {
332 if (linked->btree_id != path->btree_id) {
333 if (linked->btree_id < path->btree_id)
341 * Within the same btree, non-cached paths come before cached
344 if (linked->cached != path->cached) {
353 * Interior nodes must be locked before their descendants: if
354 * another path has possible descendants locked of the node
355 * we're about to lock, it must have the ancestors locked too:
357 if (level > __fls(linked->nodes_locked)) {
362 /* Must lock btree nodes in key order: */
363 if (btree_node_locked(linked, level) &&
364 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
365 linked->cached)) <= 0) {
366 BUG_ON(trans->in_traverse_all);
372 return btree_node_lock_type(trans, path, b, pos, level,
373 type, should_sleep_fn, p);
375 trace_trans_restart_would_deadlock(trans->fn, ip,
376 trans->in_traverse_all, reason,
383 btree_trans_restart(trans);
387 /* Btree iterator locking: */
389 #ifdef CONFIG_BCACHEFS_DEBUG
391 static void bch2_btree_path_verify_locks(struct btree_path *path)
395 if (!path->nodes_locked) {
396 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
397 btree_path_node(path, path->level));
401 for (l = 0; btree_path_node(path, l); l++)
402 BUG_ON(btree_lock_want(path, l) !=
403 btree_node_locked_type(path, l));
406 void bch2_trans_verify_locks(struct btree_trans *trans)
408 struct btree_path *path;
410 trans_for_each_path(trans, path)
411 bch2_btree_path_verify_locks(path);
414 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
417 /* Btree path locking: */
420 * Only for btree_cache.c - only relocks intent locks
422 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
423 struct btree_path *path)
427 for (l = path->level;
428 l < path->locks_want && btree_path_node(path, l);
430 if (!bch2_btree_node_relock(trans, path, l)) {
431 __bch2_btree_path_unlock(path);
432 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
433 trace_trans_restart_relock_path_intent(trans->fn, _RET_IP_,
434 path->btree_id, &path->pos);
435 btree_trans_restart(trans);
444 static bool bch2_btree_path_relock(struct btree_trans *trans,
445 struct btree_path *path, unsigned long trace_ip)
447 bool ret = btree_path_get_locks(trans, path, false);
450 trace_trans_restart_relock_path(trans->fn, trace_ip,
451 path->btree_id, &path->pos);
452 btree_trans_restart(trans);
457 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
458 struct btree_path *path,
459 unsigned new_locks_want)
461 struct btree_path *linked;
463 EBUG_ON(path->locks_want >= new_locks_want);
465 path->locks_want = new_locks_want;
467 if (btree_path_get_locks(trans, path, true))
471 * XXX: this is ugly - we'd prefer to not be mucking with other
472 * iterators in the btree_trans here.
474 * On failure to upgrade the iterator, setting iter->locks_want and
475 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
476 * get the locks we want on transaction restart.
478 * But if this iterator was a clone, on transaction restart what we did
479 * to this iterator isn't going to be preserved.
481 * Possibly we could add an iterator field for the parent iterator when
482 * an iterator is a copy - for now, we'll just upgrade any other
483 * iterators with the same btree id.
485 * The code below used to be needed to ensure ancestor nodes get locked
486 * before interior nodes - now that's handled by
487 * bch2_btree_path_traverse_all().
489 trans_for_each_path(trans, linked)
490 if (linked != path &&
491 linked->cached == path->cached &&
492 linked->btree_id == path->btree_id &&
493 linked->locks_want < new_locks_want) {
494 linked->locks_want = new_locks_want;
495 btree_path_get_locks(trans, linked, true);
501 void __bch2_btree_path_downgrade(struct btree_path *path,
502 unsigned new_locks_want)
506 EBUG_ON(path->locks_want < new_locks_want);
508 path->locks_want = new_locks_want;
510 while (path->nodes_locked &&
511 (l = __fls(path->nodes_locked)) >= path->locks_want) {
512 if (l > path->level) {
513 btree_node_unlock(path, l);
515 if (btree_node_intent_locked(path, l)) {
516 six_lock_downgrade(&path->l[l].b->c.lock);
517 path->nodes_intent_locked ^= 1 << l;
523 bch2_btree_path_verify_locks(path);
526 void bch2_trans_downgrade(struct btree_trans *trans)
528 struct btree_path *path;
530 trans_for_each_path(trans, path)
531 bch2_btree_path_downgrade(path);
534 /* Btree transaction locking: */
536 bool bch2_trans_relock(struct btree_trans *trans)
538 struct btree_path *path;
540 if (unlikely(trans->restarted))
543 trans_for_each_path(trans, path)
544 if (path->should_be_locked &&
545 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
546 trace_trans_restart_relock(trans->fn, _RET_IP_,
547 path->btree_id, &path->pos);
548 BUG_ON(!trans->restarted);
554 void bch2_trans_unlock(struct btree_trans *trans)
556 struct btree_path *path;
558 trans_for_each_path(trans, path)
559 __bch2_btree_path_unlock(path);
562 * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
563 * btree nodes, it implements its own walking:
565 BUG_ON(!trans->is_initial_gc &&
566 lock_class_is_held(&bch2_btree_node_lock_key));
569 /* Btree iterator: */
571 #ifdef CONFIG_BCACHEFS_DEBUG
573 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
574 struct btree_path *path)
576 struct bkey_cached *ck;
577 bool locked = btree_node_locked(path, 0);
579 if (!bch2_btree_node_relock(trans, path, 0))
582 ck = (void *) path->l[0].b;
583 BUG_ON(ck->key.btree_id != path->btree_id ||
584 bkey_cmp(ck->key.pos, path->pos));
587 btree_node_unlock(path, 0);
590 static void bch2_btree_path_verify_level(struct btree_trans *trans,
591 struct btree_path *path, unsigned level)
593 struct btree_path_level *l;
594 struct btree_node_iter tmp;
596 struct bkey_packed *p, *k;
597 struct printbuf buf1 = PRINTBUF;
598 struct printbuf buf2 = PRINTBUF;
599 struct printbuf buf3 = PRINTBUF;
602 if (!bch2_debug_check_iterators)
607 locked = btree_node_locked(path, level);
611 bch2_btree_path_verify_cached(trans, path);
615 if (!btree_path_node(path, level))
618 if (!bch2_btree_node_relock(trans, path, level))
621 BUG_ON(!btree_path_pos_in_node(path, l->b));
623 bch2_btree_node_iter_verify(&l->iter, l->b);
626 * For interior nodes, the iterator will have skipped past deleted keys:
629 ? bch2_btree_node_iter_prev(&tmp, l->b)
630 : bch2_btree_node_iter_prev_all(&tmp, l->b);
631 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
633 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
638 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
644 btree_node_unlock(path, level);
647 bch2_bpos_to_text(&buf1, path->pos);
650 struct bkey uk = bkey_unpack_key(l->b, p);
651 bch2_bkey_to_text(&buf2, &uk);
653 pr_buf(&buf2, "(none)");
657 struct bkey uk = bkey_unpack_key(l->b, k);
658 bch2_bkey_to_text(&buf3, &uk);
660 pr_buf(&buf3, "(none)");
663 panic("path should be %s key at level %u:\n"
667 msg, level, buf1.buf, buf2.buf, buf3.buf);
670 static void bch2_btree_path_verify(struct btree_trans *trans,
671 struct btree_path *path)
673 struct bch_fs *c = trans->c;
676 EBUG_ON(path->btree_id >= BTREE_ID_NR);
678 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
680 BUG_ON(!path->cached &&
681 c->btree_roots[path->btree_id].b->c.level > i);
685 bch2_btree_path_verify_level(trans, path, i);
688 bch2_btree_path_verify_locks(path);
691 void bch2_trans_verify_paths(struct btree_trans *trans)
693 struct btree_path *path;
695 trans_for_each_path(trans, path)
696 bch2_btree_path_verify(trans, path);
699 static void bch2_btree_iter_verify(struct btree_iter *iter)
701 struct btree_trans *trans = iter->trans;
703 BUG_ON(iter->btree_id >= BTREE_ID_NR);
705 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
707 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
708 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
710 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
711 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
712 !btree_type_has_snapshots(iter->btree_id));
714 if (iter->update_path)
715 bch2_btree_path_verify(trans, iter->update_path);
716 bch2_btree_path_verify(trans, iter->path);
719 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
721 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
722 !iter->pos.snapshot);
724 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
725 iter->pos.snapshot != iter->snapshot);
727 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
728 bkey_cmp(iter->pos, iter->k.p) > 0);
731 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
733 struct btree_trans *trans = iter->trans;
734 struct btree_iter copy;
735 struct bkey_s_c prev;
738 if (!bch2_debug_check_iterators)
741 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
744 if (bkey_err(k) || !k.k)
747 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
751 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
752 BTREE_ITER_NOPRESERVE|
753 BTREE_ITER_ALL_SNAPSHOTS);
754 prev = bch2_btree_iter_prev(©);
758 ret = bkey_err(prev);
762 if (!bkey_cmp(prev.k->p, k.k->p) &&
763 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
764 prev.k->p.snapshot) > 0) {
765 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
767 bch2_bkey_to_text(&buf1, k.k);
768 bch2_bkey_to_text(&buf2, prev.k);
770 panic("iter snap %u\n"
777 bch2_trans_iter_exit(trans, ©);
781 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
782 struct bpos pos, bool key_cache)
784 struct btree_path *path;
786 struct printbuf buf = PRINTBUF;
788 trans_for_each_path_inorder(trans, path, idx) {
789 int cmp = cmp_int(path->btree_id, id) ?:
790 cmp_int(path->cached, key_cache);
797 if (!(path->nodes_locked & 1) ||
798 !path->should_be_locked)
802 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
803 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
806 if (!bkey_cmp(pos, path->pos))
811 bch2_dump_trans_paths_updates(trans);
812 bch2_bpos_to_text(&buf, pos);
814 panic("not locked: %s %s%s\n",
815 bch2_btree_ids[id], buf.buf,
816 key_cache ? " cached" : "");
821 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
822 struct btree_path *path, unsigned l) {}
823 static inline void bch2_btree_path_verify(struct btree_trans *trans,
824 struct btree_path *path) {}
825 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
826 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
827 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
831 /* Btree path: fixups after btree updates */
833 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
836 struct bkey_packed *k)
838 struct btree_node_iter_set *set;
840 btree_node_iter_for_each(iter, set)
841 if (set->end == t->end_offset) {
842 set->k = __btree_node_key_to_offset(b, k);
843 bch2_btree_node_iter_sort(iter, b);
847 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
850 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
852 struct bkey_packed *where)
854 struct btree_path_level *l = &path->l[b->c.level];
856 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
859 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
860 bch2_btree_node_iter_advance(&l->iter, l->b);
863 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
865 struct bkey_packed *where)
867 struct btree_path *path;
869 trans_for_each_path_with_node(trans, b, path) {
870 __bch2_btree_path_fix_key_modified(path, b, where);
871 bch2_btree_path_verify_level(trans, path, b->c.level);
875 static void __bch2_btree_node_iter_fix(struct btree_path *path,
877 struct btree_node_iter *node_iter,
879 struct bkey_packed *where,
880 unsigned clobber_u64s,
883 const struct bkey_packed *end = btree_bkey_last(b, t);
884 struct btree_node_iter_set *set;
885 unsigned offset = __btree_node_key_to_offset(b, where);
886 int shift = new_u64s - clobber_u64s;
887 unsigned old_end = t->end_offset - shift;
888 unsigned orig_iter_pos = node_iter->data[0].k;
889 bool iter_current_key_modified =
890 orig_iter_pos >= offset &&
891 orig_iter_pos <= offset + clobber_u64s;
893 btree_node_iter_for_each(node_iter, set)
894 if (set->end == old_end)
897 /* didn't find the bset in the iterator - might have to readd it: */
899 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
900 bch2_btree_node_iter_push(node_iter, b, where, end);
903 /* Iterator is after key that changed */
907 set->end = t->end_offset;
909 /* Iterator hasn't gotten to the key that changed yet: */
914 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
916 } else if (set->k < offset + clobber_u64s) {
917 set->k = offset + new_u64s;
918 if (set->k == set->end)
919 bch2_btree_node_iter_set_drop(node_iter, set);
921 /* Iterator is after key that changed */
922 set->k = (int) set->k + shift;
926 bch2_btree_node_iter_sort(node_iter, b);
928 if (node_iter->data[0].k != orig_iter_pos)
929 iter_current_key_modified = true;
932 * When a new key is added, and the node iterator now points to that
933 * key, the iterator might have skipped past deleted keys that should
934 * come after the key the iterator now points to. We have to rewind to
935 * before those deleted keys - otherwise
936 * bch2_btree_node_iter_prev_all() breaks:
938 if (!bch2_btree_node_iter_end(node_iter) &&
939 iter_current_key_modified &&
942 struct bkey_packed *k, *k2, *p;
944 k = bch2_btree_node_iter_peek_all(node_iter, b);
946 for_each_bset(b, t) {
947 bool set_pos = false;
949 if (node_iter->data[0].end == t->end_offset)
952 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
954 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
955 bkey_iter_cmp(b, k, p) < 0) {
961 btree_node_iter_set_set_pos(node_iter,
967 void bch2_btree_node_iter_fix(struct btree_trans *trans,
968 struct btree_path *path,
970 struct btree_node_iter *node_iter,
971 struct bkey_packed *where,
972 unsigned clobber_u64s,
975 struct bset_tree *t = bch2_bkey_to_bset(b, where);
976 struct btree_path *linked;
978 if (node_iter != &path->l[b->c.level].iter) {
979 __bch2_btree_node_iter_fix(path, b, node_iter, t,
980 where, clobber_u64s, new_u64s);
982 if (bch2_debug_check_iterators)
983 bch2_btree_node_iter_verify(node_iter, b);
986 trans_for_each_path_with_node(trans, b, linked) {
987 __bch2_btree_node_iter_fix(linked, b,
988 &linked->l[b->c.level].iter, t,
989 where, clobber_u64s, new_u64s);
990 bch2_btree_path_verify_level(trans, linked, b->c.level);
994 /* Btree path level: pointer to a particular btree node and node iter */
996 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
997 struct btree_path_level *l,
999 struct bkey_packed *k)
1003 * signal to bch2_btree_iter_peek_slot() that we're currently at
1006 u->type = KEY_TYPE_deleted;
1007 return bkey_s_c_null;
1010 return bkey_disassemble(l->b, k, u);
1013 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
1014 struct btree_path_level *l,
1017 return __btree_iter_unpack(c, l, u,
1018 bch2_btree_node_iter_peek_all(&l->iter, l->b));
1021 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
1022 struct btree_path *path,
1023 struct btree_path_level *l,
1026 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1027 bch2_btree_node_iter_peek(&l->iter, l->b));
1029 path->pos = k.k ? k.k->p : l->b->key.k.p;
1033 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
1034 struct btree_path *path,
1035 struct btree_path_level *l,
1038 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
1039 bch2_btree_node_iter_prev(&l->iter, l->b));
1041 path->pos = k.k ? k.k->p : l->b->data->min_key;
1045 static inline bool btree_path_advance_to_pos(struct btree_path *path,
1046 struct btree_path_level *l,
1049 struct bkey_packed *k;
1050 int nr_advanced = 0;
1052 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
1053 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
1054 if (max_advance > 0 && nr_advanced >= max_advance)
1057 bch2_btree_node_iter_advance(&l->iter, l->b);
1065 * Verify that iterator for parent node points to child node:
1067 static void btree_path_verify_new_node(struct btree_trans *trans,
1068 struct btree_path *path, struct btree *b)
1070 struct bch_fs *c = trans->c;
1071 struct btree_path_level *l;
1074 struct bkey_packed *k;
1076 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1079 if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))
1082 plevel = b->c.level + 1;
1083 if (!btree_path_node(path, plevel))
1086 parent_locked = btree_node_locked(path, plevel);
1088 if (!bch2_btree_node_relock(trans, path, plevel))
1091 l = &path->l[plevel];
1092 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1095 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1096 struct printbuf buf1 = PRINTBUF;
1097 struct printbuf buf2 = PRINTBUF;
1098 struct printbuf buf3 = PRINTBUF;
1099 struct printbuf buf4 = PRINTBUF;
1100 struct bkey uk = bkey_unpack_key(b, k);
1102 bch2_dump_btree_node(c, l->b);
1103 bch2_bpos_to_text(&buf1, path->pos);
1104 bch2_bkey_to_text(&buf2, &uk);
1105 bch2_bpos_to_text(&buf3, b->data->min_key);
1106 bch2_bpos_to_text(&buf3, b->data->max_key);
1107 panic("parent iter doesn't point to new node:\n"
1111 bch2_btree_ids[path->btree_id],
1112 buf1.buf, buf2.buf, buf3.buf, buf4.buf);
1116 btree_node_unlock(path, plevel);
1119 static inline void __btree_path_level_init(struct btree_path *path,
1122 struct btree_path_level *l = &path->l[level];
1124 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1127 * Iterators to interior nodes should always be pointed at the first non
1131 bch2_btree_node_iter_peek(&l->iter, l->b);
1134 static inline void btree_path_level_init(struct btree_trans *trans,
1135 struct btree_path *path,
1138 BUG_ON(path->cached);
1140 btree_path_verify_new_node(trans, path, b);
1142 EBUG_ON(!btree_path_pos_in_node(path, b));
1143 EBUG_ON(b->c.lock.state.seq & 1);
1145 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1146 path->l[b->c.level].b = b;
1147 __btree_path_level_init(path, b->c.level);
1150 /* Btree path: fixups after btree node updates: */
1153 * A btree node is being replaced - update the iterator to point to the new
1156 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1158 struct btree_path *path;
1160 trans_for_each_path(trans, path)
1161 if (!path->cached &&
1162 btree_path_pos_in_node(path, b)) {
1163 enum btree_node_locked_type t =
1164 btree_lock_want(path, b->c.level);
1166 if (path->nodes_locked &&
1167 t != BTREE_NODE_UNLOCKED) {
1168 btree_node_unlock(path, b->c.level);
1169 six_lock_increment(&b->c.lock, t);
1170 mark_btree_node_locked(path, b->c.level, t);
1173 btree_path_level_init(trans, path, b);
1178 * A btree node has been modified in such a way as to invalidate iterators - fix
1181 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1183 struct btree_path *path;
1185 trans_for_each_path_with_node(trans, b, path)
1186 __btree_path_level_init(path, b->c.level);
1189 /* Btree path: traverse, set_pos: */
1191 static int lock_root_check_fn(struct six_lock *lock, void *p)
1193 struct btree *b = container_of(lock, struct btree, c.lock);
1194 struct btree **rootp = p;
1196 return b == *rootp ? 0 : -1;
1199 static inline int btree_path_lock_root(struct btree_trans *trans,
1200 struct btree_path *path,
1201 unsigned depth_want,
1202 unsigned long trace_ip)
1204 struct bch_fs *c = trans->c;
1205 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1206 enum six_lock_type lock_type;
1209 EBUG_ON(path->nodes_locked);
1212 b = READ_ONCE(*rootp);
1213 path->level = READ_ONCE(b->c.level);
1215 if (unlikely(path->level < depth_want)) {
1217 * the root is at a lower depth than the depth we want:
1218 * got to the end of the btree, or we're walking nodes
1219 * greater than some depth and there are no nodes >=
1222 path->level = depth_want;
1223 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1224 path->l[i].b = NULL;
1228 lock_type = __btree_lock_want(path, path->level);
1229 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1230 path->level, lock_type,
1231 lock_root_check_fn, rootp,
1233 if (trans->restarted)
1238 if (likely(b == READ_ONCE(*rootp) &&
1239 b->c.level == path->level &&
1241 for (i = 0; i < path->level; i++)
1242 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1243 path->l[path->level].b = b;
1244 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1245 path->l[i].b = NULL;
1247 mark_btree_node_locked(path, path->level, lock_type);
1248 btree_path_level_init(trans, path, b);
1252 six_unlock_type(&b->c.lock, lock_type);
1257 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1259 struct bch_fs *c = trans->c;
1260 struct btree_path_level *l = path_l(path);
1261 struct btree_node_iter node_iter = l->iter;
1262 struct bkey_packed *k;
1263 struct bkey_buf tmp;
1264 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1265 ? (path->level > 1 ? 0 : 2)
1266 : (path->level > 1 ? 1 : 16);
1267 bool was_locked = btree_node_locked(path, path->level);
1270 bch2_bkey_buf_init(&tmp);
1272 while (nr && !ret) {
1273 if (!bch2_btree_node_relock(trans, path, path->level))
1276 bch2_btree_node_iter_advance(&node_iter, l->b);
1277 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1281 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1282 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1287 btree_node_unlock(path, path->level);
1289 bch2_bkey_buf_exit(&tmp, c);
1293 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
1294 struct btree_and_journal_iter *jiter)
1296 struct bch_fs *c = trans->c;
1298 struct bkey_buf tmp;
1299 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1300 ? (path->level > 1 ? 0 : 2)
1301 : (path->level > 1 ? 1 : 16);
1302 bool was_locked = btree_node_locked(path, path->level);
1305 bch2_bkey_buf_init(&tmp);
1307 while (nr && !ret) {
1308 if (!bch2_btree_node_relock(trans, path, path->level))
1311 bch2_btree_and_journal_iter_advance(jiter);
1312 k = bch2_btree_and_journal_iter_peek(jiter);
1316 bch2_bkey_buf_reassemble(&tmp, c, k);
1317 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1322 btree_node_unlock(path, path->level);
1324 bch2_bkey_buf_exit(&tmp, c);
1328 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1329 struct btree_path *path,
1330 unsigned plevel, struct btree *b)
1332 struct btree_path_level *l = &path->l[plevel];
1333 bool locked = btree_node_locked(path, plevel);
1334 struct bkey_packed *k;
1335 struct bch_btree_ptr_v2 *bp;
1337 if (!bch2_btree_node_relock(trans, path, plevel))
1340 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1341 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1343 bp = (void *) bkeyp_val(&l->b->format, k);
1344 bp->mem_ptr = (unsigned long)b;
1347 btree_node_unlock(path, plevel);
1350 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
1351 struct btree_path *path,
1353 struct bkey_buf *out)
1355 struct bch_fs *c = trans->c;
1356 struct btree_path_level *l = path_l(path);
1357 struct btree_and_journal_iter jiter;
1361 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
1363 k = bch2_btree_and_journal_iter_peek(&jiter);
1365 bch2_bkey_buf_reassemble(out, c, k);
1367 if (flags & BTREE_ITER_PREFETCH)
1368 ret = btree_path_prefetch_j(trans, path, &jiter);
1370 bch2_btree_and_journal_iter_exit(&jiter);
1374 static __always_inline int btree_path_down(struct btree_trans *trans,
1375 struct btree_path *path,
1377 unsigned long trace_ip)
1379 struct bch_fs *c = trans->c;
1380 struct btree_path_level *l = path_l(path);
1382 unsigned level = path->level - 1;
1383 enum six_lock_type lock_type = __btree_lock_want(path, level);
1384 bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
1385 struct bkey_buf tmp;
1388 EBUG_ON(!btree_node_locked(path, path->level));
1390 bch2_bkey_buf_init(&tmp);
1392 if (unlikely(!replay_done)) {
1393 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
1397 bch2_bkey_buf_unpack(&tmp, c, l->b,
1398 bch2_btree_node_iter_peek(&l->iter, l->b));
1400 if (flags & BTREE_ITER_PREFETCH) {
1401 ret = btree_path_prefetch(trans, path);
1407 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1408 ret = PTR_ERR_OR_ZERO(b);
1412 mark_btree_node_locked(path, level, lock_type);
1413 btree_path_level_init(trans, path, b);
1415 if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
1416 unlikely(b != btree_node_mem_ptr(tmp.k)))
1417 btree_node_mem_ptr_set(trans, path, level + 1, b);
1419 if (btree_node_read_locked(path, level + 1))
1420 btree_node_unlock(path, level + 1);
1421 path->level = level;
1423 bch2_btree_path_verify_locks(path);
1425 bch2_bkey_buf_exit(&tmp, c);
1429 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1430 unsigned, unsigned long);
1432 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1434 struct bch_fs *c = trans->c;
1435 struct btree_path *path;
1436 unsigned long trace_ip = _RET_IP_;
1439 if (trans->in_traverse_all)
1442 trans->in_traverse_all = true;
1444 trans->restarted = false;
1446 trans_for_each_path(trans, path)
1447 path->should_be_locked = false;
1449 btree_trans_verify_sorted(trans);
1451 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1452 struct btree_path *path1 = trans->paths + trans->sorted[i];
1453 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1455 if (path1->btree_id == path2->btree_id &&
1456 path1->locks_want < path2->locks_want)
1457 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1458 else if (!path1->locks_want && path2->locks_want)
1459 __bch2_btree_path_upgrade(trans, path1, 1);
1462 bch2_trans_unlock(trans);
1465 if (unlikely(trans->memory_allocation_failure)) {
1468 closure_init_stack(&cl);
1471 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1476 /* Now, redo traversals in correct order: */
1478 while (i < trans->nr_sorted) {
1479 path = trans->paths + trans->sorted[i];
1482 * Traversing a path can cause another path to be added at about
1483 * the same position:
1485 if (path->uptodate) {
1486 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1495 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1496 * and relock(), relock() won't relock since path->should_be_locked
1497 * isn't set yet, which is all fine
1499 trans_for_each_path(trans, path)
1500 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1502 bch2_btree_cache_cannibalize_unlock(c);
1504 trans->in_traverse_all = false;
1506 trace_trans_traverse_all(trans->fn, trace_ip);
1510 static inline bool btree_path_good_node(struct btree_trans *trans,
1511 struct btree_path *path,
1512 unsigned l, int check_pos)
1514 if (!is_btree_node(path, l) ||
1515 !bch2_btree_node_relock(trans, path, l))
1518 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1520 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1525 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1526 struct btree_path *path,
1529 unsigned i, l = path->level;
1531 while (btree_path_node(path, l) &&
1532 !btree_path_good_node(trans, path, l, check_pos)) {
1533 btree_node_unlock(path, l);
1534 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1538 /* If we need intent locks, take them too: */
1540 i < path->locks_want && btree_path_node(path, i);
1542 if (!bch2_btree_node_relock(trans, path, i))
1544 btree_node_unlock(path, l);
1545 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1553 * This is the main state machine for walking down the btree - walks down to a
1556 * Returns 0 on success, -EIO on error (error reading in a btree node).
1558 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1559 * stashed in the iterator and returned from bch2_trans_exit().
1561 static int btree_path_traverse_one(struct btree_trans *trans,
1562 struct btree_path *path,
1564 unsigned long trace_ip)
1566 unsigned depth_want = path->level;
1569 if (unlikely(trans->restarted)) {
1575 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1576 * and re-traverse the path without a transaction restart:
1578 if (path->should_be_locked) {
1579 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1584 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1588 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1591 path->level = btree_path_up_until_good_node(trans, path, 0);
1594 * Note: path->nodes[path->level] may be temporarily NULL here - that
1595 * would indicate to other code that we got to the end of the btree,
1596 * here it indicates that relocking the root failed - it's critical that
1597 * btree_path_lock_root() comes next and that it can't fail
1599 while (path->level > depth_want) {
1600 ret = btree_path_node(path, path->level)
1601 ? btree_path_down(trans, path, flags, trace_ip)
1602 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1603 if (unlikely(ret)) {
1606 * No nodes at this level - got to the end of
1613 __bch2_btree_path_unlock(path);
1614 path->level = depth_want;
1617 path->l[path->level].b =
1618 BTREE_ITER_NO_NODE_ERROR;
1620 path->l[path->level].b =
1621 BTREE_ITER_NO_NODE_DOWN;
1626 path->uptodate = BTREE_ITER_UPTODATE;
1628 BUG_ON((ret == -EINTR) != !!trans->restarted);
1629 bch2_btree_path_verify(trans, path);
1633 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1634 struct btree_path *path, unsigned flags)
1636 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1639 return bch2_trans_cond_resched(trans) ?:
1640 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1643 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1644 struct btree_path *src)
1648 memcpy(&dst->pos, &src->pos,
1649 sizeof(struct btree_path) - offsetof(struct btree_path, pos));
1651 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1652 if (btree_node_locked(dst, i))
1653 six_lock_increment(&dst->l[i].b->c.lock,
1654 __btree_lock_want(dst, i));
1656 bch2_btree_path_check_sort(trans, dst, 0);
1659 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1662 struct btree_path *new = btree_path_alloc(trans, src);
1664 btree_path_copy(trans, new, src);
1665 __btree_path_get(new, intent);
1669 inline struct btree_path * __must_check
1670 bch2_btree_path_make_mut(struct btree_trans *trans,
1671 struct btree_path *path, bool intent,
1674 if (path->ref > 1 || path->preserve) {
1675 __btree_path_put(path, intent);
1676 path = btree_path_clone(trans, path, intent);
1677 path->preserve = false;
1678 #ifdef CONFIG_BCACHEFS_DEBUG
1679 path->ip_allocated = ip;
1681 btree_trans_verify_sorted(trans);
1687 struct btree_path * __must_check
1688 bch2_btree_path_set_pos(struct btree_trans *trans,
1689 struct btree_path *path, struct bpos new_pos,
1690 bool intent, unsigned long ip)
1692 int cmp = bpos_cmp(new_pos, path->pos);
1693 unsigned l = path->level;
1695 EBUG_ON(trans->restarted);
1696 EBUG_ON(!path->ref);
1701 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1703 path->pos = new_pos;
1704 path->should_be_locked = false;
1706 bch2_btree_path_check_sort(trans, path, cmp);
1708 if (unlikely(path->cached)) {
1709 btree_node_unlock(path, 0);
1710 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1711 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1715 l = btree_path_up_until_good_node(trans, path, cmp);
1717 if (btree_path_node(path, l)) {
1719 * We might have to skip over many keys, or just a few: try
1720 * advancing the node iterator, and if we have to skip over too
1721 * many keys just reinit it (or if we're rewinding, since that
1725 !btree_path_advance_to_pos(path, &path->l[l], 8))
1726 __btree_path_level_init(path, l);
1729 if (l != path->level) {
1730 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1731 __bch2_btree_path_unlock(path);
1734 bch2_btree_path_verify(trans, path);
1738 /* Btree path: main interface: */
1740 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1742 struct btree_path *next;
1744 next = prev_btree_path(trans, path);
1745 if (next && !btree_path_cmp(next, path))
1748 next = next_btree_path(trans, path);
1749 if (next && !btree_path_cmp(next, path))
1755 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1757 struct btree_path *next;
1759 next = prev_btree_path(trans, path);
1760 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1763 next = next_btree_path(trans, path);
1764 if (next && next->level == path->level && path_l(next)->b == path_l(path)->b)
1770 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1772 __bch2_btree_path_unlock(path);
1773 btree_path_list_remove(trans, path);
1774 trans->paths_allocated &= ~(1ULL << path->idx);
1777 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1779 struct btree_path *dup;
1781 EBUG_ON(trans->paths + path->idx != path);
1782 EBUG_ON(!path->ref);
1784 if (!__btree_path_put(path, intent))
1788 * Perhaps instead we should check for duplicate paths in traverse_all:
1790 if (path->preserve &&
1791 (dup = have_path_at_pos(trans, path))) {
1792 dup->preserve = true;
1793 path->preserve = false;
1797 if (!path->preserve &&
1798 (dup = have_node_at_pos(trans, path)))
1802 if (path->should_be_locked &&
1803 !btree_node_locked(dup, path->level))
1806 dup->should_be_locked |= path->should_be_locked;
1807 __bch2_path_free(trans, path);
1811 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1813 struct btree_path *path;
1814 struct btree_insert_entry *i;
1815 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
1818 trans_for_each_path_inorder(trans, path, idx) {
1819 printbuf_reset(&buf1);
1821 bch2_bpos_to_text(&buf1, path->pos);
1823 printk(KERN_ERR "path: idx %u ref %u:%u%s%s btree=%s l=%u pos %s locks %u %pS\n",
1824 path->idx, path->ref, path->intent_ref,
1825 path->should_be_locked ? " S" : "",
1826 path->preserve ? " P" : "",
1827 bch2_btree_ids[path->btree_id],
1831 #ifdef CONFIG_BCACHEFS_DEBUG
1832 (void *) path->ip_allocated
1839 trans_for_each_update(trans, i) {
1841 struct bkey_s_c old = bch2_btree_path_peek_slot(i->path, &u);
1843 printbuf_reset(&buf1);
1844 printbuf_reset(&buf2);
1845 bch2_bkey_val_to_text(&buf1, trans->c, old);
1846 bch2_bkey_val_to_text(&buf2, trans->c, bkey_i_to_s_c(i->k));
1848 printk(KERN_ERR "update: btree %s %pS\n old %s\n new %s",
1849 bch2_btree_ids[i->btree_id],
1850 (void *) i->ip_allocated,
1851 buf1.buf, buf2.buf);
1854 printbuf_exit(&buf2);
1855 printbuf_exit(&buf1);
1858 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1859 struct btree_path *pos)
1861 struct btree_path *path;
1864 if (unlikely(trans->paths_allocated ==
1865 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1866 bch2_dump_trans_paths_updates(trans);
1867 panic("trans path oveflow\n");
1870 idx = __ffs64(~trans->paths_allocated);
1871 trans->paths_allocated |= 1ULL << idx;
1873 path = &trans->paths[idx];
1877 path->intent_ref = 0;
1878 path->nodes_locked = 0;
1879 path->nodes_intent_locked = 0;
1881 btree_path_list_add(trans, pos, path);
1885 struct btree_path *bch2_path_get(struct btree_trans *trans,
1886 enum btree_id btree_id, struct bpos pos,
1887 unsigned locks_want, unsigned level,
1888 unsigned flags, unsigned long ip)
1890 struct btree_path *path, *path_pos = NULL;
1891 bool cached = flags & BTREE_ITER_CACHED;
1892 bool intent = flags & BTREE_ITER_INTENT;
1895 BUG_ON(trans->restarted);
1896 btree_trans_verify_sorted(trans);
1898 trans_for_each_path_inorder(trans, path, i) {
1899 if (__btree_path_cmp(path,
1910 path_pos->cached == cached &&
1911 path_pos->btree_id == btree_id &&
1912 path_pos->level == level) {
1913 __btree_path_get(path_pos, intent);
1914 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1916 path = btree_path_alloc(trans, path_pos);
1919 __btree_path_get(path, intent);
1921 path->btree_id = btree_id;
1922 path->cached = cached;
1923 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1924 path->should_be_locked = false;
1925 path->level = level;
1926 path->locks_want = locks_want;
1927 path->nodes_locked = 0;
1928 path->nodes_intent_locked = 0;
1929 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1930 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1931 #ifdef CONFIG_BCACHEFS_DEBUG
1932 path->ip_allocated = ip;
1934 btree_trans_verify_sorted(trans);
1937 if (!(flags & BTREE_ITER_NOPRESERVE))
1938 path->preserve = true;
1940 if (path->intent_ref)
1941 locks_want = max(locks_want, level + 1);
1944 * If the path has locks_want greater than requested, we don't downgrade
1945 * it here - on transaction restart because btree node split needs to
1946 * upgrade locks, we might be putting/getting the iterator again.
1947 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1948 * a successful transaction commit.
1951 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1952 if (locks_want > path->locks_want) {
1953 path->locks_want = locks_want;
1954 btree_path_get_locks(trans, path, true);
1960 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1965 if (!path->cached) {
1966 struct btree_path_level *l = path_l(path);
1967 struct bkey_packed *_k;
1969 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1971 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1972 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1974 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1976 if (!k.k || bpos_cmp(path->pos, k.k->p))
1979 struct bkey_cached *ck = (void *) path->l[0].b;
1982 (path->btree_id != ck->key.btree_id ||
1983 bkey_cmp(path->pos, ck->key.pos)));
1985 /* BTREE_ITER_CACHED_NOFILL|BTREE_ITER_CACHED_NOCREATE? */
1986 if (unlikely(!ck || !ck->valid))
1987 return bkey_s_c_null;
1989 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1992 k = bkey_i_to_s_c(ck->k);
1999 return (struct bkey_s_c) { u, NULL };
2002 /* Btree iterators: */
2005 __bch2_btree_iter_traverse(struct btree_iter *iter)
2007 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2011 bch2_btree_iter_traverse(struct btree_iter *iter)
2015 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
2016 btree_iter_search_key(iter),
2017 iter->flags & BTREE_ITER_INTENT,
2018 btree_iter_ip_allocated(iter));
2020 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
2024 iter->path->should_be_locked = true;
2028 /* Iterate across nodes (leaf and interior nodes) */
2030 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
2032 struct btree_trans *trans = iter->trans;
2033 struct btree *b = NULL;
2036 EBUG_ON(iter->path->cached);
2037 bch2_btree_iter_verify(iter);
2039 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2043 b = btree_path_node(iter->path, iter->path->level);
2047 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
2049 bkey_init(&iter->k);
2050 iter->k.p = iter->pos = b->key.k.p;
2052 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2053 iter->flags & BTREE_ITER_INTENT,
2054 btree_iter_ip_allocated(iter));
2055 iter->path->should_be_locked = true;
2056 BUG_ON(iter->path->uptodate);
2058 bch2_btree_iter_verify_entry_exit(iter);
2059 bch2_btree_iter_verify(iter);
2067 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
2069 struct btree_trans *trans = iter->trans;
2070 struct btree_path *path = iter->path;
2071 struct btree *b = NULL;
2075 BUG_ON(trans->restarted);
2076 EBUG_ON(iter->path->cached);
2077 bch2_btree_iter_verify(iter);
2079 /* already at end? */
2080 if (!btree_path_node(path, path->level))
2084 if (!btree_path_node(path, path->level + 1)) {
2085 btree_node_unlock(path, path->level);
2086 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2091 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
2092 __bch2_btree_path_unlock(path);
2093 path->l[path->level].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2094 path->l[path->level + 1].b = BTREE_ITER_NO_NODE_GET_LOCKS;
2095 trace_trans_restart_relock_next_node(trans->fn, _THIS_IP_,
2096 path->btree_id, &path->pos);
2097 btree_trans_restart(trans);
2102 b = btree_path_node(path, path->level + 1);
2104 if (!bpos_cmp(iter->pos, b->key.k.p)) {
2105 btree_node_unlock(path, path->level);
2106 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
2110 * Haven't gotten to the end of the parent node: go back down to
2111 * the next child node
2114 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
2115 iter->flags & BTREE_ITER_INTENT,
2116 btree_iter_ip_allocated(iter));
2118 path->level = iter->min_depth;
2120 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
2121 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
2122 btree_node_unlock(path, l);
2124 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
2125 bch2_btree_iter_verify(iter);
2127 ret = bch2_btree_path_traverse(trans, path, iter->flags);
2131 b = path->l[path->level].b;
2134 bkey_init(&iter->k);
2135 iter->k.p = iter->pos = b->key.k.p;
2137 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
2138 iter->flags & BTREE_ITER_INTENT,
2139 btree_iter_ip_allocated(iter));
2140 iter->path->should_be_locked = true;
2141 BUG_ON(iter->path->uptodate);
2143 bch2_btree_iter_verify_entry_exit(iter);
2144 bch2_btree_iter_verify(iter);
2152 /* Iterate across keys (in leaf nodes only) */
2154 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
2156 struct bpos pos = iter->k.p;
2157 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2158 ? bpos_cmp(pos, SPOS_MAX)
2159 : bkey_cmp(pos, SPOS_MAX)) != 0;
2161 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2162 pos = bkey_successor(iter, pos);
2163 bch2_btree_iter_set_pos(iter, pos);
2167 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
2169 struct bpos pos = bkey_start_pos(&iter->k);
2170 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
2171 ? bpos_cmp(pos, POS_MIN)
2172 : bkey_cmp(pos, POS_MIN)) != 0;
2174 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
2175 pos = bkey_predecessor(iter, pos);
2176 bch2_btree_iter_set_pos(iter, pos);
2180 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
2181 enum btree_id btree_id,
2184 struct btree_insert_entry *i;
2186 trans_for_each_update(trans, i)
2187 if ((cmp_int(btree_id, i->btree_id) ?:
2188 bpos_cmp(pos, i->k->k.p)) <= 0) {
2189 if (btree_id == i->btree_id)
2198 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
2199 struct btree_iter *iter,
2202 struct bkey_i *next_journal =
2203 bch2_journal_keys_peek(trans->c, iter->btree_id, 0,
2207 bpos_cmp(next_journal->k.p,
2208 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2209 iter->k = next_journal->k;
2210 k = bkey_i_to_s_c(next_journal);
2217 * Checks btree key cache for key at iter->pos and returns it if present, or
2221 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
2223 struct btree_trans *trans = iter->trans;
2224 struct bch_fs *c = trans->c;
2228 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
2229 return bkey_s_c_null;
2231 if (!iter->key_cache_path)
2232 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2233 iter->flags & BTREE_ITER_INTENT, 0,
2234 iter->flags|BTREE_ITER_CACHED,
2237 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2238 iter->flags & BTREE_ITER_INTENT,
2239 btree_iter_ip_allocated(iter));
2241 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
2243 return bkey_s_c_err(ret);
2245 iter->key_cache_path->should_be_locked = true;
2247 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
2250 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2252 struct btree_trans *trans = iter->trans;
2253 struct bkey_i *next_update;
2254 struct bkey_s_c k, k2;
2257 EBUG_ON(iter->path->cached || iter->path->level);
2258 bch2_btree_iter_verify(iter);
2261 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2262 iter->flags & BTREE_ITER_INTENT,
2263 btree_iter_ip_allocated(iter));
2265 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2266 if (unlikely(ret)) {
2267 /* ensure that iter->k is consistent with iter->pos: */
2268 bch2_btree_iter_set_pos(iter, iter->pos);
2269 k = bkey_s_c_err(ret);
2273 iter->path->should_be_locked = true;
2275 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2277 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2279 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2283 bch2_btree_iter_set_pos(iter, iter->pos);
2291 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2292 k = btree_trans_peek_journal(trans, iter, k);
2294 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2295 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2298 bpos_cmp(next_update->k.p,
2299 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2300 iter->k = next_update->k;
2301 k = bkey_i_to_s_c(next_update);
2304 if (k.k && bkey_deleted(k.k)) {
2306 * If we've got a whiteout, and it's after the search
2307 * key, advance the search key to the whiteout instead
2308 * of just after the whiteout - it might be a btree
2309 * whiteout, with a real key at the same position, since
2310 * in the btree deleted keys sort before non deleted.
2312 search_key = bpos_cmp(search_key, k.k->p)
2314 : bpos_successor(k.k->p);
2320 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2321 /* Advance to next leaf node: */
2322 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2325 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2331 bch2_btree_iter_verify(iter);
2337 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2340 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
2342 struct btree_trans *trans = iter->trans;
2343 struct bpos search_key = btree_iter_search_key(iter);
2347 if (iter->update_path) {
2348 bch2_path_put(trans, iter->update_path,
2349 iter->flags & BTREE_ITER_INTENT);
2350 iter->update_path = NULL;
2353 bch2_btree_iter_verify_entry_exit(iter);
2356 k = __bch2_btree_iter_peek(iter, search_key);
2357 if (!k.k || bkey_err(k))
2360 if (iter->update_path &&
2361 bkey_cmp(iter->update_path->pos, k.k->p)) {
2362 bch2_path_put(trans, iter->update_path,
2363 iter->flags & BTREE_ITER_INTENT);
2364 iter->update_path = NULL;
2367 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2368 (iter->flags & BTREE_ITER_INTENT) &&
2369 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2370 !iter->update_path) {
2371 struct bpos pos = k.k->p;
2373 if (pos.snapshot < iter->snapshot) {
2374 search_key = bpos_successor(k.k->p);
2378 pos.snapshot = iter->snapshot;
2381 * advance, same as on exit for iter->path, but only up
2384 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2385 iter->update_path = iter->path;
2387 iter->update_path = bch2_btree_path_set_pos(trans,
2388 iter->update_path, pos,
2389 iter->flags & BTREE_ITER_INTENT,
2390 btree_iter_ip_allocated(iter));
2392 BUG_ON(!(iter->update_path->nodes_locked & 1));
2393 iter->update_path->should_be_locked = true;
2397 * We can never have a key in a leaf node at POS_MAX, so
2398 * we don't have to check these successor() calls:
2400 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2401 !bch2_snapshot_is_ancestor(trans->c,
2404 search_key = bpos_successor(k.k->p);
2408 if (bkey_whiteout(k.k) &&
2409 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2410 search_key = bkey_successor(iter, k.k->p);
2418 * iter->pos should be mononotically increasing, and always be equal to
2419 * the key we just returned - except extents can straddle iter->pos:
2421 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2423 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2424 iter->pos = bkey_start_pos(k.k);
2426 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2427 iter->flags & BTREE_ITER_INTENT,
2428 btree_iter_ip_allocated(iter));
2429 BUG_ON(!iter->path->nodes_locked);
2431 if (iter->update_path) {
2432 BUG_ON(!(iter->update_path->nodes_locked & 1));
2433 iter->update_path->should_be_locked = true;
2435 iter->path->should_be_locked = true;
2437 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2438 iter->pos.snapshot = iter->snapshot;
2440 ret = bch2_btree_iter_verify_ret(iter, k);
2441 if (unlikely(ret)) {
2442 bch2_btree_iter_set_pos(iter, iter->pos);
2443 k = bkey_s_c_err(ret);
2446 bch2_btree_iter_verify_entry_exit(iter);
2452 * bch2_btree_iter_next: returns first key greater than iterator's current
2455 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2457 if (!bch2_btree_iter_advance(iter))
2458 return bkey_s_c_null;
2460 return bch2_btree_iter_peek(iter);
2464 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2465 * iterator's current position
2467 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2469 struct btree_trans *trans = iter->trans;
2470 struct bpos search_key = iter->pos;
2471 struct btree_path *saved_path = NULL;
2473 struct bkey saved_k;
2474 const struct bch_val *saved_v;
2477 EBUG_ON(iter->path->cached || iter->path->level);
2478 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2480 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2481 return bkey_s_c_err(-EIO);
2483 bch2_btree_iter_verify(iter);
2484 bch2_btree_iter_verify_entry_exit(iter);
2486 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2487 search_key.snapshot = U32_MAX;
2490 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2491 iter->flags & BTREE_ITER_INTENT,
2492 btree_iter_ip_allocated(iter));
2494 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2495 if (unlikely(ret)) {
2496 /* ensure that iter->k is consistent with iter->pos: */
2497 bch2_btree_iter_set_pos(iter, iter->pos);
2498 k = bkey_s_c_err(ret);
2502 k = btree_path_level_peek(trans->c, iter->path,
2503 &iter->path->l[0], &iter->k);
2505 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2506 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2507 : bpos_cmp(k.k->p, search_key) > 0))
2508 k = btree_path_level_prev(trans->c, iter->path,
2509 &iter->path->l[0], &iter->k);
2511 bch2_btree_path_check_sort(trans, iter->path, 0);
2514 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2515 if (k.k->p.snapshot == iter->snapshot)
2519 * If we have a saved candidate, and we're no
2520 * longer at the same _key_ (not pos), return
2523 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2524 bch2_path_put(trans, iter->path,
2525 iter->flags & BTREE_ITER_INTENT);
2526 iter->path = saved_path;
2533 if (bch2_snapshot_is_ancestor(iter->trans->c,
2537 bch2_path_put(trans, saved_path,
2538 iter->flags & BTREE_ITER_INTENT);
2539 saved_path = btree_path_clone(trans, iter->path,
2540 iter->flags & BTREE_ITER_INTENT);
2545 search_key = bpos_predecessor(k.k->p);
2549 if (bkey_whiteout(k.k) &&
2550 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2551 search_key = bkey_predecessor(iter, k.k->p);
2552 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2553 search_key.snapshot = U32_MAX;
2558 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2559 /* Advance to previous leaf node: */
2560 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2562 /* Start of btree: */
2563 bch2_btree_iter_set_pos(iter, POS_MIN);
2569 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2571 /* Extents can straddle iter->pos: */
2572 if (bkey_cmp(k.k->p, iter->pos) < 0)
2575 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2576 iter->pos.snapshot = iter->snapshot;
2579 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2580 iter->path->should_be_locked = true;
2582 bch2_btree_iter_verify_entry_exit(iter);
2583 bch2_btree_iter_verify(iter);
2589 * bch2_btree_iter_prev: returns first key less than iterator's current
2592 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2594 if (!bch2_btree_iter_rewind(iter))
2595 return bkey_s_c_null;
2597 return bch2_btree_iter_peek_prev(iter);
2600 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2602 struct btree_trans *trans = iter->trans;
2603 struct bpos search_key;
2607 EBUG_ON(iter->path->level);
2608 bch2_btree_iter_verify(iter);
2609 bch2_btree_iter_verify_entry_exit(iter);
2611 /* extents can't span inode numbers: */
2612 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2613 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2614 if (iter->pos.inode == KEY_INODE_MAX)
2615 return bkey_s_c_null;
2617 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2620 search_key = btree_iter_search_key(iter);
2621 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2622 iter->flags & BTREE_ITER_INTENT,
2623 btree_iter_ip_allocated(iter));
2625 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2627 return bkey_s_c_err(ret);
2629 if ((iter->flags & BTREE_ITER_CACHED) ||
2630 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2631 struct bkey_i *next_update;
2633 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2634 (next_update = btree_trans_peek_updates(trans,
2635 iter->btree_id, search_key)) &&
2636 !bpos_cmp(next_update->k.p, iter->pos)) {
2637 iter->k = next_update->k;
2638 k = bkey_i_to_s_c(next_update);
2642 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2643 (next_update = bch2_journal_keys_peek(trans->c, iter->btree_id,
2645 !bpos_cmp(next_update->k.p, iter->pos)) {
2646 iter->k = next_update->k;
2647 k = bkey_i_to_s_c(next_update);
2651 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2652 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2658 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2662 if (iter->flags & BTREE_ITER_INTENT) {
2663 struct btree_iter iter2;
2665 bch2_trans_copy_iter(&iter2, iter);
2666 k = bch2_btree_iter_peek(&iter2);
2668 if (k.k && !bkey_err(k)) {
2672 bch2_trans_iter_exit(trans, &iter2);
2674 struct bpos pos = iter->pos;
2676 k = bch2_btree_iter_peek(iter);
2680 if (unlikely(bkey_err(k)))
2683 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2685 if (bkey_cmp(iter->pos, next) < 0) {
2686 bkey_init(&iter->k);
2687 iter->k.p = iter->pos;
2689 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2690 bch2_key_resize(&iter->k,
2691 min_t(u64, KEY_SIZE_MAX,
2692 (next.inode == iter->pos.inode
2696 EBUG_ON(!iter->k.size);
2699 k = (struct bkey_s_c) { &iter->k, NULL };
2703 iter->path->should_be_locked = true;
2705 bch2_btree_iter_verify_entry_exit(iter);
2706 bch2_btree_iter_verify(iter);
2707 ret = bch2_btree_iter_verify_ret(iter, k);
2709 return bkey_s_c_err(ret);
2714 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2716 if (!bch2_btree_iter_advance(iter))
2717 return bkey_s_c_null;
2719 return bch2_btree_iter_peek_slot(iter);
2722 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2724 if (!bch2_btree_iter_rewind(iter))
2725 return bkey_s_c_null;
2727 return bch2_btree_iter_peek_slot(iter);
2730 /* new transactional stuff: */
2732 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2733 struct btree_path *path)
2735 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2736 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2737 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2740 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2742 #ifdef CONFIG_BCACHEFS_DEBUG
2745 for (i = 0; i < trans->nr_sorted; i++)
2746 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2750 static void btree_trans_verify_sorted(struct btree_trans *trans)
2752 #ifdef CONFIG_BCACHEFS_DEBUG
2753 struct btree_path *path, *prev = NULL;
2756 trans_for_each_path_inorder(trans, path, i) {
2757 if (prev && btree_path_cmp(prev, path) > 0) {
2758 bch2_dump_trans_paths_updates(trans);
2759 panic("trans paths out of order!\n");
2766 static inline void btree_path_swap(struct btree_trans *trans,
2767 struct btree_path *l, struct btree_path *r)
2769 swap(l->sorted_idx, r->sorted_idx);
2770 swap(trans->sorted[l->sorted_idx],
2771 trans->sorted[r->sorted_idx]);
2773 btree_path_verify_sorted_ref(trans, l);
2774 btree_path_verify_sorted_ref(trans, r);
2777 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2780 struct btree_path *n;
2783 n = prev_btree_path(trans, path);
2784 if (n && btree_path_cmp(n, path) > 0) {
2786 btree_path_swap(trans, n, path);
2787 n = prev_btree_path(trans, path);
2788 } while (n && btree_path_cmp(n, path) > 0);
2795 n = next_btree_path(trans, path);
2796 if (n && btree_path_cmp(path, n) > 0) {
2798 btree_path_swap(trans, path, n);
2799 n = next_btree_path(trans, path);
2800 } while (n && btree_path_cmp(path, n) > 0);
2804 btree_trans_verify_sorted(trans);
2807 static inline void btree_path_list_remove(struct btree_trans *trans,
2808 struct btree_path *path)
2812 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2814 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2816 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2817 trans->paths[trans->sorted[i]].sorted_idx = i;
2819 path->sorted_idx = U8_MAX;
2821 btree_trans_verify_sorted_refs(trans);
2824 static inline void btree_path_list_add(struct btree_trans *trans,
2825 struct btree_path *pos,
2826 struct btree_path *path)
2830 btree_trans_verify_sorted_refs(trans);
2832 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2834 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2836 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2837 trans->paths[trans->sorted[i]].sorted_idx = i;
2839 btree_trans_verify_sorted_refs(trans);
2842 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2845 bch2_path_put(trans, iter->path,
2846 iter->flags & BTREE_ITER_INTENT);
2847 if (iter->update_path)
2848 bch2_path_put(trans, iter->update_path,
2849 iter->flags & BTREE_ITER_INTENT);
2850 if (iter->key_cache_path)
2851 bch2_path_put(trans, iter->key_cache_path,
2852 iter->flags & BTREE_ITER_INTENT);
2854 iter->update_path = NULL;
2855 iter->key_cache_path = NULL;
2858 static void __bch2_trans_iter_init(struct btree_trans *trans,
2859 struct btree_iter *iter,
2860 unsigned btree_id, struct bpos pos,
2861 unsigned locks_want,
2866 EBUG_ON(trans->restarted);
2868 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2869 btree_node_type_is_extents(btree_id))
2870 flags |= BTREE_ITER_IS_EXTENTS;
2872 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2873 !btree_type_has_snapshots(btree_id))
2874 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2876 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2877 btree_type_has_snapshots(btree_id))
2878 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2880 if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
2881 flags |= BTREE_ITER_WITH_JOURNAL;
2883 if (!btree_id_cached(trans->c, btree_id)) {
2884 flags &= ~BTREE_ITER_CACHED;
2885 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
2886 } else if (!(flags & BTREE_ITER_CACHED))
2887 flags |= BTREE_ITER_WITH_KEY_CACHE;
2889 iter->trans = trans;
2891 iter->update_path = NULL;
2892 iter->key_cache_path = NULL;
2893 iter->btree_id = btree_id;
2894 iter->min_depth = depth;
2895 iter->flags = flags;
2896 iter->snapshot = pos.snapshot;
2898 iter->k.type = KEY_TYPE_deleted;
2901 #ifdef CONFIG_BCACHEFS_DEBUG
2902 iter->ip_allocated = ip;
2905 iter->path = bch2_path_get(trans, btree_id, iter->pos,
2906 locks_want, depth, flags, ip);
2909 void bch2_trans_iter_init(struct btree_trans *trans,
2910 struct btree_iter *iter,
2911 unsigned btree_id, struct bpos pos,
2914 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2915 0, 0, flags, _RET_IP_);
2918 void bch2_trans_node_iter_init(struct btree_trans *trans,
2919 struct btree_iter *iter,
2920 enum btree_id btree_id,
2922 unsigned locks_want,
2926 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2927 BTREE_ITER_NOT_EXTENTS|
2928 __BTREE_ITER_ALL_SNAPSHOTS|
2929 BTREE_ITER_ALL_SNAPSHOTS|
2931 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2932 BUG_ON(iter->path->level != depth);
2933 BUG_ON(iter->min_depth != depth);
2936 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2940 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2941 if (src->update_path)
2942 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
2943 dst->key_cache_path = NULL;
2946 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2948 size_t new_top = trans->mem_top + size;
2951 if (new_top > trans->mem_bytes) {
2952 size_t old_bytes = trans->mem_bytes;
2953 size_t new_bytes = roundup_pow_of_two(new_top);
2956 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2958 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2959 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2960 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2961 new_bytes = BTREE_TRANS_MEM_MAX;
2966 return ERR_PTR(-ENOMEM);
2968 trans->mem = new_mem;
2969 trans->mem_bytes = new_bytes;
2972 trace_trans_restart_mem_realloced(trans->fn, _RET_IP_, new_bytes);
2973 btree_trans_restart(trans);
2974 return ERR_PTR(-EINTR);
2978 p = trans->mem + trans->mem_top;
2979 trans->mem_top += size;
2985 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2986 * @trans: transaction to reset
2988 * While iterating over nodes or updating nodes a attempt to lock a btree
2989 * node may return EINTR when the trylock fails. When this occurs
2990 * bch2_trans_begin() should be called and the transaction retried.
2992 void bch2_trans_begin(struct btree_trans *trans)
2994 struct btree_insert_entry *i;
2995 struct btree_path *path;
2997 trans_for_each_update(trans, i)
2998 __btree_path_put(i->path, true);
3000 memset(&trans->journal_res, 0, sizeof(trans->journal_res));
3001 trans->extra_journal_res = 0;
3002 trans->nr_updates = 0;
3005 trans->hooks = NULL;
3006 trans->extra_journal_entries = NULL;
3007 trans->extra_journal_entry_u64s = 0;
3009 if (trans->fs_usage_deltas) {
3010 trans->fs_usage_deltas->used = 0;
3011 memset(&trans->fs_usage_deltas->memset_start, 0,
3012 (void *) &trans->fs_usage_deltas->memset_end -
3013 (void *) &trans->fs_usage_deltas->memset_start);
3016 trans_for_each_path(trans, path) {
3017 path->should_be_locked = false;
3020 * XXX: we probably shouldn't be doing this if the transaction
3021 * was restarted, but currently we still overflow transaction
3022 * iterators if we do that
3024 if (!path->ref && !path->preserve)
3025 __bch2_path_free(trans, path);
3027 path->preserve = false;
3030 bch2_trans_cond_resched(trans);
3032 if (trans->restarted)
3033 bch2_btree_path_traverse_all(trans);
3035 trans->restarted = false;
3038 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
3040 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
3041 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
3044 BUG_ON(trans->used_mempool);
3047 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
3050 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
3052 trans->paths = p; p += paths_bytes;
3053 trans->updates = p; p += updates_bytes;
3056 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
3057 unsigned expected_nr_iters,
3058 size_t expected_mem_bytes,
3060 __acquires(&c->btree_trans_barrier)
3062 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
3064 memset(trans, 0, sizeof(*trans));
3068 bch2_trans_alloc_paths(trans, c);
3070 if (expected_mem_bytes) {
3071 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
3072 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
3074 if (!unlikely(trans->mem)) {
3075 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
3076 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
3080 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3082 trans->pid = current->pid;
3083 mutex_lock(&c->btree_trans_lock);
3084 list_add(&trans->list, &c->btree_trans_list);
3085 mutex_unlock(&c->btree_trans_lock);
3088 static void check_btree_paths_leaked(struct btree_trans *trans)
3090 #ifdef CONFIG_BCACHEFS_DEBUG
3091 struct bch_fs *c = trans->c;
3092 struct btree_path *path;
3094 trans_for_each_path(trans, path)
3099 bch_err(c, "btree paths leaked from %s!", trans->fn);
3100 trans_for_each_path(trans, path)
3102 printk(KERN_ERR " btree %s %pS\n",
3103 bch2_btree_ids[path->btree_id],
3104 (void *) path->ip_allocated);
3105 /* Be noisy about this: */
3106 bch2_fatal_error(c);
3110 void bch2_trans_exit(struct btree_trans *trans)
3111 __releases(&c->btree_trans_barrier)
3113 struct btree_insert_entry *i;
3114 struct bch_fs *c = trans->c;
3116 bch2_trans_unlock(trans);
3118 trans_for_each_update(trans, i)
3119 __btree_path_put(i->path, true);
3120 trans->nr_updates = 0;
3122 check_btree_paths_leaked(trans);
3124 mutex_lock(&c->btree_trans_lock);
3125 list_del(&trans->list);
3126 mutex_unlock(&c->btree_trans_lock);
3128 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3130 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3132 if (trans->fs_usage_deltas) {
3133 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3134 REPLICAS_DELTA_LIST_MAX)
3135 mempool_free(trans->fs_usage_deltas,
3136 &c->replicas_delta_pool);
3138 kfree(trans->fs_usage_deltas);
3141 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3142 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3148 * Userspace doesn't have a real percpu implementation:
3150 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3154 mempool_free(trans->paths, &c->btree_paths_pool);
3156 trans->mem = (void *) 0x1;
3157 trans->paths = (void *) 0x1;
3160 static void __maybe_unused
3161 bch2_btree_path_node_to_text(struct printbuf *out,
3162 struct btree_bkey_cached_common *_b,
3165 pr_buf(out, " l=%u %s:",
3166 _b->level, bch2_btree_ids[_b->btree_id]);
3167 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
3170 static bool trans_has_locks(struct btree_trans *trans)
3172 struct btree_path *path;
3174 trans_for_each_path(trans, path)
3175 if (path->nodes_locked)
3180 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
3182 struct btree_trans *trans;
3183 struct btree_path *path;
3185 static char lock_types[] = { 'r', 'i', 'w' };
3188 mutex_lock(&c->btree_trans_lock);
3189 list_for_each_entry(trans, &c->btree_trans_list, list) {
3190 if (!trans_has_locks(trans))
3193 pr_buf(out, "%i %s\n", trans->pid, trans->fn);
3195 trans_for_each_path(trans, path) {
3196 if (!path->nodes_locked)
3199 pr_buf(out, " path %u %c l=%u %s:",
3201 path->cached ? 'c' : 'b',
3203 bch2_btree_ids[path->btree_id]);
3204 bch2_bpos_to_text(out, path->pos);
3207 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3208 if (btree_node_locked(path, l)) {
3209 pr_buf(out, " %s l=%u ",
3210 btree_node_intent_locked(path, l) ? "i" : "r", l);
3211 bch2_btree_path_node_to_text(out,
3212 (void *) path->l[l].b,
3219 b = READ_ONCE(trans->locking);
3221 path = &trans->paths[trans->locking_path_idx];
3222 pr_buf(out, " locking path %u %c l=%u %c %s:",
3223 trans->locking_path_idx,
3224 path->cached ? 'c' : 'b',
3225 trans->locking_level,
3226 lock_types[trans->locking_lock_type],
3227 bch2_btree_ids[trans->locking_btree_id]);
3228 bch2_bpos_to_text(out, trans->locking_pos);
3230 pr_buf(out, " node ");
3231 bch2_btree_path_node_to_text(out,
3232 (void *) b, path->cached);
3236 mutex_unlock(&c->btree_trans_lock);
3239 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3241 if (c->btree_trans_barrier_initialized)
3242 cleanup_srcu_struct(&c->btree_trans_barrier);
3243 mempool_exit(&c->btree_trans_mem_pool);
3244 mempool_exit(&c->btree_paths_pool);
3247 int bch2_fs_btree_iter_init(struct bch_fs *c)
3249 unsigned nr = BTREE_ITER_MAX;
3252 INIT_LIST_HEAD(&c->btree_trans_list);
3253 mutex_init(&c->btree_trans_lock);
3255 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3256 sizeof(struct btree_path) * nr +
3257 sizeof(struct btree_insert_entry) * nr) ?:
3258 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3259 BTREE_TRANS_MEM_MAX) ?:
3260 init_srcu_struct(&c->btree_trans_barrier);
3262 c->btree_trans_barrier_initialized = true;