1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
16 #include "subvolume.h"
18 #include <linux/prefetch.h>
19 #include <trace/events/bcachefs.h>
21 static void btree_trans_verify_sorted(struct btree_trans *);
22 static void btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
28 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
30 static inline int __btree_path_cmp(const struct btree_path *l,
31 enum btree_id r_btree_id,
36 return cmp_int(l->btree_id, r_btree_id) ?:
37 cmp_int(l->cached, r_cached) ?:
38 bpos_cmp(l->pos, r_pos) ?:
39 -cmp_int(l->level, r_level);
42 static inline int btree_path_cmp(const struct btree_path *l,
43 const struct btree_path *r)
45 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
48 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
50 /* Are we iterating over keys in all snapshots? */
51 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
52 p = bpos_successor(p);
54 p = bpos_nosnap_successor(p);
55 p.snapshot = iter->snapshot;
61 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
63 /* Are we iterating over keys in all snapshots? */
64 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
65 p = bpos_predecessor(p);
67 p = bpos_nosnap_predecessor(p);
68 p.snapshot = iter->snapshot;
74 static inline bool is_btree_node(struct btree_path *path, unsigned l)
76 return l < BTREE_MAX_DEPTH &&
77 (unsigned long) path->l[l].b >= 128;
80 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
82 struct bpos pos = iter->pos;
84 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
85 bkey_cmp(pos, POS_MAX))
86 pos = bkey_successor(iter, pos);
90 static inline bool btree_path_pos_before_node(struct btree_path *path,
93 return bpos_cmp(path->pos, b->data->min_key) < 0;
96 static inline bool btree_path_pos_after_node(struct btree_path *path,
99 return bpos_cmp(b->key.k.p, path->pos) < 0;
102 static inline bool btree_path_pos_in_node(struct btree_path *path,
105 return path->btree_id == b->c.btree_id &&
106 !btree_path_pos_before_node(path, b) &&
107 !btree_path_pos_after_node(path, b);
110 /* Btree node locking: */
112 void bch2_btree_node_unlock_write(struct btree_trans *trans,
113 struct btree_path *path, struct btree *b)
115 bch2_btree_node_unlock_write_inlined(trans, path, b);
118 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
120 struct btree_path *linked;
121 unsigned readers = 0;
123 trans_for_each_path(trans, linked)
124 if (linked->l[b->c.level].b == b &&
125 btree_node_read_locked(linked, b->c.level))
129 * Must drop our read locks before calling six_lock_write() -
130 * six_unlock() won't do wakeups until the reader count
131 * goes to 0, and it's safe because we have the node intent
134 atomic64_sub(__SIX_VAL(read_lock, readers),
135 &b->c.lock.state.counter);
136 btree_node_lock_type(trans->c, b, SIX_LOCK_write);
137 atomic64_add(__SIX_VAL(read_lock, readers),
138 &b->c.lock.state.counter);
141 bool __bch2_btree_node_relock(struct btree_trans *trans,
142 struct btree_path *path, unsigned level)
144 struct btree *b = btree_path_node(path, level);
145 int want = __btree_lock_want(path, level);
147 if (!is_btree_node(path, level))
153 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
154 (btree_node_lock_seq_matches(path, b, level) &&
155 btree_node_lock_increment(trans, b, level, want))) {
156 mark_btree_node_locked(path, level, want);
163 static bool bch2_btree_node_upgrade(struct btree_trans *trans,
164 struct btree_path *path, unsigned level)
166 struct btree *b = path->l[level].b;
168 EBUG_ON(btree_lock_want(path, level) != BTREE_NODE_INTENT_LOCKED);
170 if (!is_btree_node(path, level))
173 if (btree_node_intent_locked(path, level))
179 if (btree_node_locked(path, level)
180 ? six_lock_tryupgrade(&b->c.lock)
181 : six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
184 if (btree_node_lock_seq_matches(path, b, level) &&
185 btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
186 btree_node_unlock(path, level);
192 mark_btree_node_intent_locked(path, level);
196 static inline bool btree_path_get_locks(struct btree_trans *trans,
197 struct btree_path *path,
198 bool upgrade, unsigned long trace_ip)
200 unsigned l = path->level;
204 if (!btree_path_node(path, l))
208 ? bch2_btree_node_upgrade(trans, path, l)
209 : bch2_btree_node_relock(trans, path, l)))
213 } while (l < path->locks_want);
216 * When we fail to get a lock, we have to ensure that any child nodes
217 * can't be relocked so bch2_btree_path_traverse has to walk back up to
218 * the node that we failed to relock:
221 __bch2_btree_path_unlock(path);
222 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
225 path->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
227 } while (fail_idx >= 0);
230 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
231 path->uptodate = BTREE_ITER_UPTODATE;
233 bch2_trans_verify_locks(trans);
235 return path->uptodate < BTREE_ITER_NEED_RELOCK;
238 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
242 ? container_of(_b, struct btree, c)->key.k.p
243 : container_of(_b, struct bkey_cached, c)->key.pos;
247 bool __bch2_btree_node_lock(struct btree_trans *trans,
248 struct btree_path *path,
250 struct bpos pos, unsigned level,
251 enum six_lock_type type,
252 six_lock_should_sleep_fn should_sleep_fn, void *p,
255 struct btree_path *linked, *deadlock_path = NULL;
256 u64 start_time = local_clock();
260 /* Check if it's safe to block: */
261 trans_for_each_path(trans, linked) {
262 if (!linked->nodes_locked)
266 * Can't block taking an intent lock if we have _any_ nodes read
269 * - Our read lock blocks another thread with an intent lock on
270 * the same node from getting a write lock, and thus from
271 * dropping its intent lock
273 * - And the other thread may have multiple nodes intent locked:
274 * both the node we want to intent lock, and the node we
275 * already have read locked - deadlock:
277 if (type == SIX_LOCK_intent &&
278 linked->nodes_locked != linked->nodes_intent_locked) {
279 deadlock_path = linked;
283 if (linked->btree_id != path->btree_id) {
284 if (linked->btree_id > path->btree_id) {
285 deadlock_path = linked;
292 * Within the same btree, cached paths come before non
295 if (linked->cached != path->cached) {
297 deadlock_path = linked;
304 * Interior nodes must be locked before their descendants: if
305 * another path has possible descendants locked of the node
306 * we're about to lock, it must have the ancestors locked too:
308 if (level > __fls(linked->nodes_locked)) {
309 deadlock_path = linked;
313 /* Must lock btree nodes in key order: */
314 if (btree_node_locked(linked, level) &&
315 bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
316 linked->cached)) <= 0) {
317 deadlock_path = linked;
319 BUG_ON(trans->in_traverse_all);
323 if (unlikely(deadlock_path)) {
324 trace_trans_restart_would_deadlock(trans->ip, ip,
325 trans->in_traverse_all, reason,
326 deadlock_path->btree_id,
327 deadlock_path->cached,
332 btree_trans_restart(trans);
336 if (six_trylock_type(&b->c.lock, type))
339 #ifdef CONFIG_BCACHEFS_DEBUG
340 trans->locking_path_idx = path->idx;
341 trans->locking_pos = pos;
342 trans->locking_btree_id = path->btree_id;
343 trans->locking_level = level;
347 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
349 #ifdef CONFIG_BCACHEFS_DEBUG
350 trans->locking = NULL;
353 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
358 /* Btree iterator locking: */
360 #ifdef CONFIG_BCACHEFS_DEBUG
362 static void bch2_btree_path_verify_locks(struct btree_path *path)
366 if (!path->nodes_locked) {
367 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE);
371 for (l = 0; btree_path_node(path, l); l++)
372 BUG_ON(btree_lock_want(path, l) !=
373 btree_node_locked_type(path, l));
376 void bch2_trans_verify_locks(struct btree_trans *trans)
378 struct btree_path *path;
380 trans_for_each_path(trans, path)
381 bch2_btree_path_verify_locks(path);
384 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
387 /* Btree path locking: */
390 * Only for btree_cache.c - only relocks intent locks
392 bool bch2_btree_path_relock_intent(struct btree_trans *trans,
393 struct btree_path *path)
397 for (l = path->level;
398 l < path->locks_want && btree_path_node(path, l);
400 if (!bch2_btree_node_relock(trans, path, l)) {
401 __bch2_btree_path_unlock(path);
402 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
403 btree_trans_restart(trans);
412 static bool bch2_btree_path_relock(struct btree_trans *trans,
413 struct btree_path *path, unsigned long trace_ip)
415 bool ret = btree_path_get_locks(trans, path, false, trace_ip);
418 btree_trans_restart(trans);
422 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
423 struct btree_path *path,
424 unsigned new_locks_want)
426 struct btree_path *linked;
428 EBUG_ON(path->locks_want >= new_locks_want);
430 path->locks_want = new_locks_want;
432 if (btree_path_get_locks(trans, path, true, _THIS_IP_))
436 * XXX: this is ugly - we'd prefer to not be mucking with other
437 * iterators in the btree_trans here.
439 * On failure to upgrade the iterator, setting iter->locks_want and
440 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
441 * get the locks we want on transaction restart.
443 * But if this iterator was a clone, on transaction restart what we did
444 * to this iterator isn't going to be preserved.
446 * Possibly we could add an iterator field for the parent iterator when
447 * an iterator is a copy - for now, we'll just upgrade any other
448 * iterators with the same btree id.
450 * The code below used to be needed to ensure ancestor nodes get locked
451 * before interior nodes - now that's handled by
452 * bch2_btree_path_traverse_all().
454 trans_for_each_path(trans, linked)
455 if (linked != path &&
456 linked->cached == path->cached &&
457 linked->btree_id == path->btree_id &&
458 linked->locks_want < new_locks_want) {
459 linked->locks_want = new_locks_want;
460 btree_path_get_locks(trans, linked, true, _THIS_IP_);
466 void __bch2_btree_path_downgrade(struct btree_path *path,
467 unsigned new_locks_want)
471 EBUG_ON(path->locks_want < new_locks_want);
473 path->locks_want = new_locks_want;
475 while (path->nodes_locked &&
476 (l = __fls(path->nodes_locked)) >= path->locks_want) {
477 if (l > path->level) {
478 btree_node_unlock(path, l);
480 if (btree_node_intent_locked(path, l)) {
481 six_lock_downgrade(&path->l[l].b->c.lock);
482 path->nodes_intent_locked ^= 1 << l;
488 bch2_btree_path_verify_locks(path);
491 void bch2_trans_downgrade(struct btree_trans *trans)
493 struct btree_path *path;
495 trans_for_each_path(trans, path)
496 bch2_btree_path_downgrade(path);
499 /* Btree transaction locking: */
501 bool bch2_trans_relock(struct btree_trans *trans)
503 struct btree_path *path;
505 if (unlikely(trans->restarted))
508 trans_for_each_path(trans, path)
509 if (path->should_be_locked &&
510 !bch2_btree_path_relock(trans, path, _RET_IP_)) {
511 trace_trans_restart_relock(trans->ip, _RET_IP_,
512 path->btree_id, &path->pos);
513 BUG_ON(!trans->restarted);
519 void bch2_trans_unlock(struct btree_trans *trans)
521 struct btree_path *path;
523 trans_for_each_path(trans, path)
524 __bch2_btree_path_unlock(path);
526 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
529 /* Btree iterator: */
531 #ifdef CONFIG_BCACHEFS_DEBUG
533 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
534 struct btree_path *path)
536 struct bkey_cached *ck;
537 bool locked = btree_node_locked(path, 0);
539 if (!bch2_btree_node_relock(trans, path, 0))
542 ck = (void *) path->l[0].b;
543 BUG_ON(ck->key.btree_id != path->btree_id ||
544 bkey_cmp(ck->key.pos, path->pos));
547 btree_node_unlock(path, 0);
550 static void bch2_btree_path_verify_level(struct btree_trans *trans,
551 struct btree_path *path, unsigned level)
553 struct btree_path_level *l;
554 struct btree_node_iter tmp;
556 struct bkey_packed *p, *k;
557 char buf1[100], buf2[100], buf3[100];
560 if (!bch2_debug_check_iterators)
565 locked = btree_node_locked(path, level);
569 bch2_btree_path_verify_cached(trans, path);
573 if (!btree_path_node(path, level))
576 if (!bch2_btree_node_relock(trans, path, level))
579 BUG_ON(!btree_path_pos_in_node(path, l->b));
581 bch2_btree_node_iter_verify(&l->iter, l->b);
584 * For interior nodes, the iterator will have skipped past deleted keys:
587 ? bch2_btree_node_iter_prev(&tmp, l->b)
588 : bch2_btree_node_iter_prev_all(&tmp, l->b);
589 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
591 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
596 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
602 btree_node_unlock(path, level);
605 strcpy(buf2, "(none)");
606 strcpy(buf3, "(none)");
608 bch2_bpos_to_text(&PBUF(buf1), path->pos);
611 struct bkey uk = bkey_unpack_key(l->b, p);
612 bch2_bkey_to_text(&PBUF(buf2), &uk);
616 struct bkey uk = bkey_unpack_key(l->b, k);
617 bch2_bkey_to_text(&PBUF(buf3), &uk);
620 panic("path should be %s key at level %u:\n"
624 msg, level, buf1, buf2, buf3);
627 static void bch2_btree_path_verify(struct btree_trans *trans,
628 struct btree_path *path)
630 struct bch_fs *c = trans->c;
633 EBUG_ON(path->btree_id >= BTREE_ID_NR);
635 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
637 BUG_ON(c->btree_roots[path->btree_id].b->c.level > i);
641 bch2_btree_path_verify_level(trans, path, i);
644 bch2_btree_path_verify_locks(path);
647 void bch2_trans_verify_paths(struct btree_trans *trans)
649 struct btree_path *path;
651 trans_for_each_path(trans, path)
652 bch2_btree_path_verify(trans, path);
655 static void bch2_btree_iter_verify(struct btree_iter *iter)
657 struct btree_trans *trans = iter->trans;
659 BUG_ON(iter->btree_id >= BTREE_ID_NR);
661 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
663 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
664 iter->pos.snapshot != iter->snapshot);
666 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
667 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
669 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
670 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
671 !btree_type_has_snapshots(iter->btree_id));
673 bch2_btree_path_verify(trans, iter->path);
676 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
678 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
679 !iter->pos.snapshot);
681 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
682 iter->pos.snapshot != iter->snapshot);
684 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
685 bkey_cmp(iter->pos, iter->k.p) > 0);
688 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
690 struct btree_trans *trans = iter->trans;
691 struct btree_iter copy;
692 struct bkey_s_c prev;
695 if (!bch2_debug_check_iterators)
698 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
701 if (bkey_err(k) || !k.k)
704 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
708 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
709 BTREE_ITER_ALL_SNAPSHOTS);
710 prev = bch2_btree_iter_prev(©);
714 ret = bkey_err(prev);
718 if (!bkey_cmp(prev.k->p, k.k->p) &&
719 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
720 prev.k->p.snapshot) > 0) {
721 char buf1[100], buf2[200];
723 bch2_bkey_to_text(&PBUF(buf1), k.k);
724 bch2_bkey_to_text(&PBUF(buf2), prev.k);
726 panic("iter snap %u\n"
733 bch2_trans_iter_exit(trans, ©);
739 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
740 struct btree_path *path, unsigned l) {}
741 static inline void bch2_btree_path_verify(struct btree_trans *trans,
742 struct btree_path *path) {}
743 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
744 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
745 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
749 /* Btree path: fixups after btree updates */
751 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
754 struct bkey_packed *k)
756 struct btree_node_iter_set *set;
758 btree_node_iter_for_each(iter, set)
759 if (set->end == t->end_offset) {
760 set->k = __btree_node_key_to_offset(b, k);
761 bch2_btree_node_iter_sort(iter, b);
765 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
768 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
770 struct bkey_packed *where)
772 struct btree_path_level *l = &path->l[b->c.level];
774 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
777 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
778 bch2_btree_node_iter_advance(&l->iter, l->b);
781 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
783 struct bkey_packed *where)
785 struct btree_path *path;
787 trans_for_each_path_with_node(trans, b, path) {
788 __bch2_btree_path_fix_key_modified(path, b, where);
789 bch2_btree_path_verify_level(trans, path, b->c.level);
793 static void __bch2_btree_node_iter_fix(struct btree_path *path,
795 struct btree_node_iter *node_iter,
797 struct bkey_packed *where,
798 unsigned clobber_u64s,
801 const struct bkey_packed *end = btree_bkey_last(b, t);
802 struct btree_node_iter_set *set;
803 unsigned offset = __btree_node_key_to_offset(b, where);
804 int shift = new_u64s - clobber_u64s;
805 unsigned old_end = t->end_offset - shift;
806 unsigned orig_iter_pos = node_iter->data[0].k;
807 bool iter_current_key_modified =
808 orig_iter_pos >= offset &&
809 orig_iter_pos <= offset + clobber_u64s;
811 btree_node_iter_for_each(node_iter, set)
812 if (set->end == old_end)
815 /* didn't find the bset in the iterator - might have to readd it: */
817 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
818 bch2_btree_node_iter_push(node_iter, b, where, end);
821 /* Iterator is after key that changed */
825 set->end = t->end_offset;
827 /* Iterator hasn't gotten to the key that changed yet: */
832 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
834 } else if (set->k < offset + clobber_u64s) {
835 set->k = offset + new_u64s;
836 if (set->k == set->end)
837 bch2_btree_node_iter_set_drop(node_iter, set);
839 /* Iterator is after key that changed */
840 set->k = (int) set->k + shift;
844 bch2_btree_node_iter_sort(node_iter, b);
846 if (node_iter->data[0].k != orig_iter_pos)
847 iter_current_key_modified = true;
850 * When a new key is added, and the node iterator now points to that
851 * key, the iterator might have skipped past deleted keys that should
852 * come after the key the iterator now points to. We have to rewind to
853 * before those deleted keys - otherwise
854 * bch2_btree_node_iter_prev_all() breaks:
856 if (!bch2_btree_node_iter_end(node_iter) &&
857 iter_current_key_modified &&
860 struct bkey_packed *k, *k2, *p;
862 k = bch2_btree_node_iter_peek_all(node_iter, b);
864 for_each_bset(b, t) {
865 bool set_pos = false;
867 if (node_iter->data[0].end == t->end_offset)
870 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
872 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
873 bkey_iter_cmp(b, k, p) < 0) {
879 btree_node_iter_set_set_pos(node_iter,
885 void bch2_btree_node_iter_fix(struct btree_trans *trans,
886 struct btree_path *path,
888 struct btree_node_iter *node_iter,
889 struct bkey_packed *where,
890 unsigned clobber_u64s,
893 struct bset_tree *t = bch2_bkey_to_bset(b, where);
894 struct btree_path *linked;
896 if (node_iter != &path->l[b->c.level].iter) {
897 __bch2_btree_node_iter_fix(path, b, node_iter, t,
898 where, clobber_u64s, new_u64s);
900 if (bch2_debug_check_iterators)
901 bch2_btree_node_iter_verify(node_iter, b);
904 trans_for_each_path_with_node(trans, b, linked) {
905 __bch2_btree_node_iter_fix(linked, b,
906 &linked->l[b->c.level].iter, t,
907 where, clobber_u64s, new_u64s);
908 bch2_btree_path_verify_level(trans, linked, b->c.level);
912 /* Btree path level: pointer to a particular btree node and node iter */
914 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
915 struct btree_path_level *l,
917 struct bkey_packed *k)
923 * signal to bch2_btree_iter_peek_slot() that we're currently at
926 u->type = KEY_TYPE_deleted;
927 return bkey_s_c_null;
930 ret = bkey_disassemble(l->b, k, u);
933 * XXX: bch2_btree_bset_insert_key() generates invalid keys when we
934 * overwrite extents - it sets k->type = KEY_TYPE_deleted on the key
935 * being overwritten but doesn't change k->size. But this is ok, because
936 * those keys are never written out, we just have to avoid a spurious
939 if (bch2_debug_check_bkeys && !bkey_deleted(ret.k))
940 bch2_bkey_debugcheck(c, l->b, ret);
945 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
946 struct btree_path_level *l,
949 return __btree_iter_unpack(c, l, u,
950 bch2_btree_node_iter_peek_all(&l->iter, l->b));
953 static inline struct bkey_s_c btree_path_level_peek(struct bch_fs *c,
954 struct btree_path *path,
955 struct btree_path_level *l,
958 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
959 bch2_btree_node_iter_peek(&l->iter, l->b));
961 path->pos = k.k ? k.k->p : l->b->key.k.p;
965 static inline struct bkey_s_c btree_path_level_prev(struct bch_fs *c,
966 struct btree_path *path,
967 struct btree_path_level *l,
970 struct bkey_s_c k = __btree_iter_unpack(c, l, u,
971 bch2_btree_node_iter_prev(&l->iter, l->b));
973 path->pos = k.k ? k.k->p : l->b->data->min_key;
977 static inline bool btree_path_advance_to_pos(struct btree_path *path,
978 struct btree_path_level *l,
981 struct bkey_packed *k;
984 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
985 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
986 if (max_advance > 0 && nr_advanced >= max_advance)
989 bch2_btree_node_iter_advance(&l->iter, l->b);
997 * Verify that iterator for parent node points to child node:
999 static void btree_path_verify_new_node(struct btree_trans *trans,
1000 struct btree_path *path, struct btree *b)
1002 struct btree_path_level *l;
1005 struct bkey_packed *k;
1007 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1010 plevel = b->c.level + 1;
1011 if (!btree_path_node(path, plevel))
1014 parent_locked = btree_node_locked(path, plevel);
1016 if (!bch2_btree_node_relock(trans, path, plevel))
1019 l = &path->l[plevel];
1020 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1023 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
1028 struct bkey uk = bkey_unpack_key(b, k);
1030 bch2_dump_btree_node(trans->c, l->b);
1031 bch2_bpos_to_text(&PBUF(buf1), path->pos);
1032 bch2_bkey_to_text(&PBUF(buf2), &uk);
1033 bch2_bpos_to_text(&PBUF(buf3), b->data->min_key);
1034 bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
1035 panic("parent iter doesn't point to new node:\n"
1039 bch2_btree_ids[path->btree_id], buf1,
1044 btree_node_unlock(path, plevel);
1047 static inline void __btree_path_level_init(struct btree_path *path,
1050 struct btree_path_level *l = &path->l[level];
1052 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1055 * Iterators to interior nodes should always be pointed at the first non
1059 bch2_btree_node_iter_peek(&l->iter, l->b);
1062 static inline void btree_path_level_init(struct btree_trans *trans,
1063 struct btree_path *path,
1066 BUG_ON(path->cached);
1068 btree_path_verify_new_node(trans, path, b);
1070 EBUG_ON(!btree_path_pos_in_node(path, b));
1071 EBUG_ON(b->c.lock.state.seq & 1);
1073 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
1074 path->l[b->c.level].b = b;
1075 __btree_path_level_init(path, b->c.level);
1078 /* Btree path: fixups after btree node updates: */
1081 * A btree node is being replaced - update the iterator to point to the new
1084 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
1086 struct btree_path *path;
1088 trans_for_each_path(trans, path)
1089 if (!path->cached &&
1090 btree_path_pos_in_node(path, b)) {
1091 enum btree_node_locked_type t =
1092 btree_lock_want(path, b->c.level);
1094 if (path->nodes_locked &&
1095 t != BTREE_NODE_UNLOCKED) {
1096 btree_node_unlock(path, b->c.level);
1097 six_lock_increment(&b->c.lock, t);
1098 mark_btree_node_locked(path, b->c.level, t);
1101 btree_path_level_init(trans, path, b);
1106 * A btree node has been modified in such a way as to invalidate iterators - fix
1109 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
1111 struct btree_path *path;
1113 trans_for_each_path_with_node(trans, b, path)
1114 __btree_path_level_init(path, b->c.level);
1117 /* Btree path: traverse, set_pos: */
1119 static int lock_root_check_fn(struct six_lock *lock, void *p)
1121 struct btree *b = container_of(lock, struct btree, c.lock);
1122 struct btree **rootp = p;
1124 return b == *rootp ? 0 : -1;
1127 static inline int btree_path_lock_root(struct btree_trans *trans,
1128 struct btree_path *path,
1129 unsigned depth_want,
1130 unsigned long trace_ip)
1132 struct bch_fs *c = trans->c;
1133 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
1134 enum six_lock_type lock_type;
1137 EBUG_ON(path->nodes_locked);
1140 b = READ_ONCE(*rootp);
1141 path->level = READ_ONCE(b->c.level);
1143 if (unlikely(path->level < depth_want)) {
1145 * the root is at a lower depth than the depth we want:
1146 * got to the end of the btree, or we're walking nodes
1147 * greater than some depth and there are no nodes >=
1150 path->level = depth_want;
1151 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
1152 path->l[i].b = NULL;
1156 lock_type = __btree_lock_want(path, path->level);
1157 if (unlikely(!btree_node_lock(trans, path, b, SPOS_MAX,
1158 path->level, lock_type,
1159 lock_root_check_fn, rootp,
1161 if (trans->restarted)
1166 if (likely(b == READ_ONCE(*rootp) &&
1167 b->c.level == path->level &&
1169 for (i = 0; i < path->level; i++)
1170 path->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1171 path->l[path->level].b = b;
1172 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
1173 path->l[i].b = NULL;
1175 mark_btree_node_locked(path, path->level, lock_type);
1176 btree_path_level_init(trans, path, b);
1180 six_unlock_type(&b->c.lock, lock_type);
1185 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
1187 struct bch_fs *c = trans->c;
1188 struct btree_path_level *l = path_l(path);
1189 struct btree_node_iter node_iter = l->iter;
1190 struct bkey_packed *k;
1191 struct bkey_buf tmp;
1192 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1193 ? (path->level > 1 ? 0 : 2)
1194 : (path->level > 1 ? 1 : 16);
1195 bool was_locked = btree_node_locked(path, path->level);
1198 bch2_bkey_buf_init(&tmp);
1200 while (nr && !ret) {
1201 if (!bch2_btree_node_relock(trans, path, path->level))
1204 bch2_btree_node_iter_advance(&node_iter, l->b);
1205 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1209 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
1210 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
1215 btree_node_unlock(path, path->level);
1217 bch2_bkey_buf_exit(&tmp, c);
1221 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
1222 struct btree_path *path,
1223 unsigned plevel, struct btree *b)
1225 struct btree_path_level *l = &path->l[plevel];
1226 bool locked = btree_node_locked(path, plevel);
1227 struct bkey_packed *k;
1228 struct bch_btree_ptr_v2 *bp;
1230 if (!bch2_btree_node_relock(trans, path, plevel))
1233 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1234 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1236 bp = (void *) bkeyp_val(&l->b->format, k);
1237 bp->mem_ptr = (unsigned long)b;
1240 btree_node_unlock(path, plevel);
1243 static __always_inline int btree_path_down(struct btree_trans *trans,
1244 struct btree_path *path,
1246 unsigned long trace_ip)
1248 struct bch_fs *c = trans->c;
1249 struct btree_path_level *l = path_l(path);
1251 unsigned level = path->level - 1;
1252 enum six_lock_type lock_type = __btree_lock_want(path, level);
1253 struct bkey_buf tmp;
1256 EBUG_ON(!btree_node_locked(path, path->level));
1258 bch2_bkey_buf_init(&tmp);
1259 bch2_bkey_buf_unpack(&tmp, c, l->b,
1260 bch2_btree_node_iter_peek(&l->iter, l->b));
1262 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
1263 ret = PTR_ERR_OR_ZERO(b);
1267 mark_btree_node_locked(path, level, lock_type);
1268 btree_path_level_init(trans, path, b);
1270 if (tmp.k->k.type == KEY_TYPE_btree_ptr_v2 &&
1271 unlikely(b != btree_node_mem_ptr(tmp.k)))
1272 btree_node_mem_ptr_set(trans, path, level + 1, b);
1274 if (flags & BTREE_ITER_PREFETCH)
1275 ret = btree_path_prefetch(trans, path);
1277 if (btree_node_read_locked(path, level + 1))
1278 btree_node_unlock(path, level + 1);
1279 path->level = level;
1281 bch2_btree_path_verify_locks(path);
1283 bch2_bkey_buf_exit(&tmp, c);
1287 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
1288 unsigned, unsigned long);
1290 static int __btree_path_traverse_all(struct btree_trans *trans, int ret,
1291 unsigned long trace_ip)
1293 struct bch_fs *c = trans->c;
1294 struct btree_path *path;
1297 if (trans->in_traverse_all)
1300 trans->in_traverse_all = true;
1302 trans->restarted = false;
1304 trans_for_each_path(trans, path)
1305 path->should_be_locked = false;
1307 btree_trans_verify_sorted(trans);
1309 for (i = trans->nr_sorted - 2; i >= 0; --i) {
1310 struct btree_path *path1 = trans->paths + trans->sorted[i];
1311 struct btree_path *path2 = trans->paths + trans->sorted[i + 1];
1313 if (path1->btree_id == path2->btree_id &&
1314 path1->locks_want < path2->locks_want)
1315 __bch2_btree_path_upgrade(trans, path1, path2->locks_want);
1316 else if (!path1->locks_want && path2->locks_want)
1317 __bch2_btree_path_upgrade(trans, path1, 1);
1320 bch2_trans_unlock(trans);
1323 if (unlikely(ret == -ENOMEM)) {
1326 closure_init_stack(&cl);
1329 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1334 if (unlikely(ret == -EIO)) {
1335 trans->error = true;
1339 BUG_ON(ret && ret != -EINTR);
1341 /* Now, redo traversals in correct order: */
1343 while (i < trans->nr_sorted) {
1344 path = trans->paths + trans->sorted[i];
1346 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1348 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1352 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
1354 if (path->nodes_locked)
1359 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1360 * and relock(), relock() won't relock since path->should_be_locked
1361 * isn't set yet, which is all fine
1363 trans_for_each_path(trans, path)
1364 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1366 bch2_btree_cache_cannibalize_unlock(c);
1368 trans->in_traverse_all = false;
1370 trace_trans_traverse_all(trans->ip, trace_ip);
1374 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
1376 return __btree_path_traverse_all(trans, 0, _RET_IP_);
1379 static inline bool btree_path_good_node(struct btree_trans *trans,
1380 struct btree_path *path,
1381 unsigned l, int check_pos)
1383 if (!is_btree_node(path, l) ||
1384 !bch2_btree_node_relock(trans, path, l))
1387 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1389 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1394 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1395 struct btree_path *path,
1398 unsigned i, l = path->level;
1400 while (btree_path_node(path, l) &&
1401 !btree_path_good_node(trans, path, l, check_pos)) {
1402 btree_node_unlock(path, l);
1403 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1407 /* If we need intent locks, take them too: */
1409 i < path->locks_want && btree_path_node(path, i);
1411 if (!bch2_btree_node_relock(trans, path, i))
1413 btree_node_unlock(path, l);
1414 path->l[l].b = BTREE_ITER_NO_NODE_UP;
1422 * This is the main state machine for walking down the btree - walks down to a
1425 * Returns 0 on success, -EIO on error (error reading in a btree node).
1427 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1428 * stashed in the iterator and returned from bch2_trans_exit().
1430 static int btree_path_traverse_one(struct btree_trans *trans,
1431 struct btree_path *path,
1433 unsigned long trace_ip)
1435 unsigned depth_want = path->level;
1439 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1440 * and re-traverse the path without a transaction restart:
1442 if (path->should_be_locked) {
1443 ret = bch2_btree_path_relock(trans, path, trace_ip) ? 0 : -EINTR;
1448 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1452 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1455 path->level = btree_path_up_until_good_node(trans, path, 0);
1458 * Note: path->nodes[path->level] may be temporarily NULL here - that
1459 * would indicate to other code that we got to the end of the btree,
1460 * here it indicates that relocking the root failed - it's critical that
1461 * btree_path_lock_root() comes next and that it can't fail
1463 while (path->level > depth_want) {
1464 ret = btree_path_node(path, path->level)
1465 ? btree_path_down(trans, path, flags, trace_ip)
1466 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1467 if (unlikely(ret)) {
1470 * No nodes at this level - got to the end of
1477 __bch2_btree_path_unlock(path);
1478 path->level = depth_want;
1481 path->l[path->level].b =
1482 BTREE_ITER_NO_NODE_ERROR;
1484 path->l[path->level].b =
1485 BTREE_ITER_NO_NODE_DOWN;
1490 path->uptodate = BTREE_ITER_UPTODATE;
1492 BUG_ON((ret == -EINTR) != !!trans->restarted);
1493 bch2_btree_path_verify(trans, path);
1497 static int __btree_path_traverse_all(struct btree_trans *, int, unsigned long);
1499 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1500 struct btree_path *path, unsigned flags)
1504 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1507 ret = bch2_trans_cond_resched(trans) ?:
1508 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1509 if (unlikely(ret) && hweight64(trans->paths_allocated) == 1) {
1510 ret = __btree_path_traverse_all(trans, ret, _RET_IP_);
1511 BUG_ON(ret == -EINTR);
1517 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1518 struct btree_path *src)
1522 memcpy(&dst->pos, &src->pos,
1523 sizeof(struct btree_path) - offsetof(struct btree_path, pos));
1525 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1526 if (btree_node_locked(dst, i))
1527 six_lock_increment(&dst->l[i].b->c.lock,
1528 __btree_lock_want(dst, i));
1530 btree_path_check_sort(trans, dst, 0);
1533 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1536 struct btree_path *new = btree_path_alloc(trans, src);
1538 btree_path_copy(trans, new, src);
1539 __btree_path_get(new, intent);
1543 inline struct btree_path * __must_check
1544 bch2_btree_path_make_mut(struct btree_trans *trans,
1545 struct btree_path *path, bool intent)
1547 if (path->ref > 1 || path->preserve) {
1548 __btree_path_put(path, intent);
1549 path = btree_path_clone(trans, path, intent);
1550 path->preserve = false;
1551 #ifdef CONFIG_BCACHEFS_DEBUG
1552 path->ip_allocated = _RET_IP_;
1554 btree_trans_verify_sorted(trans);
1560 static struct btree_path * __must_check
1561 btree_path_set_pos(struct btree_trans *trans,
1562 struct btree_path *path, struct bpos new_pos,
1565 int cmp = bpos_cmp(new_pos, path->pos);
1566 unsigned l = path->level;
1568 EBUG_ON(trans->restarted);
1569 EBUG_ON(!path->ref);
1574 path = bch2_btree_path_make_mut(trans, path, intent);
1576 path->pos = new_pos;
1577 path->should_be_locked = false;
1579 btree_path_check_sort(trans, path, cmp);
1581 if (unlikely(path->cached)) {
1582 btree_node_unlock(path, 0);
1583 path->l[0].b = BTREE_ITER_NO_NODE_CACHED;
1584 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1588 l = btree_path_up_until_good_node(trans, path, cmp);
1590 if (btree_path_node(path, l)) {
1592 * We might have to skip over many keys, or just a few: try
1593 * advancing the node iterator, and if we have to skip over too
1594 * many keys just reinit it (or if we're rewinding, since that
1598 !btree_path_advance_to_pos(path, &path->l[l], 8))
1599 __btree_path_level_init(path, l);
1602 if (l != path->level) {
1603 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1604 __bch2_btree_path_unlock(path);
1607 bch2_btree_path_verify(trans, path);
1611 /* Btree path: main interface: */
1613 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1615 struct btree_path *next;
1617 next = prev_btree_path(trans, path);
1618 if (next && !btree_path_cmp(next, path))
1621 next = next_btree_path(trans, path);
1622 if (next && !btree_path_cmp(next, path))
1628 static bool have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1630 struct btree_path *next;
1632 next = prev_btree_path(trans, path);
1633 if (next && path_l(next)->b == path_l(path)->b)
1636 next = next_btree_path(trans, path);
1637 if (next && path_l(next)->b == path_l(path)->b)
1643 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1645 __bch2_btree_path_unlock(path);
1646 btree_path_list_remove(trans, path);
1647 trans->paths_allocated &= ~(1ULL << path->idx);
1650 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1652 struct btree_path *dup;
1654 EBUG_ON(trans->paths + path->idx != path);
1655 EBUG_ON(!path->ref);
1657 if (!__btree_path_put(path, intent))
1661 * Perhaps instead we should check for duplicate paths in traverse_all:
1663 if (path->preserve &&
1664 (dup = have_path_at_pos(trans, path))) {
1665 dup->preserve = true;
1666 path->preserve = false;
1669 if (!path->preserve &&
1670 have_node_at_pos(trans, path))
1671 __bch2_path_free(trans, path);
1675 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1677 struct btree_path *path;
1678 struct btree_insert_entry *i;
1682 btree_trans_verify_sorted(trans);
1684 trans_for_each_path_inorder(trans, path, idx)
1685 printk(KERN_ERR "path: idx %u ref %u:%u%s btree %s pos %s %pS\n",
1686 path->idx, path->ref, path->intent_ref,
1687 path->preserve ? " preserve" : "",
1688 bch2_btree_ids[path->btree_id],
1689 (bch2_bpos_to_text(&PBUF(buf), path->pos), buf),
1690 #ifdef CONFIG_BCACHEFS_DEBUG
1691 (void *) path->ip_allocated
1697 trans_for_each_update(trans, i)
1698 printk(KERN_ERR "update: btree %s %s %pS\n",
1699 bch2_btree_ids[i->btree_id],
1700 (bch2_bkey_val_to_text(&PBUF(buf), trans->c, bkey_i_to_s_c(i->k)), buf),
1701 (void *) i->ip_allocated);
1704 static struct btree_path *btree_path_alloc(struct btree_trans *trans,
1705 struct btree_path *pos)
1707 struct btree_path *path;
1710 if (unlikely(trans->paths_allocated ==
1711 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1)))) {
1712 bch2_dump_trans_paths_updates(trans);
1713 panic("trans path oveflow\n");
1716 idx = __ffs64(~trans->paths_allocated);
1717 trans->paths_allocated |= 1ULL << idx;
1719 path = &trans->paths[idx];
1723 path->intent_ref = 0;
1724 path->nodes_locked = 0;
1725 path->nodes_intent_locked = 0;
1727 btree_path_list_add(trans, pos, path);
1731 struct btree_path *bch2_path_get(struct btree_trans *trans, bool cached,
1732 enum btree_id btree_id, struct bpos pos,
1733 unsigned locks_want, unsigned level,
1736 struct btree_path *path, *path_pos = NULL;
1739 BUG_ON(trans->restarted);
1741 trans_for_each_path_inorder(trans, path, i) {
1742 if (__btree_path_cmp(path,
1753 path_pos->cached == cached &&
1754 path_pos->btree_id == btree_id &&
1755 path_pos->level == level) {
1756 __btree_path_get(path_pos, intent);
1757 path = btree_path_set_pos(trans, path_pos, pos, intent);
1758 path->preserve = true;
1760 path = btree_path_alloc(trans, path_pos);
1763 __btree_path_get(path, intent);
1765 path->btree_id = btree_id;
1766 path->cached = cached;
1767 path->preserve = true;
1768 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1769 path->should_be_locked = false;
1770 path->level = level;
1771 path->locks_want = locks_want;
1772 path->nodes_locked = 0;
1773 path->nodes_intent_locked = 0;
1774 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1775 path->l[i].b = BTREE_ITER_NO_NODE_INIT;
1776 #ifdef CONFIG_BCACHEFS_DEBUG
1777 path->ip_allocated = _RET_IP_;
1779 btree_trans_verify_sorted(trans);
1782 if (path->intent_ref)
1783 locks_want = max(locks_want, level + 1);
1786 * If the path has locks_want greater than requested, we don't downgrade
1787 * it here - on transaction restart because btree node split needs to
1788 * upgrade locks, we might be putting/getting the iterator again.
1789 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1790 * a successful transaction commit.
1793 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1794 if (locks_want > path->locks_want) {
1795 path->locks_want = locks_want;
1796 btree_path_get_locks(trans, path, true, _THIS_IP_);
1802 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1807 BUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1809 if (!path->cached) {
1810 struct btree_path_level *l = path_l(path);
1811 struct bkey_packed *_k =
1812 bch2_btree_node_iter_peek_all(&l->iter, l->b);
1814 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1816 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1818 if (!k.k || bpos_cmp(path->pos, k.k->p))
1821 struct bkey_cached *ck = (void *) path->l[0].b;
1823 EBUG_ON(path->btree_id != ck->key.btree_id ||
1824 bkey_cmp(path->pos, ck->key.pos));
1826 /* BTREE_ITER_CACHED_NOFILL? */
1827 if (unlikely(!ck->valid))
1830 k = bkey_i_to_s_c(ck->k);
1837 return (struct bkey_s_c) { u, NULL };
1840 /* Btree iterators: */
1843 __bch2_btree_iter_traverse(struct btree_iter *iter)
1845 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1849 bch2_btree_iter_traverse(struct btree_iter *iter)
1853 iter->path = btree_path_set_pos(iter->trans, iter->path,
1854 btree_iter_search_key(iter),
1855 iter->flags & BTREE_ITER_INTENT);
1857 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1861 iter->path->should_be_locked = true;
1865 /* Iterate across nodes (leaf and interior nodes) */
1867 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1869 struct btree *b = NULL;
1872 EBUG_ON(iter->path->cached);
1873 bch2_btree_iter_verify(iter);
1875 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1879 b = btree_path_node(iter->path, iter->path->level);
1883 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1885 bkey_init(&iter->k);
1886 iter->k.p = iter->pos = b->key.k.p;
1887 iter->path->should_be_locked = true;
1889 bch2_btree_iter_verify_entry_exit(iter);
1890 bch2_btree_iter_verify(iter);
1895 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1897 struct btree_trans *trans = iter->trans;
1898 struct btree_path *path = iter->path;
1899 struct btree *b = NULL;
1902 EBUG_ON(iter->path->cached);
1903 bch2_btree_iter_verify(iter);
1905 /* already got to end? */
1906 if (!btree_path_node(path, path->level))
1909 bch2_trans_cond_resched(trans);
1911 btree_node_unlock(path, path->level);
1912 path->l[path->level].b = BTREE_ITER_NO_NODE_UP;
1915 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1916 ret = bch2_btree_path_traverse(trans, path, iter->flags);
1921 b = btree_path_node(path, path->level);
1925 if (bpos_cmp(iter->pos, b->key.k.p) < 0) {
1927 * Haven't gotten to the end of the parent node: go back down to
1928 * the next child node
1931 btree_path_set_pos(trans, path, bpos_successor(iter->pos),
1932 iter->flags & BTREE_ITER_INTENT);
1934 /* Unlock to avoid screwing up our lock invariants: */
1935 btree_node_unlock(path, path->level);
1937 path->level = iter->min_depth;
1938 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1939 bch2_btree_iter_verify(iter);
1941 ret = bch2_btree_path_traverse(trans, path, iter->flags);
1947 b = path->l[path->level].b;
1950 bkey_init(&iter->k);
1951 iter->k.p = iter->pos = b->key.k.p;
1952 iter->path->should_be_locked = true;
1954 bch2_btree_iter_verify_entry_exit(iter);
1955 bch2_btree_iter_verify(iter);
1960 /* Iterate across keys (in leaf nodes only) */
1962 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1964 struct bpos pos = iter->k.p;
1965 bool ret = bpos_cmp(pos, SPOS_MAX) != 0;
1967 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1968 pos = bkey_successor(iter, pos);
1969 bch2_btree_iter_set_pos(iter, pos);
1973 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1975 struct bpos pos = bkey_start_pos(&iter->k);
1976 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1977 ? bpos_cmp(pos, POS_MIN)
1978 : bkey_cmp(pos, POS_MIN)) != 0;
1980 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1981 pos = bkey_predecessor(iter, pos);
1982 bch2_btree_iter_set_pos(iter, pos);
1987 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1990 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1992 struct btree_trans *trans = iter->trans;
1993 struct bpos search_key = btree_iter_search_key(iter);
1994 struct bkey_i *next_update;
1998 EBUG_ON(iter->path->cached || iter->path->level);
1999 bch2_btree_iter_verify(iter);
2000 bch2_btree_iter_verify_entry_exit(iter);
2003 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2004 iter->flags & BTREE_ITER_INTENT);
2006 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2007 if (unlikely(ret)) {
2008 /* ensure that iter->k is consistent with iter->pos: */
2009 bch2_btree_iter_set_pos(iter, iter->pos);
2010 k = bkey_s_c_err(ret);
2014 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2015 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2017 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
2019 /* * In the btree, deleted keys sort before non deleted: */
2020 if (k.k && bkey_deleted(k.k) &&
2022 bpos_cmp(k.k->p, next_update->k.p) <= 0)) {
2023 search_key = k.k->p;
2028 bpos_cmp(next_update->k.p,
2029 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
2030 iter->k = next_update->k;
2031 k = bkey_i_to_s_c(next_update);
2036 * We can never have a key in a leaf node at POS_MAX, so
2037 * we don't have to check these successor() calls:
2039 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2040 !bch2_snapshot_is_ancestor(trans->c,
2043 search_key = bpos_successor(k.k->p);
2047 if (bkey_whiteout(k.k) &&
2048 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2049 search_key = bkey_successor(iter, k.k->p);
2054 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
2055 /* Advance to next leaf node: */
2056 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
2059 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2066 * iter->pos should be mononotically increasing, and always be equal to
2067 * the key we just returned - except extents can straddle iter->pos:
2069 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2071 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2072 iter->pos = bkey_start_pos(k.k);
2074 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2075 iter->pos.snapshot = iter->snapshot;
2077 cmp = bpos_cmp(k.k->p, iter->path->pos);
2079 iter->path = bch2_btree_path_make_mut(trans, iter->path,
2080 iter->flags & BTREE_ITER_INTENT);
2081 iter->path->pos = k.k->p;
2082 btree_path_check_sort(trans, iter->path, cmp);
2085 iter->path->should_be_locked = true;
2087 bch2_btree_iter_verify_entry_exit(iter);
2088 bch2_btree_iter_verify(iter);
2089 ret = bch2_btree_iter_verify_ret(iter, k);
2091 return bkey_s_c_err(ret);
2097 * bch2_btree_iter_next: returns first key greater than iterator's current
2100 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2102 if (!bch2_btree_iter_advance(iter))
2103 return bkey_s_c_null;
2105 return bch2_btree_iter_peek(iter);
2109 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2110 * iterator's current position
2112 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2114 struct btree_trans *trans = iter->trans;
2115 struct bpos search_key = iter->pos;
2116 struct btree_path *saved_path = NULL;
2118 struct bkey saved_k;
2119 const struct bch_val *saved_v;
2122 EBUG_ON(iter->path->cached || iter->path->level);
2123 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2124 bch2_btree_iter_verify(iter);
2125 bch2_btree_iter_verify_entry_exit(iter);
2127 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2128 search_key.snapshot = U32_MAX;
2131 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2132 iter->flags & BTREE_ITER_INTENT);
2134 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2135 if (unlikely(ret)) {
2136 /* ensure that iter->k is consistent with iter->pos: */
2137 bch2_btree_iter_set_pos(iter, iter->pos);
2138 k = bkey_s_c_err(ret);
2142 k = btree_path_level_peek(trans->c, iter->path,
2143 &iter->path->l[0], &iter->k);
2145 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2146 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2147 : bpos_cmp(k.k->p, search_key) > 0))
2148 k = btree_path_level_prev(trans->c, iter->path,
2149 &iter->path->l[0], &iter->k);
2151 btree_path_check_sort(trans, iter->path, 0);
2154 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2155 if (k.k->p.snapshot == iter->snapshot)
2159 * If we have a saved candidate, and we're no
2160 * longer at the same _key_ (not pos), return
2163 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2164 bch2_path_put(trans, iter->path,
2165 iter->flags & BTREE_ITER_INTENT);
2166 iter->path = saved_path;
2173 if (bch2_snapshot_is_ancestor(iter->trans->c,
2177 bch2_path_put(trans, saved_path,
2178 iter->flags & BTREE_ITER_INTENT);
2179 saved_path = btree_path_clone(trans, iter->path,
2180 iter->flags & BTREE_ITER_INTENT);
2185 search_key = bpos_predecessor(k.k->p);
2189 if (bkey_whiteout(k.k) &&
2190 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2191 search_key = bkey_predecessor(iter, k.k->p);
2192 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2193 search_key.snapshot = U32_MAX;
2198 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2199 /* Advance to previous leaf node: */
2200 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2202 /* Start of btree: */
2203 bch2_btree_iter_set_pos(iter, POS_MIN);
2209 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2211 /* Extents can straddle iter->pos: */
2212 if (bkey_cmp(k.k->p, iter->pos) < 0)
2215 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2216 iter->pos.snapshot = iter->snapshot;
2219 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2220 iter->path->should_be_locked = true;
2222 bch2_btree_iter_verify_entry_exit(iter);
2223 bch2_btree_iter_verify(iter);
2229 * bch2_btree_iter_prev: returns first key less than iterator's current
2232 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2234 if (!bch2_btree_iter_rewind(iter))
2235 return bkey_s_c_null;
2237 return bch2_btree_iter_peek_prev(iter);
2240 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2242 struct btree_trans *trans = iter->trans;
2243 struct bpos search_key;
2247 EBUG_ON(iter->path->level);
2248 bch2_btree_iter_verify(iter);
2249 bch2_btree_iter_verify_entry_exit(iter);
2251 /* extents can't span inode numbers: */
2252 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2253 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2254 if (iter->pos.inode == KEY_INODE_MAX)
2255 return bkey_s_c_null;
2257 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2260 search_key = btree_iter_search_key(iter);
2261 iter->path = btree_path_set_pos(trans, iter->path, search_key,
2262 iter->flags & BTREE_ITER_INTENT);
2264 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2266 return bkey_s_c_err(ret);
2268 if ((iter->flags & BTREE_ITER_CACHED) ||
2269 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2270 struct bkey_i *next_update;
2272 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
2273 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
2277 !bpos_cmp(next_update->k.p, iter->pos)) {
2278 iter->k = next_update->k;
2279 k = bkey_i_to_s_c(next_update);
2281 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2286 if (iter->flags & BTREE_ITER_INTENT) {
2287 struct btree_iter iter2;
2289 bch2_trans_copy_iter(&iter2, iter);
2290 k = bch2_btree_iter_peek(&iter2);
2292 if (k.k && !bkey_err(k)) {
2296 bch2_trans_iter_exit(trans, &iter2);
2298 struct bpos pos = iter->pos;
2300 k = bch2_btree_iter_peek(iter);
2304 if (unlikely(bkey_err(k)))
2307 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2309 if (bkey_cmp(iter->pos, next) < 0) {
2310 bkey_init(&iter->k);
2311 iter->k.p = iter->pos;
2312 bch2_key_resize(&iter->k,
2313 min_t(u64, KEY_SIZE_MAX,
2314 (next.inode == iter->pos.inode
2319 k = (struct bkey_s_c) { &iter->k, NULL };
2320 EBUG_ON(!k.k->size);
2324 iter->path->should_be_locked = true;
2326 bch2_btree_iter_verify_entry_exit(iter);
2327 bch2_btree_iter_verify(iter);
2328 ret = bch2_btree_iter_verify_ret(iter, k);
2330 return bkey_s_c_err(ret);
2335 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2337 if (!bch2_btree_iter_advance(iter))
2338 return bkey_s_c_null;
2340 return bch2_btree_iter_peek_slot(iter);
2343 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2345 if (!bch2_btree_iter_rewind(iter))
2346 return bkey_s_c_null;
2348 return bch2_btree_iter_peek_slot(iter);
2351 /* new transactional stuff: */
2353 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2354 struct btree_path *path)
2356 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2357 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2358 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2361 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2363 #ifdef CONFIG_BCACHEFS_DEBUG
2366 for (i = 0; i < trans->nr_sorted; i++)
2367 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2371 static void btree_trans_verify_sorted(struct btree_trans *trans)
2373 #ifdef CONFIG_BCACHEFS_DEBUG
2374 struct btree_path *path, *prev = NULL;
2377 trans_for_each_path_inorder(trans, path, i) {
2378 BUG_ON(prev && btree_path_cmp(prev, path) > 0);
2384 static inline void btree_path_swap(struct btree_trans *trans,
2385 struct btree_path *l, struct btree_path *r)
2387 swap(l->sorted_idx, r->sorted_idx);
2388 swap(trans->sorted[l->sorted_idx],
2389 trans->sorted[r->sorted_idx]);
2391 btree_path_verify_sorted_ref(trans, l);
2392 btree_path_verify_sorted_ref(trans, r);
2395 static void btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2398 struct btree_path *n;
2401 n = prev_btree_path(trans, path);
2402 if (n && btree_path_cmp(n, path) > 0) {
2404 btree_path_swap(trans, n, path);
2405 n = prev_btree_path(trans, path);
2406 } while (n && btree_path_cmp(n, path) > 0);
2413 n = next_btree_path(trans, path);
2414 if (n && btree_path_cmp(path, n) > 0) {
2416 btree_path_swap(trans, path, n);
2417 n = next_btree_path(trans, path);
2418 } while (n && btree_path_cmp(path, n) > 0);
2422 btree_trans_verify_sorted(trans);
2425 static inline void btree_path_list_remove(struct btree_trans *trans,
2426 struct btree_path *path)
2430 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2432 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2434 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2435 trans->paths[trans->sorted[i]].sorted_idx = i;
2437 path->sorted_idx = U8_MAX;
2439 btree_trans_verify_sorted_refs(trans);
2442 static inline void btree_path_list_add(struct btree_trans *trans,
2443 struct btree_path *pos,
2444 struct btree_path *path)
2448 btree_trans_verify_sorted_refs(trans);
2450 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2452 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2454 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2455 trans->paths[trans->sorted[i]].sorted_idx = i;
2457 btree_trans_verify_sorted_refs(trans);
2460 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2463 bch2_path_put(trans, iter->path,
2464 iter->flags & BTREE_ITER_INTENT);
2468 static void __bch2_trans_iter_init(struct btree_trans *trans,
2469 struct btree_iter *iter,
2470 unsigned btree_id, struct bpos pos,
2471 unsigned locks_want,
2475 EBUG_ON(trans->restarted);
2477 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2478 btree_node_type_is_extents(btree_id))
2479 flags |= BTREE_ITER_IS_EXTENTS;
2481 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2482 !btree_type_has_snapshots(btree_id))
2483 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2485 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2486 btree_type_has_snapshots(btree_id))
2487 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2489 iter->trans = trans;
2491 iter->btree_id = btree_id;
2492 iter->min_depth = depth;
2493 iter->flags = flags;
2494 iter->snapshot = pos.snapshot;
2496 iter->k.type = KEY_TYPE_deleted;
2500 iter->path = bch2_path_get(trans,
2501 flags & BTREE_ITER_CACHED,
2506 flags & BTREE_ITER_INTENT);
2509 void bch2_trans_iter_init(struct btree_trans *trans,
2510 struct btree_iter *iter,
2511 unsigned btree_id, struct bpos pos,
2514 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2518 void bch2_trans_node_iter_init(struct btree_trans *trans,
2519 struct btree_iter *iter,
2520 enum btree_id btree_id,
2522 unsigned locks_want,
2526 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2527 BTREE_ITER_NOT_EXTENTS|
2528 __BTREE_ITER_ALL_SNAPSHOTS|
2529 BTREE_ITER_ALL_SNAPSHOTS|
2531 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2532 BUG_ON(iter->path->level != depth);
2533 BUG_ON(iter->min_depth != depth);
2536 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2540 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2543 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2545 size_t new_top = trans->mem_top + size;
2548 if (new_top > trans->mem_bytes) {
2549 size_t old_bytes = trans->mem_bytes;
2550 size_t new_bytes = roundup_pow_of_two(new_top);
2553 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2555 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2556 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2557 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2558 new_bytes = BTREE_TRANS_MEM_MAX;
2563 return ERR_PTR(-ENOMEM);
2565 trans->mem = new_mem;
2566 trans->mem_bytes = new_bytes;
2569 trace_trans_restart_mem_realloced(trans->ip, _RET_IP_, new_bytes);
2570 btree_trans_restart(trans);
2571 return ERR_PTR(-EINTR);
2575 p = trans->mem + trans->mem_top;
2576 trans->mem_top += size;
2582 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2583 * @trans: transaction to reset
2585 * While iterating over nodes or updating nodes a attempt to lock a btree
2586 * node may return EINTR when the trylock fails. When this occurs
2587 * bch2_trans_begin() should be called and the transaction retried.
2589 void bch2_trans_begin(struct btree_trans *trans)
2591 struct btree_insert_entry *i;
2592 struct btree_path *path;
2594 trans_for_each_update(trans, i)
2595 __btree_path_put(i->path, true);
2597 trans->extra_journal_res = 0;
2598 trans->nr_updates = 0;
2601 trans->hooks = NULL;
2602 trans->extra_journal_entries = NULL;
2603 trans->extra_journal_entry_u64s = 0;
2605 if (trans->fs_usage_deltas) {
2606 trans->fs_usage_deltas->used = 0;
2607 memset(&trans->fs_usage_deltas->memset_start, 0,
2608 (void *) &trans->fs_usage_deltas->memset_end -
2609 (void *) &trans->fs_usage_deltas->memset_start);
2612 trans_for_each_path(trans, path) {
2613 path->should_be_locked = false;
2616 * XXX: we probably shouldn't be doing this if the transaction
2617 * was restarted, but currently we still overflow transaction
2618 * iterators if we do that
2620 if (!path->ref && !path->preserve)
2621 __bch2_path_free(trans, path);
2622 else if (!path->ref)
2623 path->preserve = false;
2626 bch2_trans_cond_resched(trans);
2628 if (trans->restarted)
2629 bch2_btree_path_traverse_all(trans);
2631 trans->restarted = false;
2634 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2636 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2637 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2640 BUG_ON(trans->used_mempool);
2643 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
2646 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2648 trans->paths = p; p += paths_bytes;
2649 trans->updates = p; p += updates_bytes;
2652 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2653 unsigned expected_nr_iters,
2654 size_t expected_mem_bytes)
2655 __acquires(&c->btree_trans_barrier)
2657 memset(trans, 0, sizeof(*trans));
2659 trans->ip = _RET_IP_;
2661 bch2_trans_alloc_paths(trans, c);
2663 if (expected_mem_bytes) {
2664 trans->mem_bytes = roundup_pow_of_two(expected_mem_bytes);
2665 trans->mem = kmalloc(trans->mem_bytes, GFP_KERNEL|__GFP_NOFAIL);
2667 if (!unlikely(trans->mem)) {
2668 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2669 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2673 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2675 #ifdef CONFIG_BCACHEFS_DEBUG
2676 trans->pid = current->pid;
2677 mutex_lock(&c->btree_trans_lock);
2678 list_add(&trans->list, &c->btree_trans_list);
2679 mutex_unlock(&c->btree_trans_lock);
2683 static void check_btree_paths_leaked(struct btree_trans *trans)
2685 #ifdef CONFIG_BCACHEFS_DEBUG
2686 struct bch_fs *c = trans->c;
2687 struct btree_path *path;
2689 trans_for_each_path(trans, path)
2694 bch_err(c, "btree paths leaked from %pS!", (void *) trans->ip);
2695 trans_for_each_path(trans, path)
2697 printk(KERN_ERR " btree %s %pS\n",
2698 bch2_btree_ids[path->btree_id],
2699 (void *) path->ip_allocated);
2700 /* Be noisy about this: */
2701 bch2_fatal_error(c);
2705 int bch2_trans_exit(struct btree_trans *trans)
2706 __releases(&c->btree_trans_barrier)
2708 struct btree_insert_entry *i;
2709 struct bch_fs *c = trans->c;
2711 bch2_trans_unlock(trans);
2713 trans_for_each_update(trans, i)
2714 __btree_path_put(i->path, true);
2715 trans->nr_updates = 0;
2717 check_btree_paths_leaked(trans);
2719 #ifdef CONFIG_BCACHEFS_DEBUG
2720 mutex_lock(&c->btree_trans_lock);
2721 list_del(&trans->list);
2722 mutex_unlock(&c->btree_trans_lock);
2725 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2727 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
2729 if (trans->fs_usage_deltas) {
2730 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2731 REPLICAS_DELTA_LIST_MAX)
2732 mempool_free(trans->fs_usage_deltas,
2733 &c->replicas_delta_pool);
2735 kfree(trans->fs_usage_deltas);
2738 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2739 mempool_free(trans->mem, &c->btree_trans_mem_pool);
2745 * Userspace doesn't have a real percpu implementation:
2747 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
2751 mempool_free(trans->paths, &c->btree_paths_pool);
2753 trans->mem = (void *) 0x1;
2754 trans->paths = (void *) 0x1;
2756 return trans->error ? -EIO : 0;
2759 static void __maybe_unused
2760 bch2_btree_path_node_to_text(struct printbuf *out,
2761 struct btree_bkey_cached_common *_b,
2764 pr_buf(out, " l=%u %s:",
2765 _b->level, bch2_btree_ids[_b->btree_id]);
2766 bch2_bpos_to_text(out, btree_node_pos(_b, cached));
2769 #ifdef CONFIG_BCACHEFS_DEBUG
2770 static bool trans_has_locks(struct btree_trans *trans)
2772 struct btree_path *path;
2774 trans_for_each_path(trans, path)
2775 if (path->nodes_locked)
2781 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2783 #ifdef CONFIG_BCACHEFS_DEBUG
2784 struct btree_trans *trans;
2785 struct btree_path *path;
2789 mutex_lock(&c->btree_trans_lock);
2790 list_for_each_entry(trans, &c->btree_trans_list, list) {
2791 if (!trans_has_locks(trans))
2794 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2796 trans_for_each_path(trans, path) {
2797 if (!path->nodes_locked)
2800 pr_buf(out, " path %u %c l=%u %s:",
2802 path->cached ? 'c' : 'b',
2804 bch2_btree_ids[path->btree_id]);
2805 bch2_bpos_to_text(out, path->pos);
2808 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2809 if (btree_node_locked(path, l)) {
2810 pr_buf(out, " %s l=%u ",
2811 btree_node_intent_locked(path, l) ? "i" : "r", l);
2812 bch2_btree_path_node_to_text(out,
2813 (void *) path->l[l].b,
2820 b = READ_ONCE(trans->locking);
2822 path = &trans->paths[trans->locking_path_idx];
2823 pr_buf(out, " locking path %u %c l=%u %s:",
2824 trans->locking_path_idx,
2825 path->cached ? 'c' : 'b',
2826 trans->locking_level,
2827 bch2_btree_ids[trans->locking_btree_id]);
2828 bch2_bpos_to_text(out, trans->locking_pos);
2830 pr_buf(out, " node ");
2831 bch2_btree_path_node_to_text(out,
2832 (void *) b, path->cached);
2836 mutex_unlock(&c->btree_trans_lock);
2840 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2842 mempool_exit(&c->btree_trans_mem_pool);
2843 mempool_exit(&c->btree_paths_pool);
2844 cleanup_srcu_struct(&c->btree_trans_barrier);
2847 int bch2_fs_btree_iter_init(struct bch_fs *c)
2849 unsigned nr = BTREE_ITER_MAX;
2851 INIT_LIST_HEAD(&c->btree_trans_list);
2852 mutex_init(&c->btree_trans_lock);
2854 return init_srcu_struct(&c->btree_trans_barrier) ?:
2855 mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
2856 sizeof(struct btree_path) * nr +
2857 sizeof(struct btree_insert_entry) * nr) ?:
2858 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
2859 BTREE_TRANS_MEM_MAX);