1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prandom.h>
20 #include <linux/prefetch.h>
21 #include <trace/events/bcachefs.h>
23 static void btree_trans_verify_sorted(struct btree_trans *);
24 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
25 static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *,
26 struct btree_path *, int);
28 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
29 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
32 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
34 #ifdef CONFIG_BCACHEFS_DEBUG
35 return iter->ip_allocated;
41 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
44 * Unlocks before scheduling
45 * Note: does not revalidate iterator
47 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
49 if (need_resched() || race_fault()) {
50 bch2_trans_unlock(trans);
52 return bch2_trans_relock(trans);
58 static inline int __btree_path_cmp(const struct btree_path *l,
59 enum btree_id r_btree_id,
65 * Must match lock ordering as defined by __bch2_btree_node_lock:
67 return cmp_int(l->btree_id, r_btree_id) ?:
68 cmp_int((int) l->cached, (int) r_cached) ?:
69 bpos_cmp(l->pos, r_pos) ?:
70 -cmp_int(l->level, r_level);
73 static inline int btree_path_cmp(const struct btree_path *l,
74 const struct btree_path *r)
76 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
79 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
81 /* Are we iterating over keys in all snapshots? */
82 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
83 p = bpos_successor(p);
85 p = bpos_nosnap_successor(p);
86 p.snapshot = iter->snapshot;
92 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
94 /* Are we iterating over keys in all snapshots? */
95 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
96 p = bpos_predecessor(p);
98 p = bpos_nosnap_predecessor(p);
99 p.snapshot = iter->snapshot;
105 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
107 struct bpos pos = iter->pos;
109 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
110 bkey_cmp(pos, POS_MAX))
111 pos = bkey_successor(iter, pos);
115 static inline bool btree_path_pos_before_node(struct btree_path *path,
118 return bpos_cmp(path->pos, b->data->min_key) < 0;
121 static inline bool btree_path_pos_after_node(struct btree_path *path,
124 return bpos_cmp(b->key.k.p, path->pos) < 0;
127 static inline bool btree_path_pos_in_node(struct btree_path *path,
130 return path->btree_id == b->c.btree_id &&
131 !btree_path_pos_before_node(path, b) &&
132 !btree_path_pos_after_node(path, b);
135 /* Btree iterator: */
137 #ifdef CONFIG_BCACHEFS_DEBUG
139 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
140 struct btree_path *path)
142 struct bkey_cached *ck;
143 bool locked = btree_node_locked(path, 0);
145 if (!bch2_btree_node_relock(trans, path, 0))
148 ck = (void *) path->l[0].b;
149 BUG_ON(ck->key.btree_id != path->btree_id ||
150 bkey_cmp(ck->key.pos, path->pos));
153 btree_node_unlock(trans, path, 0);
156 static void bch2_btree_path_verify_level(struct btree_trans *trans,
157 struct btree_path *path, unsigned level)
159 struct btree_path_level *l;
160 struct btree_node_iter tmp;
162 struct bkey_packed *p, *k;
163 struct printbuf buf1 = PRINTBUF;
164 struct printbuf buf2 = PRINTBUF;
165 struct printbuf buf3 = PRINTBUF;
168 if (!bch2_debug_check_iterators)
173 locked = btree_node_locked(path, level);
177 bch2_btree_path_verify_cached(trans, path);
181 if (!btree_path_node(path, level))
184 if (!bch2_btree_node_relock_notrace(trans, path, level))
187 BUG_ON(!btree_path_pos_in_node(path, l->b));
189 bch2_btree_node_iter_verify(&l->iter, l->b);
192 * For interior nodes, the iterator will have skipped past deleted keys:
195 ? bch2_btree_node_iter_prev(&tmp, l->b)
196 : bch2_btree_node_iter_prev_all(&tmp, l->b);
197 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
199 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
204 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
210 btree_node_unlock(trans, path, level);
213 bch2_bpos_to_text(&buf1, path->pos);
216 struct bkey uk = bkey_unpack_key(l->b, p);
217 bch2_bkey_to_text(&buf2, &uk);
219 prt_printf(&buf2, "(none)");
223 struct bkey uk = bkey_unpack_key(l->b, k);
224 bch2_bkey_to_text(&buf3, &uk);
226 prt_printf(&buf3, "(none)");
229 panic("path should be %s key at level %u:\n"
233 msg, level, buf1.buf, buf2.buf, buf3.buf);
236 static void bch2_btree_path_verify(struct btree_trans *trans,
237 struct btree_path *path)
239 struct bch_fs *c = trans->c;
242 EBUG_ON(path->btree_id >= BTREE_ID_NR);
244 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
246 BUG_ON(!path->cached &&
247 c->btree_roots[path->btree_id].b->c.level > i);
251 bch2_btree_path_verify_level(trans, path, i);
254 bch2_btree_path_verify_locks(path);
257 void bch2_trans_verify_paths(struct btree_trans *trans)
259 struct btree_path *path;
261 trans_for_each_path(trans, path)
262 bch2_btree_path_verify(trans, path);
265 static void bch2_btree_iter_verify(struct btree_iter *iter)
267 struct btree_trans *trans = iter->trans;
269 BUG_ON(iter->btree_id >= BTREE_ID_NR);
271 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
273 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
274 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
276 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
277 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
278 !btree_type_has_snapshots(iter->btree_id));
280 if (iter->update_path)
281 bch2_btree_path_verify(trans, iter->update_path);
282 bch2_btree_path_verify(trans, iter->path);
285 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
287 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
288 !iter->pos.snapshot);
290 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
291 iter->pos.snapshot != iter->snapshot);
293 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
294 bkey_cmp(iter->pos, iter->k.p) > 0);
297 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
299 struct btree_trans *trans = iter->trans;
300 struct btree_iter copy;
301 struct bkey_s_c prev;
304 if (!bch2_debug_check_iterators)
307 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
310 if (bkey_err(k) || !k.k)
313 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
317 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
318 BTREE_ITER_NOPRESERVE|
319 BTREE_ITER_ALL_SNAPSHOTS);
320 prev = bch2_btree_iter_prev(©);
324 ret = bkey_err(prev);
328 if (!bkey_cmp(prev.k->p, k.k->p) &&
329 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
330 prev.k->p.snapshot) > 0) {
331 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
333 bch2_bkey_to_text(&buf1, k.k);
334 bch2_bkey_to_text(&buf2, prev.k);
336 panic("iter snap %u\n"
343 bch2_trans_iter_exit(trans, ©);
347 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
348 struct bpos pos, bool key_cache)
350 struct btree_path *path;
352 struct printbuf buf = PRINTBUF;
354 trans_for_each_path_inorder(trans, path, idx) {
355 int cmp = cmp_int(path->btree_id, id) ?:
356 cmp_int(path->cached, key_cache);
363 if (!btree_node_locked(path, 0) ||
364 !path->should_be_locked)
368 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
369 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
372 if (!bkey_cmp(pos, path->pos))
377 bch2_dump_trans_paths_updates(trans);
378 bch2_bpos_to_text(&buf, pos);
380 panic("not locked: %s %s%s\n",
381 bch2_btree_ids[id], buf.buf,
382 key_cache ? " cached" : "");
387 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
388 struct btree_path *path, unsigned l) {}
389 static inline void bch2_btree_path_verify(struct btree_trans *trans,
390 struct btree_path *path) {}
391 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
392 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
393 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
397 /* Btree path: fixups after btree updates */
399 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
402 struct bkey_packed *k)
404 struct btree_node_iter_set *set;
406 btree_node_iter_for_each(iter, set)
407 if (set->end == t->end_offset) {
408 set->k = __btree_node_key_to_offset(b, k);
409 bch2_btree_node_iter_sort(iter, b);
413 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
416 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
418 struct bkey_packed *where)
420 struct btree_path_level *l = &path->l[b->c.level];
422 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
425 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
426 bch2_btree_node_iter_advance(&l->iter, l->b);
429 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
431 struct bkey_packed *where)
433 struct btree_path *path;
435 trans_for_each_path_with_node(trans, b, path) {
436 __bch2_btree_path_fix_key_modified(path, b, where);
437 bch2_btree_path_verify_level(trans, path, b->c.level);
441 static void __bch2_btree_node_iter_fix(struct btree_path *path,
443 struct btree_node_iter *node_iter,
445 struct bkey_packed *where,
446 unsigned clobber_u64s,
449 const struct bkey_packed *end = btree_bkey_last(b, t);
450 struct btree_node_iter_set *set;
451 unsigned offset = __btree_node_key_to_offset(b, where);
452 int shift = new_u64s - clobber_u64s;
453 unsigned old_end = t->end_offset - shift;
454 unsigned orig_iter_pos = node_iter->data[0].k;
455 bool iter_current_key_modified =
456 orig_iter_pos >= offset &&
457 orig_iter_pos <= offset + clobber_u64s;
459 btree_node_iter_for_each(node_iter, set)
460 if (set->end == old_end)
463 /* didn't find the bset in the iterator - might have to readd it: */
465 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
466 bch2_btree_node_iter_push(node_iter, b, where, end);
469 /* Iterator is after key that changed */
473 set->end = t->end_offset;
475 /* Iterator hasn't gotten to the key that changed yet: */
480 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
482 } else if (set->k < offset + clobber_u64s) {
483 set->k = offset + new_u64s;
484 if (set->k == set->end)
485 bch2_btree_node_iter_set_drop(node_iter, set);
487 /* Iterator is after key that changed */
488 set->k = (int) set->k + shift;
492 bch2_btree_node_iter_sort(node_iter, b);
494 if (node_iter->data[0].k != orig_iter_pos)
495 iter_current_key_modified = true;
498 * When a new key is added, and the node iterator now points to that
499 * key, the iterator might have skipped past deleted keys that should
500 * come after the key the iterator now points to. We have to rewind to
501 * before those deleted keys - otherwise
502 * bch2_btree_node_iter_prev_all() breaks:
504 if (!bch2_btree_node_iter_end(node_iter) &&
505 iter_current_key_modified &&
508 struct bkey_packed *k, *k2, *p;
510 k = bch2_btree_node_iter_peek_all(node_iter, b);
512 for_each_bset(b, t) {
513 bool set_pos = false;
515 if (node_iter->data[0].end == t->end_offset)
518 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
520 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
521 bkey_iter_cmp(b, k, p) < 0) {
527 btree_node_iter_set_set_pos(node_iter,
533 void bch2_btree_node_iter_fix(struct btree_trans *trans,
534 struct btree_path *path,
536 struct btree_node_iter *node_iter,
537 struct bkey_packed *where,
538 unsigned clobber_u64s,
541 struct bset_tree *t = bch2_bkey_to_bset(b, where);
542 struct btree_path *linked;
544 if (node_iter != &path->l[b->c.level].iter) {
545 __bch2_btree_node_iter_fix(path, b, node_iter, t,
546 where, clobber_u64s, new_u64s);
548 if (bch2_debug_check_iterators)
549 bch2_btree_node_iter_verify(node_iter, b);
552 trans_for_each_path_with_node(trans, b, linked) {
553 __bch2_btree_node_iter_fix(linked, b,
554 &linked->l[b->c.level].iter, t,
555 where, clobber_u64s, new_u64s);
556 bch2_btree_path_verify_level(trans, linked, b->c.level);
560 /* Btree path level: pointer to a particular btree node and node iter */
562 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
563 struct btree_path_level *l,
565 struct bkey_packed *k)
569 * signal to bch2_btree_iter_peek_slot() that we're currently at
572 u->type = KEY_TYPE_deleted;
573 return bkey_s_c_null;
576 return bkey_disassemble(l->b, k, u);
579 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
580 struct btree_path_level *l,
583 return __btree_iter_unpack(c, l, u,
584 bch2_btree_node_iter_peek_all(&l->iter, l->b));
587 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
588 struct btree_path *path,
589 struct btree_path_level *l,
592 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
593 bch2_btree_node_iter_peek(&l->iter, l->b));
595 path->pos = k.k ? k.k->p : l->b->key.k.p;
596 bch2_btree_path_verify_level(trans, path, l - path->l);
600 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
601 struct btree_path *path,
602 struct btree_path_level *l,
605 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
606 bch2_btree_node_iter_prev(&l->iter, l->b));
608 path->pos = k.k ? k.k->p : l->b->data->min_key;
609 bch2_btree_path_verify_level(trans, path, l - path->l);
613 static inline bool btree_path_advance_to_pos(struct btree_path *path,
614 struct btree_path_level *l,
617 struct bkey_packed *k;
620 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
621 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
622 if (max_advance > 0 && nr_advanced >= max_advance)
625 bch2_btree_node_iter_advance(&l->iter, l->b);
632 static inline void __btree_path_level_init(struct btree_path *path,
635 struct btree_path_level *l = &path->l[level];
637 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
640 * Iterators to interior nodes should always be pointed at the first non
644 bch2_btree_node_iter_peek(&l->iter, l->b);
647 inline void bch2_btree_path_level_init(struct btree_trans *trans,
648 struct btree_path *path,
651 BUG_ON(path->cached);
653 EBUG_ON(!btree_path_pos_in_node(path, b));
654 EBUG_ON(b->c.lock.state.seq & 1);
656 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
657 path->l[b->c.level].b = b;
658 __btree_path_level_init(path, b->c.level);
661 /* Btree path: fixups after btree node updates: */
664 * A btree node is being replaced - update the iterator to point to the new
667 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
669 struct btree_path *path;
671 trans_for_each_path(trans, path)
672 if (path->uptodate == BTREE_ITER_UPTODATE &&
674 btree_path_pos_in_node(path, b)) {
675 enum btree_node_locked_type t =
676 btree_lock_want(path, b->c.level);
678 if (t != BTREE_NODE_UNLOCKED) {
679 btree_node_unlock(trans, path, b->c.level);
680 six_lock_increment(&b->c.lock, t);
681 mark_btree_node_locked(trans, path, b->c.level, t);
684 bch2_btree_path_level_init(trans, path, b);
689 * A btree node has been modified in such a way as to invalidate iterators - fix
692 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
694 struct btree_path *path;
696 trans_for_each_path_with_node(trans, b, path)
697 __btree_path_level_init(path, b->c.level);
700 /* Btree path: traverse, set_pos: */
702 static inline int btree_path_lock_root(struct btree_trans *trans,
703 struct btree_path *path,
705 unsigned long trace_ip)
707 struct bch_fs *c = trans->c;
708 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
709 enum six_lock_type lock_type;
713 EBUG_ON(path->nodes_locked);
716 b = READ_ONCE(*rootp);
717 path->level = READ_ONCE(b->c.level);
719 if (unlikely(path->level < depth_want)) {
721 * the root is at a lower depth than the depth we want:
722 * got to the end of the btree, or we're walking nodes
723 * greater than some depth and there are no nodes >=
726 path->level = depth_want;
727 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
732 lock_type = __btree_lock_want(path, path->level);
733 ret = btree_node_lock(trans, path, &b->c,
734 path->level, lock_type, trace_ip);
736 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
738 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
743 if (likely(b == READ_ONCE(*rootp) &&
744 b->c.level == path->level &&
746 for (i = 0; i < path->level; i++)
747 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
748 path->l[path->level].b = b;
749 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
752 mark_btree_node_locked(trans, path, path->level, lock_type);
753 bch2_btree_path_level_init(trans, path, b);
757 six_unlock_type(&b->c.lock, lock_type);
762 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
764 struct bch_fs *c = trans->c;
765 struct btree_path_level *l = path_l(path);
766 struct btree_node_iter node_iter = l->iter;
767 struct bkey_packed *k;
769 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
770 ? (path->level > 1 ? 0 : 2)
771 : (path->level > 1 ? 1 : 16);
772 bool was_locked = btree_node_locked(path, path->level);
775 bch2_bkey_buf_init(&tmp);
777 while (nr-- && !ret) {
778 if (!bch2_btree_node_relock(trans, path, path->level))
781 bch2_btree_node_iter_advance(&node_iter, l->b);
782 k = bch2_btree_node_iter_peek(&node_iter, l->b);
786 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
787 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
792 btree_node_unlock(trans, path, path->level);
794 bch2_bkey_buf_exit(&tmp, c);
798 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
799 struct btree_and_journal_iter *jiter)
801 struct bch_fs *c = trans->c;
804 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
805 ? (path->level > 1 ? 0 : 2)
806 : (path->level > 1 ? 1 : 16);
807 bool was_locked = btree_node_locked(path, path->level);
810 bch2_bkey_buf_init(&tmp);
812 while (nr-- && !ret) {
813 if (!bch2_btree_node_relock(trans, path, path->level))
816 bch2_btree_and_journal_iter_advance(jiter);
817 k = bch2_btree_and_journal_iter_peek(jiter);
821 bch2_bkey_buf_reassemble(&tmp, c, k);
822 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
827 btree_node_unlock(trans, path, path->level);
829 bch2_bkey_buf_exit(&tmp, c);
833 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
834 struct btree_path *path,
835 unsigned plevel, struct btree *b)
837 struct btree_path_level *l = &path->l[plevel];
838 bool locked = btree_node_locked(path, plevel);
839 struct bkey_packed *k;
840 struct bch_btree_ptr_v2 *bp;
842 if (!bch2_btree_node_relock(trans, path, plevel))
845 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
846 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
848 bp = (void *) bkeyp_val(&l->b->format, k);
849 bp->mem_ptr = (unsigned long)b;
852 btree_node_unlock(trans, path, plevel);
855 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
856 struct btree_path *path,
858 struct bkey_buf *out)
860 struct bch_fs *c = trans->c;
861 struct btree_path_level *l = path_l(path);
862 struct btree_and_journal_iter jiter;
866 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
868 k = bch2_btree_and_journal_iter_peek(&jiter);
870 bch2_bkey_buf_reassemble(out, c, k);
872 if (flags & BTREE_ITER_PREFETCH)
873 ret = btree_path_prefetch_j(trans, path, &jiter);
875 bch2_btree_and_journal_iter_exit(&jiter);
879 static __always_inline int btree_path_down(struct btree_trans *trans,
880 struct btree_path *path,
882 unsigned long trace_ip)
884 struct bch_fs *c = trans->c;
885 struct btree_path_level *l = path_l(path);
887 unsigned level = path->level - 1;
888 enum six_lock_type lock_type = __btree_lock_want(path, level);
889 bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
893 EBUG_ON(!btree_node_locked(path, path->level));
895 bch2_bkey_buf_init(&tmp);
897 if (unlikely(!replay_done)) {
898 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
902 bch2_bkey_buf_unpack(&tmp, c, l->b,
903 bch2_btree_node_iter_peek(&l->iter, l->b));
905 if (flags & BTREE_ITER_PREFETCH) {
906 ret = btree_path_prefetch(trans, path);
912 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
913 ret = PTR_ERR_OR_ZERO(b);
917 if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
918 unlikely(b != btree_node_mem_ptr(tmp.k)))
919 btree_node_mem_ptr_set(trans, path, level + 1, b);
921 if (btree_node_read_locked(path, level + 1))
922 btree_node_unlock(trans, path, level + 1);
924 mark_btree_node_locked(trans, path, level, lock_type);
926 bch2_btree_path_level_init(trans, path, b);
928 bch2_btree_path_verify_locks(path);
930 bch2_bkey_buf_exit(&tmp, c);
934 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
935 unsigned, unsigned long);
937 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
939 struct bch_fs *c = trans->c;
940 struct btree_path *path;
941 unsigned long trace_ip = _RET_IP_;
944 if (trans->in_traverse_all)
945 return -BCH_ERR_transaction_restart_in_traverse_all;
947 trans->in_traverse_all = true;
949 trans->restarted = 0;
950 trans->traverse_all_idx = U8_MAX;
952 trans_for_each_path(trans, path)
953 path->should_be_locked = false;
955 btree_trans_verify_sorted(trans);
957 bch2_trans_unlock(trans);
960 if (unlikely(trans->memory_allocation_failure)) {
963 closure_init_stack(&cl);
966 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
971 /* Now, redo traversals in correct order: */
972 trans->traverse_all_idx = 0;
973 while (trans->traverse_all_idx < trans->nr_sorted) {
974 path = trans->paths + trans->sorted[trans->traverse_all_idx];
977 * Traversing a path can cause another path to be added at about
980 if (path->uptodate) {
981 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
982 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
987 BUG_ON(path->uptodate);
989 trans->traverse_all_idx++;
994 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
995 * and relock(), relock() won't relock since path->should_be_locked
996 * isn't set yet, which is all fine
998 trans_for_each_path(trans, path)
999 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1001 bch2_btree_cache_cannibalize_unlock(c);
1003 trans->in_traverse_all = false;
1005 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1009 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1010 unsigned l, int check_pos)
1012 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1014 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1019 static inline bool btree_path_good_node(struct btree_trans *trans,
1020 struct btree_path *path,
1021 unsigned l, int check_pos)
1023 return is_btree_node(path, l) &&
1024 bch2_btree_node_relock(trans, path, l) &&
1025 btree_path_check_pos_in_node(path, l, check_pos);
1028 static void btree_path_set_level_down(struct btree_trans *trans,
1029 struct btree_path *path,
1034 path->level = new_level;
1036 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1037 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1038 btree_node_unlock(trans, path, l);
1040 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1041 bch2_btree_path_verify(trans, path);
1044 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1045 struct btree_path *path,
1048 unsigned i, l = path->level;
1050 while (btree_path_node(path, l) &&
1051 !btree_path_good_node(trans, path, l, check_pos))
1052 __btree_path_set_level_up(trans, path, l++);
1054 /* If we need intent locks, take them too: */
1056 i < path->locks_want && btree_path_node(path, i);
1058 if (!bch2_btree_node_relock(trans, path, i)) {
1060 __btree_path_set_level_up(trans, path, l++);
1067 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1068 struct btree_path *path,
1071 return likely(btree_node_locked(path, path->level) &&
1072 btree_path_check_pos_in_node(path, path->level, check_pos))
1074 : __btree_path_up_until_good_node(trans, path, check_pos);
1078 * This is the main state machine for walking down the btree - walks down to a
1081 * Returns 0 on success, -EIO on error (error reading in a btree node).
1083 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1084 * stashed in the iterator and returned from bch2_trans_exit().
1086 static int btree_path_traverse_one(struct btree_trans *trans,
1087 struct btree_path *path,
1089 unsigned long trace_ip)
1091 unsigned depth_want = path->level;
1092 int ret = trans->restarted;
1098 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1099 * and re-traverse the path without a transaction restart:
1101 if (path->should_be_locked) {
1102 ret = bch2_btree_path_relock(trans, path, trace_ip);
1107 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1111 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1114 path->level = btree_path_up_until_good_node(trans, path, 0);
1116 EBUG_ON(btree_path_node(path, path->level) &&
1117 !btree_node_locked(path, path->level));
1120 * Note: path->nodes[path->level] may be temporarily NULL here - that
1121 * would indicate to other code that we got to the end of the btree,
1122 * here it indicates that relocking the root failed - it's critical that
1123 * btree_path_lock_root() comes next and that it can't fail
1125 while (path->level > depth_want) {
1126 ret = btree_path_node(path, path->level)
1127 ? btree_path_down(trans, path, flags, trace_ip)
1128 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1129 if (unlikely(ret)) {
1132 * No nodes at this level - got to the end of
1139 __bch2_btree_path_unlock(trans, path);
1140 path->level = depth_want;
1141 path->l[path->level].b = ERR_PTR(ret);
1146 path->uptodate = BTREE_ITER_UPTODATE;
1148 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
1149 bch2_btree_path_verify(trans, path);
1153 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1154 struct btree_path *path, unsigned flags)
1156 if (0 && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1157 unsigned restart_probability_bits = 4 << min(trans->restart_count, 32U);
1158 u64 mask = ~(~0ULL << restart_probability_bits);
1160 if ((prandom_u32() & mask) == mask) {
1161 trace_and_count(trans->c, trans_restart_injected, trans, _RET_IP_);
1162 return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
1166 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1169 return bch2_trans_cond_resched(trans) ?:
1170 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1173 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1174 struct btree_path *src)
1176 unsigned i, offset = offsetof(struct btree_path, pos);
1177 int cmp = btree_path_cmp(dst, src);
1179 memcpy((void *) dst + offset,
1180 (void *) src + offset,
1181 sizeof(struct btree_path) - offset);
1183 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1184 unsigned t = btree_node_locked_type(dst, i);
1186 if (t != BTREE_NODE_UNLOCKED)
1187 six_lock_increment(&dst->l[i].b->c.lock, t);
1191 bch2_btree_path_check_sort_fast(trans, dst, cmp);
1194 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1197 struct btree_path *new = btree_path_alloc(trans, src);
1199 btree_path_copy(trans, new, src);
1200 __btree_path_get(new, intent);
1204 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
1205 struct btree_path *path, bool intent,
1208 if (path->ref > 1 || path->preserve) {
1209 __btree_path_put(path, intent);
1210 path = btree_path_clone(trans, path, intent);
1211 path->preserve = false;
1212 #ifdef CONFIG_BCACHEFS_DEBUG
1213 path->ip_allocated = ip;
1215 btree_trans_verify_sorted(trans);
1218 path->should_be_locked = false;
1222 struct btree_path * __must_check
1223 bch2_btree_path_set_pos(struct btree_trans *trans,
1224 struct btree_path *path, struct bpos new_pos,
1225 bool intent, unsigned long ip)
1227 int cmp = bpos_cmp(new_pos, path->pos);
1228 unsigned l = path->level;
1230 EBUG_ON(trans->restarted);
1231 EBUG_ON(!path->ref);
1236 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1238 path->pos = new_pos;
1240 bch2_btree_path_check_sort_fast(trans, path, cmp);
1242 if (unlikely(path->cached)) {
1243 btree_node_unlock(trans, path, 0);
1244 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1245 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1249 l = btree_path_up_until_good_node(trans, path, cmp);
1251 if (btree_path_node(path, l)) {
1252 BUG_ON(!btree_node_locked(path, l));
1254 * We might have to skip over many keys, or just a few: try
1255 * advancing the node iterator, and if we have to skip over too
1256 * many keys just reinit it (or if we're rewinding, since that
1260 !btree_path_advance_to_pos(path, &path->l[l], 8))
1261 __btree_path_level_init(path, l);
1264 if (unlikely(l != path->level)) {
1265 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1266 __bch2_btree_path_unlock(trans, path);
1269 bch2_btree_path_verify(trans, path);
1273 /* Btree path: main interface: */
1275 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1277 struct btree_path *sib;
1279 sib = prev_btree_path(trans, path);
1280 if (sib && !btree_path_cmp(sib, path))
1283 sib = next_btree_path(trans, path);
1284 if (sib && !btree_path_cmp(sib, path))
1290 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1292 struct btree_path *sib;
1294 sib = prev_btree_path(trans, path);
1295 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1298 sib = next_btree_path(trans, path);
1299 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1305 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1307 __bch2_btree_path_unlock(trans, path);
1308 btree_path_list_remove(trans, path);
1309 trans->paths_allocated &= ~(1ULL << path->idx);
1312 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1314 struct btree_path *dup;
1316 EBUG_ON(trans->paths + path->idx != path);
1317 EBUG_ON(!path->ref);
1319 if (!__btree_path_put(path, intent))
1322 dup = path->preserve
1323 ? have_path_at_pos(trans, path)
1324 : have_node_at_pos(trans, path);
1326 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1329 if (path->should_be_locked &&
1330 !trans->restarted &&
1331 (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
1335 dup->preserve |= path->preserve;
1336 dup->should_be_locked |= path->should_be_locked;
1339 __bch2_path_free(trans, path);
1342 static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *path,
1345 EBUG_ON(trans->paths + path->idx != path);
1346 EBUG_ON(!path->ref);
1348 if (!__btree_path_put(path, intent))
1351 __bch2_path_free(trans, path);
1354 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1356 struct btree_insert_entry *i;
1358 prt_printf(buf, "transaction updates for %s journal seq %llu",
1359 trans->fn, trans->journal_res.seq);
1361 printbuf_indent_add(buf, 2);
1363 trans_for_each_update(trans, i) {
1364 struct bkey_s_c old = { &i->old_k, i->old_v };
1366 prt_printf(buf, "update: btree=%s cached=%u %pS",
1367 bch2_btree_ids[i->btree_id],
1369 (void *) i->ip_allocated);
1372 prt_printf(buf, " old ");
1373 bch2_bkey_val_to_text(buf, trans->c, old);
1376 prt_printf(buf, " new ");
1377 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1381 printbuf_indent_sub(buf, 2);
1385 void bch2_dump_trans_updates(struct btree_trans *trans)
1387 struct printbuf buf = PRINTBUF;
1389 bch2_trans_updates_to_text(&buf, trans);
1390 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1391 printbuf_exit(&buf);
1394 void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
1396 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1397 path->idx, path->ref, path->intent_ref,
1398 path->preserve ? 'P' : ' ',
1399 path->should_be_locked ? 'S' : ' ',
1400 bch2_btree_ids[path->btree_id],
1402 bch2_bpos_to_text(out, path->pos);
1404 prt_printf(out, " locks %u", path->nodes_locked);
1405 #ifdef CONFIG_BCACHEFS_DEBUG
1406 prt_printf(out, " %pS", (void *) path->ip_allocated);
1411 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1413 struct btree_path *path;
1416 trans_for_each_path_inorder(trans, path, idx)
1417 bch2_btree_path_to_text(out, path);
1421 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1423 struct printbuf buf = PRINTBUF;
1425 bch2_trans_paths_to_text(&buf, trans);
1426 bch2_trans_updates_to_text(&buf, trans);
1428 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1429 printbuf_exit(&buf);
1433 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1435 struct btree_transaction_stats *s = btree_trans_stats(trans);
1436 struct printbuf buf = PRINTBUF;
1438 bch2_trans_paths_to_text(&buf, trans);
1440 if (!buf.allocation_failure) {
1441 mutex_lock(&s->lock);
1442 if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
1443 s->nr_max_paths = trans->nr_max_paths =
1444 hweight64(trans->paths_allocated);
1445 swap(s->max_paths_text, buf.buf);
1447 mutex_unlock(&s->lock);
1450 printbuf_exit(&buf);
1453 static noinline void btree_path_overflow(struct btree_trans *trans)
1455 bch2_dump_trans_paths_updates(trans);
1456 panic("trans path oveflow\n");
1459 static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
1460 struct btree_path *pos)
1462 struct btree_path *path;
1465 if (unlikely(trans->paths_allocated ==
1466 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
1467 btree_path_overflow(trans);
1469 idx = __ffs64(~trans->paths_allocated);
1470 trans->paths_allocated |= 1ULL << idx;
1472 if (unlikely(idx > trans->nr_max_paths))
1473 bch2_trans_update_max_paths(trans);
1475 path = &trans->paths[idx];
1479 path->intent_ref = 0;
1480 path->nodes_locked = 0;
1482 btree_path_list_add(trans, pos, path);
1486 struct btree_path *bch2_path_get(struct btree_trans *trans,
1487 enum btree_id btree_id, struct bpos pos,
1488 unsigned locks_want, unsigned level,
1489 unsigned flags, unsigned long ip)
1491 struct btree_path *path, *path_pos = NULL;
1492 bool cached = flags & BTREE_ITER_CACHED;
1493 bool intent = flags & BTREE_ITER_INTENT;
1496 BUG_ON(trans->restarted);
1497 btree_trans_verify_sorted(trans);
1498 bch2_trans_verify_locks(trans);
1500 trans_for_each_path_inorder(trans, path, i) {
1501 if (__btree_path_cmp(path,
1512 path_pos->cached == cached &&
1513 path_pos->btree_id == btree_id &&
1514 path_pos->level == level) {
1515 __btree_path_get(path_pos, intent);
1516 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1518 path = btree_path_alloc(trans, path_pos);
1521 __btree_path_get(path, intent);
1523 path->btree_id = btree_id;
1524 path->cached = cached;
1525 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1526 path->should_be_locked = false;
1527 path->level = level;
1528 path->locks_want = locks_want;
1529 path->nodes_locked = 0;
1530 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1531 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1532 #ifdef CONFIG_BCACHEFS_DEBUG
1533 path->ip_allocated = ip;
1535 btree_trans_verify_sorted(trans);
1538 if (!(flags & BTREE_ITER_NOPRESERVE))
1539 path->preserve = true;
1541 if (path->intent_ref)
1542 locks_want = max(locks_want, level + 1);
1545 * If the path has locks_want greater than requested, we don't downgrade
1546 * it here - on transaction restart because btree node split needs to
1547 * upgrade locks, we might be putting/getting the iterator again.
1548 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1549 * a successful transaction commit.
1552 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1553 if (locks_want > path->locks_want)
1554 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
1559 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1562 struct btree_path_level *l = path_l(path);
1563 struct bkey_packed *_k;
1566 if (unlikely(!l->b))
1567 return bkey_s_c_null;
1569 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1570 EBUG_ON(!btree_node_locked(path, path->level));
1572 if (!path->cached) {
1573 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1574 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1576 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1578 if (!k.k || bpos_cmp(path->pos, k.k->p))
1581 struct bkey_cached *ck = (void *) path->l[0].b;
1584 (path->btree_id != ck->key.btree_id ||
1585 bkey_cmp(path->pos, ck->key.pos)));
1586 EBUG_ON(!ck || !ck->valid);
1589 k = bkey_i_to_s_c(ck->k);
1596 return (struct bkey_s_c) { u, NULL };
1599 /* Btree iterators: */
1602 __bch2_btree_iter_traverse(struct btree_iter *iter)
1604 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1608 bch2_btree_iter_traverse(struct btree_iter *iter)
1612 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
1613 btree_iter_search_key(iter),
1614 iter->flags & BTREE_ITER_INTENT,
1615 btree_iter_ip_allocated(iter));
1617 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1621 btree_path_set_should_be_locked(iter->path);
1625 /* Iterate across nodes (leaf and interior nodes) */
1627 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1629 struct btree_trans *trans = iter->trans;
1630 struct btree *b = NULL;
1633 EBUG_ON(iter->path->cached);
1634 bch2_btree_iter_verify(iter);
1636 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1640 b = btree_path_node(iter->path, iter->path->level);
1644 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1646 bkey_init(&iter->k);
1647 iter->k.p = iter->pos = b->key.k.p;
1649 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1650 iter->flags & BTREE_ITER_INTENT,
1651 btree_iter_ip_allocated(iter));
1652 btree_path_set_should_be_locked(iter->path);
1654 bch2_btree_iter_verify_entry_exit(iter);
1655 bch2_btree_iter_verify(iter);
1663 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1665 struct btree_trans *trans = iter->trans;
1666 struct btree_path *path = iter->path;
1667 struct btree *b = NULL;
1670 BUG_ON(trans->restarted);
1671 EBUG_ON(iter->path->cached);
1672 bch2_btree_iter_verify(iter);
1674 /* already at end? */
1675 if (!btree_path_node(path, path->level))
1679 if (!btree_path_node(path, path->level + 1)) {
1680 btree_path_set_level_up(trans, path);
1684 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1685 __bch2_btree_path_unlock(trans, path);
1686 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1687 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1688 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1689 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1690 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1694 b = btree_path_node(path, path->level + 1);
1696 if (!bpos_cmp(iter->pos, b->key.k.p)) {
1697 __btree_path_set_level_up(trans, path, path->level++);
1700 * Haven't gotten to the end of the parent node: go back down to
1701 * the next child node
1704 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
1705 iter->flags & BTREE_ITER_INTENT,
1706 btree_iter_ip_allocated(iter));
1708 btree_path_set_level_down(trans, path, iter->min_depth);
1710 ret = bch2_btree_path_traverse(trans, path, iter->flags);
1714 b = path->l[path->level].b;
1717 bkey_init(&iter->k);
1718 iter->k.p = iter->pos = b->key.k.p;
1720 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1721 iter->flags & BTREE_ITER_INTENT,
1722 btree_iter_ip_allocated(iter));
1723 btree_path_set_should_be_locked(iter->path);
1724 BUG_ON(iter->path->uptodate);
1726 bch2_btree_iter_verify_entry_exit(iter);
1727 bch2_btree_iter_verify(iter);
1735 /* Iterate across keys (in leaf nodes only) */
1737 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1739 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
1740 struct bpos pos = iter->k.p;
1741 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1742 ? bpos_cmp(pos, SPOS_MAX)
1743 : bkey_cmp(pos, SPOS_MAX)) != 0;
1745 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1746 pos = bkey_successor(iter, pos);
1747 bch2_btree_iter_set_pos(iter, pos);
1750 if (!btree_path_node(iter->path, iter->path->level))
1753 iter->advanced = true;
1758 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1760 struct bpos pos = bkey_start_pos(&iter->k);
1761 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1762 ? bpos_cmp(pos, POS_MIN)
1763 : bkey_cmp(pos, POS_MIN)) != 0;
1765 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1766 pos = bkey_predecessor(iter, pos);
1767 bch2_btree_iter_set_pos(iter, pos);
1771 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
1772 enum btree_id btree_id,
1775 struct btree_insert_entry *i;
1776 struct bkey_i *ret = NULL;
1778 trans_for_each_update(trans, i) {
1779 if (i->btree_id < btree_id)
1781 if (i->btree_id > btree_id)
1783 if (bpos_cmp(i->k->k.p, pos) < 0)
1785 if (i->key_cache_already_flushed)
1787 if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
1794 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
1795 struct btree_iter *iter,
1796 struct bpos start_pos,
1797 struct bpos end_pos)
1801 if (bpos_cmp(start_pos, iter->journal_pos) < 0)
1802 iter->journal_idx = 0;
1804 k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
1806 &iter->journal_idx);
1808 iter->journal_pos = k ? k->k.p : end_pos;
1812 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
1813 struct btree_iter *iter,
1816 return bch2_btree_journal_peek(trans, iter, pos, pos);
1820 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
1821 struct btree_iter *iter,
1824 struct bkey_i *next_journal =
1825 bch2_btree_journal_peek(trans, iter, iter->path->pos,
1826 k.k ? k.k->p : iter->path->l[0].b->key.k.p);
1829 iter->k = next_journal->k;
1830 k = bkey_i_to_s_c(next_journal);
1837 * Checks btree key cache for key at iter->pos and returns it if present, or
1841 struct bkey_s_c __btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1843 struct btree_trans *trans = iter->trans;
1844 struct bch_fs *c = trans->c;
1848 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
1849 return bkey_s_c_null;
1851 if (!iter->key_cache_path)
1852 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
1853 iter->flags & BTREE_ITER_INTENT, 0,
1854 iter->flags|BTREE_ITER_CACHED,
1857 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
1858 iter->flags & BTREE_ITER_INTENT,
1859 btree_iter_ip_allocated(iter));
1861 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
1863 return bkey_s_c_err(ret);
1865 btree_path_set_should_be_locked(iter->key_cache_path);
1867 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
1871 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1873 struct bkey_s_c ret = __btree_trans_peek_key_cache(iter, pos);
1874 int err = bkey_err(ret) ?: bch2_btree_path_relock(iter->trans, iter->path, _THIS_IP_);
1876 return err ? bkey_s_c_err(err) : ret;
1879 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
1881 struct btree_trans *trans = iter->trans;
1882 struct bkey_i *next_update;
1883 struct bkey_s_c k, k2;
1886 EBUG_ON(iter->path->cached);
1887 bch2_btree_iter_verify(iter);
1890 struct btree_path_level *l;
1892 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
1893 iter->flags & BTREE_ITER_INTENT,
1894 btree_iter_ip_allocated(iter));
1896 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1897 if (unlikely(ret)) {
1898 /* ensure that iter->k is consistent with iter->pos: */
1899 bch2_btree_iter_set_pos(iter, iter->pos);
1900 k = bkey_s_c_err(ret);
1904 l = path_l(iter->path);
1906 if (unlikely(!l->b)) {
1907 /* No btree nodes at requested level: */
1908 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1913 btree_path_set_should_be_locked(iter->path);
1915 k = btree_path_level_peek_all(trans->c, l, &iter->k);
1917 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
1919 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
1923 bch2_btree_iter_set_pos(iter, iter->pos);
1928 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
1929 k = btree_trans_peek_journal(trans, iter, k);
1931 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
1932 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
1935 bpos_cmp(next_update->k.p,
1936 k.k ? k.k->p : l->b->key.k.p) <= 0) {
1937 iter->k = next_update->k;
1938 k = bkey_i_to_s_c(next_update);
1941 if (k.k && bkey_deleted(k.k)) {
1943 * If we've got a whiteout, and it's after the search
1944 * key, advance the search key to the whiteout instead
1945 * of just after the whiteout - it might be a btree
1946 * whiteout, with a real key at the same position, since
1947 * in the btree deleted keys sort before non deleted.
1949 search_key = bpos_cmp(search_key, k.k->p)
1951 : bpos_successor(k.k->p);
1957 } else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) {
1958 /* Advance to next leaf node: */
1959 search_key = bpos_successor(l->b->key.k.p);
1962 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1968 bch2_btree_iter_verify(iter);
1974 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1977 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
1979 struct btree_trans *trans = iter->trans;
1980 struct bpos search_key = btree_iter_search_key(iter);
1982 struct bpos iter_pos;
1985 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
1987 if (iter->update_path) {
1988 bch2_path_put_nokeep(trans, iter->update_path,
1989 iter->flags & BTREE_ITER_INTENT);
1990 iter->update_path = NULL;
1993 bch2_btree_iter_verify_entry_exit(iter);
1996 k = __bch2_btree_iter_peek(iter, search_key);
1997 if (!k.k || bkey_err(k))
2001 * iter->pos should be mononotically increasing, and always be
2002 * equal to the key we just returned - except extents can
2003 * straddle iter->pos:
2005 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2007 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
2008 iter_pos = bkey_start_pos(k.k);
2010 iter_pos = iter->pos;
2012 if (bkey_cmp(iter_pos, end) > 0) {
2013 bch2_btree_iter_set_pos(iter, end);
2018 if (iter->update_path &&
2019 bkey_cmp(iter->update_path->pos, k.k->p)) {
2020 bch2_path_put_nokeep(trans, iter->update_path,
2021 iter->flags & BTREE_ITER_INTENT);
2022 iter->update_path = NULL;
2025 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2026 (iter->flags & BTREE_ITER_INTENT) &&
2027 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2028 !iter->update_path) {
2029 struct bpos pos = k.k->p;
2031 if (pos.snapshot < iter->snapshot) {
2032 search_key = bpos_successor(k.k->p);
2036 pos.snapshot = iter->snapshot;
2039 * advance, same as on exit for iter->path, but only up
2042 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2043 iter->update_path = iter->path;
2045 iter->update_path = bch2_btree_path_set_pos(trans,
2046 iter->update_path, pos,
2047 iter->flags & BTREE_ITER_INTENT,
2052 * We can never have a key in a leaf node at POS_MAX, so
2053 * we don't have to check these successor() calls:
2055 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2056 !bch2_snapshot_is_ancestor(trans->c,
2059 search_key = bpos_successor(k.k->p);
2063 if (bkey_whiteout(k.k) &&
2064 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2065 search_key = bkey_successor(iter, k.k->p);
2072 iter->pos = iter_pos;
2074 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2075 iter->flags & BTREE_ITER_INTENT,
2076 btree_iter_ip_allocated(iter));
2078 btree_path_set_should_be_locked(iter->path);
2080 if (iter->update_path) {
2081 if (iter->update_path->uptodate &&
2082 (ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)))
2083 k = bkey_s_c_err(ret);
2085 btree_path_set_should_be_locked(iter->update_path);
2088 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2089 iter->pos.snapshot = iter->snapshot;
2091 ret = bch2_btree_iter_verify_ret(iter, k);
2092 if (unlikely(ret)) {
2093 bch2_btree_iter_set_pos(iter, iter->pos);
2094 k = bkey_s_c_err(ret);
2097 bch2_btree_iter_verify_entry_exit(iter);
2103 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2104 * to iterator's current position, returning keys from every level of the btree.
2105 * For keys at different levels of the btree that compare equal, the key from
2106 * the lower level (leaf) is returned first.
2108 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2110 struct btree_trans *trans = iter->trans;
2114 EBUG_ON(iter->path->cached);
2115 bch2_btree_iter_verify(iter);
2116 BUG_ON(iter->path->level < iter->min_depth);
2117 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2118 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2121 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2122 iter->flags & BTREE_ITER_INTENT,
2123 btree_iter_ip_allocated(iter));
2125 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2126 if (unlikely(ret)) {
2127 /* ensure that iter->k is consistent with iter->pos: */
2128 bch2_btree_iter_set_pos(iter, iter->pos);
2129 k = bkey_s_c_err(ret);
2133 /* Already at end? */
2134 if (!btree_path_node(iter->path, iter->path->level)) {
2139 k = btree_path_level_peek_all(trans->c,
2140 &iter->path->l[iter->path->level], &iter->k);
2142 /* Check if we should go up to the parent node: */
2145 !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
2146 iter->pos = path_l(iter->path)->b->key.k.p;
2147 btree_path_set_level_up(trans, iter->path);
2148 iter->advanced = false;
2153 * Check if we should go back down to a leaf:
2154 * If we're not in a leaf node, we only return the current key
2155 * if it exactly matches iter->pos - otherwise we first have to
2156 * go back to the leaf:
2158 if (iter->path->level != iter->min_depth &&
2161 bpos_cmp(iter->pos, k.k->p))) {
2162 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2163 iter->pos = bpos_successor(iter->pos);
2164 iter->advanced = false;
2168 /* Check if we should go to the next key: */
2169 if (iter->path->level == iter->min_depth &&
2172 !bpos_cmp(iter->pos, k.k->p)) {
2173 iter->pos = bpos_successor(iter->pos);
2174 iter->advanced = false;
2178 if (iter->advanced &&
2179 iter->path->level == iter->min_depth &&
2180 bpos_cmp(k.k->p, iter->pos))
2181 iter->advanced = false;
2183 BUG_ON(iter->advanced);
2189 btree_path_set_should_be_locked(iter->path);
2191 bch2_btree_iter_verify(iter);
2197 * bch2_btree_iter_next: returns first key greater than iterator's current
2200 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2202 if (!bch2_btree_iter_advance(iter))
2203 return bkey_s_c_null;
2205 return bch2_btree_iter_peek(iter);
2209 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2210 * iterator's current position
2212 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2214 struct btree_trans *trans = iter->trans;
2215 struct bpos search_key = iter->pos;
2216 struct btree_path *saved_path = NULL;
2218 struct bkey saved_k;
2219 const struct bch_val *saved_v;
2222 EBUG_ON(iter->path->cached || iter->path->level);
2223 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2225 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2226 return bkey_s_c_err(-EIO);
2228 bch2_btree_iter_verify(iter);
2229 bch2_btree_iter_verify_entry_exit(iter);
2231 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2232 search_key.snapshot = U32_MAX;
2235 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2236 iter->flags & BTREE_ITER_INTENT,
2237 btree_iter_ip_allocated(iter));
2239 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2240 if (unlikely(ret)) {
2241 /* ensure that iter->k is consistent with iter->pos: */
2242 bch2_btree_iter_set_pos(iter, iter->pos);
2243 k = bkey_s_c_err(ret);
2247 k = btree_path_level_peek(trans, iter->path,
2248 &iter->path->l[0], &iter->k);
2250 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2251 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2252 : bpos_cmp(k.k->p, search_key) > 0))
2253 k = btree_path_level_prev(trans, iter->path,
2254 &iter->path->l[0], &iter->k);
2256 bch2_btree_path_check_sort(trans, iter->path, 0);
2259 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2260 if (k.k->p.snapshot == iter->snapshot)
2264 * If we have a saved candidate, and we're no
2265 * longer at the same _key_ (not pos), return
2268 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2269 bch2_path_put_nokeep(trans, iter->path,
2270 iter->flags & BTREE_ITER_INTENT);
2271 iter->path = saved_path;
2278 if (bch2_snapshot_is_ancestor(iter->trans->c,
2282 bch2_path_put_nokeep(trans, saved_path,
2283 iter->flags & BTREE_ITER_INTENT);
2284 saved_path = btree_path_clone(trans, iter->path,
2285 iter->flags & BTREE_ITER_INTENT);
2290 search_key = bpos_predecessor(k.k->p);
2294 if (bkey_whiteout(k.k) &&
2295 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2296 search_key = bkey_predecessor(iter, k.k->p);
2297 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2298 search_key.snapshot = U32_MAX;
2303 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2304 /* Advance to previous leaf node: */
2305 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2307 /* Start of btree: */
2308 bch2_btree_iter_set_pos(iter, POS_MIN);
2314 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2316 /* Extents can straddle iter->pos: */
2317 if (bkey_cmp(k.k->p, iter->pos) < 0)
2320 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2321 iter->pos.snapshot = iter->snapshot;
2323 btree_path_set_should_be_locked(iter->path);
2326 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2328 bch2_btree_iter_verify_entry_exit(iter);
2329 bch2_btree_iter_verify(iter);
2335 * bch2_btree_iter_prev: returns first key less than iterator's current
2338 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2340 if (!bch2_btree_iter_rewind(iter))
2341 return bkey_s_c_null;
2343 return bch2_btree_iter_peek_prev(iter);
2346 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2348 struct btree_trans *trans = iter->trans;
2349 struct bpos search_key;
2353 bch2_btree_iter_verify(iter);
2354 bch2_btree_iter_verify_entry_exit(iter);
2355 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2356 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2358 /* extents can't span inode numbers: */
2359 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2360 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2361 if (iter->pos.inode == KEY_INODE_MAX)
2362 return bkey_s_c_null;
2364 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2367 search_key = btree_iter_search_key(iter);
2368 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2369 iter->flags & BTREE_ITER_INTENT,
2370 btree_iter_ip_allocated(iter));
2372 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2373 if (unlikely(ret)) {
2374 k = bkey_s_c_err(ret);
2378 if ((iter->flags & BTREE_ITER_CACHED) ||
2379 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2380 struct bkey_i *next_update;
2382 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2383 (next_update = btree_trans_peek_updates(trans,
2384 iter->btree_id, search_key)) &&
2385 !bpos_cmp(next_update->k.p, iter->pos)) {
2386 iter->k = next_update->k;
2387 k = bkey_i_to_s_c(next_update);
2391 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2392 (next_update = bch2_btree_journal_peek_slot(trans,
2393 iter, iter->pos))) {
2394 iter->k = next_update->k;
2395 k = bkey_i_to_s_c(next_update);
2399 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2400 (k = __btree_trans_peek_key_cache(iter, iter->pos)).k) {
2403 /* We're not returning a key from iter->path: */
2407 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2413 EBUG_ON(iter->path->level);
2415 if (iter->flags & BTREE_ITER_INTENT) {
2416 struct btree_iter iter2;
2417 struct bpos end = iter->pos;
2419 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2420 end.offset = U64_MAX;
2422 bch2_trans_copy_iter(&iter2, iter);
2423 k = bch2_btree_iter_peek_upto(&iter2, end);
2425 if (k.k && !bkey_err(k)) {
2429 bch2_trans_iter_exit(trans, &iter2);
2431 struct bpos pos = iter->pos;
2433 k = bch2_btree_iter_peek(iter);
2434 if (unlikely(bkey_err(k)))
2435 bch2_btree_iter_set_pos(iter, pos);
2440 if (unlikely(bkey_err(k)))
2443 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2445 if (bkey_cmp(iter->pos, next) < 0) {
2446 bkey_init(&iter->k);
2447 iter->k.p = iter->pos;
2449 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2450 bch2_key_resize(&iter->k,
2451 min_t(u64, KEY_SIZE_MAX,
2452 (next.inode == iter->pos.inode
2456 EBUG_ON(!iter->k.size);
2459 k = (struct bkey_s_c) { &iter->k, NULL };
2463 btree_path_set_should_be_locked(iter->path);
2465 bch2_btree_iter_verify_entry_exit(iter);
2466 bch2_btree_iter_verify(iter);
2467 ret = bch2_btree_iter_verify_ret(iter, k);
2469 return bkey_s_c_err(ret);
2474 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2476 if (!bch2_btree_iter_advance(iter))
2477 return bkey_s_c_null;
2479 return bch2_btree_iter_peek_slot(iter);
2482 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2484 if (!bch2_btree_iter_rewind(iter))
2485 return bkey_s_c_null;
2487 return bch2_btree_iter_peek_slot(iter);
2490 /* new transactional stuff: */
2492 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2493 struct btree_path *path)
2495 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2496 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2497 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2500 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2502 #ifdef CONFIG_BCACHEFS_DEBUG
2505 for (i = 0; i < trans->nr_sorted; i++)
2506 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2510 static void btree_trans_verify_sorted(struct btree_trans *trans)
2512 #ifdef CONFIG_BCACHEFS_DEBUG
2513 struct btree_path *path, *prev = NULL;
2516 if (!bch2_debug_check_iterators)
2519 trans_for_each_path_inorder(trans, path, i) {
2520 if (prev && btree_path_cmp(prev, path) > 0) {
2521 bch2_dump_trans_paths_updates(trans);
2522 panic("trans paths out of order!\n");
2529 static inline void btree_path_swap(struct btree_trans *trans,
2530 struct btree_path *l, struct btree_path *r)
2532 swap(l->sorted_idx, r->sorted_idx);
2533 swap(trans->sorted[l->sorted_idx],
2534 trans->sorted[r->sorted_idx]);
2536 btree_path_verify_sorted_ref(trans, l);
2537 btree_path_verify_sorted_ref(trans, r);
2540 static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *trans,
2541 struct btree_path *path,
2544 struct btree_path *n;
2550 ? prev_btree_path(trans, path)
2551 : next_btree_path(trans, path)) &&
2552 (cmp2 = btree_path_cmp(n, path)) &&
2554 btree_path_swap(trans, n, path);
2556 btree_trans_verify_sorted(trans);
2559 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2562 struct btree_path *n;
2565 n = prev_btree_path(trans, path);
2566 if (n && btree_path_cmp(n, path) > 0) {
2568 btree_path_swap(trans, n, path);
2569 n = prev_btree_path(trans, path);
2570 } while (n && btree_path_cmp(n, path) > 0);
2577 n = next_btree_path(trans, path);
2578 if (n && btree_path_cmp(path, n) > 0) {
2580 btree_path_swap(trans, path, n);
2581 n = next_btree_path(trans, path);
2582 } while (n && btree_path_cmp(path, n) > 0);
2586 btree_trans_verify_sorted(trans);
2589 static inline void btree_path_list_remove(struct btree_trans *trans,
2590 struct btree_path *path)
2594 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2596 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2598 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2599 trans->paths[trans->sorted[i]].sorted_idx = i;
2601 path->sorted_idx = U8_MAX;
2603 btree_trans_verify_sorted_refs(trans);
2606 static inline void btree_path_list_add(struct btree_trans *trans,
2607 struct btree_path *pos,
2608 struct btree_path *path)
2612 btree_trans_verify_sorted_refs(trans);
2614 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2616 if (trans->in_traverse_all &&
2617 trans->traverse_all_idx != U8_MAX &&
2618 trans->traverse_all_idx >= path->sorted_idx)
2619 trans->traverse_all_idx++;
2621 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2623 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2624 trans->paths[trans->sorted[i]].sorted_idx = i;
2626 btree_trans_verify_sorted_refs(trans);
2629 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2632 bch2_path_put(trans, iter->path,
2633 iter->flags & BTREE_ITER_INTENT);
2634 if (iter->update_path)
2635 bch2_path_put_nokeep(trans, iter->update_path,
2636 iter->flags & BTREE_ITER_INTENT);
2637 if (iter->key_cache_path)
2638 bch2_path_put(trans, iter->key_cache_path,
2639 iter->flags & BTREE_ITER_INTENT);
2641 iter->update_path = NULL;
2642 iter->key_cache_path = NULL;
2645 static inline void __bch2_trans_iter_init(struct btree_trans *trans,
2646 struct btree_iter *iter,
2647 unsigned btree_id, struct bpos pos,
2648 unsigned locks_want,
2653 if (unlikely(trans->restarted))
2654 panic("bch2_trans_iter_init(): in transaction restart, %s by %pS\n",
2655 bch2_err_str(trans->restarted),
2656 (void *) trans->last_restarted_ip);
2658 if (flags & BTREE_ITER_ALL_LEVELS)
2659 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
2661 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2662 btree_node_type_is_extents(btree_id))
2663 flags |= BTREE_ITER_IS_EXTENTS;
2665 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2666 !btree_type_has_snapshots(btree_id))
2667 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2669 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2670 btree_type_has_snapshots(btree_id))
2671 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2673 if (trans->journal_replay_not_finished)
2674 flags |= BTREE_ITER_WITH_JOURNAL;
2676 iter->trans = trans;
2678 iter->update_path = NULL;
2679 iter->key_cache_path = NULL;
2680 iter->btree_id = btree_id;
2681 iter->min_depth = depth;
2682 iter->flags = flags;
2683 iter->snapshot = pos.snapshot;
2685 iter->k.type = KEY_TYPE_deleted;
2688 iter->journal_idx = 0;
2689 iter->journal_pos = POS_MIN;
2690 #ifdef CONFIG_BCACHEFS_DEBUG
2691 iter->ip_allocated = ip;
2694 iter->path = bch2_path_get(trans, btree_id, iter->pos,
2695 locks_want, depth, flags, ip);
2698 void bch2_trans_iter_init(struct btree_trans *trans,
2699 struct btree_iter *iter,
2700 unsigned btree_id, struct bpos pos,
2703 if (!btree_id_cached(trans->c, btree_id)) {
2704 flags &= ~BTREE_ITER_CACHED;
2705 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
2706 } else if (!(flags & BTREE_ITER_CACHED))
2707 flags |= BTREE_ITER_WITH_KEY_CACHE;
2709 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2710 0, 0, flags, _RET_IP_);
2713 void bch2_trans_node_iter_init(struct btree_trans *trans,
2714 struct btree_iter *iter,
2715 enum btree_id btree_id,
2717 unsigned locks_want,
2721 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2722 BTREE_ITER_NOT_EXTENTS|
2723 __BTREE_ITER_ALL_SNAPSHOTS|
2724 BTREE_ITER_ALL_SNAPSHOTS|
2726 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2727 BUG_ON(iter->path->level != depth);
2728 BUG_ON(iter->min_depth != depth);
2731 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2735 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2736 if (src->update_path)
2737 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
2738 dst->key_cache_path = NULL;
2741 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2743 unsigned new_top = trans->mem_top + size;
2744 size_t old_bytes = trans->mem_bytes;
2745 size_t new_bytes = roundup_pow_of_two(new_top);
2749 trans->mem_max = max(trans->mem_max, new_top);
2751 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2753 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2754 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2755 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2756 new_bytes = BTREE_TRANS_MEM_MAX;
2761 return ERR_PTR(-ENOMEM);
2763 trans->mem = new_mem;
2764 trans->mem_bytes = new_bytes;
2767 trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2768 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2771 p = trans->mem + trans->mem_top;
2772 trans->mem_top += size;
2778 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2779 * @trans: transaction to reset
2781 * While iterating over nodes or updating nodes a attempt to lock a btree node
2782 * may return BCH_ERR_transaction_restart when the trylock fails. When this
2783 * occurs bch2_trans_begin() should be called and the transaction retried.
2785 u32 bch2_trans_begin(struct btree_trans *trans)
2787 struct btree_path *path;
2789 bch2_trans_reset_updates(trans);
2791 trans->restart_count++;
2794 if (trans->fs_usage_deltas) {
2795 trans->fs_usage_deltas->used = 0;
2796 memset((void *) trans->fs_usage_deltas +
2797 offsetof(struct replicas_delta_list, memset_start), 0,
2798 (void *) &trans->fs_usage_deltas->memset_end -
2799 (void *) &trans->fs_usage_deltas->memset_start);
2802 trans_for_each_path(trans, path) {
2803 path->should_be_locked = false;
2806 * If the transaction wasn't restarted, we're presuming to be
2807 * doing something new: dont keep iterators excpt the ones that
2808 * are in use - except for the subvolumes btree:
2810 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
2811 path->preserve = false;
2814 * XXX: we probably shouldn't be doing this if the transaction
2815 * was restarted, but currently we still overflow transaction
2816 * iterators if we do that
2818 if (!path->ref && !path->preserve)
2819 __bch2_path_free(trans, path);
2821 path->preserve = false;
2824 if (!trans->restarted &&
2826 local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
2827 bch2_trans_unlock(trans);
2829 bch2_trans_relock(trans);
2832 trans->last_restarted_ip = _RET_IP_;
2833 if (trans->restarted)
2834 bch2_btree_path_traverse_all(trans);
2836 trans->last_begin_time = local_clock();
2837 return trans->restart_count;
2840 void bch2_trans_verify_not_restarted(struct btree_trans *trans, u32 restart_count)
2842 if (trans_was_restarted(trans, restart_count))
2843 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
2844 trans->restart_count, restart_count,
2845 (void *) trans->last_restarted_ip);
2848 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2850 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2851 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2854 BUG_ON(trans->used_mempool);
2857 p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
2860 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2862 trans->paths = p; p += paths_bytes;
2863 trans->updates = p; p += updates_bytes;
2866 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
2868 unsigned bch2_trans_get_fn_idx(const char *fn)
2872 for (i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
2873 if (!bch2_btree_transaction_fns[i] ||
2874 bch2_btree_transaction_fns[i] == fn) {
2875 bch2_btree_transaction_fns[i] = fn;
2879 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
2883 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_idx)
2884 __acquires(&c->btree_trans_barrier)
2886 struct btree_transaction_stats *s;
2887 struct btree_trans *pos;
2889 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
2891 memset(trans, 0, sizeof(*trans));
2893 trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns)
2894 ? bch2_btree_transaction_fns[fn_idx] : NULL;
2895 trans->last_begin_time = local_clock();
2896 trans->fn_idx = fn_idx;
2897 trans->locking_wait.task = current;
2898 trans->journal_replay_not_finished =
2899 !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
2900 closure_init_stack(&trans->ref);
2902 bch2_trans_alloc_paths(trans, c);
2904 s = btree_trans_stats(trans);
2906 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
2908 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
2910 if (!unlikely(trans->mem)) {
2911 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2912 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2914 trans->mem_bytes = expected_mem_bytes;
2917 trans->nr_max_paths = s->nr_max_paths;
2920 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2922 mutex_lock(&c->btree_trans_lock);
2923 list_for_each_entry(pos, &c->btree_trans_list, list) {
2924 if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
2925 list_add_tail(&trans->list, &pos->list);
2929 list_add_tail(&trans->list, &c->btree_trans_list);
2931 mutex_unlock(&c->btree_trans_lock);
2934 static void check_btree_paths_leaked(struct btree_trans *trans)
2936 #ifdef CONFIG_BCACHEFS_DEBUG
2937 struct bch_fs *c = trans->c;
2938 struct btree_path *path;
2940 trans_for_each_path(trans, path)
2945 bch_err(c, "btree paths leaked from %s!", trans->fn);
2946 trans_for_each_path(trans, path)
2948 printk(KERN_ERR " btree %s %pS\n",
2949 bch2_btree_ids[path->btree_id],
2950 (void *) path->ip_allocated);
2951 /* Be noisy about this: */
2952 bch2_fatal_error(c);
2956 void bch2_trans_exit(struct btree_trans *trans)
2957 __releases(&c->btree_trans_barrier)
2959 struct btree_insert_entry *i;
2960 struct bch_fs *c = trans->c;
2961 struct btree_transaction_stats *s = btree_trans_stats(trans);
2963 bch2_trans_unlock(trans);
2965 closure_sync(&trans->ref);
2968 s->max_mem = max(s->max_mem, trans->mem_max);
2970 trans_for_each_update(trans, i)
2971 __btree_path_put(i->path, true);
2972 trans->nr_updates = 0;
2974 check_btree_paths_leaked(trans);
2976 mutex_lock(&c->btree_trans_lock);
2977 list_del(&trans->list);
2978 mutex_unlock(&c->btree_trans_lock);
2980 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2982 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
2984 kfree(trans->extra_journal_entries.data);
2986 if (trans->fs_usage_deltas) {
2987 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2988 REPLICAS_DELTA_LIST_MAX)
2989 mempool_free(trans->fs_usage_deltas,
2990 &c->replicas_delta_pool);
2992 kfree(trans->fs_usage_deltas);
2995 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2996 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3002 * Userspace doesn't have a real percpu implementation:
3004 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3008 mempool_free(trans->paths, &c->btree_paths_pool);
3010 trans->mem = (void *) 0x1;
3011 trans->paths = (void *) 0x1;
3014 static void __maybe_unused
3015 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3016 struct btree_bkey_cached_common *b)
3018 struct six_lock_count c = six_lock_counts(&b->lock);
3019 struct task_struct *owner;
3023 owner = READ_ONCE(b->lock.owner);
3024 pid = owner ? owner->pid : 0;
3028 prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3029 b->level, bch2_btree_ids[b->btree_id]);
3030 bch2_bpos_to_text(out, btree_node_pos(b));
3033 prt_printf(out, " locks %u:%u:%u held by pid %u",
3034 c.n[0], c.n[1], c.n[2], pid);
3037 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3039 struct btree_path *path;
3040 struct btree_bkey_cached_common *b;
3041 static char lock_types[] = { 'r', 'i', 'w' };
3044 if (!out->nr_tabstops) {
3045 printbuf_tabstop_push(out, 16);
3046 printbuf_tabstop_push(out, 32);
3049 prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn);
3051 trans_for_each_path(trans, path) {
3052 if (!path->nodes_locked)
3055 prt_printf(out, " path %u %c l=%u %s:",
3057 path->cached ? 'c' : 'b',
3059 bch2_btree_ids[path->btree_id]);
3060 bch2_bpos_to_text(out, path->pos);
3063 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3064 if (btree_node_locked(path, l) &&
3065 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3066 prt_printf(out, " %c l=%u ",
3067 lock_types[btree_node_locked_type(path, l)], l);
3068 bch2_btree_bkey_cached_common_to_text(out, b);
3074 b = READ_ONCE(trans->locking);
3076 prt_str(out, " want");
3078 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3079 bch2_btree_bkey_cached_common_to_text(out, b);
3084 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3086 struct btree_transaction_stats *s;
3088 for (s = c->btree_transaction_stats;
3089 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3091 kfree(s->max_paths_text);
3093 if (c->btree_trans_barrier_initialized)
3094 cleanup_srcu_struct(&c->btree_trans_barrier);
3095 mempool_exit(&c->btree_trans_mem_pool);
3096 mempool_exit(&c->btree_paths_pool);
3099 int bch2_fs_btree_iter_init(struct bch_fs *c)
3101 unsigned i, nr = BTREE_ITER_MAX;
3104 for (i = 0; i < ARRAY_SIZE(c->btree_transaction_stats); i++)
3105 mutex_init(&c->btree_transaction_stats[i].lock);
3107 INIT_LIST_HEAD(&c->btree_trans_list);
3108 mutex_init(&c->btree_trans_lock);
3110 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3111 sizeof(struct btree_path) * nr +
3112 sizeof(struct btree_insert_entry) * nr) ?:
3113 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3114 BTREE_TRANS_MEM_MAX) ?:
3115 init_srcu_struct(&c->btree_trans_barrier);
3117 c->btree_trans_barrier_initialized = true;