1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prandom.h>
20 #include <linux/prefetch.h>
21 #include <trace/events/bcachefs.h>
23 static void btree_trans_verify_sorted(struct btree_trans *);
24 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
25 static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *,
26 struct btree_path *, int);
28 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
29 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
32 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
34 #ifdef CONFIG_BCACHEFS_DEBUG
35 return iter->ip_allocated;
41 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
44 * Unlocks before scheduling
45 * Note: does not revalidate iterator
47 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
49 if (need_resched() || race_fault()) {
50 bch2_trans_unlock(trans);
52 return bch2_trans_relock(trans);
58 static inline int __btree_path_cmp(const struct btree_path *l,
59 enum btree_id r_btree_id,
65 * Must match lock ordering as defined by __bch2_btree_node_lock:
67 return cmp_int(l->btree_id, r_btree_id) ?:
68 cmp_int((int) l->cached, (int) r_cached) ?:
69 bpos_cmp(l->pos, r_pos) ?:
70 -cmp_int(l->level, r_level);
73 static inline int btree_path_cmp(const struct btree_path *l,
74 const struct btree_path *r)
76 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
79 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
81 /* Are we iterating over keys in all snapshots? */
82 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
83 p = bpos_successor(p);
85 p = bpos_nosnap_successor(p);
86 p.snapshot = iter->snapshot;
92 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
94 /* Are we iterating over keys in all snapshots? */
95 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
96 p = bpos_predecessor(p);
98 p = bpos_nosnap_predecessor(p);
99 p.snapshot = iter->snapshot;
105 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
107 struct bpos pos = iter->pos;
109 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
110 !bkey_eq(pos, POS_MAX))
111 pos = bkey_successor(iter, pos);
115 static inline bool btree_path_pos_before_node(struct btree_path *path,
118 return bpos_lt(path->pos, b->data->min_key);
121 static inline bool btree_path_pos_after_node(struct btree_path *path,
124 return bpos_gt(path->pos, b->key.k.p);
127 static inline bool btree_path_pos_in_node(struct btree_path *path,
130 return path->btree_id == b->c.btree_id &&
131 !btree_path_pos_before_node(path, b) &&
132 !btree_path_pos_after_node(path, b);
135 /* Btree iterator: */
137 #ifdef CONFIG_BCACHEFS_DEBUG
139 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
140 struct btree_path *path)
142 struct bkey_cached *ck;
143 bool locked = btree_node_locked(path, 0);
145 if (!bch2_btree_node_relock(trans, path, 0))
148 ck = (void *) path->l[0].b;
149 BUG_ON(ck->key.btree_id != path->btree_id ||
150 !bkey_eq(ck->key.pos, path->pos));
153 btree_node_unlock(trans, path, 0);
156 static void bch2_btree_path_verify_level(struct btree_trans *trans,
157 struct btree_path *path, unsigned level)
159 struct btree_path_level *l;
160 struct btree_node_iter tmp;
162 struct bkey_packed *p, *k;
163 struct printbuf buf1 = PRINTBUF;
164 struct printbuf buf2 = PRINTBUF;
165 struct printbuf buf3 = PRINTBUF;
168 if (!bch2_debug_check_iterators)
173 locked = btree_node_locked(path, level);
177 bch2_btree_path_verify_cached(trans, path);
181 if (!btree_path_node(path, level))
184 if (!bch2_btree_node_relock_notrace(trans, path, level))
187 BUG_ON(!btree_path_pos_in_node(path, l->b));
189 bch2_btree_node_iter_verify(&l->iter, l->b);
192 * For interior nodes, the iterator will have skipped past deleted keys:
195 ? bch2_btree_node_iter_prev(&tmp, l->b)
196 : bch2_btree_node_iter_prev_all(&tmp, l->b);
197 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
199 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
204 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
210 btree_node_unlock(trans, path, level);
213 bch2_bpos_to_text(&buf1, path->pos);
216 struct bkey uk = bkey_unpack_key(l->b, p);
218 bch2_bkey_to_text(&buf2, &uk);
220 prt_printf(&buf2, "(none)");
224 struct bkey uk = bkey_unpack_key(l->b, k);
226 bch2_bkey_to_text(&buf3, &uk);
228 prt_printf(&buf3, "(none)");
231 panic("path should be %s key at level %u:\n"
235 msg, level, buf1.buf, buf2.buf, buf3.buf);
238 static void bch2_btree_path_verify(struct btree_trans *trans,
239 struct btree_path *path)
241 struct bch_fs *c = trans->c;
244 EBUG_ON(path->btree_id >= BTREE_ID_NR);
246 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
248 BUG_ON(!path->cached &&
249 c->btree_roots[path->btree_id].b->c.level > i);
253 bch2_btree_path_verify_level(trans, path, i);
256 bch2_btree_path_verify_locks(path);
259 void bch2_trans_verify_paths(struct btree_trans *trans)
261 struct btree_path *path;
263 trans_for_each_path(trans, path)
264 bch2_btree_path_verify(trans, path);
267 static void bch2_btree_iter_verify(struct btree_iter *iter)
269 struct btree_trans *trans = iter->trans;
271 BUG_ON(iter->btree_id >= BTREE_ID_NR);
273 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
275 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
276 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
278 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
279 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
280 !btree_type_has_snapshots(iter->btree_id));
282 if (iter->update_path)
283 bch2_btree_path_verify(trans, iter->update_path);
284 bch2_btree_path_verify(trans, iter->path);
287 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
289 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
290 !iter->pos.snapshot);
292 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
293 iter->pos.snapshot != iter->snapshot);
295 BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
296 bkey_gt(iter->pos, iter->k.p));
299 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
301 struct btree_trans *trans = iter->trans;
302 struct btree_iter copy;
303 struct bkey_s_c prev;
306 if (!bch2_debug_check_iterators)
309 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
312 if (bkey_err(k) || !k.k)
315 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
319 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
320 BTREE_ITER_NOPRESERVE|
321 BTREE_ITER_ALL_SNAPSHOTS);
322 prev = bch2_btree_iter_prev(©);
326 ret = bkey_err(prev);
330 if (bkey_eq(prev.k->p, k.k->p) &&
331 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
332 prev.k->p.snapshot) > 0) {
333 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
335 bch2_bkey_to_text(&buf1, k.k);
336 bch2_bkey_to_text(&buf2, prev.k);
338 panic("iter snap %u\n"
345 bch2_trans_iter_exit(trans, ©);
349 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
350 struct bpos pos, bool key_cache)
352 struct btree_path *path;
354 struct printbuf buf = PRINTBUF;
356 trans_for_each_path_inorder(trans, path, idx) {
357 int cmp = cmp_int(path->btree_id, id) ?:
358 cmp_int(path->cached, key_cache);
365 if (!btree_node_locked(path, 0) ||
366 !path->should_be_locked)
370 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
371 bkey_le(pos, path->l[0].b->key.k.p))
374 if (bkey_eq(pos, path->pos))
379 bch2_dump_trans_paths_updates(trans);
380 bch2_bpos_to_text(&buf, pos);
382 panic("not locked: %s %s%s\n",
383 bch2_btree_ids[id], buf.buf,
384 key_cache ? " cached" : "");
389 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
390 struct btree_path *path, unsigned l) {}
391 static inline void bch2_btree_path_verify(struct btree_trans *trans,
392 struct btree_path *path) {}
393 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
394 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
395 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
399 /* Btree path: fixups after btree updates */
401 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
404 struct bkey_packed *k)
406 struct btree_node_iter_set *set;
408 btree_node_iter_for_each(iter, set)
409 if (set->end == t->end_offset) {
410 set->k = __btree_node_key_to_offset(b, k);
411 bch2_btree_node_iter_sort(iter, b);
415 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
418 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
420 struct bkey_packed *where)
422 struct btree_path_level *l = &path->l[b->c.level];
424 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
427 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
428 bch2_btree_node_iter_advance(&l->iter, l->b);
431 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
433 struct bkey_packed *where)
435 struct btree_path *path;
437 trans_for_each_path_with_node(trans, b, path) {
438 __bch2_btree_path_fix_key_modified(path, b, where);
439 bch2_btree_path_verify_level(trans, path, b->c.level);
443 static void __bch2_btree_node_iter_fix(struct btree_path *path,
445 struct btree_node_iter *node_iter,
447 struct bkey_packed *where,
448 unsigned clobber_u64s,
451 const struct bkey_packed *end = btree_bkey_last(b, t);
452 struct btree_node_iter_set *set;
453 unsigned offset = __btree_node_key_to_offset(b, where);
454 int shift = new_u64s - clobber_u64s;
455 unsigned old_end = t->end_offset - shift;
456 unsigned orig_iter_pos = node_iter->data[0].k;
457 bool iter_current_key_modified =
458 orig_iter_pos >= offset &&
459 orig_iter_pos <= offset + clobber_u64s;
461 btree_node_iter_for_each(node_iter, set)
462 if (set->end == old_end)
465 /* didn't find the bset in the iterator - might have to readd it: */
467 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
468 bch2_btree_node_iter_push(node_iter, b, where, end);
471 /* Iterator is after key that changed */
475 set->end = t->end_offset;
477 /* Iterator hasn't gotten to the key that changed yet: */
482 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
484 } else if (set->k < offset + clobber_u64s) {
485 set->k = offset + new_u64s;
486 if (set->k == set->end)
487 bch2_btree_node_iter_set_drop(node_iter, set);
489 /* Iterator is after key that changed */
490 set->k = (int) set->k + shift;
494 bch2_btree_node_iter_sort(node_iter, b);
496 if (node_iter->data[0].k != orig_iter_pos)
497 iter_current_key_modified = true;
500 * When a new key is added, and the node iterator now points to that
501 * key, the iterator might have skipped past deleted keys that should
502 * come after the key the iterator now points to. We have to rewind to
503 * before those deleted keys - otherwise
504 * bch2_btree_node_iter_prev_all() breaks:
506 if (!bch2_btree_node_iter_end(node_iter) &&
507 iter_current_key_modified &&
510 struct bkey_packed *k, *k2, *p;
512 k = bch2_btree_node_iter_peek_all(node_iter, b);
514 for_each_bset(b, t) {
515 bool set_pos = false;
517 if (node_iter->data[0].end == t->end_offset)
520 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
522 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
523 bkey_iter_cmp(b, k, p) < 0) {
529 btree_node_iter_set_set_pos(node_iter,
535 void bch2_btree_node_iter_fix(struct btree_trans *trans,
536 struct btree_path *path,
538 struct btree_node_iter *node_iter,
539 struct bkey_packed *where,
540 unsigned clobber_u64s,
543 struct bset_tree *t = bch2_bkey_to_bset(b, where);
544 struct btree_path *linked;
546 if (node_iter != &path->l[b->c.level].iter) {
547 __bch2_btree_node_iter_fix(path, b, node_iter, t,
548 where, clobber_u64s, new_u64s);
550 if (bch2_debug_check_iterators)
551 bch2_btree_node_iter_verify(node_iter, b);
554 trans_for_each_path_with_node(trans, b, linked) {
555 __bch2_btree_node_iter_fix(linked, b,
556 &linked->l[b->c.level].iter, t,
557 where, clobber_u64s, new_u64s);
558 bch2_btree_path_verify_level(trans, linked, b->c.level);
562 /* Btree path level: pointer to a particular btree node and node iter */
564 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
565 struct btree_path_level *l,
567 struct bkey_packed *k)
571 * signal to bch2_btree_iter_peek_slot() that we're currently at
574 u->type = KEY_TYPE_deleted;
575 return bkey_s_c_null;
578 return bkey_disassemble(l->b, k, u);
581 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
582 struct btree_path_level *l,
585 return __btree_iter_unpack(c, l, u,
586 bch2_btree_node_iter_peek_all(&l->iter, l->b));
589 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
590 struct btree_path *path,
591 struct btree_path_level *l,
594 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
595 bch2_btree_node_iter_peek(&l->iter, l->b));
597 path->pos = k.k ? k.k->p : l->b->key.k.p;
598 bch2_btree_path_verify_level(trans, path, l - path->l);
602 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
603 struct btree_path *path,
604 struct btree_path_level *l,
607 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
608 bch2_btree_node_iter_prev(&l->iter, l->b));
610 path->pos = k.k ? k.k->p : l->b->data->min_key;
611 bch2_btree_path_verify_level(trans, path, l - path->l);
615 static inline bool btree_path_advance_to_pos(struct btree_path *path,
616 struct btree_path_level *l,
619 struct bkey_packed *k;
622 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
623 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
624 if (max_advance > 0 && nr_advanced >= max_advance)
627 bch2_btree_node_iter_advance(&l->iter, l->b);
634 static inline void __btree_path_level_init(struct btree_path *path,
637 struct btree_path_level *l = &path->l[level];
639 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
642 * Iterators to interior nodes should always be pointed at the first non
646 bch2_btree_node_iter_peek(&l->iter, l->b);
649 void bch2_btree_path_level_init(struct btree_trans *trans,
650 struct btree_path *path,
653 BUG_ON(path->cached);
655 EBUG_ON(!btree_path_pos_in_node(path, b));
656 EBUG_ON(b->c.lock.state.seq & 1);
658 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
659 path->l[b->c.level].b = b;
660 __btree_path_level_init(path, b->c.level);
663 /* Btree path: fixups after btree node updates: */
665 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
667 struct bch_fs *c = trans->c;
668 struct btree_insert_entry *i;
670 trans_for_each_update(trans, i)
672 i->level == b->c.level &&
673 i->btree_id == b->c.btree_id &&
674 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
675 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
676 i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
678 if (unlikely(trans->journal_replay_not_finished)) {
680 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
692 * A btree node is being replaced - update the iterator to point to the new
695 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
697 struct btree_path *path;
699 trans_for_each_path(trans, path)
700 if (path->uptodate == BTREE_ITER_UPTODATE &&
702 btree_path_pos_in_node(path, b)) {
703 enum btree_node_locked_type t =
704 btree_lock_want(path, b->c.level);
706 if (t != BTREE_NODE_UNLOCKED) {
707 btree_node_unlock(trans, path, b->c.level);
708 six_lock_increment(&b->c.lock, t);
709 mark_btree_node_locked(trans, path, b->c.level, t);
712 bch2_btree_path_level_init(trans, path, b);
715 bch2_trans_revalidate_updates_in_node(trans, b);
719 * A btree node has been modified in such a way as to invalidate iterators - fix
722 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
724 struct btree_path *path;
726 trans_for_each_path_with_node(trans, b, path)
727 __btree_path_level_init(path, b->c.level);
729 bch2_trans_revalidate_updates_in_node(trans, b);
732 /* Btree path: traverse, set_pos: */
734 static inline int btree_path_lock_root(struct btree_trans *trans,
735 struct btree_path *path,
737 unsigned long trace_ip)
739 struct bch_fs *c = trans->c;
740 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
741 enum six_lock_type lock_type;
745 EBUG_ON(path->nodes_locked);
748 b = READ_ONCE(*rootp);
749 path->level = READ_ONCE(b->c.level);
751 if (unlikely(path->level < depth_want)) {
753 * the root is at a lower depth than the depth we want:
754 * got to the end of the btree, or we're walking nodes
755 * greater than some depth and there are no nodes >=
758 path->level = depth_want;
759 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
764 lock_type = __btree_lock_want(path, path->level);
765 ret = btree_node_lock(trans, path, &b->c,
766 path->level, lock_type, trace_ip);
768 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
770 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
775 if (likely(b == READ_ONCE(*rootp) &&
776 b->c.level == path->level &&
778 for (i = 0; i < path->level; i++)
779 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
780 path->l[path->level].b = b;
781 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
784 mark_btree_node_locked(trans, path, path->level, lock_type);
785 bch2_btree_path_level_init(trans, path, b);
789 six_unlock_type(&b->c.lock, lock_type);
794 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
796 struct bch_fs *c = trans->c;
797 struct btree_path_level *l = path_l(path);
798 struct btree_node_iter node_iter = l->iter;
799 struct bkey_packed *k;
801 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
802 ? (path->level > 1 ? 0 : 2)
803 : (path->level > 1 ? 1 : 16);
804 bool was_locked = btree_node_locked(path, path->level);
807 bch2_bkey_buf_init(&tmp);
809 while (nr-- && !ret) {
810 if (!bch2_btree_node_relock(trans, path, path->level))
813 bch2_btree_node_iter_advance(&node_iter, l->b);
814 k = bch2_btree_node_iter_peek(&node_iter, l->b);
818 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
819 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
824 btree_node_unlock(trans, path, path->level);
826 bch2_bkey_buf_exit(&tmp, c);
830 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
831 struct btree_and_journal_iter *jiter)
833 struct bch_fs *c = trans->c;
836 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
837 ? (path->level > 1 ? 0 : 2)
838 : (path->level > 1 ? 1 : 16);
839 bool was_locked = btree_node_locked(path, path->level);
842 bch2_bkey_buf_init(&tmp);
844 while (nr-- && !ret) {
845 if (!bch2_btree_node_relock(trans, path, path->level))
848 bch2_btree_and_journal_iter_advance(jiter);
849 k = bch2_btree_and_journal_iter_peek(jiter);
853 bch2_bkey_buf_reassemble(&tmp, c, k);
854 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
859 btree_node_unlock(trans, path, path->level);
861 bch2_bkey_buf_exit(&tmp, c);
865 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
866 struct btree_path *path,
867 unsigned plevel, struct btree *b)
869 struct btree_path_level *l = &path->l[plevel];
870 bool locked = btree_node_locked(path, plevel);
871 struct bkey_packed *k;
872 struct bch_btree_ptr_v2 *bp;
874 if (!bch2_btree_node_relock(trans, path, plevel))
877 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
878 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
880 bp = (void *) bkeyp_val(&l->b->format, k);
881 bp->mem_ptr = (unsigned long)b;
884 btree_node_unlock(trans, path, plevel);
887 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
888 struct btree_path *path,
890 struct bkey_buf *out)
892 struct bch_fs *c = trans->c;
893 struct btree_path_level *l = path_l(path);
894 struct btree_and_journal_iter jiter;
898 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
900 k = bch2_btree_and_journal_iter_peek(&jiter);
902 bch2_bkey_buf_reassemble(out, c, k);
904 if (flags & BTREE_ITER_PREFETCH)
905 ret = btree_path_prefetch_j(trans, path, &jiter);
907 bch2_btree_and_journal_iter_exit(&jiter);
911 static __always_inline int btree_path_down(struct btree_trans *trans,
912 struct btree_path *path,
914 unsigned long trace_ip)
916 struct bch_fs *c = trans->c;
917 struct btree_path_level *l = path_l(path);
919 unsigned level = path->level - 1;
920 enum six_lock_type lock_type = __btree_lock_want(path, level);
924 EBUG_ON(!btree_node_locked(path, path->level));
926 bch2_bkey_buf_init(&tmp);
928 if (unlikely(trans->journal_replay_not_finished)) {
929 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
933 bch2_bkey_buf_unpack(&tmp, c, l->b,
934 bch2_btree_node_iter_peek(&l->iter, l->b));
936 if (flags & BTREE_ITER_PREFETCH) {
937 ret = btree_path_prefetch(trans, path);
943 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
944 ret = PTR_ERR_OR_ZERO(b);
948 if (likely(!trans->journal_replay_not_finished &&
949 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
950 unlikely(b != btree_node_mem_ptr(tmp.k)))
951 btree_node_mem_ptr_set(trans, path, level + 1, b);
953 if (btree_node_read_locked(path, level + 1))
954 btree_node_unlock(trans, path, level + 1);
956 mark_btree_node_locked(trans, path, level, lock_type);
958 bch2_btree_path_level_init(trans, path, b);
960 bch2_btree_path_verify_locks(path);
962 bch2_bkey_buf_exit(&tmp, c);
966 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
967 unsigned, unsigned long);
969 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
971 struct bch_fs *c = trans->c;
972 struct btree_path *path;
973 unsigned long trace_ip = _RET_IP_;
976 if (trans->in_traverse_all)
977 return -BCH_ERR_transaction_restart_in_traverse_all;
979 trans->in_traverse_all = true;
981 trans->restarted = 0;
982 trans->traverse_all_idx = U8_MAX;
984 trans_for_each_path(trans, path)
985 path->should_be_locked = false;
987 btree_trans_verify_sorted(trans);
989 bch2_trans_unlock(trans);
992 if (unlikely(trans->memory_allocation_failure)) {
995 closure_init_stack(&cl);
998 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1003 /* Now, redo traversals in correct order: */
1004 trans->traverse_all_idx = 0;
1005 while (trans->traverse_all_idx < trans->nr_sorted) {
1006 path = trans->paths + trans->sorted[trans->traverse_all_idx];
1009 * Traversing a path can cause another path to be added at about
1010 * the same position:
1012 if (path->uptodate) {
1013 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1014 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1019 BUG_ON(path->uptodate);
1021 trans->traverse_all_idx++;
1026 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
1027 * and relock(), relock() won't relock since path->should_be_locked
1028 * isn't set yet, which is all fine
1030 trans_for_each_path(trans, path)
1031 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
1033 bch2_btree_cache_cannibalize_unlock(c);
1035 trans->in_traverse_all = false;
1037 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1041 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1042 unsigned l, int check_pos)
1044 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1046 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1051 static inline bool btree_path_good_node(struct btree_trans *trans,
1052 struct btree_path *path,
1053 unsigned l, int check_pos)
1055 return is_btree_node(path, l) &&
1056 bch2_btree_node_relock(trans, path, l) &&
1057 btree_path_check_pos_in_node(path, l, check_pos);
1060 static void btree_path_set_level_down(struct btree_trans *trans,
1061 struct btree_path *path,
1066 path->level = new_level;
1068 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1069 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1070 btree_node_unlock(trans, path, l);
1072 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1073 bch2_btree_path_verify(trans, path);
1076 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1077 struct btree_path *path,
1080 unsigned i, l = path->level;
1082 while (btree_path_node(path, l) &&
1083 !btree_path_good_node(trans, path, l, check_pos))
1084 __btree_path_set_level_up(trans, path, l++);
1086 /* If we need intent locks, take them too: */
1088 i < path->locks_want && btree_path_node(path, i);
1090 if (!bch2_btree_node_relock(trans, path, i)) {
1092 __btree_path_set_level_up(trans, path, l++);
1099 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1100 struct btree_path *path,
1103 return likely(btree_node_locked(path, path->level) &&
1104 btree_path_check_pos_in_node(path, path->level, check_pos))
1106 : __btree_path_up_until_good_node(trans, path, check_pos);
1110 * This is the main state machine for walking down the btree - walks down to a
1113 * Returns 0 on success, -EIO on error (error reading in a btree node).
1115 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1116 * stashed in the iterator and returned from bch2_trans_exit().
1118 static int btree_path_traverse_one(struct btree_trans *trans,
1119 struct btree_path *path,
1121 unsigned long trace_ip)
1123 unsigned depth_want = path->level;
1124 int ret = -((int) trans->restarted);
1130 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1131 * and re-traverse the path without a transaction restart:
1133 if (path->should_be_locked) {
1134 ret = bch2_btree_path_relock(trans, path, trace_ip);
1139 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1143 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1146 path->level = btree_path_up_until_good_node(trans, path, 0);
1148 EBUG_ON(btree_path_node(path, path->level) &&
1149 !btree_node_locked(path, path->level));
1152 * Note: path->nodes[path->level] may be temporarily NULL here - that
1153 * would indicate to other code that we got to the end of the btree,
1154 * here it indicates that relocking the root failed - it's critical that
1155 * btree_path_lock_root() comes next and that it can't fail
1157 while (path->level > depth_want) {
1158 ret = btree_path_node(path, path->level)
1159 ? btree_path_down(trans, path, flags, trace_ip)
1160 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1161 if (unlikely(ret)) {
1164 * No nodes at this level - got to the end of
1171 __bch2_btree_path_unlock(trans, path);
1172 path->level = depth_want;
1173 path->l[path->level].b = ERR_PTR(ret);
1178 path->uptodate = BTREE_ITER_UPTODATE;
1180 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
1181 bch2_btree_path_verify(trans, path);
1185 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1186 struct btree_path *path, unsigned flags)
1188 if (0 && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1189 unsigned restart_probability_bits = 4 << min(trans->restart_count, 32U);
1190 u64 mask = ~(~0ULL << restart_probability_bits);
1192 if ((prandom_u32() & mask) == mask) {
1193 trace_and_count(trans->c, trans_restart_injected, trans, _RET_IP_);
1194 return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
1198 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1201 return bch2_trans_cond_resched(trans) ?:
1202 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1205 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1206 struct btree_path *src)
1208 unsigned i, offset = offsetof(struct btree_path, pos);
1210 memcpy((void *) dst + offset,
1211 (void *) src + offset,
1212 sizeof(struct btree_path) - offset);
1214 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1215 unsigned t = btree_node_locked_type(dst, i);
1217 if (t != BTREE_NODE_UNLOCKED)
1218 six_lock_increment(&dst->l[i].b->c.lock, t);
1222 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1225 struct btree_path *new = btree_path_alloc(trans, src);
1227 btree_path_copy(trans, new, src);
1228 __btree_path_get(new, intent);
1233 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
1234 struct btree_path *path, bool intent,
1237 __btree_path_put(path, intent);
1238 path = btree_path_clone(trans, path, intent);
1239 path->preserve = false;
1240 #ifdef CONFIG_BCACHEFS_DEBUG
1241 path->ip_allocated = ip;
1243 btree_trans_verify_sorted(trans);
1247 struct btree_path * __must_check
1248 __bch2_btree_path_set_pos(struct btree_trans *trans,
1249 struct btree_path *path, struct bpos new_pos,
1250 bool intent, unsigned long ip, int cmp)
1252 unsigned level = path->level;
1254 EBUG_ON(trans->restarted);
1255 EBUG_ON(!path->ref);
1257 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1259 path->pos = new_pos;
1261 bch2_btree_path_check_sort_fast(trans, path, cmp);
1263 if (unlikely(path->cached)) {
1264 btree_node_unlock(trans, path, 0);
1265 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1266 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1270 level = btree_path_up_until_good_node(trans, path, cmp);
1272 if (btree_path_node(path, level)) {
1273 struct btree_path_level *l = &path->l[level];
1275 BUG_ON(!btree_node_locked(path, level));
1277 * We might have to skip over many keys, or just a few: try
1278 * advancing the node iterator, and if we have to skip over too
1279 * many keys just reinit it (or if we're rewinding, since that
1283 !btree_path_advance_to_pos(path, l, 8))
1284 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1287 * Iterators to interior nodes should always be pointed at the first non
1290 if (unlikely(level))
1291 bch2_btree_node_iter_peek(&l->iter, l->b);
1294 if (unlikely(level != path->level)) {
1295 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1296 __bch2_btree_path_unlock(trans, path);
1299 bch2_btree_path_verify(trans, path);
1303 /* Btree path: main interface: */
1305 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1307 struct btree_path *sib;
1309 sib = prev_btree_path(trans, path);
1310 if (sib && !btree_path_cmp(sib, path))
1313 sib = next_btree_path(trans, path);
1314 if (sib && !btree_path_cmp(sib, path))
1320 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1322 struct btree_path *sib;
1324 sib = prev_btree_path(trans, path);
1325 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1328 sib = next_btree_path(trans, path);
1329 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1335 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1337 __bch2_btree_path_unlock(trans, path);
1338 btree_path_list_remove(trans, path);
1339 trans->paths_allocated &= ~(1ULL << path->idx);
1342 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1344 struct btree_path *dup;
1346 EBUG_ON(trans->paths + path->idx != path);
1347 EBUG_ON(!path->ref);
1349 if (!__btree_path_put(path, intent))
1352 dup = path->preserve
1353 ? have_path_at_pos(trans, path)
1354 : have_node_at_pos(trans, path);
1356 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1359 if (path->should_be_locked &&
1360 !trans->restarted &&
1361 (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
1365 dup->preserve |= path->preserve;
1366 dup->should_be_locked |= path->should_be_locked;
1369 __bch2_path_free(trans, path);
1372 static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *path,
1375 EBUG_ON(trans->paths + path->idx != path);
1376 EBUG_ON(!path->ref);
1378 if (!__btree_path_put(path, intent))
1381 __bch2_path_free(trans, path);
1384 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1386 struct btree_insert_entry *i;
1388 prt_printf(buf, "transaction updates for %s journal seq %llu",
1389 trans->fn, trans->journal_res.seq);
1391 printbuf_indent_add(buf, 2);
1393 trans_for_each_update(trans, i) {
1394 struct bkey_s_c old = { &i->old_k, i->old_v };
1396 prt_printf(buf, "update: btree=%s cached=%u %pS",
1397 bch2_btree_ids[i->btree_id],
1399 (void *) i->ip_allocated);
1402 prt_printf(buf, " old ");
1403 bch2_bkey_val_to_text(buf, trans->c, old);
1406 prt_printf(buf, " new ");
1407 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1411 printbuf_indent_sub(buf, 2);
1415 void bch2_dump_trans_updates(struct btree_trans *trans)
1417 struct printbuf buf = PRINTBUF;
1419 bch2_trans_updates_to_text(&buf, trans);
1420 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1421 printbuf_exit(&buf);
1424 void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
1426 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1427 path->idx, path->ref, path->intent_ref,
1428 path->preserve ? 'P' : ' ',
1429 path->should_be_locked ? 'S' : ' ',
1430 bch2_btree_ids[path->btree_id],
1432 bch2_bpos_to_text(out, path->pos);
1434 prt_printf(out, " locks %u", path->nodes_locked);
1435 #ifdef CONFIG_BCACHEFS_DEBUG
1436 prt_printf(out, " %pS", (void *) path->ip_allocated);
1441 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1443 struct btree_path *path;
1446 trans_for_each_path_inorder(trans, path, idx)
1447 bch2_btree_path_to_text(out, path);
1451 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1453 struct printbuf buf = PRINTBUF;
1455 bch2_trans_paths_to_text(&buf, trans);
1456 bch2_trans_updates_to_text(&buf, trans);
1458 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1459 printbuf_exit(&buf);
1463 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1465 struct btree_transaction_stats *s = btree_trans_stats(trans);
1466 struct printbuf buf = PRINTBUF;
1468 bch2_trans_paths_to_text(&buf, trans);
1470 if (!buf.allocation_failure) {
1471 mutex_lock(&s->lock);
1472 if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
1473 s->nr_max_paths = trans->nr_max_paths =
1474 hweight64(trans->paths_allocated);
1475 swap(s->max_paths_text, buf.buf);
1477 mutex_unlock(&s->lock);
1480 printbuf_exit(&buf);
1483 static noinline void btree_path_overflow(struct btree_trans *trans)
1485 bch2_dump_trans_paths_updates(trans);
1486 panic("trans path oveflow\n");
1489 static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
1490 struct btree_path *pos)
1492 struct btree_path *path;
1495 if (unlikely(trans->paths_allocated ==
1496 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
1497 btree_path_overflow(trans);
1499 idx = __ffs64(~trans->paths_allocated);
1500 trans->paths_allocated |= 1ULL << idx;
1502 if (unlikely(idx > trans->nr_max_paths))
1503 bch2_trans_update_max_paths(trans);
1505 path = &trans->paths[idx];
1509 path->intent_ref = 0;
1510 path->nodes_locked = 0;
1512 btree_path_list_add(trans, pos, path);
1516 struct btree_path *bch2_path_get(struct btree_trans *trans,
1517 enum btree_id btree_id, struct bpos pos,
1518 unsigned locks_want, unsigned level,
1519 unsigned flags, unsigned long ip)
1521 struct btree_path *path, *path_pos = NULL;
1522 bool cached = flags & BTREE_ITER_CACHED;
1523 bool intent = flags & BTREE_ITER_INTENT;
1526 EBUG_ON(trans->restarted);
1527 btree_trans_verify_sorted(trans);
1528 bch2_trans_verify_locks(trans);
1530 trans_for_each_path_inorder(trans, path, i) {
1531 if (__btree_path_cmp(path,
1542 path_pos->cached == cached &&
1543 path_pos->btree_id == btree_id &&
1544 path_pos->level == level) {
1545 __btree_path_get(path_pos, intent);
1546 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1548 path = btree_path_alloc(trans, path_pos);
1551 __btree_path_get(path, intent);
1553 path->btree_id = btree_id;
1554 path->cached = cached;
1555 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1556 path->should_be_locked = false;
1557 path->level = level;
1558 path->locks_want = locks_want;
1559 path->nodes_locked = 0;
1560 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1561 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1562 #ifdef CONFIG_BCACHEFS_DEBUG
1563 path->ip_allocated = ip;
1565 btree_trans_verify_sorted(trans);
1568 if (!(flags & BTREE_ITER_NOPRESERVE))
1569 path->preserve = true;
1571 if (path->intent_ref)
1572 locks_want = max(locks_want, level + 1);
1575 * If the path has locks_want greater than requested, we don't downgrade
1576 * it here - on transaction restart because btree node split needs to
1577 * upgrade locks, we might be putting/getting the iterator again.
1578 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1579 * a successful transaction commit.
1582 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1583 if (locks_want > path->locks_want)
1584 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
1589 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1592 struct btree_path_level *l = path_l(path);
1593 struct bkey_packed *_k;
1596 if (unlikely(!l->b))
1597 return bkey_s_c_null;
1599 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1600 EBUG_ON(!btree_node_locked(path, path->level));
1602 if (!path->cached) {
1603 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1604 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1606 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1608 if (!k.k || !bpos_eq(path->pos, k.k->p))
1611 struct bkey_cached *ck = (void *) path->l[0].b;
1614 (path->btree_id != ck->key.btree_id ||
1615 !bkey_eq(path->pos, ck->key.pos)));
1616 EBUG_ON(!ck || !ck->valid);
1619 k = bkey_i_to_s_c(ck->k);
1626 return (struct bkey_s_c) { u, NULL };
1629 /* Btree iterators: */
1632 __bch2_btree_iter_traverse(struct btree_iter *iter)
1634 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1638 bch2_btree_iter_traverse(struct btree_iter *iter)
1642 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
1643 btree_iter_search_key(iter),
1644 iter->flags & BTREE_ITER_INTENT,
1645 btree_iter_ip_allocated(iter));
1647 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1651 btree_path_set_should_be_locked(iter->path);
1655 /* Iterate across nodes (leaf and interior nodes) */
1657 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1659 struct btree_trans *trans = iter->trans;
1660 struct btree *b = NULL;
1663 EBUG_ON(iter->path->cached);
1664 bch2_btree_iter_verify(iter);
1666 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1670 b = btree_path_node(iter->path, iter->path->level);
1674 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1676 bkey_init(&iter->k);
1677 iter->k.p = iter->pos = b->key.k.p;
1679 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1680 iter->flags & BTREE_ITER_INTENT,
1681 btree_iter_ip_allocated(iter));
1682 btree_path_set_should_be_locked(iter->path);
1684 bch2_btree_iter_verify_entry_exit(iter);
1685 bch2_btree_iter_verify(iter);
1693 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1695 struct btree_trans *trans = iter->trans;
1696 struct btree_path *path = iter->path;
1697 struct btree *b = NULL;
1700 BUG_ON(trans->restarted);
1701 EBUG_ON(iter->path->cached);
1702 bch2_btree_iter_verify(iter);
1704 /* already at end? */
1705 if (!btree_path_node(path, path->level))
1709 if (!btree_path_node(path, path->level + 1)) {
1710 btree_path_set_level_up(trans, path);
1714 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1715 __bch2_btree_path_unlock(trans, path);
1716 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1717 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1718 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1719 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1720 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1724 b = btree_path_node(path, path->level + 1);
1726 if (bpos_eq(iter->pos, b->key.k.p)) {
1727 __btree_path_set_level_up(trans, path, path->level++);
1730 * Haven't gotten to the end of the parent node: go back down to
1731 * the next child node
1734 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
1735 iter->flags & BTREE_ITER_INTENT,
1736 btree_iter_ip_allocated(iter));
1738 btree_path_set_level_down(trans, path, iter->min_depth);
1740 ret = bch2_btree_path_traverse(trans, path, iter->flags);
1744 b = path->l[path->level].b;
1747 bkey_init(&iter->k);
1748 iter->k.p = iter->pos = b->key.k.p;
1750 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1751 iter->flags & BTREE_ITER_INTENT,
1752 btree_iter_ip_allocated(iter));
1753 btree_path_set_should_be_locked(iter->path);
1754 BUG_ON(iter->path->uptodate);
1756 bch2_btree_iter_verify_entry_exit(iter);
1757 bch2_btree_iter_verify(iter);
1765 /* Iterate across keys (in leaf nodes only) */
1767 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1769 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
1770 struct bpos pos = iter->k.p;
1771 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1772 ? bpos_eq(pos, SPOS_MAX)
1773 : bkey_eq(pos, SPOS_MAX));
1775 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1776 pos = bkey_successor(iter, pos);
1777 bch2_btree_iter_set_pos(iter, pos);
1780 if (!btree_path_node(iter->path, iter->path->level))
1783 iter->advanced = true;
1788 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1790 struct bpos pos = bkey_start_pos(&iter->k);
1791 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1792 ? bpos_eq(pos, POS_MIN)
1793 : bkey_eq(pos, POS_MIN));
1795 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1796 pos = bkey_predecessor(iter, pos);
1797 bch2_btree_iter_set_pos(iter, pos);
1801 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
1802 enum btree_id btree_id,
1805 struct btree_insert_entry *i;
1806 struct bkey_i *ret = NULL;
1808 trans_for_each_update(trans, i) {
1809 if (i->btree_id < btree_id)
1811 if (i->btree_id > btree_id)
1813 if (bpos_lt(i->k->k.p, pos))
1815 if (i->key_cache_already_flushed)
1817 if (!ret || bpos_lt(i->k->k.p, ret->k.p))
1824 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
1825 struct btree_iter *iter,
1826 struct bpos start_pos,
1827 struct bpos end_pos)
1831 if (bpos_lt(start_pos, iter->journal_pos))
1832 iter->journal_idx = 0;
1834 k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
1837 &iter->journal_idx);
1839 iter->journal_pos = k ? k->k.p : end_pos;
1843 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
1844 struct btree_iter *iter,
1847 return bch2_btree_journal_peek(trans, iter, pos, pos);
1851 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
1852 struct btree_iter *iter,
1855 struct bkey_i *next_journal =
1856 bch2_btree_journal_peek(trans, iter, iter->path->pos,
1857 k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
1860 iter->k = next_journal->k;
1861 k = bkey_i_to_s_c(next_journal);
1868 * Checks btree key cache for key at iter->pos and returns it if present, or
1872 struct bkey_s_c __btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1874 struct btree_trans *trans = iter->trans;
1875 struct bch_fs *c = trans->c;
1879 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
1880 return bkey_s_c_null;
1882 if (!iter->key_cache_path)
1883 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
1884 iter->flags & BTREE_ITER_INTENT, 0,
1885 iter->flags|BTREE_ITER_CACHED,
1888 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
1889 iter->flags & BTREE_ITER_INTENT,
1890 btree_iter_ip_allocated(iter));
1892 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
1894 return bkey_s_c_err(ret);
1896 btree_path_set_should_be_locked(iter->key_cache_path);
1898 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
1902 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1904 struct bkey_s_c ret = __btree_trans_peek_key_cache(iter, pos);
1905 int err = bkey_err(ret) ?: bch2_btree_path_relock(iter->trans, iter->path, _THIS_IP_);
1907 return err ? bkey_s_c_err(err) : ret;
1910 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
1912 struct btree_trans *trans = iter->trans;
1913 struct bkey_i *next_update;
1914 struct bkey_s_c k, k2;
1917 EBUG_ON(iter->path->cached);
1918 bch2_btree_iter_verify(iter);
1921 struct btree_path_level *l;
1923 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
1924 iter->flags & BTREE_ITER_INTENT,
1925 btree_iter_ip_allocated(iter));
1927 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1928 if (unlikely(ret)) {
1929 /* ensure that iter->k is consistent with iter->pos: */
1930 bch2_btree_iter_set_pos(iter, iter->pos);
1931 k = bkey_s_c_err(ret);
1935 l = path_l(iter->path);
1937 if (unlikely(!l->b)) {
1938 /* No btree nodes at requested level: */
1939 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1944 btree_path_set_should_be_locked(iter->path);
1946 k = btree_path_level_peek_all(trans->c, l, &iter->k);
1948 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
1950 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
1954 bch2_btree_iter_set_pos(iter, iter->pos);
1959 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
1960 k = btree_trans_peek_journal(trans, iter, k);
1962 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
1963 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
1966 bpos_le(next_update->k.p,
1967 k.k ? k.k->p : l->b->key.k.p)) {
1968 iter->k = next_update->k;
1969 k = bkey_i_to_s_c(next_update);
1972 if (k.k && bkey_deleted(k.k)) {
1974 * If we've got a whiteout, and it's after the search
1975 * key, advance the search key to the whiteout instead
1976 * of just after the whiteout - it might be a btree
1977 * whiteout, with a real key at the same position, since
1978 * in the btree deleted keys sort before non deleted.
1980 search_key = !bpos_eq(search_key, k.k->p)
1982 : bpos_successor(k.k->p);
1988 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
1989 /* Advance to next leaf node: */
1990 search_key = bpos_successor(l->b->key.k.p);
1993 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1999 bch2_btree_iter_verify(iter);
2005 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2008 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2010 struct btree_trans *trans = iter->trans;
2011 struct bpos search_key = btree_iter_search_key(iter);
2013 struct bpos iter_pos;
2016 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2017 EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
2019 if (iter->update_path) {
2020 bch2_path_put_nokeep(trans, iter->update_path,
2021 iter->flags & BTREE_ITER_INTENT);
2022 iter->update_path = NULL;
2025 bch2_btree_iter_verify_entry_exit(iter);
2028 k = __bch2_btree_iter_peek(iter, search_key);
2031 if (unlikely(bkey_err(k)))
2035 * iter->pos should be mononotically increasing, and always be
2036 * equal to the key we just returned - except extents can
2037 * straddle iter->pos:
2039 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2042 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2044 if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
2045 ? bkey_gt(iter_pos, end)
2046 : bkey_ge(iter_pos, end)))
2049 if (iter->update_path &&
2050 !bkey_eq(iter->update_path->pos, k.k->p)) {
2051 bch2_path_put_nokeep(trans, iter->update_path,
2052 iter->flags & BTREE_ITER_INTENT);
2053 iter->update_path = NULL;
2056 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2057 (iter->flags & BTREE_ITER_INTENT) &&
2058 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2059 !iter->update_path) {
2060 struct bpos pos = k.k->p;
2062 if (pos.snapshot < iter->snapshot) {
2063 search_key = bpos_successor(k.k->p);
2067 pos.snapshot = iter->snapshot;
2070 * advance, same as on exit for iter->path, but only up
2073 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2074 iter->update_path = iter->path;
2076 iter->update_path = bch2_btree_path_set_pos(trans,
2077 iter->update_path, pos,
2078 iter->flags & BTREE_ITER_INTENT,
2080 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2081 if (unlikely(ret)) {
2082 k = bkey_s_c_err(ret);
2088 * We can never have a key in a leaf node at POS_MAX, so
2089 * we don't have to check these successor() calls:
2091 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2092 !bch2_snapshot_is_ancestor(trans->c,
2095 search_key = bpos_successor(k.k->p);
2099 if (bkey_whiteout(k.k) &&
2100 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2101 search_key = bkey_successor(iter, k.k->p);
2108 iter->pos = iter_pos;
2110 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2111 iter->flags & BTREE_ITER_INTENT,
2112 btree_iter_ip_allocated(iter));
2114 btree_path_set_should_be_locked(iter->path);
2116 if (iter->update_path) {
2117 if (iter->update_path->uptodate &&
2118 (ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)))
2119 k = bkey_s_c_err(ret);
2121 btree_path_set_should_be_locked(iter->update_path);
2124 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2125 iter->pos.snapshot = iter->snapshot;
2127 ret = bch2_btree_iter_verify_ret(iter, k);
2128 if (unlikely(ret)) {
2129 bch2_btree_iter_set_pos(iter, iter->pos);
2130 k = bkey_s_c_err(ret);
2133 bch2_btree_iter_verify_entry_exit(iter);
2137 bch2_btree_iter_set_pos(iter, end);
2143 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2144 * to iterator's current position, returning keys from every level of the btree.
2145 * For keys at different levels of the btree that compare equal, the key from
2146 * the lower level (leaf) is returned first.
2148 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2150 struct btree_trans *trans = iter->trans;
2154 EBUG_ON(iter->path->cached);
2155 bch2_btree_iter_verify(iter);
2156 BUG_ON(iter->path->level < iter->min_depth);
2157 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2158 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2161 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2162 iter->flags & BTREE_ITER_INTENT,
2163 btree_iter_ip_allocated(iter));
2165 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2166 if (unlikely(ret)) {
2167 /* ensure that iter->k is consistent with iter->pos: */
2168 bch2_btree_iter_set_pos(iter, iter->pos);
2169 k = bkey_s_c_err(ret);
2173 /* Already at end? */
2174 if (!btree_path_node(iter->path, iter->path->level)) {
2179 k = btree_path_level_peek_all(trans->c,
2180 &iter->path->l[iter->path->level], &iter->k);
2182 /* Check if we should go up to the parent node: */
2185 bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
2186 iter->pos = path_l(iter->path)->b->key.k.p;
2187 btree_path_set_level_up(trans, iter->path);
2188 iter->advanced = false;
2193 * Check if we should go back down to a leaf:
2194 * If we're not in a leaf node, we only return the current key
2195 * if it exactly matches iter->pos - otherwise we first have to
2196 * go back to the leaf:
2198 if (iter->path->level != iter->min_depth &&
2201 !bpos_eq(iter->pos, k.k->p))) {
2202 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2203 iter->pos = bpos_successor(iter->pos);
2204 iter->advanced = false;
2208 /* Check if we should go to the next key: */
2209 if (iter->path->level == iter->min_depth &&
2212 bpos_eq(iter->pos, k.k->p)) {
2213 iter->pos = bpos_successor(iter->pos);
2214 iter->advanced = false;
2218 if (iter->advanced &&
2219 iter->path->level == iter->min_depth &&
2220 !bpos_eq(k.k->p, iter->pos))
2221 iter->advanced = false;
2223 BUG_ON(iter->advanced);
2229 btree_path_set_should_be_locked(iter->path);
2231 bch2_btree_iter_verify(iter);
2237 * bch2_btree_iter_next: returns first key greater than iterator's current
2240 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2242 if (!bch2_btree_iter_advance(iter))
2243 return bkey_s_c_null;
2245 return bch2_btree_iter_peek(iter);
2249 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2250 * iterator's current position
2252 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2254 struct btree_trans *trans = iter->trans;
2255 struct bpos search_key = iter->pos;
2256 struct btree_path *saved_path = NULL;
2258 struct bkey saved_k;
2259 const struct bch_val *saved_v;
2262 EBUG_ON(iter->path->cached || iter->path->level);
2263 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2265 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2266 return bkey_s_c_err(-EIO);
2268 bch2_btree_iter_verify(iter);
2269 bch2_btree_iter_verify_entry_exit(iter);
2271 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2272 search_key.snapshot = U32_MAX;
2275 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2276 iter->flags & BTREE_ITER_INTENT,
2277 btree_iter_ip_allocated(iter));
2279 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2280 if (unlikely(ret)) {
2281 /* ensure that iter->k is consistent with iter->pos: */
2282 bch2_btree_iter_set_pos(iter, iter->pos);
2283 k = bkey_s_c_err(ret);
2287 k = btree_path_level_peek(trans, iter->path,
2288 &iter->path->l[0], &iter->k);
2290 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2291 ? bpos_ge(bkey_start_pos(k.k), search_key)
2292 : bpos_gt(k.k->p, search_key)))
2293 k = btree_path_level_prev(trans, iter->path,
2294 &iter->path->l[0], &iter->k);
2296 bch2_btree_path_check_sort(trans, iter->path, 0);
2299 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2300 if (k.k->p.snapshot == iter->snapshot)
2304 * If we have a saved candidate, and we're no
2305 * longer at the same _key_ (not pos), return
2308 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2309 bch2_path_put_nokeep(trans, iter->path,
2310 iter->flags & BTREE_ITER_INTENT);
2311 iter->path = saved_path;
2318 if (bch2_snapshot_is_ancestor(iter->trans->c,
2322 bch2_path_put_nokeep(trans, saved_path,
2323 iter->flags & BTREE_ITER_INTENT);
2324 saved_path = btree_path_clone(trans, iter->path,
2325 iter->flags & BTREE_ITER_INTENT);
2330 search_key = bpos_predecessor(k.k->p);
2334 if (bkey_whiteout(k.k) &&
2335 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2336 search_key = bkey_predecessor(iter, k.k->p);
2337 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2338 search_key.snapshot = U32_MAX;
2343 } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
2344 /* Advance to previous leaf node: */
2345 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2347 /* Start of btree: */
2348 bch2_btree_iter_set_pos(iter, POS_MIN);
2354 EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2356 /* Extents can straddle iter->pos: */
2357 if (bkey_lt(k.k->p, iter->pos))
2360 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2361 iter->pos.snapshot = iter->snapshot;
2363 btree_path_set_should_be_locked(iter->path);
2366 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2368 bch2_btree_iter_verify_entry_exit(iter);
2369 bch2_btree_iter_verify(iter);
2375 * bch2_btree_iter_prev: returns first key less than iterator's current
2378 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2380 if (!bch2_btree_iter_rewind(iter))
2381 return bkey_s_c_null;
2383 return bch2_btree_iter_peek_prev(iter);
2386 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2388 struct btree_trans *trans = iter->trans;
2389 struct bpos search_key;
2393 bch2_btree_iter_verify(iter);
2394 bch2_btree_iter_verify_entry_exit(iter);
2395 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2396 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2398 /* extents can't span inode numbers: */
2399 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2400 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2401 if (iter->pos.inode == KEY_INODE_MAX)
2402 return bkey_s_c_null;
2404 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2407 search_key = btree_iter_search_key(iter);
2408 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2409 iter->flags & BTREE_ITER_INTENT,
2410 btree_iter_ip_allocated(iter));
2412 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2413 if (unlikely(ret)) {
2414 k = bkey_s_c_err(ret);
2418 if ((iter->flags & BTREE_ITER_CACHED) ||
2419 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2420 struct bkey_i *next_update;
2422 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2423 (next_update = btree_trans_peek_updates(trans,
2424 iter->btree_id, search_key)) &&
2425 bpos_eq(next_update->k.p, iter->pos)) {
2426 iter->k = next_update->k;
2427 k = bkey_i_to_s_c(next_update);
2431 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2432 (next_update = bch2_btree_journal_peek_slot(trans,
2433 iter, iter->pos))) {
2434 iter->k = next_update->k;
2435 k = bkey_i_to_s_c(next_update);
2439 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2440 (k = __btree_trans_peek_key_cache(iter, iter->pos)).k) {
2443 /* We're not returning a key from iter->path: */
2447 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2452 struct bpos end = iter->pos;
2454 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2455 end.offset = U64_MAX;
2457 EBUG_ON(iter->path->level);
2459 if (iter->flags & BTREE_ITER_INTENT) {
2460 struct btree_iter iter2;
2462 bch2_trans_copy_iter(&iter2, iter);
2463 k = bch2_btree_iter_peek_upto(&iter2, end);
2465 if (k.k && !bkey_err(k)) {
2469 bch2_trans_iter_exit(trans, &iter2);
2471 struct bpos pos = iter->pos;
2473 k = bch2_btree_iter_peek_upto(iter, end);
2474 if (unlikely(bkey_err(k)))
2475 bch2_btree_iter_set_pos(iter, pos);
2480 if (unlikely(bkey_err(k)))
2483 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2485 if (bkey_lt(iter->pos, next)) {
2486 bkey_init(&iter->k);
2487 iter->k.p = iter->pos;
2489 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2490 bch2_key_resize(&iter->k,
2491 min_t(u64, KEY_SIZE_MAX,
2492 (next.inode == iter->pos.inode
2496 EBUG_ON(!iter->k.size);
2499 k = (struct bkey_s_c) { &iter->k, NULL };
2503 btree_path_set_should_be_locked(iter->path);
2505 bch2_btree_iter_verify_entry_exit(iter);
2506 bch2_btree_iter_verify(iter);
2507 ret = bch2_btree_iter_verify_ret(iter, k);
2509 return bkey_s_c_err(ret);
2514 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2516 if (!bch2_btree_iter_advance(iter))
2517 return bkey_s_c_null;
2519 return bch2_btree_iter_peek_slot(iter);
2522 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2524 if (!bch2_btree_iter_rewind(iter))
2525 return bkey_s_c_null;
2527 return bch2_btree_iter_peek_slot(iter);
2530 /* new transactional stuff: */
2532 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2533 struct btree_path *path)
2535 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2536 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2537 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2540 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2542 #ifdef CONFIG_BCACHEFS_DEBUG
2545 for (i = 0; i < trans->nr_sorted; i++)
2546 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2550 static void btree_trans_verify_sorted(struct btree_trans *trans)
2552 #ifdef CONFIG_BCACHEFS_DEBUG
2553 struct btree_path *path, *prev = NULL;
2556 if (!bch2_debug_check_iterators)
2559 trans_for_each_path_inorder(trans, path, i) {
2560 if (prev && btree_path_cmp(prev, path) > 0) {
2561 bch2_dump_trans_paths_updates(trans);
2562 panic("trans paths out of order!\n");
2569 static inline void btree_path_swap(struct btree_trans *trans,
2570 struct btree_path *l, struct btree_path *r)
2572 swap(l->sorted_idx, r->sorted_idx);
2573 swap(trans->sorted[l->sorted_idx],
2574 trans->sorted[r->sorted_idx]);
2576 btree_path_verify_sorted_ref(trans, l);
2577 btree_path_verify_sorted_ref(trans, r);
2580 static inline struct btree_path *sib_btree_path(struct btree_trans *trans,
2581 struct btree_path *path, int sib)
2583 unsigned idx = (unsigned) path->sorted_idx + sib;
2585 EBUG_ON(sib != -1 && sib != 1);
2587 return idx < trans->nr_sorted
2588 ? trans->paths + trans->sorted[idx]
2592 static __always_inline void bch2_btree_path_check_sort_fast(struct btree_trans *trans,
2593 struct btree_path *path,
2596 struct btree_path *n;
2601 while ((n = sib_btree_path(trans, path, cmp)) &&
2602 (cmp2 = btree_path_cmp(n, path)) &&
2604 btree_path_swap(trans, n, path);
2606 btree_trans_verify_sorted(trans);
2609 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2612 struct btree_path *n;
2615 n = prev_btree_path(trans, path);
2616 if (n && btree_path_cmp(n, path) > 0) {
2618 btree_path_swap(trans, n, path);
2619 n = prev_btree_path(trans, path);
2620 } while (n && btree_path_cmp(n, path) > 0);
2627 n = next_btree_path(trans, path);
2628 if (n && btree_path_cmp(path, n) > 0) {
2630 btree_path_swap(trans, path, n);
2631 n = next_btree_path(trans, path);
2632 } while (n && btree_path_cmp(path, n) > 0);
2636 btree_trans_verify_sorted(trans);
2639 static inline void btree_path_list_remove(struct btree_trans *trans,
2640 struct btree_path *path)
2644 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2646 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2648 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2649 trans->paths[trans->sorted[i]].sorted_idx = i;
2651 path->sorted_idx = U8_MAX;
2653 btree_trans_verify_sorted_refs(trans);
2656 static inline void btree_path_list_add(struct btree_trans *trans,
2657 struct btree_path *pos,
2658 struct btree_path *path)
2662 btree_trans_verify_sorted_refs(trans);
2664 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2666 if (unlikely(trans->in_traverse_all) &&
2667 trans->traverse_all_idx != U8_MAX &&
2668 trans->traverse_all_idx >= path->sorted_idx)
2669 trans->traverse_all_idx++;
2671 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2673 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2674 trans->paths[trans->sorted[i]].sorted_idx = i;
2676 btree_trans_verify_sorted_refs(trans);
2679 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2682 bch2_path_put(trans, iter->path,
2683 iter->flags & BTREE_ITER_INTENT);
2684 if (iter->update_path)
2685 bch2_path_put_nokeep(trans, iter->update_path,
2686 iter->flags & BTREE_ITER_INTENT);
2687 if (iter->key_cache_path)
2688 bch2_path_put(trans, iter->key_cache_path,
2689 iter->flags & BTREE_ITER_INTENT);
2691 iter->update_path = NULL;
2692 iter->key_cache_path = NULL;
2695 static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
2696 struct btree_iter *iter,
2697 unsigned btree_id, struct bpos pos,
2700 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2701 bch2_btree_iter_flags(trans, btree_id, flags),
2705 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2706 struct btree_iter *iter,
2707 unsigned btree_id, struct bpos pos,
2710 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2711 bch2_btree_iter_flags(trans, btree_id, flags),
2715 void bch2_trans_node_iter_init(struct btree_trans *trans,
2716 struct btree_iter *iter,
2717 enum btree_id btree_id,
2719 unsigned locks_want,
2723 flags |= BTREE_ITER_NOT_EXTENTS;
2724 flags |= __BTREE_ITER_ALL_SNAPSHOTS;
2725 flags |= BTREE_ITER_ALL_SNAPSHOTS;
2727 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2728 __bch2_btree_iter_flags(trans, btree_id, flags),
2731 iter->min_depth = depth;
2733 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2734 BUG_ON(iter->path->level != depth);
2735 BUG_ON(iter->min_depth != depth);
2738 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2742 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2743 if (src->update_path)
2744 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
2745 dst->key_cache_path = NULL;
2748 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2750 unsigned new_top = trans->mem_top + size;
2751 size_t old_bytes = trans->mem_bytes;
2752 size_t new_bytes = roundup_pow_of_two(new_top);
2756 trans->mem_max = max(trans->mem_max, new_top);
2758 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2760 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2761 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2762 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2763 new_bytes = BTREE_TRANS_MEM_MAX;
2768 return ERR_PTR(-ENOMEM);
2770 trans->mem = new_mem;
2771 trans->mem_bytes = new_bytes;
2774 trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2775 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2778 p = trans->mem + trans->mem_top;
2779 trans->mem_top += size;
2784 static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
2786 struct bch_fs *c = trans->c;
2787 struct btree_path *path;
2789 trans_for_each_path(trans, path)
2790 if (path->cached && !btree_node_locked(path, 0))
2791 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
2793 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2794 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2795 trans->srcu_lock_time = jiffies;
2799 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2800 * @trans: transaction to reset
2802 * While iterating over nodes or updating nodes a attempt to lock a btree node
2803 * may return BCH_ERR_transaction_restart when the trylock fails. When this
2804 * occurs bch2_trans_begin() should be called and the transaction retried.
2806 u32 bch2_trans_begin(struct btree_trans *trans)
2808 struct btree_path *path;
2810 bch2_trans_reset_updates(trans);
2812 trans->restart_count++;
2815 if (trans->fs_usage_deltas) {
2816 trans->fs_usage_deltas->used = 0;
2817 memset((void *) trans->fs_usage_deltas +
2818 offsetof(struct replicas_delta_list, memset_start), 0,
2819 (void *) &trans->fs_usage_deltas->memset_end -
2820 (void *) &trans->fs_usage_deltas->memset_start);
2823 trans_for_each_path(trans, path) {
2824 path->should_be_locked = false;
2827 * If the transaction wasn't restarted, we're presuming to be
2828 * doing something new: dont keep iterators excpt the ones that
2829 * are in use - except for the subvolumes btree:
2831 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
2832 path->preserve = false;
2835 * XXX: we probably shouldn't be doing this if the transaction
2836 * was restarted, but currently we still overflow transaction
2837 * iterators if we do that
2839 if (!path->ref && !path->preserve)
2840 __bch2_path_free(trans, path);
2842 path->preserve = false;
2845 if (!trans->restarted &&
2847 local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
2848 bch2_trans_unlock(trans);
2850 bch2_trans_relock(trans);
2853 if (unlikely(time_after(jiffies, trans->srcu_lock_time + HZ)))
2854 bch2_trans_reset_srcu_lock(trans);
2856 trans->last_restarted_ip = _RET_IP_;
2857 if (trans->restarted)
2858 bch2_btree_path_traverse_all(trans);
2860 trans->last_begin_time = local_clock();
2861 return trans->restart_count;
2864 void bch2_trans_verify_not_restarted(struct btree_trans *trans, u32 restart_count)
2866 if (trans_was_restarted(trans, restart_count))
2867 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
2868 trans->restart_count, restart_count,
2869 (void *) trans->last_restarted_ip);
2872 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2874 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2875 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2878 BUG_ON(trans->used_mempool);
2881 p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
2884 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2886 trans->paths = p; p += paths_bytes;
2887 trans->updates = p; p += updates_bytes;
2890 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
2892 unsigned bch2_trans_get_fn_idx(const char *fn)
2896 for (i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
2897 if (!bch2_btree_transaction_fns[i] ||
2898 bch2_btree_transaction_fns[i] == fn) {
2899 bch2_btree_transaction_fns[i] = fn;
2903 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
2907 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_idx)
2908 __acquires(&c->btree_trans_barrier)
2910 struct btree_transaction_stats *s;
2911 struct btree_trans *pos;
2913 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
2915 memset(trans, 0, sizeof(*trans));
2917 trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns)
2918 ? bch2_btree_transaction_fns[fn_idx] : NULL;
2919 trans->last_begin_time = local_clock();
2920 trans->fn_idx = fn_idx;
2921 trans->locking_wait.task = current;
2922 trans->journal_replay_not_finished =
2923 !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
2924 closure_init_stack(&trans->ref);
2926 bch2_trans_alloc_paths(trans, c);
2928 s = btree_trans_stats(trans);
2929 if (s && s->max_mem) {
2930 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
2932 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
2934 if (!unlikely(trans->mem)) {
2935 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2936 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2938 trans->mem_bytes = expected_mem_bytes;
2942 trans->nr_max_paths = s->nr_max_paths;
2944 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2945 trans->srcu_lock_time = jiffies;
2947 mutex_lock(&c->btree_trans_lock);
2948 list_for_each_entry(pos, &c->btree_trans_list, list) {
2949 if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
2950 list_add_tail(&trans->list, &pos->list);
2954 list_add_tail(&trans->list, &c->btree_trans_list);
2956 mutex_unlock(&c->btree_trans_lock);
2959 static void check_btree_paths_leaked(struct btree_trans *trans)
2961 #ifdef CONFIG_BCACHEFS_DEBUG
2962 struct bch_fs *c = trans->c;
2963 struct btree_path *path;
2965 trans_for_each_path(trans, path)
2970 bch_err(c, "btree paths leaked from %s!", trans->fn);
2971 trans_for_each_path(trans, path)
2973 printk(KERN_ERR " btree %s %pS\n",
2974 bch2_btree_ids[path->btree_id],
2975 (void *) path->ip_allocated);
2976 /* Be noisy about this: */
2977 bch2_fatal_error(c);
2981 void bch2_trans_exit(struct btree_trans *trans)
2982 __releases(&c->btree_trans_barrier)
2984 struct btree_insert_entry *i;
2985 struct bch_fs *c = trans->c;
2986 struct btree_transaction_stats *s = btree_trans_stats(trans);
2988 bch2_trans_unlock(trans);
2990 closure_sync(&trans->ref);
2993 s->max_mem = max(s->max_mem, trans->mem_max);
2995 trans_for_each_update(trans, i)
2996 __btree_path_put(i->path, true);
2997 trans->nr_updates = 0;
2999 check_btree_paths_leaked(trans);
3001 mutex_lock(&c->btree_trans_lock);
3002 list_del(&trans->list);
3003 mutex_unlock(&c->btree_trans_lock);
3005 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3007 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3009 kfree(trans->extra_journal_entries.data);
3011 if (trans->fs_usage_deltas) {
3012 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3013 REPLICAS_DELTA_LIST_MAX)
3014 mempool_free(trans->fs_usage_deltas,
3015 &c->replicas_delta_pool);
3017 kfree(trans->fs_usage_deltas);
3020 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3021 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3027 * Userspace doesn't have a real percpu implementation:
3029 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3033 mempool_free(trans->paths, &c->btree_paths_pool);
3035 trans->mem = (void *) 0x1;
3036 trans->paths = (void *) 0x1;
3039 static void __maybe_unused
3040 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3041 struct btree_bkey_cached_common *b)
3043 struct six_lock_count c = six_lock_counts(&b->lock);
3044 struct task_struct *owner;
3048 owner = READ_ONCE(b->lock.owner);
3049 pid = owner ? owner->pid : 0;
3053 prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3054 b->level, bch2_btree_ids[b->btree_id]);
3055 bch2_bpos_to_text(out, btree_node_pos(b));
3058 prt_printf(out, " locks %u:%u:%u held by pid %u",
3059 c.n[0], c.n[1], c.n[2], pid);
3062 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3064 struct btree_path *path;
3065 struct btree_bkey_cached_common *b;
3066 static char lock_types[] = { 'r', 'i', 'w' };
3069 if (!out->nr_tabstops) {
3070 printbuf_tabstop_push(out, 16);
3071 printbuf_tabstop_push(out, 32);
3074 prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn);
3076 trans_for_each_path(trans, path) {
3077 if (!path->nodes_locked)
3080 prt_printf(out, " path %u %c l=%u %s:",
3082 path->cached ? 'c' : 'b',
3084 bch2_btree_ids[path->btree_id]);
3085 bch2_bpos_to_text(out, path->pos);
3088 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3089 if (btree_node_locked(path, l) &&
3090 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3091 prt_printf(out, " %c l=%u ",
3092 lock_types[btree_node_locked_type(path, l)], l);
3093 bch2_btree_bkey_cached_common_to_text(out, b);
3099 b = READ_ONCE(trans->locking);
3101 prt_str(out, " want");
3103 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3104 bch2_btree_bkey_cached_common_to_text(out, b);
3109 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3111 struct btree_transaction_stats *s;
3113 for (s = c->btree_transaction_stats;
3114 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3116 kfree(s->max_paths_text);
3118 if (c->btree_trans_barrier_initialized)
3119 cleanup_srcu_struct(&c->btree_trans_barrier);
3120 mempool_exit(&c->btree_trans_mem_pool);
3121 mempool_exit(&c->btree_paths_pool);
3124 int bch2_fs_btree_iter_init(struct bch_fs *c)
3126 unsigned i, nr = BTREE_ITER_MAX;
3129 for (i = 0; i < ARRAY_SIZE(c->btree_transaction_stats); i++)
3130 mutex_init(&c->btree_transaction_stats[i].lock);
3132 INIT_LIST_HEAD(&c->btree_trans_list);
3133 mutex_init(&c->btree_trans_lock);
3135 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3136 sizeof(struct btree_path) * nr +
3137 sizeof(struct btree_insert_entry) * nr) ?:
3138 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3139 BTREE_TRANS_MEM_MAX) ?:
3140 init_srcu_struct(&c->btree_trans_barrier);
3142 c->btree_trans_barrier_initialized = true;