1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prandom.h>
20 #include <linux/prefetch.h>
21 #include <trace/events/bcachefs.h>
23 static void btree_trans_verify_sorted(struct btree_trans *);
24 inline void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
26 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
27 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
30 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
32 #ifdef CONFIG_BCACHEFS_DEBUG
33 return iter->ip_allocated;
39 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
42 * Unlocks before scheduling
43 * Note: does not revalidate iterator
45 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
47 if (need_resched() || race_fault()) {
48 bch2_trans_unlock(trans);
50 return bch2_trans_relock(trans);
56 static inline int __btree_path_cmp(const struct btree_path *l,
57 enum btree_id r_btree_id,
63 * Must match lock ordering as defined by __bch2_btree_node_lock:
65 return cmp_int(l->btree_id, r_btree_id) ?:
66 cmp_int((int) l->cached, (int) r_cached) ?:
67 bpos_cmp(l->pos, r_pos) ?:
68 -cmp_int(l->level, r_level);
71 static inline int btree_path_cmp(const struct btree_path *l,
72 const struct btree_path *r)
74 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
77 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
79 /* Are we iterating over keys in all snapshots? */
80 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
81 p = bpos_successor(p);
83 p = bpos_nosnap_successor(p);
84 p.snapshot = iter->snapshot;
90 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
92 /* Are we iterating over keys in all snapshots? */
93 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
94 p = bpos_predecessor(p);
96 p = bpos_nosnap_predecessor(p);
97 p.snapshot = iter->snapshot;
103 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
105 struct bpos pos = iter->pos;
107 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
108 bkey_cmp(pos, POS_MAX))
109 pos = bkey_successor(iter, pos);
113 static inline bool btree_path_pos_before_node(struct btree_path *path,
116 return bpos_cmp(path->pos, b->data->min_key) < 0;
119 static inline bool btree_path_pos_after_node(struct btree_path *path,
122 return bpos_cmp(b->key.k.p, path->pos) < 0;
125 static inline bool btree_path_pos_in_node(struct btree_path *path,
128 return path->btree_id == b->c.btree_id &&
129 !btree_path_pos_before_node(path, b) &&
130 !btree_path_pos_after_node(path, b);
133 /* Btree iterator: */
135 #ifdef CONFIG_BCACHEFS_DEBUG
137 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
138 struct btree_path *path)
140 struct bkey_cached *ck;
141 bool locked = btree_node_locked(path, 0);
143 if (!bch2_btree_node_relock(trans, path, 0))
146 ck = (void *) path->l[0].b;
147 BUG_ON(ck->key.btree_id != path->btree_id ||
148 bkey_cmp(ck->key.pos, path->pos));
151 btree_node_unlock(trans, path, 0);
154 static void bch2_btree_path_verify_level(struct btree_trans *trans,
155 struct btree_path *path, unsigned level)
157 struct btree_path_level *l;
158 struct btree_node_iter tmp;
160 struct bkey_packed *p, *k;
161 struct printbuf buf1 = PRINTBUF;
162 struct printbuf buf2 = PRINTBUF;
163 struct printbuf buf3 = PRINTBUF;
166 if (!bch2_debug_check_iterators)
171 locked = btree_node_locked(path, level);
175 bch2_btree_path_verify_cached(trans, path);
179 if (!btree_path_node(path, level))
182 if (!bch2_btree_node_relock_notrace(trans, path, level))
185 BUG_ON(!btree_path_pos_in_node(path, l->b));
187 bch2_btree_node_iter_verify(&l->iter, l->b);
190 * For interior nodes, the iterator will have skipped past deleted keys:
193 ? bch2_btree_node_iter_prev(&tmp, l->b)
194 : bch2_btree_node_iter_prev_all(&tmp, l->b);
195 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
197 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
202 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
208 btree_node_unlock(trans, path, level);
211 bch2_bpos_to_text(&buf1, path->pos);
214 struct bkey uk = bkey_unpack_key(l->b, p);
215 bch2_bkey_to_text(&buf2, &uk);
217 prt_printf(&buf2, "(none)");
221 struct bkey uk = bkey_unpack_key(l->b, k);
222 bch2_bkey_to_text(&buf3, &uk);
224 prt_printf(&buf3, "(none)");
227 panic("path should be %s key at level %u:\n"
231 msg, level, buf1.buf, buf2.buf, buf3.buf);
234 static void bch2_btree_path_verify(struct btree_trans *trans,
235 struct btree_path *path)
237 struct bch_fs *c = trans->c;
240 EBUG_ON(path->btree_id >= BTREE_ID_NR);
242 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
244 BUG_ON(!path->cached &&
245 c->btree_roots[path->btree_id].b->c.level > i);
249 bch2_btree_path_verify_level(trans, path, i);
252 bch2_btree_path_verify_locks(path);
255 void bch2_trans_verify_paths(struct btree_trans *trans)
257 struct btree_path *path;
259 trans_for_each_path(trans, path)
260 bch2_btree_path_verify(trans, path);
263 static void bch2_btree_iter_verify(struct btree_iter *iter)
265 struct btree_trans *trans = iter->trans;
267 BUG_ON(iter->btree_id >= BTREE_ID_NR);
269 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
271 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
272 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
274 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
275 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
276 !btree_type_has_snapshots(iter->btree_id));
278 if (iter->update_path)
279 bch2_btree_path_verify(trans, iter->update_path);
280 bch2_btree_path_verify(trans, iter->path);
283 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
285 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
286 !iter->pos.snapshot);
288 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
289 iter->pos.snapshot != iter->snapshot);
291 BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
292 bkey_cmp(iter->pos, iter->k.p) > 0);
295 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
297 struct btree_trans *trans = iter->trans;
298 struct btree_iter copy;
299 struct bkey_s_c prev;
302 if (!bch2_debug_check_iterators)
305 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
308 if (bkey_err(k) || !k.k)
311 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
315 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
316 BTREE_ITER_NOPRESERVE|
317 BTREE_ITER_ALL_SNAPSHOTS);
318 prev = bch2_btree_iter_prev(©);
322 ret = bkey_err(prev);
326 if (!bkey_cmp(prev.k->p, k.k->p) &&
327 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
328 prev.k->p.snapshot) > 0) {
329 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
331 bch2_bkey_to_text(&buf1, k.k);
332 bch2_bkey_to_text(&buf2, prev.k);
334 panic("iter snap %u\n"
341 bch2_trans_iter_exit(trans, ©);
345 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
346 struct bpos pos, bool key_cache)
348 struct btree_path *path;
350 struct printbuf buf = PRINTBUF;
352 trans_for_each_path_inorder(trans, path, idx) {
353 int cmp = cmp_int(path->btree_id, id) ?:
354 cmp_int(path->cached, key_cache);
361 if (!btree_node_locked(path, 0) ||
362 !path->should_be_locked)
366 if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
367 bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
370 if (!bkey_cmp(pos, path->pos))
375 bch2_dump_trans_paths_updates(trans);
376 bch2_bpos_to_text(&buf, pos);
378 panic("not locked: %s %s%s\n",
379 bch2_btree_ids[id], buf.buf,
380 key_cache ? " cached" : "");
385 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
386 struct btree_path *path, unsigned l) {}
387 static inline void bch2_btree_path_verify(struct btree_trans *trans,
388 struct btree_path *path) {}
389 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
390 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
391 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
395 /* Btree path: fixups after btree updates */
397 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
400 struct bkey_packed *k)
402 struct btree_node_iter_set *set;
404 btree_node_iter_for_each(iter, set)
405 if (set->end == t->end_offset) {
406 set->k = __btree_node_key_to_offset(b, k);
407 bch2_btree_node_iter_sort(iter, b);
411 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
414 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
416 struct bkey_packed *where)
418 struct btree_path_level *l = &path->l[b->c.level];
420 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
423 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
424 bch2_btree_node_iter_advance(&l->iter, l->b);
427 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
429 struct bkey_packed *where)
431 struct btree_path *path;
433 trans_for_each_path_with_node(trans, b, path) {
434 __bch2_btree_path_fix_key_modified(path, b, where);
435 bch2_btree_path_verify_level(trans, path, b->c.level);
439 static void __bch2_btree_node_iter_fix(struct btree_path *path,
441 struct btree_node_iter *node_iter,
443 struct bkey_packed *where,
444 unsigned clobber_u64s,
447 const struct bkey_packed *end = btree_bkey_last(b, t);
448 struct btree_node_iter_set *set;
449 unsigned offset = __btree_node_key_to_offset(b, where);
450 int shift = new_u64s - clobber_u64s;
451 unsigned old_end = t->end_offset - shift;
452 unsigned orig_iter_pos = node_iter->data[0].k;
453 bool iter_current_key_modified =
454 orig_iter_pos >= offset &&
455 orig_iter_pos <= offset + clobber_u64s;
457 btree_node_iter_for_each(node_iter, set)
458 if (set->end == old_end)
461 /* didn't find the bset in the iterator - might have to readd it: */
463 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
464 bch2_btree_node_iter_push(node_iter, b, where, end);
467 /* Iterator is after key that changed */
471 set->end = t->end_offset;
473 /* Iterator hasn't gotten to the key that changed yet: */
478 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
480 } else if (set->k < offset + clobber_u64s) {
481 set->k = offset + new_u64s;
482 if (set->k == set->end)
483 bch2_btree_node_iter_set_drop(node_iter, set);
485 /* Iterator is after key that changed */
486 set->k = (int) set->k + shift;
490 bch2_btree_node_iter_sort(node_iter, b);
492 if (node_iter->data[0].k != orig_iter_pos)
493 iter_current_key_modified = true;
496 * When a new key is added, and the node iterator now points to that
497 * key, the iterator might have skipped past deleted keys that should
498 * come after the key the iterator now points to. We have to rewind to
499 * before those deleted keys - otherwise
500 * bch2_btree_node_iter_prev_all() breaks:
502 if (!bch2_btree_node_iter_end(node_iter) &&
503 iter_current_key_modified &&
506 struct bkey_packed *k, *k2, *p;
508 k = bch2_btree_node_iter_peek_all(node_iter, b);
510 for_each_bset(b, t) {
511 bool set_pos = false;
513 if (node_iter->data[0].end == t->end_offset)
516 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
518 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
519 bkey_iter_cmp(b, k, p) < 0) {
525 btree_node_iter_set_set_pos(node_iter,
531 void bch2_btree_node_iter_fix(struct btree_trans *trans,
532 struct btree_path *path,
534 struct btree_node_iter *node_iter,
535 struct bkey_packed *where,
536 unsigned clobber_u64s,
539 struct bset_tree *t = bch2_bkey_to_bset(b, where);
540 struct btree_path *linked;
542 if (node_iter != &path->l[b->c.level].iter) {
543 __bch2_btree_node_iter_fix(path, b, node_iter, t,
544 where, clobber_u64s, new_u64s);
546 if (bch2_debug_check_iterators)
547 bch2_btree_node_iter_verify(node_iter, b);
550 trans_for_each_path_with_node(trans, b, linked) {
551 __bch2_btree_node_iter_fix(linked, b,
552 &linked->l[b->c.level].iter, t,
553 where, clobber_u64s, new_u64s);
554 bch2_btree_path_verify_level(trans, linked, b->c.level);
558 /* Btree path level: pointer to a particular btree node and node iter */
560 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
561 struct btree_path_level *l,
563 struct bkey_packed *k)
567 * signal to bch2_btree_iter_peek_slot() that we're currently at
570 u->type = KEY_TYPE_deleted;
571 return bkey_s_c_null;
574 return bkey_disassemble(l->b, k, u);
577 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
578 struct btree_path_level *l,
581 return __btree_iter_unpack(c, l, u,
582 bch2_btree_node_iter_peek_all(&l->iter, l->b));
585 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
586 struct btree_path *path,
587 struct btree_path_level *l,
590 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
591 bch2_btree_node_iter_peek(&l->iter, l->b));
593 path->pos = k.k ? k.k->p : l->b->key.k.p;
594 bch2_btree_path_verify_level(trans, path, l - path->l);
598 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
599 struct btree_path *path,
600 struct btree_path_level *l,
603 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
604 bch2_btree_node_iter_prev(&l->iter, l->b));
606 path->pos = k.k ? k.k->p : l->b->data->min_key;
607 bch2_btree_path_verify_level(trans, path, l - path->l);
611 static inline bool btree_path_advance_to_pos(struct btree_path *path,
612 struct btree_path_level *l,
615 struct bkey_packed *k;
618 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
619 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
620 if (max_advance > 0 && nr_advanced >= max_advance)
623 bch2_btree_node_iter_advance(&l->iter, l->b);
630 static inline void __btree_path_level_init(struct btree_path *path,
633 struct btree_path_level *l = &path->l[level];
635 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
638 * Iterators to interior nodes should always be pointed at the first non
642 bch2_btree_node_iter_peek(&l->iter, l->b);
645 inline void bch2_btree_path_level_init(struct btree_trans *trans,
646 struct btree_path *path,
649 BUG_ON(path->cached);
651 EBUG_ON(!btree_path_pos_in_node(path, b));
652 EBUG_ON(b->c.lock.state.seq & 1);
654 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
655 path->l[b->c.level].b = b;
656 __btree_path_level_init(path, b->c.level);
659 /* Btree path: fixups after btree node updates: */
662 * A btree node is being replaced - update the iterator to point to the new
665 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
667 struct btree_path *path;
669 trans_for_each_path(trans, path)
670 if (path->uptodate == BTREE_ITER_UPTODATE &&
672 btree_path_pos_in_node(path, b)) {
673 enum btree_node_locked_type t =
674 btree_lock_want(path, b->c.level);
676 if (t != BTREE_NODE_UNLOCKED) {
677 btree_node_unlock(trans, path, b->c.level);
678 six_lock_increment(&b->c.lock, t);
679 mark_btree_node_locked(trans, path, b->c.level, t);
682 bch2_btree_path_level_init(trans, path, b);
687 * A btree node has been modified in such a way as to invalidate iterators - fix
690 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
692 struct btree_path *path;
694 trans_for_each_path_with_node(trans, b, path)
695 __btree_path_level_init(path, b->c.level);
698 /* Btree path: traverse, set_pos: */
700 static inline int btree_path_lock_root(struct btree_trans *trans,
701 struct btree_path *path,
703 unsigned long trace_ip)
705 struct bch_fs *c = trans->c;
706 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
707 enum six_lock_type lock_type;
711 EBUG_ON(path->nodes_locked);
714 b = READ_ONCE(*rootp);
715 path->level = READ_ONCE(b->c.level);
717 if (unlikely(path->level < depth_want)) {
719 * the root is at a lower depth than the depth we want:
720 * got to the end of the btree, or we're walking nodes
721 * greater than some depth and there are no nodes >=
724 path->level = depth_want;
725 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
730 lock_type = __btree_lock_want(path, path->level);
731 ret = btree_node_lock(trans, path, &b->c,
732 path->level, lock_type, trace_ip);
734 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
736 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
741 if (likely(b == READ_ONCE(*rootp) &&
742 b->c.level == path->level &&
744 for (i = 0; i < path->level; i++)
745 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
746 path->l[path->level].b = b;
747 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
750 mark_btree_node_locked(trans, path, path->level, lock_type);
751 bch2_btree_path_level_init(trans, path, b);
755 six_unlock_type(&b->c.lock, lock_type);
760 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
762 struct bch_fs *c = trans->c;
763 struct btree_path_level *l = path_l(path);
764 struct btree_node_iter node_iter = l->iter;
765 struct bkey_packed *k;
767 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
768 ? (path->level > 1 ? 0 : 2)
769 : (path->level > 1 ? 1 : 16);
770 bool was_locked = btree_node_locked(path, path->level);
773 bch2_bkey_buf_init(&tmp);
776 if (!bch2_btree_node_relock(trans, path, path->level))
779 bch2_btree_node_iter_advance(&node_iter, l->b);
780 k = bch2_btree_node_iter_peek(&node_iter, l->b);
784 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
785 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
790 btree_node_unlock(trans, path, path->level);
792 bch2_bkey_buf_exit(&tmp, c);
796 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
797 struct btree_and_journal_iter *jiter)
799 struct bch_fs *c = trans->c;
802 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
803 ? (path->level > 1 ? 0 : 2)
804 : (path->level > 1 ? 1 : 16);
805 bool was_locked = btree_node_locked(path, path->level);
808 bch2_bkey_buf_init(&tmp);
811 if (!bch2_btree_node_relock(trans, path, path->level))
814 bch2_btree_and_journal_iter_advance(jiter);
815 k = bch2_btree_and_journal_iter_peek(jiter);
819 bch2_bkey_buf_reassemble(&tmp, c, k);
820 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
825 btree_node_unlock(trans, path, path->level);
827 bch2_bkey_buf_exit(&tmp, c);
831 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
832 struct btree_path *path,
833 unsigned plevel, struct btree *b)
835 struct btree_path_level *l = &path->l[plevel];
836 bool locked = btree_node_locked(path, plevel);
837 struct bkey_packed *k;
838 struct bch_btree_ptr_v2 *bp;
840 if (!bch2_btree_node_relock(trans, path, plevel))
843 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
844 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
846 bp = (void *) bkeyp_val(&l->b->format, k);
847 bp->mem_ptr = (unsigned long)b;
850 btree_node_unlock(trans, path, plevel);
853 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
854 struct btree_path *path,
856 struct bkey_buf *out)
858 struct bch_fs *c = trans->c;
859 struct btree_path_level *l = path_l(path);
860 struct btree_and_journal_iter jiter;
864 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
866 k = bch2_btree_and_journal_iter_peek(&jiter);
868 bch2_bkey_buf_reassemble(out, c, k);
870 if (flags & BTREE_ITER_PREFETCH)
871 ret = btree_path_prefetch_j(trans, path, &jiter);
873 bch2_btree_and_journal_iter_exit(&jiter);
877 static __always_inline int btree_path_down(struct btree_trans *trans,
878 struct btree_path *path,
880 unsigned long trace_ip)
882 struct bch_fs *c = trans->c;
883 struct btree_path_level *l = path_l(path);
885 unsigned level = path->level - 1;
886 enum six_lock_type lock_type = __btree_lock_want(path, level);
887 bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
891 EBUG_ON(!btree_node_locked(path, path->level));
893 bch2_bkey_buf_init(&tmp);
895 if (unlikely(!replay_done)) {
896 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
900 bch2_bkey_buf_unpack(&tmp, c, l->b,
901 bch2_btree_node_iter_peek(&l->iter, l->b));
903 if (flags & BTREE_ITER_PREFETCH) {
904 ret = btree_path_prefetch(trans, path);
910 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
911 ret = PTR_ERR_OR_ZERO(b);
915 if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
916 unlikely(b != btree_node_mem_ptr(tmp.k)))
917 btree_node_mem_ptr_set(trans, path, level + 1, b);
919 if (btree_node_read_locked(path, level + 1))
920 btree_node_unlock(trans, path, level + 1);
922 mark_btree_node_locked(trans, path, level, lock_type);
924 bch2_btree_path_level_init(trans, path, b);
926 bch2_btree_path_verify_locks(path);
928 bch2_bkey_buf_exit(&tmp, c);
932 static int btree_path_traverse_one(struct btree_trans *, struct btree_path *,
933 unsigned, unsigned long);
935 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
937 struct bch_fs *c = trans->c;
938 struct btree_path *path;
939 unsigned long trace_ip = _RET_IP_;
942 if (trans->in_traverse_all)
943 return -BCH_ERR_transaction_restart_in_traverse_all;
945 trans->in_traverse_all = true;
947 trans->restarted = 0;
948 trans->traverse_all_idx = U8_MAX;
950 trans_for_each_path(trans, path)
951 path->should_be_locked = false;
953 btree_trans_verify_sorted(trans);
955 bch2_trans_unlock(trans);
958 if (unlikely(trans->memory_allocation_failure)) {
961 closure_init_stack(&cl);
964 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
969 /* Now, redo traversals in correct order: */
970 trans->traverse_all_idx = 0;
971 while (trans->traverse_all_idx < trans->nr_sorted) {
972 path = trans->paths + trans->sorted[trans->traverse_all_idx];
975 * Traversing a path can cause another path to be added at about
978 if (path->uptodate) {
979 ret = btree_path_traverse_one(trans, path, 0, _THIS_IP_);
980 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
985 BUG_ON(path->uptodate);
987 trans->traverse_all_idx++;
992 * BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock()
993 * and relock(), relock() won't relock since path->should_be_locked
994 * isn't set yet, which is all fine
996 trans_for_each_path(trans, path)
997 BUG_ON(path->uptodate >= BTREE_ITER_NEED_TRAVERSE);
999 bch2_btree_cache_cannibalize_unlock(c);
1001 trans->in_traverse_all = false;
1003 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1007 static inline bool btree_path_good_node(struct btree_trans *trans,
1008 struct btree_path *path,
1009 unsigned l, int check_pos)
1011 if (!is_btree_node(path, l) ||
1012 !bch2_btree_node_relock(trans, path, l))
1015 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1017 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1022 static void btree_path_set_level_down(struct btree_trans *trans,
1023 struct btree_path *path,
1028 path->level = new_level;
1030 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1031 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1032 btree_node_unlock(trans, path, l);
1034 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1035 bch2_btree_path_verify(trans, path);
1038 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1039 struct btree_path *path,
1042 unsigned i, l = path->level;
1044 while (btree_path_node(path, l) &&
1045 !btree_path_good_node(trans, path, l, check_pos))
1046 __btree_path_set_level_up(trans, path, l++);
1048 /* If we need intent locks, take them too: */
1050 i < path->locks_want && btree_path_node(path, i);
1052 if (!bch2_btree_node_relock(trans, path, i)) {
1054 __btree_path_set_level_up(trans, path, l++);
1062 * This is the main state machine for walking down the btree - walks down to a
1065 * Returns 0 on success, -EIO on error (error reading in a btree node).
1067 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1068 * stashed in the iterator and returned from bch2_trans_exit().
1070 static int btree_path_traverse_one(struct btree_trans *trans,
1071 struct btree_path *path,
1073 unsigned long trace_ip)
1075 unsigned depth_want = path->level;
1076 int ret = trans->restarted;
1082 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1083 * and re-traverse the path without a transaction restart:
1085 if (path->should_be_locked) {
1086 ret = bch2_btree_path_relock(trans, path, trace_ip);
1091 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1095 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1098 path->level = btree_path_up_until_good_node(trans, path, 0);
1100 EBUG_ON(btree_path_node(path, path->level) &&
1101 !btree_node_locked(path, path->level));
1104 * Note: path->nodes[path->level] may be temporarily NULL here - that
1105 * would indicate to other code that we got to the end of the btree,
1106 * here it indicates that relocking the root failed - it's critical that
1107 * btree_path_lock_root() comes next and that it can't fail
1109 while (path->level > depth_want) {
1110 ret = btree_path_node(path, path->level)
1111 ? btree_path_down(trans, path, flags, trace_ip)
1112 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1113 if (unlikely(ret)) {
1116 * No nodes at this level - got to the end of
1123 __bch2_btree_path_unlock(trans, path);
1124 path->level = depth_want;
1125 path->l[path->level].b = ERR_PTR(ret);
1130 path->uptodate = BTREE_ITER_UPTODATE;
1132 BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted);
1133 bch2_btree_path_verify(trans, path);
1137 int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
1138 struct btree_path *path, unsigned flags)
1140 if (0 && IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1141 unsigned restart_probability_bits = 4 << min(trans->restart_count, 32U);
1142 u64 mask = ~(~0ULL << restart_probability_bits);
1144 if ((prandom_u32() & mask) == mask) {
1145 trace_and_count(trans->c, trans_restart_injected, trans, _RET_IP_);
1146 return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
1150 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
1153 return bch2_trans_cond_resched(trans) ?:
1154 btree_path_traverse_one(trans, path, flags, _RET_IP_);
1157 static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1158 struct btree_path *src)
1160 unsigned i, offset = offsetof(struct btree_path, pos);
1162 memcpy((void *) dst + offset,
1163 (void *) src + offset,
1164 sizeof(struct btree_path) - offset);
1166 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1167 if (btree_node_locked(dst, i))
1168 six_lock_increment(&dst->l[i].b->c.lock,
1169 __btree_lock_want(dst, i));
1171 bch2_btree_path_check_sort(trans, dst, 0);
1174 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1177 struct btree_path *new = btree_path_alloc(trans, src);
1179 btree_path_copy(trans, new, src);
1180 __btree_path_get(new, intent);
1184 inline struct btree_path * __must_check
1185 bch2_btree_path_make_mut(struct btree_trans *trans,
1186 struct btree_path *path, bool intent,
1189 if (path->ref > 1 || path->preserve) {
1190 __btree_path_put(path, intent);
1191 path = btree_path_clone(trans, path, intent);
1192 path->preserve = false;
1193 #ifdef CONFIG_BCACHEFS_DEBUG
1194 path->ip_allocated = ip;
1196 btree_trans_verify_sorted(trans);
1199 path->should_be_locked = false;
1203 struct btree_path * __must_check
1204 bch2_btree_path_set_pos(struct btree_trans *trans,
1205 struct btree_path *path, struct bpos new_pos,
1206 bool intent, unsigned long ip)
1208 int cmp = bpos_cmp(new_pos, path->pos);
1209 unsigned l = path->level;
1211 EBUG_ON(trans->restarted);
1212 EBUG_ON(!path->ref);
1217 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1219 path->pos = new_pos;
1221 bch2_btree_path_check_sort(trans, path, cmp);
1223 if (unlikely(path->cached)) {
1224 btree_node_unlock(trans, path, 0);
1225 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1226 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1230 l = btree_path_up_until_good_node(trans, path, cmp);
1232 if (btree_path_node(path, l)) {
1233 BUG_ON(!btree_node_locked(path, l));
1235 * We might have to skip over many keys, or just a few: try
1236 * advancing the node iterator, and if we have to skip over too
1237 * many keys just reinit it (or if we're rewinding, since that
1241 !btree_path_advance_to_pos(path, &path->l[l], 8))
1242 __btree_path_level_init(path, l);
1245 if (l != path->level) {
1246 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1247 __bch2_btree_path_unlock(trans, path);
1250 bch2_btree_path_verify(trans, path);
1254 /* Btree path: main interface: */
1256 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1258 struct btree_path *sib;
1260 sib = prev_btree_path(trans, path);
1261 if (sib && !btree_path_cmp(sib, path))
1264 sib = next_btree_path(trans, path);
1265 if (sib && !btree_path_cmp(sib, path))
1271 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1273 struct btree_path *sib;
1275 sib = prev_btree_path(trans, path);
1276 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1279 sib = next_btree_path(trans, path);
1280 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1286 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1288 __bch2_btree_path_unlock(trans, path);
1289 btree_path_list_remove(trans, path);
1290 trans->paths_allocated &= ~(1ULL << path->idx);
1293 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1295 struct btree_path *dup;
1297 EBUG_ON(trans->paths + path->idx != path);
1298 EBUG_ON(!path->ref);
1300 if (!__btree_path_put(path, intent))
1303 dup = path->preserve
1304 ? have_path_at_pos(trans, path)
1305 : have_node_at_pos(trans, path);
1307 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1310 if (path->should_be_locked &&
1311 !trans->restarted &&
1312 (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
1316 dup->preserve |= path->preserve;
1317 dup->should_be_locked |= path->should_be_locked;
1320 __bch2_path_free(trans, path);
1323 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1325 struct btree_insert_entry *i;
1327 prt_printf(buf, "transaction updates for %s journal seq %llu",
1328 trans->fn, trans->journal_res.seq);
1330 printbuf_indent_add(buf, 2);
1332 trans_for_each_update(trans, i) {
1333 struct bkey_s_c old = { &i->old_k, i->old_v };
1335 prt_printf(buf, "update: btree=%s cached=%u %pS",
1336 bch2_btree_ids[i->btree_id],
1338 (void *) i->ip_allocated);
1341 prt_printf(buf, " old ");
1342 bch2_bkey_val_to_text(buf, trans->c, old);
1345 prt_printf(buf, " new ");
1346 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1350 printbuf_indent_sub(buf, 2);
1354 void bch2_dump_trans_updates(struct btree_trans *trans)
1356 struct printbuf buf = PRINTBUF;
1358 bch2_trans_updates_to_text(&buf, trans);
1359 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1360 printbuf_exit(&buf);
1363 void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
1365 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1366 path->idx, path->ref, path->intent_ref,
1367 path->preserve ? 'P' : ' ',
1368 path->should_be_locked ? 'S' : ' ',
1369 bch2_btree_ids[path->btree_id],
1371 bch2_bpos_to_text(out, path->pos);
1373 prt_printf(out, " locks %u", path->nodes_locked);
1374 #ifdef CONFIG_BCACHEFS_DEBUG
1375 prt_printf(out, " %pS", (void *) path->ip_allocated);
1380 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1382 struct btree_path *path;
1385 trans_for_each_path_inorder(trans, path, idx)
1386 bch2_btree_path_to_text(out, path);
1390 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1392 struct printbuf buf = PRINTBUF;
1394 bch2_trans_paths_to_text(&buf, trans);
1395 bch2_trans_updates_to_text(&buf, trans);
1397 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1398 printbuf_exit(&buf);
1402 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1404 struct btree_transaction_stats *s = btree_trans_stats(trans);
1405 struct printbuf buf = PRINTBUF;
1407 bch2_trans_paths_to_text(&buf, trans);
1409 if (!buf.allocation_failure) {
1410 mutex_lock(&s->lock);
1411 if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
1412 s->nr_max_paths = trans->nr_max_paths =
1413 hweight64(trans->paths_allocated);
1414 swap(s->max_paths_text, buf.buf);
1416 mutex_unlock(&s->lock);
1419 printbuf_exit(&buf);
1422 static noinline void btree_path_overflow(struct btree_trans *trans)
1424 bch2_dump_trans_paths_updates(trans);
1425 panic("trans path oveflow\n");
1428 static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
1429 struct btree_path *pos)
1431 struct btree_path *path;
1434 if (unlikely(trans->paths_allocated ==
1435 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
1436 btree_path_overflow(trans);
1438 idx = __ffs64(~trans->paths_allocated);
1439 trans->paths_allocated |= 1ULL << idx;
1441 if (unlikely(idx > trans->nr_max_paths))
1442 bch2_trans_update_max_paths(trans);
1444 path = &trans->paths[idx];
1448 path->intent_ref = 0;
1449 path->nodes_locked = 0;
1451 btree_path_list_add(trans, pos, path);
1455 struct btree_path *bch2_path_get(struct btree_trans *trans,
1456 enum btree_id btree_id, struct bpos pos,
1457 unsigned locks_want, unsigned level,
1458 unsigned flags, unsigned long ip)
1460 struct btree_path *path, *path_pos = NULL;
1461 bool cached = flags & BTREE_ITER_CACHED;
1462 bool intent = flags & BTREE_ITER_INTENT;
1465 BUG_ON(trans->restarted);
1466 btree_trans_verify_sorted(trans);
1467 bch2_trans_verify_locks(trans);
1469 trans_for_each_path_inorder(trans, path, i) {
1470 if (__btree_path_cmp(path,
1481 path_pos->cached == cached &&
1482 path_pos->btree_id == btree_id &&
1483 path_pos->level == level) {
1484 __btree_path_get(path_pos, intent);
1485 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1487 path = btree_path_alloc(trans, path_pos);
1490 __btree_path_get(path, intent);
1492 path->btree_id = btree_id;
1493 path->cached = cached;
1494 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1495 path->should_be_locked = false;
1496 path->level = level;
1497 path->locks_want = locks_want;
1498 path->nodes_locked = 0;
1499 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1500 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1501 #ifdef CONFIG_BCACHEFS_DEBUG
1502 path->ip_allocated = ip;
1504 btree_trans_verify_sorted(trans);
1507 if (!(flags & BTREE_ITER_NOPRESERVE))
1508 path->preserve = true;
1510 if (path->intent_ref)
1511 locks_want = max(locks_want, level + 1);
1514 * If the path has locks_want greater than requested, we don't downgrade
1515 * it here - on transaction restart because btree node split needs to
1516 * upgrade locks, we might be putting/getting the iterator again.
1517 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1518 * a successful transaction commit.
1521 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1522 if (locks_want > path->locks_want)
1523 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
1528 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1533 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1534 EBUG_ON(!btree_node_locked(path, path->level));
1536 if (!path->cached) {
1537 struct btree_path_level *l = path_l(path);
1538 struct bkey_packed *_k;
1540 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1541 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1543 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
1545 if (!k.k || bpos_cmp(path->pos, k.k->p))
1548 struct bkey_cached *ck = (void *) path->l[0].b;
1551 (path->btree_id != ck->key.btree_id ||
1552 bkey_cmp(path->pos, ck->key.pos)));
1553 EBUG_ON(!ck || !ck->valid);
1556 k = bkey_i_to_s_c(ck->k);
1563 return (struct bkey_s_c) { u, NULL };
1566 /* Btree iterators: */
1569 __bch2_btree_iter_traverse(struct btree_iter *iter)
1571 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1575 bch2_btree_iter_traverse(struct btree_iter *iter)
1579 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
1580 btree_iter_search_key(iter),
1581 iter->flags & BTREE_ITER_INTENT,
1582 btree_iter_ip_allocated(iter));
1584 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1588 btree_path_set_should_be_locked(iter->path);
1592 /* Iterate across nodes (leaf and interior nodes) */
1594 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1596 struct btree_trans *trans = iter->trans;
1597 struct btree *b = NULL;
1600 EBUG_ON(iter->path->cached);
1601 bch2_btree_iter_verify(iter);
1603 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1607 b = btree_path_node(iter->path, iter->path->level);
1611 BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
1613 bkey_init(&iter->k);
1614 iter->k.p = iter->pos = b->key.k.p;
1616 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1617 iter->flags & BTREE_ITER_INTENT,
1618 btree_iter_ip_allocated(iter));
1619 btree_path_set_should_be_locked(iter->path);
1621 bch2_btree_iter_verify_entry_exit(iter);
1622 bch2_btree_iter_verify(iter);
1630 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1632 struct btree_trans *trans = iter->trans;
1633 struct btree_path *path = iter->path;
1634 struct btree *b = NULL;
1637 BUG_ON(trans->restarted);
1638 EBUG_ON(iter->path->cached);
1639 bch2_btree_iter_verify(iter);
1641 /* already at end? */
1642 if (!btree_path_node(path, path->level))
1646 if (!btree_path_node(path, path->level + 1)) {
1647 btree_path_set_level_up(trans, path);
1651 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1652 __bch2_btree_path_unlock(trans, path);
1653 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1654 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1655 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1656 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1657 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1661 b = btree_path_node(path, path->level + 1);
1663 if (!bpos_cmp(iter->pos, b->key.k.p)) {
1664 __btree_path_set_level_up(trans, path, path->level++);
1667 * Haven't gotten to the end of the parent node: go back down to
1668 * the next child node
1671 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
1672 iter->flags & BTREE_ITER_INTENT,
1673 btree_iter_ip_allocated(iter));
1675 btree_path_set_level_down(trans, path, iter->min_depth);
1677 ret = bch2_btree_path_traverse(trans, path, iter->flags);
1681 b = path->l[path->level].b;
1684 bkey_init(&iter->k);
1685 iter->k.p = iter->pos = b->key.k.p;
1687 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1688 iter->flags & BTREE_ITER_INTENT,
1689 btree_iter_ip_allocated(iter));
1690 btree_path_set_should_be_locked(iter->path);
1691 BUG_ON(iter->path->uptodate);
1693 bch2_btree_iter_verify_entry_exit(iter);
1694 bch2_btree_iter_verify(iter);
1702 /* Iterate across keys (in leaf nodes only) */
1704 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1706 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
1707 struct bpos pos = iter->k.p;
1708 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1709 ? bpos_cmp(pos, SPOS_MAX)
1710 : bkey_cmp(pos, SPOS_MAX)) != 0;
1712 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1713 pos = bkey_successor(iter, pos);
1714 bch2_btree_iter_set_pos(iter, pos);
1717 if (!btree_path_node(iter->path, iter->path->level))
1720 iter->advanced = true;
1725 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1727 struct bpos pos = bkey_start_pos(&iter->k);
1728 bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1729 ? bpos_cmp(pos, POS_MIN)
1730 : bkey_cmp(pos, POS_MIN)) != 0;
1732 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1733 pos = bkey_predecessor(iter, pos);
1734 bch2_btree_iter_set_pos(iter, pos);
1738 static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
1739 enum btree_id btree_id,
1742 struct btree_insert_entry *i;
1743 struct bkey_i *ret = NULL;
1745 trans_for_each_update(trans, i) {
1746 if (i->btree_id < btree_id)
1748 if (i->btree_id > btree_id)
1750 if (bpos_cmp(i->k->k.p, pos) < 0)
1752 if (i->key_cache_already_flushed)
1754 if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
1761 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
1762 struct btree_iter *iter,
1763 struct bpos start_pos,
1764 struct bpos end_pos)
1768 if (bpos_cmp(start_pos, iter->journal_pos) < 0)
1769 iter->journal_idx = 0;
1771 k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id, 0,
1773 &iter->journal_idx);
1775 iter->journal_pos = k ? k->k.p : end_pos;
1779 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *trans,
1780 struct btree_iter *iter,
1783 return bch2_btree_journal_peek(trans, iter, pos, pos);
1787 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
1788 struct btree_iter *iter,
1791 struct bkey_i *next_journal =
1792 bch2_btree_journal_peek(trans, iter, iter->path->pos,
1793 k.k ? k.k->p : iter->path->l[0].b->key.k.p);
1796 iter->k = next_journal->k;
1797 k = bkey_i_to_s_c(next_journal);
1804 * Checks btree key cache for key at iter->pos and returns it if present, or
1808 struct bkey_s_c __btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1810 struct btree_trans *trans = iter->trans;
1811 struct bch_fs *c = trans->c;
1815 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
1816 return bkey_s_c_null;
1818 if (!iter->key_cache_path)
1819 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
1820 iter->flags & BTREE_ITER_INTENT, 0,
1821 iter->flags|BTREE_ITER_CACHED,
1824 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
1825 iter->flags & BTREE_ITER_INTENT,
1826 btree_iter_ip_allocated(iter));
1828 ret = bch2_btree_path_traverse(trans, iter->key_cache_path, iter->flags|BTREE_ITER_CACHED);
1830 return bkey_s_c_err(ret);
1832 btree_path_set_should_be_locked(iter->key_cache_path);
1834 return bch2_btree_path_peek_slot(iter->key_cache_path, &u);
1838 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1840 struct bkey_s_c ret = __btree_trans_peek_key_cache(iter, pos);
1841 int err = bkey_err(ret) ?: bch2_btree_path_relock(iter->trans, iter->path, _THIS_IP_);
1843 return err ? bkey_s_c_err(err) : ret;
1846 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
1848 struct btree_trans *trans = iter->trans;
1849 struct bkey_i *next_update;
1850 struct bkey_s_c k, k2;
1853 EBUG_ON(iter->path->cached || iter->path->level);
1854 bch2_btree_iter_verify(iter);
1857 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
1858 iter->flags & BTREE_ITER_INTENT,
1859 btree_iter_ip_allocated(iter));
1861 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1862 if (unlikely(ret)) {
1863 /* ensure that iter->k is consistent with iter->pos: */
1864 bch2_btree_iter_set_pos(iter, iter->pos);
1865 k = bkey_s_c_err(ret);
1869 btree_path_set_should_be_locked(iter->path);
1871 k = btree_path_level_peek_all(trans->c, &iter->path->l[0], &iter->k);
1873 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
1875 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
1879 bch2_btree_iter_set_pos(iter, iter->pos);
1884 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
1885 k = btree_trans_peek_journal(trans, iter, k);
1887 next_update = iter->flags & BTREE_ITER_WITH_UPDATES
1888 ? btree_trans_peek_updates(trans, iter->btree_id, search_key)
1891 bpos_cmp(next_update->k.p,
1892 k.k ? k.k->p : iter->path->l[0].b->key.k.p) <= 0) {
1893 iter->k = next_update->k;
1894 k = bkey_i_to_s_c(next_update);
1897 if (k.k && bkey_deleted(k.k)) {
1899 * If we've got a whiteout, and it's after the search
1900 * key, advance the search key to the whiteout instead
1901 * of just after the whiteout - it might be a btree
1902 * whiteout, with a real key at the same position, since
1903 * in the btree deleted keys sort before non deleted.
1905 search_key = bpos_cmp(search_key, k.k->p)
1907 : bpos_successor(k.k->p);
1913 } else if (likely(bpos_cmp(iter->path->l[0].b->key.k.p, SPOS_MAX))) {
1914 /* Advance to next leaf node: */
1915 search_key = bpos_successor(iter->path->l[0].b->key.k.p);
1918 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1924 bch2_btree_iter_verify(iter);
1930 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1933 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
1935 struct btree_trans *trans = iter->trans;
1936 struct bpos search_key = btree_iter_search_key(iter);
1938 struct bpos iter_pos;
1941 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
1943 if (iter->update_path) {
1944 bch2_path_put(trans, iter->update_path,
1945 iter->flags & BTREE_ITER_INTENT);
1946 iter->update_path = NULL;
1949 bch2_btree_iter_verify_entry_exit(iter);
1952 k = __bch2_btree_iter_peek(iter, search_key);
1953 if (!k.k || bkey_err(k))
1957 * iter->pos should be mononotically increasing, and always be
1958 * equal to the key we just returned - except extents can
1959 * straddle iter->pos:
1961 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
1963 else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1964 iter_pos = bkey_start_pos(k.k);
1966 iter_pos = iter->pos;
1968 if (bkey_cmp(iter_pos, end) > 0) {
1969 bch2_btree_iter_set_pos(iter, end);
1974 if (iter->update_path &&
1975 bkey_cmp(iter->update_path->pos, k.k->p)) {
1976 bch2_path_put(trans, iter->update_path,
1977 iter->flags & BTREE_ITER_INTENT);
1978 iter->update_path = NULL;
1981 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
1982 (iter->flags & BTREE_ITER_INTENT) &&
1983 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
1984 !iter->update_path) {
1985 struct bpos pos = k.k->p;
1987 if (pos.snapshot < iter->snapshot) {
1988 search_key = bpos_successor(k.k->p);
1992 pos.snapshot = iter->snapshot;
1995 * advance, same as on exit for iter->path, but only up
1998 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
1999 iter->update_path = iter->path;
2001 iter->update_path = bch2_btree_path_set_pos(trans,
2002 iter->update_path, pos,
2003 iter->flags & BTREE_ITER_INTENT,
2008 * We can never have a key in a leaf node at POS_MAX, so
2009 * we don't have to check these successor() calls:
2011 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2012 !bch2_snapshot_is_ancestor(trans->c,
2015 search_key = bpos_successor(k.k->p);
2019 if (bkey_whiteout(k.k) &&
2020 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2021 search_key = bkey_successor(iter, k.k->p);
2028 iter->pos = iter_pos;
2030 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2031 iter->flags & BTREE_ITER_INTENT,
2032 btree_iter_ip_allocated(iter));
2034 btree_path_set_should_be_locked(iter->path);
2036 if (iter->update_path) {
2037 if (iter->update_path->uptodate &&
2038 (ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_)))
2039 k = bkey_s_c_err(ret);
2041 btree_path_set_should_be_locked(iter->update_path);
2044 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2045 iter->pos.snapshot = iter->snapshot;
2047 ret = bch2_btree_iter_verify_ret(iter, k);
2048 if (unlikely(ret)) {
2049 bch2_btree_iter_set_pos(iter, iter->pos);
2050 k = bkey_s_c_err(ret);
2053 bch2_btree_iter_verify_entry_exit(iter);
2059 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2060 * to iterator's current position, returning keys from every level of the btree.
2061 * For keys at different levels of the btree that compare equal, the key from
2062 * the lower level (leaf) is returned first.
2064 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2066 struct btree_trans *trans = iter->trans;
2070 EBUG_ON(iter->path->cached);
2071 bch2_btree_iter_verify(iter);
2072 BUG_ON(iter->path->level < iter->min_depth);
2073 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2074 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2077 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2078 iter->flags & BTREE_ITER_INTENT,
2079 btree_iter_ip_allocated(iter));
2081 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2082 if (unlikely(ret)) {
2083 /* ensure that iter->k is consistent with iter->pos: */
2084 bch2_btree_iter_set_pos(iter, iter->pos);
2085 k = bkey_s_c_err(ret);
2089 /* Already at end? */
2090 if (!btree_path_node(iter->path, iter->path->level)) {
2095 k = btree_path_level_peek_all(trans->c,
2096 &iter->path->l[iter->path->level], &iter->k);
2098 /* Check if we should go up to the parent node: */
2101 !bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
2102 iter->pos = path_l(iter->path)->b->key.k.p;
2103 btree_path_set_level_up(trans, iter->path);
2104 iter->advanced = false;
2109 * Check if we should go back down to a leaf:
2110 * If we're not in a leaf node, we only return the current key
2111 * if it exactly matches iter->pos - otherwise we first have to
2112 * go back to the leaf:
2114 if (iter->path->level != iter->min_depth &&
2117 bpos_cmp(iter->pos, k.k->p))) {
2118 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2119 iter->pos = bpos_successor(iter->pos);
2120 iter->advanced = false;
2124 /* Check if we should go to the next key: */
2125 if (iter->path->level == iter->min_depth &&
2128 !bpos_cmp(iter->pos, k.k->p)) {
2129 iter->pos = bpos_successor(iter->pos);
2130 iter->advanced = false;
2134 if (iter->advanced &&
2135 iter->path->level == iter->min_depth &&
2136 bpos_cmp(k.k->p, iter->pos))
2137 iter->advanced = false;
2139 BUG_ON(iter->advanced);
2145 btree_path_set_should_be_locked(iter->path);
2147 bch2_btree_iter_verify(iter);
2153 * bch2_btree_iter_next: returns first key greater than iterator's current
2156 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2158 if (!bch2_btree_iter_advance(iter))
2159 return bkey_s_c_null;
2161 return bch2_btree_iter_peek(iter);
2165 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2166 * iterator's current position
2168 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2170 struct btree_trans *trans = iter->trans;
2171 struct bpos search_key = iter->pos;
2172 struct btree_path *saved_path = NULL;
2174 struct bkey saved_k;
2175 const struct bch_val *saved_v;
2178 EBUG_ON(iter->path->cached || iter->path->level);
2179 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2181 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2182 return bkey_s_c_err(-EIO);
2184 bch2_btree_iter_verify(iter);
2185 bch2_btree_iter_verify_entry_exit(iter);
2187 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2188 search_key.snapshot = U32_MAX;
2191 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2192 iter->flags & BTREE_ITER_INTENT,
2193 btree_iter_ip_allocated(iter));
2195 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2196 if (unlikely(ret)) {
2197 /* ensure that iter->k is consistent with iter->pos: */
2198 bch2_btree_iter_set_pos(iter, iter->pos);
2199 k = bkey_s_c_err(ret);
2203 k = btree_path_level_peek(trans, iter->path,
2204 &iter->path->l[0], &iter->k);
2206 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2207 ? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
2208 : bpos_cmp(k.k->p, search_key) > 0))
2209 k = btree_path_level_prev(trans, iter->path,
2210 &iter->path->l[0], &iter->k);
2212 bch2_btree_path_check_sort(trans, iter->path, 0);
2215 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2216 if (k.k->p.snapshot == iter->snapshot)
2220 * If we have a saved candidate, and we're no
2221 * longer at the same _key_ (not pos), return
2224 if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
2225 bch2_path_put(trans, iter->path,
2226 iter->flags & BTREE_ITER_INTENT);
2227 iter->path = saved_path;
2234 if (bch2_snapshot_is_ancestor(iter->trans->c,
2238 bch2_path_put(trans, saved_path,
2239 iter->flags & BTREE_ITER_INTENT);
2240 saved_path = btree_path_clone(trans, iter->path,
2241 iter->flags & BTREE_ITER_INTENT);
2246 search_key = bpos_predecessor(k.k->p);
2250 if (bkey_whiteout(k.k) &&
2251 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2252 search_key = bkey_predecessor(iter, k.k->p);
2253 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2254 search_key.snapshot = U32_MAX;
2259 } else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
2260 /* Advance to previous leaf node: */
2261 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2263 /* Start of btree: */
2264 bch2_btree_iter_set_pos(iter, POS_MIN);
2270 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
2272 /* Extents can straddle iter->pos: */
2273 if (bkey_cmp(k.k->p, iter->pos) < 0)
2276 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2277 iter->pos.snapshot = iter->snapshot;
2279 btree_path_set_should_be_locked(iter->path);
2282 bch2_path_put(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2284 bch2_btree_iter_verify_entry_exit(iter);
2285 bch2_btree_iter_verify(iter);
2291 * bch2_btree_iter_prev: returns first key less than iterator's current
2294 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2296 if (!bch2_btree_iter_rewind(iter))
2297 return bkey_s_c_null;
2299 return bch2_btree_iter_peek_prev(iter);
2302 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2304 struct btree_trans *trans = iter->trans;
2305 struct bpos search_key;
2309 bch2_btree_iter_verify(iter);
2310 bch2_btree_iter_verify_entry_exit(iter);
2311 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2312 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2314 /* extents can't span inode numbers: */
2315 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2316 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2317 if (iter->pos.inode == KEY_INODE_MAX)
2318 return bkey_s_c_null;
2320 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2323 search_key = btree_iter_search_key(iter);
2324 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2325 iter->flags & BTREE_ITER_INTENT,
2326 btree_iter_ip_allocated(iter));
2328 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2329 if (unlikely(ret)) {
2330 k = bkey_s_c_err(ret);
2334 if ((iter->flags & BTREE_ITER_CACHED) ||
2335 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2336 struct bkey_i *next_update;
2338 if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2339 (next_update = btree_trans_peek_updates(trans,
2340 iter->btree_id, search_key)) &&
2341 !bpos_cmp(next_update->k.p, iter->pos)) {
2342 iter->k = next_update->k;
2343 k = bkey_i_to_s_c(next_update);
2347 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2348 (next_update = bch2_btree_journal_peek_slot(trans,
2349 iter, iter->pos))) {
2350 iter->k = next_update->k;
2351 k = bkey_i_to_s_c(next_update);
2355 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2356 (k = __btree_trans_peek_key_cache(iter, iter->pos)).k) {
2359 /* We're not returning a key from iter->path: */
2363 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2367 EBUG_ON(iter->path->level);
2369 if (iter->flags & BTREE_ITER_INTENT) {
2370 struct btree_iter iter2;
2371 struct bpos end = iter->pos;
2373 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2374 end.offset = U64_MAX;
2376 bch2_trans_copy_iter(&iter2, iter);
2377 k = bch2_btree_iter_peek_upto(&iter2, end);
2379 if (k.k && !bkey_err(k)) {
2383 bch2_trans_iter_exit(trans, &iter2);
2385 struct bpos pos = iter->pos;
2387 k = bch2_btree_iter_peek(iter);
2388 if (unlikely(bkey_err(k)))
2389 bch2_btree_iter_set_pos(iter, pos);
2394 if (unlikely(bkey_err(k)))
2397 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2399 if (bkey_cmp(iter->pos, next) < 0) {
2400 bkey_init(&iter->k);
2401 iter->k.p = iter->pos;
2403 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2404 bch2_key_resize(&iter->k,
2405 min_t(u64, KEY_SIZE_MAX,
2406 (next.inode == iter->pos.inode
2410 EBUG_ON(!iter->k.size);
2413 k = (struct bkey_s_c) { &iter->k, NULL };
2417 btree_path_set_should_be_locked(iter->path);
2419 bch2_btree_iter_verify_entry_exit(iter);
2420 bch2_btree_iter_verify(iter);
2421 ret = bch2_btree_iter_verify_ret(iter, k);
2423 return bkey_s_c_err(ret);
2428 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2430 if (!bch2_btree_iter_advance(iter))
2431 return bkey_s_c_null;
2433 return bch2_btree_iter_peek_slot(iter);
2436 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2438 if (!bch2_btree_iter_rewind(iter))
2439 return bkey_s_c_null;
2441 return bch2_btree_iter_peek_slot(iter);
2444 /* new transactional stuff: */
2446 static inline void btree_path_verify_sorted_ref(struct btree_trans *trans,
2447 struct btree_path *path)
2449 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2450 EBUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2451 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
2454 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2456 #ifdef CONFIG_BCACHEFS_DEBUG
2459 for (i = 0; i < trans->nr_sorted; i++)
2460 btree_path_verify_sorted_ref(trans, trans->paths + trans->sorted[i]);
2464 static void btree_trans_verify_sorted(struct btree_trans *trans)
2466 #ifdef CONFIG_BCACHEFS_DEBUG
2467 struct btree_path *path, *prev = NULL;
2470 if (!bch2_debug_check_iterators)
2473 trans_for_each_path_inorder(trans, path, i) {
2474 if (prev && btree_path_cmp(prev, path) > 0) {
2475 bch2_dump_trans_paths_updates(trans);
2476 panic("trans paths out of order!\n");
2483 static inline void btree_path_swap(struct btree_trans *trans,
2484 struct btree_path *l, struct btree_path *r)
2486 swap(l->sorted_idx, r->sorted_idx);
2487 swap(trans->sorted[l->sorted_idx],
2488 trans->sorted[r->sorted_idx]);
2490 btree_path_verify_sorted_ref(trans, l);
2491 btree_path_verify_sorted_ref(trans, r);
2494 inline void bch2_btree_path_check_sort(struct btree_trans *trans, struct btree_path *path,
2497 struct btree_path *n;
2500 n = prev_btree_path(trans, path);
2501 if (n && btree_path_cmp(n, path) > 0) {
2503 btree_path_swap(trans, n, path);
2504 n = prev_btree_path(trans, path);
2505 } while (n && btree_path_cmp(n, path) > 0);
2512 n = next_btree_path(trans, path);
2513 if (n && btree_path_cmp(path, n) > 0) {
2515 btree_path_swap(trans, path, n);
2516 n = next_btree_path(trans, path);
2517 } while (n && btree_path_cmp(path, n) > 0);
2521 btree_trans_verify_sorted(trans);
2524 static inline void btree_path_list_remove(struct btree_trans *trans,
2525 struct btree_path *path)
2529 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2531 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2533 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2534 trans->paths[trans->sorted[i]].sorted_idx = i;
2536 path->sorted_idx = U8_MAX;
2538 btree_trans_verify_sorted_refs(trans);
2541 static inline void btree_path_list_add(struct btree_trans *trans,
2542 struct btree_path *pos,
2543 struct btree_path *path)
2547 btree_trans_verify_sorted_refs(trans);
2549 path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
2551 if (trans->in_traverse_all &&
2552 trans->traverse_all_idx != U8_MAX &&
2553 trans->traverse_all_idx >= path->sorted_idx)
2554 trans->traverse_all_idx++;
2556 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2558 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2559 trans->paths[trans->sorted[i]].sorted_idx = i;
2561 btree_trans_verify_sorted_refs(trans);
2564 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2567 bch2_path_put(trans, iter->path,
2568 iter->flags & BTREE_ITER_INTENT);
2569 if (iter->update_path)
2570 bch2_path_put(trans, iter->update_path,
2571 iter->flags & BTREE_ITER_INTENT);
2572 if (iter->key_cache_path)
2573 bch2_path_put(trans, iter->key_cache_path,
2574 iter->flags & BTREE_ITER_INTENT);
2576 iter->update_path = NULL;
2577 iter->key_cache_path = NULL;
2580 static inline void __bch2_trans_iter_init(struct btree_trans *trans,
2581 struct btree_iter *iter,
2582 unsigned btree_id, struct bpos pos,
2583 unsigned locks_want,
2588 if (trans->restarted)
2589 panic("bch2_trans_iter_init(): in transaction restart, %s by %pS\n",
2590 bch2_err_str(trans->restarted),
2591 (void *) trans->last_restarted_ip);
2593 if (flags & BTREE_ITER_ALL_LEVELS)
2594 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
2596 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
2597 btree_node_type_is_extents(btree_id))
2598 flags |= BTREE_ITER_IS_EXTENTS;
2600 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
2601 !btree_type_has_snapshots(btree_id))
2602 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
2604 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
2605 btree_type_has_snapshots(btree_id))
2606 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
2608 if (!test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags))
2609 flags |= BTREE_ITER_WITH_JOURNAL;
2611 iter->trans = trans;
2613 iter->update_path = NULL;
2614 iter->key_cache_path = NULL;
2615 iter->btree_id = btree_id;
2616 iter->min_depth = depth;
2617 iter->flags = flags;
2618 iter->snapshot = pos.snapshot;
2620 iter->k.type = KEY_TYPE_deleted;
2623 iter->journal_idx = 0;
2624 iter->journal_pos = POS_MIN;
2625 #ifdef CONFIG_BCACHEFS_DEBUG
2626 iter->ip_allocated = ip;
2629 iter->path = bch2_path_get(trans, btree_id, iter->pos,
2630 locks_want, depth, flags, ip);
2633 void bch2_trans_iter_init(struct btree_trans *trans,
2634 struct btree_iter *iter,
2635 unsigned btree_id, struct bpos pos,
2638 if (!btree_id_cached(trans->c, btree_id)) {
2639 flags &= ~BTREE_ITER_CACHED;
2640 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
2641 } else if (!(flags & BTREE_ITER_CACHED))
2642 flags |= BTREE_ITER_WITH_KEY_CACHE;
2644 __bch2_trans_iter_init(trans, iter, btree_id, pos,
2645 0, 0, flags, _RET_IP_);
2648 void bch2_trans_node_iter_init(struct btree_trans *trans,
2649 struct btree_iter *iter,
2650 enum btree_id btree_id,
2652 unsigned locks_want,
2656 __bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
2657 BTREE_ITER_NOT_EXTENTS|
2658 __BTREE_ITER_ALL_SNAPSHOTS|
2659 BTREE_ITER_ALL_SNAPSHOTS|
2661 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2662 BUG_ON(iter->path->level != depth);
2663 BUG_ON(iter->min_depth != depth);
2666 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2670 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2671 if (src->update_path)
2672 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
2673 dst->key_cache_path = NULL;
2676 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2678 unsigned new_top = trans->mem_top + size;
2679 size_t old_bytes = trans->mem_bytes;
2680 size_t new_bytes = roundup_pow_of_two(new_top);
2684 trans->mem_max = max(trans->mem_max, new_top);
2686 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2688 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2689 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2690 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2691 new_bytes = BTREE_TRANS_MEM_MAX;
2696 return ERR_PTR(-ENOMEM);
2698 trans->mem = new_mem;
2699 trans->mem_bytes = new_bytes;
2702 trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2703 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2706 p = trans->mem + trans->mem_top;
2707 trans->mem_top += size;
2713 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2714 * @trans: transaction to reset
2716 * While iterating over nodes or updating nodes a attempt to lock a btree node
2717 * may return BCH_ERR_transaction_restart when the trylock fails. When this
2718 * occurs bch2_trans_begin() should be called and the transaction retried.
2720 u32 bch2_trans_begin(struct btree_trans *trans)
2722 struct btree_path *path;
2724 bch2_trans_reset_updates(trans);
2726 trans->restart_count++;
2729 if (trans->fs_usage_deltas) {
2730 trans->fs_usage_deltas->used = 0;
2731 memset((void *) trans->fs_usage_deltas +
2732 offsetof(struct replicas_delta_list, memset_start), 0,
2733 (void *) &trans->fs_usage_deltas->memset_end -
2734 (void *) &trans->fs_usage_deltas->memset_start);
2737 trans_for_each_path(trans, path) {
2738 path->should_be_locked = false;
2741 * If the transaction wasn't restarted, we're presuming to be
2742 * doing something new: dont keep iterators excpt the ones that
2743 * are in use - except for the subvolumes btree:
2745 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
2746 path->preserve = false;
2749 * XXX: we probably shouldn't be doing this if the transaction
2750 * was restarted, but currently we still overflow transaction
2751 * iterators if we do that
2753 if (!path->ref && !path->preserve)
2754 __bch2_path_free(trans, path);
2756 path->preserve = false;
2759 if (!trans->restarted &&
2761 ktime_get_ns() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
2762 bch2_trans_unlock(trans);
2764 bch2_trans_relock(trans);
2767 trans->last_restarted_ip = _RET_IP_;
2768 if (trans->restarted)
2769 bch2_btree_path_traverse_all(trans);
2771 trans->last_begin_time = ktime_get_ns();
2772 return trans->restart_count;
2775 void bch2_trans_verify_not_restarted(struct btree_trans *trans, u32 restart_count)
2777 if (trans_was_restarted(trans, restart_count))
2778 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
2779 trans->restart_count, restart_count,
2780 (void *) trans->last_restarted_ip);
2783 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2785 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2786 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2789 BUG_ON(trans->used_mempool);
2792 p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
2795 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2797 trans->paths = p; p += paths_bytes;
2798 trans->updates = p; p += updates_bytes;
2801 static inline unsigned bch2_trans_get_fn_idx(struct btree_trans *trans, struct bch_fs *c,
2806 for (i = 0; i < ARRAY_SIZE(c->btree_transaction_fns); i++)
2807 if (!c->btree_transaction_fns[i] ||
2808 c->btree_transaction_fns[i] == fn) {
2809 c->btree_transaction_fns[i] = fn;
2813 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
2817 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, const char *fn)
2818 __acquires(&c->btree_trans_barrier)
2820 struct btree_transaction_stats *s;
2821 struct btree_trans *pos;
2823 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
2825 memset(trans, 0, sizeof(*trans));
2828 trans->last_begin_time = ktime_get_ns();
2829 trans->fn_idx = bch2_trans_get_fn_idx(trans, c, fn);
2830 trans->locking_wait.task = current;
2831 closure_init_stack(&trans->ref);
2833 bch2_trans_alloc_paths(trans, c);
2835 s = btree_trans_stats(trans);
2837 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
2839 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
2841 if (!unlikely(trans->mem)) {
2842 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2843 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2845 trans->mem_bytes = expected_mem_bytes;
2848 trans->nr_max_paths = s->nr_max_paths;
2851 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2853 mutex_lock(&c->btree_trans_lock);
2854 list_for_each_entry(pos, &c->btree_trans_list, list) {
2855 if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
2856 list_add_tail(&trans->list, &pos->list);
2860 list_add_tail(&trans->list, &c->btree_trans_list);
2862 mutex_unlock(&c->btree_trans_lock);
2865 static void check_btree_paths_leaked(struct btree_trans *trans)
2867 #ifdef CONFIG_BCACHEFS_DEBUG
2868 struct bch_fs *c = trans->c;
2869 struct btree_path *path;
2871 trans_for_each_path(trans, path)
2876 bch_err(c, "btree paths leaked from %s!", trans->fn);
2877 trans_for_each_path(trans, path)
2879 printk(KERN_ERR " btree %s %pS\n",
2880 bch2_btree_ids[path->btree_id],
2881 (void *) path->ip_allocated);
2882 /* Be noisy about this: */
2883 bch2_fatal_error(c);
2887 void bch2_trans_exit(struct btree_trans *trans)
2888 __releases(&c->btree_trans_barrier)
2890 struct btree_insert_entry *i;
2891 struct bch_fs *c = trans->c;
2892 struct btree_transaction_stats *s = btree_trans_stats(trans);
2894 bch2_trans_unlock(trans);
2896 closure_sync(&trans->ref);
2899 s->max_mem = max(s->max_mem, trans->mem_max);
2901 trans_for_each_update(trans, i)
2902 __btree_path_put(i->path, true);
2903 trans->nr_updates = 0;
2905 check_btree_paths_leaked(trans);
2907 mutex_lock(&c->btree_trans_lock);
2908 list_del(&trans->list);
2909 mutex_unlock(&c->btree_trans_lock);
2911 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2913 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
2915 kfree(trans->extra_journal_entries.data);
2917 if (trans->fs_usage_deltas) {
2918 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
2919 REPLICAS_DELTA_LIST_MAX)
2920 mempool_free(trans->fs_usage_deltas,
2921 &c->replicas_delta_pool);
2923 kfree(trans->fs_usage_deltas);
2926 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
2927 mempool_free(trans->mem, &c->btree_trans_mem_pool);
2933 * Userspace doesn't have a real percpu implementation:
2935 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
2939 mempool_free(trans->paths, &c->btree_paths_pool);
2941 trans->mem = (void *) 0x1;
2942 trans->paths = (void *) 0x1;
2945 static void __maybe_unused
2946 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
2947 struct btree_bkey_cached_common *b)
2949 struct six_lock_count c = six_lock_counts(&b->lock);
2950 struct task_struct *owner;
2954 owner = READ_ONCE(b->lock.owner);
2955 pid = owner ? owner->pid : 0;;
2959 prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
2960 b->level, bch2_btree_ids[b->btree_id]);
2961 bch2_bpos_to_text(out, btree_node_pos(b));
2964 prt_printf(out, " locks %u:%u:%u held by pid %u",
2965 c.n[0], c.n[1], c.n[2], pid);
2968 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
2970 struct btree_path *path;
2971 struct btree_bkey_cached_common *b;
2972 static char lock_types[] = { 'r', 'i', 'w' };
2975 if (!out->nr_tabstops) {
2976 printbuf_tabstop_push(out, 16);
2977 printbuf_tabstop_push(out, 32);
2980 prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn);
2982 trans_for_each_path(trans, path) {
2983 if (!path->nodes_locked)
2986 prt_printf(out, " path %u %c l=%u %s:",
2988 path->cached ? 'c' : 'b',
2990 bch2_btree_ids[path->btree_id]);
2991 bch2_bpos_to_text(out, path->pos);
2994 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2995 if (btree_node_locked(path, l) &&
2996 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
2997 prt_printf(out, " %c l=%u ",
2998 lock_types[btree_node_locked_type(path, l)], l);
2999 bch2_btree_bkey_cached_common_to_text(out, b);
3005 b = READ_ONCE(trans->locking);
3007 prt_str(out, " want");
3009 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3010 bch2_btree_bkey_cached_common_to_text(out, b);
3015 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3017 if (c->btree_trans_barrier_initialized)
3018 cleanup_srcu_struct(&c->btree_trans_barrier);
3019 mempool_exit(&c->btree_trans_mem_pool);
3020 mempool_exit(&c->btree_paths_pool);
3023 int bch2_fs_btree_iter_init(struct bch_fs *c)
3025 unsigned i, nr = BTREE_ITER_MAX;
3028 for (i = 0; i < ARRAY_SIZE(c->btree_transaction_stats); i++)
3029 mutex_init(&c->btree_transaction_stats[i].lock);
3031 INIT_LIST_HEAD(&c->btree_trans_list);
3032 mutex_init(&c->btree_trans_lock);
3034 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3035 sizeof(struct btree_path) * nr +
3036 sizeof(struct btree_insert_entry) * nr) ?:
3037 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3038 BTREE_TRANS_MEM_MAX) ?:
3039 init_srcu_struct(&c->btree_trans_barrier);
3041 c->btree_trans_barrier_initialized = true;