1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_key_cache.h"
9 #include "btree_locking.h"
10 #include "btree_update.h"
17 #include "subvolume.h"
19 #include <linux/prandom.h>
20 #include <linux/prefetch.h>
21 #include <trace/events/bcachefs.h>
23 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
24 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
27 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 #ifdef TRACK_PATH_ALLOCATED
30 return iter->ip_allocated;
36 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
39 * Unlocks before scheduling
40 * Note: does not revalidate iterator
42 static inline int bch2_trans_cond_resched(struct btree_trans *trans)
44 if (need_resched() || race_fault()) {
45 bch2_trans_unlock(trans);
47 return bch2_trans_relock(trans);
53 static inline int __btree_path_cmp(const struct btree_path *l,
54 enum btree_id r_btree_id,
60 * Must match lock ordering as defined by __bch2_btree_node_lock:
62 return cmp_int(l->btree_id, r_btree_id) ?:
63 cmp_int((int) l->cached, (int) r_cached) ?:
64 bpos_cmp(l->pos, r_pos) ?:
65 -cmp_int(l->level, r_level);
68 static inline int btree_path_cmp(const struct btree_path *l,
69 const struct btree_path *r)
71 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
74 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
76 /* Are we iterating over keys in all snapshots? */
77 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
78 p = bpos_successor(p);
80 p = bpos_nosnap_successor(p);
81 p.snapshot = iter->snapshot;
87 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
89 /* Are we iterating over keys in all snapshots? */
90 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
91 p = bpos_predecessor(p);
93 p = bpos_nosnap_predecessor(p);
94 p.snapshot = iter->snapshot;
100 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
102 struct bpos pos = iter->pos;
104 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
105 !bkey_eq(pos, POS_MAX))
106 pos = bkey_successor(iter, pos);
110 static inline bool btree_path_pos_before_node(struct btree_path *path,
113 return bpos_lt(path->pos, b->data->min_key);
116 static inline bool btree_path_pos_after_node(struct btree_path *path,
119 return bpos_gt(path->pos, b->key.k.p);
122 static inline bool btree_path_pos_in_node(struct btree_path *path,
125 return path->btree_id == b->c.btree_id &&
126 !btree_path_pos_before_node(path, b) &&
127 !btree_path_pos_after_node(path, b);
130 /* Btree iterator: */
132 #ifdef CONFIG_BCACHEFS_DEBUG
134 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
135 struct btree_path *path)
137 struct bkey_cached *ck;
138 bool locked = btree_node_locked(path, 0);
140 if (!bch2_btree_node_relock(trans, path, 0))
143 ck = (void *) path->l[0].b;
144 BUG_ON(ck->key.btree_id != path->btree_id ||
145 !bkey_eq(ck->key.pos, path->pos));
148 btree_node_unlock(trans, path, 0);
151 static void bch2_btree_path_verify_level(struct btree_trans *trans,
152 struct btree_path *path, unsigned level)
154 struct btree_path_level *l;
155 struct btree_node_iter tmp;
157 struct bkey_packed *p, *k;
158 struct printbuf buf1 = PRINTBUF;
159 struct printbuf buf2 = PRINTBUF;
160 struct printbuf buf3 = PRINTBUF;
163 if (!bch2_debug_check_iterators)
168 locked = btree_node_locked(path, level);
172 bch2_btree_path_verify_cached(trans, path);
176 if (!btree_path_node(path, level))
179 if (!bch2_btree_node_relock_notrace(trans, path, level))
182 BUG_ON(!btree_path_pos_in_node(path, l->b));
184 bch2_btree_node_iter_verify(&l->iter, l->b);
187 * For interior nodes, the iterator will have skipped past deleted keys:
190 ? bch2_btree_node_iter_prev(&tmp, l->b)
191 : bch2_btree_node_iter_prev_all(&tmp, l->b);
192 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
194 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
199 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
205 btree_node_unlock(trans, path, level);
208 bch2_bpos_to_text(&buf1, path->pos);
211 struct bkey uk = bkey_unpack_key(l->b, p);
213 bch2_bkey_to_text(&buf2, &uk);
215 prt_printf(&buf2, "(none)");
219 struct bkey uk = bkey_unpack_key(l->b, k);
221 bch2_bkey_to_text(&buf3, &uk);
223 prt_printf(&buf3, "(none)");
226 panic("path should be %s key at level %u:\n"
230 msg, level, buf1.buf, buf2.buf, buf3.buf);
233 static void bch2_btree_path_verify(struct btree_trans *trans,
234 struct btree_path *path)
236 struct bch_fs *c = trans->c;
239 EBUG_ON(path->btree_id >= BTREE_ID_NR);
241 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
243 BUG_ON(!path->cached &&
244 c->btree_roots[path->btree_id].b->c.level > i);
248 bch2_btree_path_verify_level(trans, path, i);
251 bch2_btree_path_verify_locks(path);
254 void bch2_trans_verify_paths(struct btree_trans *trans)
256 struct btree_path *path;
258 trans_for_each_path(trans, path)
259 bch2_btree_path_verify(trans, path);
262 static void bch2_btree_iter_verify(struct btree_iter *iter)
264 struct btree_trans *trans = iter->trans;
266 BUG_ON(iter->btree_id >= BTREE_ID_NR);
268 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
270 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
271 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
273 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
274 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
275 !btree_type_has_snapshots(iter->btree_id));
277 if (iter->update_path)
278 bch2_btree_path_verify(trans, iter->update_path);
279 bch2_btree_path_verify(trans, iter->path);
282 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
284 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
285 !iter->pos.snapshot);
287 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
288 iter->pos.snapshot != iter->snapshot);
290 BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
291 bkey_gt(iter->pos, iter->k.p));
294 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
296 struct btree_trans *trans = iter->trans;
297 struct btree_iter copy;
298 struct bkey_s_c prev;
301 if (!bch2_debug_check_iterators)
304 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
307 if (bkey_err(k) || !k.k)
310 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
314 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
315 BTREE_ITER_NOPRESERVE|
316 BTREE_ITER_ALL_SNAPSHOTS);
317 prev = bch2_btree_iter_prev(©);
321 ret = bkey_err(prev);
325 if (bkey_eq(prev.k->p, k.k->p) &&
326 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
327 prev.k->p.snapshot) > 0) {
328 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
330 bch2_bkey_to_text(&buf1, k.k);
331 bch2_bkey_to_text(&buf2, prev.k);
333 panic("iter snap %u\n"
340 bch2_trans_iter_exit(trans, ©);
344 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
345 struct bpos pos, bool key_cache)
347 struct btree_path *path;
349 struct printbuf buf = PRINTBUF;
351 btree_trans_sort_paths(trans);
353 trans_for_each_path_inorder(trans, path, idx) {
354 int cmp = cmp_int(path->btree_id, id) ?:
355 cmp_int(path->cached, key_cache);
362 if (!btree_node_locked(path, 0) ||
363 !path->should_be_locked)
367 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
368 bkey_le(pos, path->l[0].b->key.k.p))
371 if (bkey_eq(pos, path->pos))
376 bch2_dump_trans_paths_updates(trans);
377 bch2_bpos_to_text(&buf, pos);
379 panic("not locked: %s %s%s\n",
380 bch2_btree_ids[id], buf.buf,
381 key_cache ? " cached" : "");
386 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
387 struct btree_path *path, unsigned l) {}
388 static inline void bch2_btree_path_verify(struct btree_trans *trans,
389 struct btree_path *path) {}
390 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
391 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
392 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
396 /* Btree path: fixups after btree updates */
398 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
401 struct bkey_packed *k)
403 struct btree_node_iter_set *set;
405 btree_node_iter_for_each(iter, set)
406 if (set->end == t->end_offset) {
407 set->k = __btree_node_key_to_offset(b, k);
408 bch2_btree_node_iter_sort(iter, b);
412 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
415 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
417 struct bkey_packed *where)
419 struct btree_path_level *l = &path->l[b->c.level];
421 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
424 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
425 bch2_btree_node_iter_advance(&l->iter, l->b);
428 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
430 struct bkey_packed *where)
432 struct btree_path *path;
434 trans_for_each_path_with_node(trans, b, path) {
435 __bch2_btree_path_fix_key_modified(path, b, where);
436 bch2_btree_path_verify_level(trans, path, b->c.level);
440 static void __bch2_btree_node_iter_fix(struct btree_path *path,
442 struct btree_node_iter *node_iter,
444 struct bkey_packed *where,
445 unsigned clobber_u64s,
448 const struct bkey_packed *end = btree_bkey_last(b, t);
449 struct btree_node_iter_set *set;
450 unsigned offset = __btree_node_key_to_offset(b, where);
451 int shift = new_u64s - clobber_u64s;
452 unsigned old_end = t->end_offset - shift;
453 unsigned orig_iter_pos = node_iter->data[0].k;
454 bool iter_current_key_modified =
455 orig_iter_pos >= offset &&
456 orig_iter_pos <= offset + clobber_u64s;
458 btree_node_iter_for_each(node_iter, set)
459 if (set->end == old_end)
462 /* didn't find the bset in the iterator - might have to readd it: */
464 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
465 bch2_btree_node_iter_push(node_iter, b, where, end);
468 /* Iterator is after key that changed */
472 set->end = t->end_offset;
474 /* Iterator hasn't gotten to the key that changed yet: */
479 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
481 } else if (set->k < offset + clobber_u64s) {
482 set->k = offset + new_u64s;
483 if (set->k == set->end)
484 bch2_btree_node_iter_set_drop(node_iter, set);
486 /* Iterator is after key that changed */
487 set->k = (int) set->k + shift;
491 bch2_btree_node_iter_sort(node_iter, b);
493 if (node_iter->data[0].k != orig_iter_pos)
494 iter_current_key_modified = true;
497 * When a new key is added, and the node iterator now points to that
498 * key, the iterator might have skipped past deleted keys that should
499 * come after the key the iterator now points to. We have to rewind to
500 * before those deleted keys - otherwise
501 * bch2_btree_node_iter_prev_all() breaks:
503 if (!bch2_btree_node_iter_end(node_iter) &&
504 iter_current_key_modified &&
507 struct bkey_packed *k, *k2, *p;
509 k = bch2_btree_node_iter_peek_all(node_iter, b);
511 for_each_bset(b, t) {
512 bool set_pos = false;
514 if (node_iter->data[0].end == t->end_offset)
517 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
519 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
520 bkey_iter_cmp(b, k, p) < 0) {
526 btree_node_iter_set_set_pos(node_iter,
532 void bch2_btree_node_iter_fix(struct btree_trans *trans,
533 struct btree_path *path,
535 struct btree_node_iter *node_iter,
536 struct bkey_packed *where,
537 unsigned clobber_u64s,
540 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
541 struct btree_path *linked;
543 if (node_iter != &path->l[b->c.level].iter) {
544 __bch2_btree_node_iter_fix(path, b, node_iter, t,
545 where, clobber_u64s, new_u64s);
547 if (bch2_debug_check_iterators)
548 bch2_btree_node_iter_verify(node_iter, b);
551 trans_for_each_path_with_node(trans, b, linked) {
552 __bch2_btree_node_iter_fix(linked, b,
553 &linked->l[b->c.level].iter, t,
554 where, clobber_u64s, new_u64s);
555 bch2_btree_path_verify_level(trans, linked, b->c.level);
559 /* Btree path level: pointer to a particular btree node and node iter */
561 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
562 struct btree_path_level *l,
564 struct bkey_packed *k)
568 * signal to bch2_btree_iter_peek_slot() that we're currently at
571 u->type = KEY_TYPE_deleted;
572 return bkey_s_c_null;
575 return bkey_disassemble(l->b, k, u);
578 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
579 struct btree_path_level *l,
582 return __btree_iter_unpack(c, l, u,
583 bch2_btree_node_iter_peek_all(&l->iter, l->b));
586 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
587 struct btree_path *path,
588 struct btree_path_level *l,
591 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
592 bch2_btree_node_iter_peek(&l->iter, l->b));
594 path->pos = k.k ? k.k->p : l->b->key.k.p;
595 trans->paths_sorted = false;
596 bch2_btree_path_verify_level(trans, path, l - path->l);
600 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
601 struct btree_path *path,
602 struct btree_path_level *l,
605 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
606 bch2_btree_node_iter_prev(&l->iter, l->b));
608 path->pos = k.k ? k.k->p : l->b->data->min_key;
609 trans->paths_sorted = false;
610 bch2_btree_path_verify_level(trans, path, l - path->l);
614 static inline bool btree_path_advance_to_pos(struct btree_path *path,
615 struct btree_path_level *l,
618 struct bkey_packed *k;
621 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
622 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
623 if (max_advance > 0 && nr_advanced >= max_advance)
626 bch2_btree_node_iter_advance(&l->iter, l->b);
633 static inline void __btree_path_level_init(struct btree_path *path,
636 struct btree_path_level *l = &path->l[level];
638 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
641 * Iterators to interior nodes should always be pointed at the first non
645 bch2_btree_node_iter_peek(&l->iter, l->b);
648 void bch2_btree_path_level_init(struct btree_trans *trans,
649 struct btree_path *path,
652 BUG_ON(path->cached);
654 EBUG_ON(!btree_path_pos_in_node(path, b));
655 EBUG_ON(b->c.lock.state.seq & 1);
657 path->l[b->c.level].lock_seq = b->c.lock.state.seq;
658 path->l[b->c.level].b = b;
659 __btree_path_level_init(path, b->c.level);
662 /* Btree path: fixups after btree node updates: */
664 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
666 struct bch_fs *c = trans->c;
667 struct btree_insert_entry *i;
669 trans_for_each_update(trans, i)
671 i->level == b->c.level &&
672 i->btree_id == b->c.btree_id &&
673 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
674 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
675 i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
677 if (unlikely(trans->journal_replay_not_finished)) {
679 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
691 * A btree node is being replaced - update the iterator to point to the new
694 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
696 struct btree_path *path;
698 trans_for_each_path(trans, path)
699 if (path->uptodate == BTREE_ITER_UPTODATE &&
701 btree_path_pos_in_node(path, b)) {
702 enum btree_node_locked_type t =
703 btree_lock_want(path, b->c.level);
705 if (t != BTREE_NODE_UNLOCKED) {
706 btree_node_unlock(trans, path, b->c.level);
707 six_lock_increment(&b->c.lock, t);
708 mark_btree_node_locked(trans, path, b->c.level, t);
711 bch2_btree_path_level_init(trans, path, b);
714 bch2_trans_revalidate_updates_in_node(trans, b);
718 * A btree node has been modified in such a way as to invalidate iterators - fix
721 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
723 struct btree_path *path;
725 trans_for_each_path_with_node(trans, b, path)
726 __btree_path_level_init(path, b->c.level);
728 bch2_trans_revalidate_updates_in_node(trans, b);
731 /* Btree path: traverse, set_pos: */
733 static inline int btree_path_lock_root(struct btree_trans *trans,
734 struct btree_path *path,
736 unsigned long trace_ip)
738 struct bch_fs *c = trans->c;
739 struct btree *b, **rootp = &c->btree_roots[path->btree_id].b;
740 enum six_lock_type lock_type;
744 EBUG_ON(path->nodes_locked);
747 b = READ_ONCE(*rootp);
748 path->level = READ_ONCE(b->c.level);
750 if (unlikely(path->level < depth_want)) {
752 * the root is at a lower depth than the depth we want:
753 * got to the end of the btree, or we're walking nodes
754 * greater than some depth and there are no nodes >=
757 path->level = depth_want;
758 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
763 lock_type = __btree_lock_want(path, path->level);
764 ret = btree_node_lock(trans, path, &b->c,
765 path->level, lock_type, trace_ip);
767 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
769 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
774 if (likely(b == READ_ONCE(*rootp) &&
775 b->c.level == path->level &&
777 for (i = 0; i < path->level; i++)
778 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
779 path->l[path->level].b = b;
780 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
783 mark_btree_node_locked(trans, path, path->level, lock_type);
784 bch2_btree_path_level_init(trans, path, b);
788 six_unlock_type(&b->c.lock, lock_type);
793 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
795 struct bch_fs *c = trans->c;
796 struct btree_path_level *l = path_l(path);
797 struct btree_node_iter node_iter = l->iter;
798 struct bkey_packed *k;
800 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
801 ? (path->level > 1 ? 0 : 2)
802 : (path->level > 1 ? 1 : 16);
803 bool was_locked = btree_node_locked(path, path->level);
806 bch2_bkey_buf_init(&tmp);
808 while (nr-- && !ret) {
809 if (!bch2_btree_node_relock(trans, path, path->level))
812 bch2_btree_node_iter_advance(&node_iter, l->b);
813 k = bch2_btree_node_iter_peek(&node_iter, l->b);
817 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
818 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
823 btree_node_unlock(trans, path, path->level);
825 bch2_bkey_buf_exit(&tmp, c);
829 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
830 struct btree_and_journal_iter *jiter)
832 struct bch_fs *c = trans->c;
835 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
836 ? (path->level > 1 ? 0 : 2)
837 : (path->level > 1 ? 1 : 16);
838 bool was_locked = btree_node_locked(path, path->level);
841 bch2_bkey_buf_init(&tmp);
843 while (nr-- && !ret) {
844 if (!bch2_btree_node_relock(trans, path, path->level))
847 bch2_btree_and_journal_iter_advance(jiter);
848 k = bch2_btree_and_journal_iter_peek(jiter);
852 bch2_bkey_buf_reassemble(&tmp, c, k);
853 ret = bch2_btree_node_prefetch(c, trans, path, tmp.k, path->btree_id,
858 btree_node_unlock(trans, path, path->level);
860 bch2_bkey_buf_exit(&tmp, c);
864 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
865 struct btree_path *path,
866 unsigned plevel, struct btree *b)
868 struct btree_path_level *l = &path->l[plevel];
869 bool locked = btree_node_locked(path, plevel);
870 struct bkey_packed *k;
871 struct bch_btree_ptr_v2 *bp;
873 if (!bch2_btree_node_relock(trans, path, plevel))
876 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
877 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
879 bp = (void *) bkeyp_val(&l->b->format, k);
880 bp->mem_ptr = (unsigned long)b;
883 btree_node_unlock(trans, path, plevel);
886 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
887 struct btree_path *path,
889 struct bkey_buf *out)
891 struct bch_fs *c = trans->c;
892 struct btree_path_level *l = path_l(path);
893 struct btree_and_journal_iter jiter;
897 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
899 k = bch2_btree_and_journal_iter_peek(&jiter);
901 bch2_bkey_buf_reassemble(out, c, k);
903 if (flags & BTREE_ITER_PREFETCH)
904 ret = btree_path_prefetch_j(trans, path, &jiter);
906 bch2_btree_and_journal_iter_exit(&jiter);
910 static __always_inline int btree_path_down(struct btree_trans *trans,
911 struct btree_path *path,
913 unsigned long trace_ip)
915 struct bch_fs *c = trans->c;
916 struct btree_path_level *l = path_l(path);
918 unsigned level = path->level - 1;
919 enum six_lock_type lock_type = __btree_lock_want(path, level);
923 EBUG_ON(!btree_node_locked(path, path->level));
925 bch2_bkey_buf_init(&tmp);
927 if (unlikely(trans->journal_replay_not_finished)) {
928 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
932 bch2_bkey_buf_unpack(&tmp, c, l->b,
933 bch2_btree_node_iter_peek(&l->iter, l->b));
935 if (flags & BTREE_ITER_PREFETCH) {
936 ret = btree_path_prefetch(trans, path);
942 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
943 ret = PTR_ERR_OR_ZERO(b);
947 if (likely(!trans->journal_replay_not_finished &&
948 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
949 unlikely(b != btree_node_mem_ptr(tmp.k)))
950 btree_node_mem_ptr_set(trans, path, level + 1, b);
952 if (btree_node_read_locked(path, level + 1))
953 btree_node_unlock(trans, path, level + 1);
955 mark_btree_node_locked(trans, path, level, lock_type);
957 bch2_btree_path_level_init(trans, path, b);
959 bch2_btree_path_verify_locks(path);
961 bch2_bkey_buf_exit(&tmp, c);
966 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
968 struct bch_fs *c = trans->c;
969 struct btree_path *path;
970 unsigned long trace_ip = _RET_IP_;
973 if (trans->in_traverse_all)
974 return -BCH_ERR_transaction_restart_in_traverse_all;
976 trans->in_traverse_all = true;
978 trans->restarted = 0;
979 trans->last_restarted_ip = 0;
981 trans_for_each_path(trans, path)
982 path->should_be_locked = false;
984 btree_trans_sort_paths(trans);
986 bch2_trans_unlock(trans);
989 if (unlikely(trans->memory_allocation_failure)) {
992 closure_init_stack(&cl);
995 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1000 /* Now, redo traversals in correct order: */
1002 while (i < trans->nr_sorted) {
1003 path = trans->paths + trans->sorted[i];
1006 * Traversing a path can cause another path to be added at about
1007 * the same position:
1009 if (path->uptodate) {
1010 __btree_path_get(path, false);
1011 ret = bch2_btree_path_traverse_one(trans, path, 0, _THIS_IP_);
1012 __btree_path_put(path, false);
1014 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1025 * We used to assert that all paths had been traversed here
1026 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1027 * path->Should_be_locked is not set yet, we we might have unlocked and
1028 * then failed to relock a path - that's fine.
1031 bch2_btree_cache_cannibalize_unlock(c);
1033 trans->in_traverse_all = false;
1035 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1039 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1040 unsigned l, int check_pos)
1042 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1044 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1049 static inline bool btree_path_good_node(struct btree_trans *trans,
1050 struct btree_path *path,
1051 unsigned l, int check_pos)
1053 return is_btree_node(path, l) &&
1054 bch2_btree_node_relock(trans, path, l) &&
1055 btree_path_check_pos_in_node(path, l, check_pos);
1058 static void btree_path_set_level_down(struct btree_trans *trans,
1059 struct btree_path *path,
1064 path->level = new_level;
1066 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1067 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1068 btree_node_unlock(trans, path, l);
1070 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1071 bch2_btree_path_verify(trans, path);
1074 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1075 struct btree_path *path,
1078 unsigned i, l = path->level;
1080 while (btree_path_node(path, l) &&
1081 !btree_path_good_node(trans, path, l, check_pos))
1082 __btree_path_set_level_up(trans, path, l++);
1084 /* If we need intent locks, take them too: */
1086 i < path->locks_want && btree_path_node(path, i);
1088 if (!bch2_btree_node_relock(trans, path, i)) {
1090 __btree_path_set_level_up(trans, path, l++);
1097 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1098 struct btree_path *path,
1101 return likely(btree_node_locked(path, path->level) &&
1102 btree_path_check_pos_in_node(path, path->level, check_pos))
1104 : __btree_path_up_until_good_node(trans, path, check_pos);
1108 * This is the main state machine for walking down the btree - walks down to a
1111 * Returns 0 on success, -EIO on error (error reading in a btree node).
1113 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1114 * stashed in the iterator and returned from bch2_trans_exit().
1116 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1117 struct btree_path *path,
1119 unsigned long trace_ip)
1121 unsigned depth_want = path->level;
1122 int ret = -((int) trans->restarted);
1128 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1129 * and re-traverse the path without a transaction restart:
1131 if (path->should_be_locked) {
1132 ret = bch2_btree_path_relock(trans, path, trace_ip);
1137 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1141 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1144 path->level = btree_path_up_until_good_node(trans, path, 0);
1146 EBUG_ON(btree_path_node(path, path->level) &&
1147 !btree_node_locked(path, path->level));
1150 * Note: path->nodes[path->level] may be temporarily NULL here - that
1151 * would indicate to other code that we got to the end of the btree,
1152 * here it indicates that relocking the root failed - it's critical that
1153 * btree_path_lock_root() comes next and that it can't fail
1155 while (path->level > depth_want) {
1156 ret = btree_path_node(path, path->level)
1157 ? btree_path_down(trans, path, flags, trace_ip)
1158 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1159 if (unlikely(ret)) {
1162 * No nodes at this level - got to the end of
1169 __bch2_btree_path_unlock(trans, path);
1170 path->level = depth_want;
1171 path->l[path->level].b = ERR_PTR(ret);
1176 path->uptodate = BTREE_ITER_UPTODATE;
1178 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1179 panic("ret %s (%i) trans->restarted %s (%i)\n",
1180 bch2_err_str(ret), ret,
1181 bch2_err_str(trans->restarted), trans->restarted);
1182 bch2_btree_path_verify(trans, path);
1186 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1187 struct btree_path *src)
1189 unsigned i, offset = offsetof(struct btree_path, pos);
1191 memcpy((void *) dst + offset,
1192 (void *) src + offset,
1193 sizeof(struct btree_path) - offset);
1195 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1196 unsigned t = btree_node_locked_type(dst, i);
1198 if (t != BTREE_NODE_UNLOCKED)
1199 six_lock_increment(&dst->l[i].b->c.lock, t);
1203 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1206 struct btree_path *new = btree_path_alloc(trans, src);
1208 btree_path_copy(trans, new, src);
1209 __btree_path_get(new, intent);
1214 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
1215 struct btree_path *path, bool intent,
1218 __btree_path_put(path, intent);
1219 path = btree_path_clone(trans, path, intent);
1220 path->preserve = false;
1224 struct btree_path * __must_check
1225 __bch2_btree_path_set_pos(struct btree_trans *trans,
1226 struct btree_path *path, struct bpos new_pos,
1227 bool intent, unsigned long ip, int cmp)
1229 unsigned level = path->level;
1231 bch2_trans_verify_not_in_restart(trans);
1232 EBUG_ON(!path->ref);
1234 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1236 path->pos = new_pos;
1237 trans->paths_sorted = false;
1239 if (unlikely(path->cached)) {
1240 btree_node_unlock(trans, path, 0);
1241 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1242 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1246 level = btree_path_up_until_good_node(trans, path, cmp);
1248 if (btree_path_node(path, level)) {
1249 struct btree_path_level *l = &path->l[level];
1251 BUG_ON(!btree_node_locked(path, level));
1253 * We might have to skip over many keys, or just a few: try
1254 * advancing the node iterator, and if we have to skip over too
1255 * many keys just reinit it (or if we're rewinding, since that
1259 !btree_path_advance_to_pos(path, l, 8))
1260 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1263 * Iterators to interior nodes should always be pointed at the first non
1266 if (unlikely(level))
1267 bch2_btree_node_iter_peek(&l->iter, l->b);
1270 if (unlikely(level != path->level)) {
1271 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1272 __bch2_btree_path_unlock(trans, path);
1275 bch2_btree_path_verify(trans, path);
1279 /* Btree path: main interface: */
1281 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1283 struct btree_path *sib;
1285 sib = prev_btree_path(trans, path);
1286 if (sib && !btree_path_cmp(sib, path))
1289 sib = next_btree_path(trans, path);
1290 if (sib && !btree_path_cmp(sib, path))
1296 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1298 struct btree_path *sib;
1300 sib = prev_btree_path(trans, path);
1301 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1304 sib = next_btree_path(trans, path);
1305 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1311 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1313 __bch2_btree_path_unlock(trans, path);
1314 btree_path_list_remove(trans, path);
1315 trans->paths_allocated &= ~(1ULL << path->idx);
1318 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1320 struct btree_path *dup;
1322 EBUG_ON(trans->paths + path->idx != path);
1323 EBUG_ON(!path->ref);
1325 if (!__btree_path_put(path, intent))
1328 dup = path->preserve
1329 ? have_path_at_pos(trans, path)
1330 : have_node_at_pos(trans, path);
1332 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1335 if (path->should_be_locked &&
1336 !trans->restarted &&
1337 (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
1341 dup->preserve |= path->preserve;
1342 dup->should_be_locked |= path->should_be_locked;
1345 __bch2_path_free(trans, path);
1348 static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *path,
1351 EBUG_ON(trans->paths + path->idx != path);
1352 EBUG_ON(!path->ref);
1354 if (!__btree_path_put(path, intent))
1357 __bch2_path_free(trans, path);
1360 void bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1362 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1363 trans->restart_count, restart_count,
1364 (void *) trans->last_begin_ip);
1367 void bch2_trans_in_restart_error(struct btree_trans *trans)
1369 panic("in transaction restart: %s, last restarted by %pS\n",
1370 bch2_err_str(trans->restarted),
1371 (void *) trans->last_restarted_ip);
1375 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1377 struct btree_insert_entry *i;
1378 struct btree_write_buffered_key *wb;
1380 prt_printf(buf, "transaction updates for %s journal seq %llu",
1381 trans->fn, trans->journal_res.seq);
1383 printbuf_indent_add(buf, 2);
1385 trans_for_each_update(trans, i) {
1386 struct bkey_s_c old = { &i->old_k, i->old_v };
1388 prt_printf(buf, "update: btree=%s cached=%u %pS",
1389 bch2_btree_ids[i->btree_id],
1391 (void *) i->ip_allocated);
1394 prt_printf(buf, " old ");
1395 bch2_bkey_val_to_text(buf, trans->c, old);
1398 prt_printf(buf, " new ");
1399 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1403 trans_for_each_wb_update(trans, wb) {
1404 prt_printf(buf, "update: btree=%s wb=1 %pS",
1405 bch2_btree_ids[wb->btree],
1406 (void *) i->ip_allocated);
1409 prt_printf(buf, " new ");
1410 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(&wb->k));
1414 printbuf_indent_sub(buf, 2);
1418 void bch2_dump_trans_updates(struct btree_trans *trans)
1420 struct printbuf buf = PRINTBUF;
1422 bch2_trans_updates_to_text(&buf, trans);
1423 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1424 printbuf_exit(&buf);
1428 void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
1430 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1431 path->idx, path->ref, path->intent_ref,
1432 path->preserve ? 'P' : ' ',
1433 path->should_be_locked ? 'S' : ' ',
1434 bch2_btree_ids[path->btree_id],
1436 bch2_bpos_to_text(out, path->pos);
1438 prt_printf(out, " locks %u", path->nodes_locked);
1439 #ifdef TRACK_PATH_ALLOCATED
1440 prt_printf(out, " %pS", (void *) path->ip_allocated);
1446 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1449 struct btree_path *path;
1453 btree_trans_sort_paths(trans);
1455 trans_for_each_path_inorder(trans, path, idx)
1456 bch2_btree_path_to_text(out, path);
1460 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1462 __bch2_trans_paths_to_text(out, trans, false);
1466 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1468 struct printbuf buf = PRINTBUF;
1470 __bch2_trans_paths_to_text(&buf, trans, nosort);
1471 bch2_trans_updates_to_text(&buf, trans);
1473 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1474 printbuf_exit(&buf);
1478 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1480 __bch2_dump_trans_paths_updates(trans, false);
1484 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1486 struct btree_transaction_stats *s = btree_trans_stats(trans);
1487 struct printbuf buf = PRINTBUF;
1492 bch2_trans_paths_to_text(&buf, trans);
1494 if (!buf.allocation_failure) {
1495 mutex_lock(&s->lock);
1496 if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
1497 s->nr_max_paths = trans->nr_max_paths =
1498 hweight64(trans->paths_allocated);
1499 swap(s->max_paths_text, buf.buf);
1501 mutex_unlock(&s->lock);
1504 printbuf_exit(&buf);
1506 trans->nr_max_paths = hweight64(trans->paths_allocated);
1509 static noinline void btree_path_overflow(struct btree_trans *trans)
1511 bch2_dump_trans_paths_updates(trans);
1512 panic("trans path oveflow\n");
1515 static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
1516 struct btree_path *pos)
1518 struct btree_path *path;
1521 if (unlikely(trans->paths_allocated ==
1522 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
1523 btree_path_overflow(trans);
1525 idx = __ffs64(~trans->paths_allocated);
1528 * Do this before marking the new path as allocated, since it won't be
1531 if (unlikely(idx > trans->nr_max_paths))
1532 bch2_trans_update_max_paths(trans);
1534 trans->paths_allocated |= 1ULL << idx;
1536 path = &trans->paths[idx];
1539 path->intent_ref = 0;
1540 path->nodes_locked = 0;
1542 btree_path_list_add(trans, pos, path);
1543 trans->paths_sorted = false;
1547 struct btree_path *bch2_path_get(struct btree_trans *trans,
1548 enum btree_id btree_id, struct bpos pos,
1549 unsigned locks_want, unsigned level,
1550 unsigned flags, unsigned long ip)
1552 struct btree_path *path, *path_pos = NULL;
1553 bool cached = flags & BTREE_ITER_CACHED;
1554 bool intent = flags & BTREE_ITER_INTENT;
1557 bch2_trans_verify_not_in_restart(trans);
1558 bch2_trans_verify_locks(trans);
1560 btree_trans_sort_paths(trans);
1562 trans_for_each_path_inorder(trans, path, i) {
1563 if (__btree_path_cmp(path,
1574 path_pos->cached == cached &&
1575 path_pos->btree_id == btree_id &&
1576 path_pos->level == level) {
1577 __btree_path_get(path_pos, intent);
1578 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1580 path = btree_path_alloc(trans, path_pos);
1583 __btree_path_get(path, intent);
1585 path->btree_id = btree_id;
1586 path->cached = cached;
1587 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1588 path->should_be_locked = false;
1589 path->level = level;
1590 path->locks_want = locks_want;
1591 path->nodes_locked = 0;
1592 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1593 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1594 #ifdef TRACK_PATH_ALLOCATED
1595 path->ip_allocated = ip;
1597 trans->paths_sorted = false;
1600 if (!(flags & BTREE_ITER_NOPRESERVE))
1601 path->preserve = true;
1603 if (path->intent_ref)
1604 locks_want = max(locks_want, level + 1);
1607 * If the path has locks_want greater than requested, we don't downgrade
1608 * it here - on transaction restart because btree node split needs to
1609 * upgrade locks, we might be putting/getting the iterator again.
1610 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1611 * a successful transaction commit.
1614 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1615 if (locks_want > path->locks_want)
1616 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
1621 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1624 struct btree_path_level *l = path_l(path);
1625 struct bkey_packed *_k;
1628 if (unlikely(!l->b))
1629 return bkey_s_c_null;
1631 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1632 EBUG_ON(!btree_node_locked(path, path->level));
1634 if (!path->cached) {
1635 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1636 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1638 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1640 if (!k.k || !bpos_eq(path->pos, k.k->p))
1643 struct bkey_cached *ck = (void *) path->l[0].b;
1646 (path->btree_id != ck->key.btree_id ||
1647 !bkey_eq(path->pos, ck->key.pos)));
1648 if (!ck || !ck->valid)
1649 return bkey_s_c_null;
1652 k = bkey_i_to_s_c(ck->k);
1659 return (struct bkey_s_c) { u, NULL };
1662 /* Btree iterators: */
1665 __bch2_btree_iter_traverse(struct btree_iter *iter)
1667 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1671 bch2_btree_iter_traverse(struct btree_iter *iter)
1675 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
1676 btree_iter_search_key(iter),
1677 iter->flags & BTREE_ITER_INTENT,
1678 btree_iter_ip_allocated(iter));
1680 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1684 btree_path_set_should_be_locked(iter->path);
1688 /* Iterate across nodes (leaf and interior nodes) */
1690 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1692 struct btree_trans *trans = iter->trans;
1693 struct btree *b = NULL;
1696 EBUG_ON(iter->path->cached);
1697 bch2_btree_iter_verify(iter);
1699 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1703 b = btree_path_node(iter->path, iter->path->level);
1707 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1709 bkey_init(&iter->k);
1710 iter->k.p = iter->pos = b->key.k.p;
1712 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1713 iter->flags & BTREE_ITER_INTENT,
1714 btree_iter_ip_allocated(iter));
1715 btree_path_set_should_be_locked(iter->path);
1717 bch2_btree_iter_verify_entry_exit(iter);
1718 bch2_btree_iter_verify(iter);
1726 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1728 struct btree_trans *trans = iter->trans;
1729 struct btree_path *path = iter->path;
1730 struct btree *b = NULL;
1733 bch2_trans_verify_not_in_restart(trans);
1734 EBUG_ON(iter->path->cached);
1735 bch2_btree_iter_verify(iter);
1737 /* already at end? */
1738 if (!btree_path_node(path, path->level))
1742 if (!btree_path_node(path, path->level + 1)) {
1743 btree_path_set_level_up(trans, path);
1747 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1748 __bch2_btree_path_unlock(trans, path);
1749 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1750 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1751 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1752 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1753 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1757 b = btree_path_node(path, path->level + 1);
1759 if (bpos_eq(iter->pos, b->key.k.p)) {
1760 __btree_path_set_level_up(trans, path, path->level++);
1763 * Haven't gotten to the end of the parent node: go back down to
1764 * the next child node
1767 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
1768 iter->flags & BTREE_ITER_INTENT,
1769 btree_iter_ip_allocated(iter));
1771 btree_path_set_level_down(trans, path, iter->min_depth);
1773 ret = bch2_btree_path_traverse(trans, path, iter->flags);
1777 b = path->l[path->level].b;
1780 bkey_init(&iter->k);
1781 iter->k.p = iter->pos = b->key.k.p;
1783 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1784 iter->flags & BTREE_ITER_INTENT,
1785 btree_iter_ip_allocated(iter));
1786 btree_path_set_should_be_locked(iter->path);
1787 BUG_ON(iter->path->uptodate);
1789 bch2_btree_iter_verify_entry_exit(iter);
1790 bch2_btree_iter_verify(iter);
1798 /* Iterate across keys (in leaf nodes only) */
1800 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1802 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
1803 struct bpos pos = iter->k.p;
1804 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1805 ? bpos_eq(pos, SPOS_MAX)
1806 : bkey_eq(pos, SPOS_MAX));
1808 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1809 pos = bkey_successor(iter, pos);
1810 bch2_btree_iter_set_pos(iter, pos);
1813 if (!btree_path_node(iter->path, iter->path->level))
1816 iter->advanced = true;
1821 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1823 struct bpos pos = bkey_start_pos(&iter->k);
1824 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1825 ? bpos_eq(pos, POS_MIN)
1826 : bkey_eq(pos, POS_MIN));
1828 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1829 pos = bkey_predecessor(iter, pos);
1830 bch2_btree_iter_set_pos(iter, pos);
1835 struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
1837 struct btree_insert_entry *i;
1838 struct bkey_i *ret = NULL;
1840 trans_for_each_update(iter->trans, i) {
1841 if (i->btree_id < iter->btree_id)
1843 if (i->btree_id > iter->btree_id)
1845 if (bpos_lt(i->k->k.p, iter->path->pos))
1847 if (i->key_cache_already_flushed)
1849 if (!ret || bpos_lt(i->k->k.p, ret->k.p))
1856 static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
1858 return iter->flags & BTREE_ITER_WITH_UPDATES
1859 ? __bch2_btree_trans_peek_updates(iter)
1863 struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
1864 struct btree_iter *iter,
1865 struct bpos end_pos)
1869 if (bpos_lt(iter->path->pos, iter->journal_pos))
1870 iter->journal_idx = 0;
1872 k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
1876 &iter->journal_idx);
1878 iter->journal_pos = k ? k->k.p : end_pos;
1883 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
1884 struct btree_iter *iter)
1886 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, iter->path->pos);
1890 return bkey_i_to_s_c(k);
1892 return bkey_s_c_null;
1897 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
1898 struct btree_iter *iter,
1901 struct bkey_i *next_journal =
1902 bch2_btree_journal_peek(trans, iter,
1903 k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
1906 iter->k = next_journal->k;
1907 k = bkey_i_to_s_c(next_journal);
1914 * Checks btree key cache for key at iter->pos and returns it if present, or
1918 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1920 struct btree_trans *trans = iter->trans;
1921 struct bch_fs *c = trans->c;
1926 if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
1927 bpos_eq(iter->pos, pos))
1928 return bkey_s_c_null;
1930 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
1931 return bkey_s_c_null;
1933 if (!iter->key_cache_path)
1934 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
1935 iter->flags & BTREE_ITER_INTENT, 0,
1936 iter->flags|BTREE_ITER_CACHED|
1937 BTREE_ITER_CACHED_NOFILL,
1940 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
1941 iter->flags & BTREE_ITER_INTENT,
1942 btree_iter_ip_allocated(iter));
1944 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
1945 iter->flags|BTREE_ITER_CACHED) ?:
1946 bch2_btree_path_relock(trans, iter->path, _THIS_IP_);
1948 return bkey_s_c_err(ret);
1950 btree_path_set_should_be_locked(iter->key_cache_path);
1952 k = bch2_btree_path_peek_slot(iter->key_cache_path, &u);
1953 if (k.k && !bkey_err(k)) {
1960 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
1962 struct btree_trans *trans = iter->trans;
1963 struct bkey_i *next_update;
1964 struct bkey_s_c k, k2;
1967 EBUG_ON(iter->path->cached);
1968 bch2_btree_iter_verify(iter);
1971 struct btree_path_level *l;
1973 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
1974 iter->flags & BTREE_ITER_INTENT,
1975 btree_iter_ip_allocated(iter));
1977 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1978 if (unlikely(ret)) {
1979 /* ensure that iter->k is consistent with iter->pos: */
1980 bch2_btree_iter_set_pos(iter, iter->pos);
1981 k = bkey_s_c_err(ret);
1985 l = path_l(iter->path);
1987 if (unlikely(!l->b)) {
1988 /* No btree nodes at requested level: */
1989 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1994 btree_path_set_should_be_locked(iter->path);
1996 k = btree_path_level_peek_all(trans->c, l, &iter->k);
1998 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2000 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2004 bch2_btree_iter_set_pos(iter, iter->pos);
2009 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2010 k = btree_trans_peek_journal(trans, iter, k);
2012 next_update = btree_trans_peek_updates(iter);
2015 bpos_le(next_update->k.p,
2016 k.k ? k.k->p : l->b->key.k.p)) {
2017 iter->k = next_update->k;
2018 k = bkey_i_to_s_c(next_update);
2021 if (k.k && bkey_deleted(k.k)) {
2023 * If we've got a whiteout, and it's after the search
2024 * key, advance the search key to the whiteout instead
2025 * of just after the whiteout - it might be a btree
2026 * whiteout, with a real key at the same position, since
2027 * in the btree deleted keys sort before non deleted.
2029 search_key = !bpos_eq(search_key, k.k->p)
2031 : bpos_successor(k.k->p);
2037 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2038 /* Advance to next leaf node: */
2039 search_key = bpos_successor(l->b->key.k.p);
2042 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2048 bch2_btree_iter_verify(iter);
2054 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2057 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2059 struct btree_trans *trans = iter->trans;
2060 struct bpos search_key = btree_iter_search_key(iter);
2062 struct bpos iter_pos;
2065 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2066 EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
2068 if (iter->update_path) {
2069 bch2_path_put_nokeep(trans, iter->update_path,
2070 iter->flags & BTREE_ITER_INTENT);
2071 iter->update_path = NULL;
2074 bch2_btree_iter_verify_entry_exit(iter);
2077 k = __bch2_btree_iter_peek(iter, search_key);
2080 if (unlikely(bkey_err(k)))
2084 * iter->pos should be mononotically increasing, and always be
2085 * equal to the key we just returned - except extents can
2086 * straddle iter->pos:
2088 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2091 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2093 if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
2094 ? bkey_gt(iter_pos, end)
2095 : bkey_ge(iter_pos, end)))
2098 if (iter->update_path &&
2099 !bkey_eq(iter->update_path->pos, k.k->p)) {
2100 bch2_path_put_nokeep(trans, iter->update_path,
2101 iter->flags & BTREE_ITER_INTENT);
2102 iter->update_path = NULL;
2105 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2106 (iter->flags & BTREE_ITER_INTENT) &&
2107 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2108 !iter->update_path) {
2109 struct bpos pos = k.k->p;
2111 if (pos.snapshot < iter->snapshot) {
2112 search_key = bpos_successor(k.k->p);
2116 pos.snapshot = iter->snapshot;
2119 * advance, same as on exit for iter->path, but only up
2122 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2123 iter->update_path = iter->path;
2125 iter->update_path = bch2_btree_path_set_pos(trans,
2126 iter->update_path, pos,
2127 iter->flags & BTREE_ITER_INTENT,
2129 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2130 if (unlikely(ret)) {
2131 k = bkey_s_c_err(ret);
2137 * We can never have a key in a leaf node at POS_MAX, so
2138 * we don't have to check these successor() calls:
2140 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2141 !bch2_snapshot_is_ancestor(trans->c,
2144 search_key = bpos_successor(k.k->p);
2148 if (bkey_whiteout(k.k) &&
2149 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2150 search_key = bkey_successor(iter, k.k->p);
2157 iter->pos = iter_pos;
2159 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2160 iter->flags & BTREE_ITER_INTENT,
2161 btree_iter_ip_allocated(iter));
2163 btree_path_set_should_be_locked(iter->path);
2165 if (iter->update_path) {
2166 ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
2168 k = bkey_s_c_err(ret);
2170 btree_path_set_should_be_locked(iter->update_path);
2173 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2174 iter->pos.snapshot = iter->snapshot;
2176 ret = bch2_btree_iter_verify_ret(iter, k);
2177 if (unlikely(ret)) {
2178 bch2_btree_iter_set_pos(iter, iter->pos);
2179 k = bkey_s_c_err(ret);
2182 bch2_btree_iter_verify_entry_exit(iter);
2186 bch2_btree_iter_set_pos(iter, end);
2192 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2193 * to iterator's current position, returning keys from every level of the btree.
2194 * For keys at different levels of the btree that compare equal, the key from
2195 * the lower level (leaf) is returned first.
2197 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2199 struct btree_trans *trans = iter->trans;
2203 EBUG_ON(iter->path->cached);
2204 bch2_btree_iter_verify(iter);
2205 BUG_ON(iter->path->level < iter->min_depth);
2206 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2207 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2210 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2211 iter->flags & BTREE_ITER_INTENT,
2212 btree_iter_ip_allocated(iter));
2214 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2215 if (unlikely(ret)) {
2216 /* ensure that iter->k is consistent with iter->pos: */
2217 bch2_btree_iter_set_pos(iter, iter->pos);
2218 k = bkey_s_c_err(ret);
2222 /* Already at end? */
2223 if (!btree_path_node(iter->path, iter->path->level)) {
2228 k = btree_path_level_peek_all(trans->c,
2229 &iter->path->l[iter->path->level], &iter->k);
2231 /* Check if we should go up to the parent node: */
2234 bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
2235 iter->pos = path_l(iter->path)->b->key.k.p;
2236 btree_path_set_level_up(trans, iter->path);
2237 iter->advanced = false;
2242 * Check if we should go back down to a leaf:
2243 * If we're not in a leaf node, we only return the current key
2244 * if it exactly matches iter->pos - otherwise we first have to
2245 * go back to the leaf:
2247 if (iter->path->level != iter->min_depth &&
2250 !bpos_eq(iter->pos, k.k->p))) {
2251 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2252 iter->pos = bpos_successor(iter->pos);
2253 iter->advanced = false;
2257 /* Check if we should go to the next key: */
2258 if (iter->path->level == iter->min_depth &&
2261 bpos_eq(iter->pos, k.k->p)) {
2262 iter->pos = bpos_successor(iter->pos);
2263 iter->advanced = false;
2267 if (iter->advanced &&
2268 iter->path->level == iter->min_depth &&
2269 !bpos_eq(k.k->p, iter->pos))
2270 iter->advanced = false;
2272 BUG_ON(iter->advanced);
2278 btree_path_set_should_be_locked(iter->path);
2280 bch2_btree_iter_verify(iter);
2286 * bch2_btree_iter_next: returns first key greater than iterator's current
2289 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2291 if (!bch2_btree_iter_advance(iter))
2292 return bkey_s_c_null;
2294 return bch2_btree_iter_peek(iter);
2298 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2299 * iterator's current position
2301 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2303 struct btree_trans *trans = iter->trans;
2304 struct bpos search_key = iter->pos;
2305 struct btree_path *saved_path = NULL;
2307 struct bkey saved_k;
2308 const struct bch_val *saved_v;
2311 EBUG_ON(iter->path->cached || iter->path->level);
2312 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2314 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2315 return bkey_s_c_err(-EIO);
2317 bch2_btree_iter_verify(iter);
2318 bch2_btree_iter_verify_entry_exit(iter);
2320 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2321 search_key.snapshot = U32_MAX;
2324 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2325 iter->flags & BTREE_ITER_INTENT,
2326 btree_iter_ip_allocated(iter));
2328 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2329 if (unlikely(ret)) {
2330 /* ensure that iter->k is consistent with iter->pos: */
2331 bch2_btree_iter_set_pos(iter, iter->pos);
2332 k = bkey_s_c_err(ret);
2336 k = btree_path_level_peek(trans, iter->path,
2337 &iter->path->l[0], &iter->k);
2339 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2340 ? bpos_ge(bkey_start_pos(k.k), search_key)
2341 : bpos_gt(k.k->p, search_key)))
2342 k = btree_path_level_prev(trans, iter->path,
2343 &iter->path->l[0], &iter->k);
2346 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2347 if (k.k->p.snapshot == iter->snapshot)
2351 * If we have a saved candidate, and we're no
2352 * longer at the same _key_ (not pos), return
2355 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2356 bch2_path_put_nokeep(trans, iter->path,
2357 iter->flags & BTREE_ITER_INTENT);
2358 iter->path = saved_path;
2365 if (bch2_snapshot_is_ancestor(iter->trans->c,
2369 bch2_path_put_nokeep(trans, saved_path,
2370 iter->flags & BTREE_ITER_INTENT);
2371 saved_path = btree_path_clone(trans, iter->path,
2372 iter->flags & BTREE_ITER_INTENT);
2377 search_key = bpos_predecessor(k.k->p);
2381 if (bkey_whiteout(k.k) &&
2382 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2383 search_key = bkey_predecessor(iter, k.k->p);
2384 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2385 search_key.snapshot = U32_MAX;
2390 } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
2391 /* Advance to previous leaf node: */
2392 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2394 /* Start of btree: */
2395 bch2_btree_iter_set_pos(iter, POS_MIN);
2401 EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2403 /* Extents can straddle iter->pos: */
2404 if (bkey_lt(k.k->p, iter->pos))
2407 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2408 iter->pos.snapshot = iter->snapshot;
2410 btree_path_set_should_be_locked(iter->path);
2413 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2415 bch2_btree_iter_verify_entry_exit(iter);
2416 bch2_btree_iter_verify(iter);
2422 * bch2_btree_iter_prev: returns first key less than iterator's current
2425 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2427 if (!bch2_btree_iter_rewind(iter))
2428 return bkey_s_c_null;
2430 return bch2_btree_iter_peek_prev(iter);
2433 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2435 struct btree_trans *trans = iter->trans;
2436 struct bpos search_key;
2440 bch2_btree_iter_verify(iter);
2441 bch2_btree_iter_verify_entry_exit(iter);
2442 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2443 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2445 /* extents can't span inode numbers: */
2446 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2447 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2448 if (iter->pos.inode == KEY_INODE_MAX)
2449 return bkey_s_c_null;
2451 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2454 search_key = btree_iter_search_key(iter);
2455 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2456 iter->flags & BTREE_ITER_INTENT,
2457 btree_iter_ip_allocated(iter));
2459 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2460 if (unlikely(ret)) {
2461 k = bkey_s_c_err(ret);
2465 if ((iter->flags & BTREE_ITER_CACHED) ||
2466 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2467 struct bkey_i *next_update;
2469 if ((next_update = btree_trans_peek_updates(iter)) &&
2470 bpos_eq(next_update->k.p, iter->pos)) {
2471 iter->k = next_update->k;
2472 k = bkey_i_to_s_c(next_update);
2476 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2477 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2480 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2481 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2484 /* We're not returning a key from iter->path: */
2488 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2493 struct bpos end = iter->pos;
2495 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2496 end.offset = U64_MAX;
2498 EBUG_ON(iter->path->level);
2500 if (iter->flags & BTREE_ITER_INTENT) {
2501 struct btree_iter iter2;
2503 bch2_trans_copy_iter(&iter2, iter);
2504 k = bch2_btree_iter_peek_upto(&iter2, end);
2506 if (k.k && !bkey_err(k)) {
2510 bch2_trans_iter_exit(trans, &iter2);
2512 struct bpos pos = iter->pos;
2514 k = bch2_btree_iter_peek_upto(iter, end);
2515 if (unlikely(bkey_err(k)))
2516 bch2_btree_iter_set_pos(iter, pos);
2521 if (unlikely(bkey_err(k)))
2524 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2526 if (bkey_lt(iter->pos, next)) {
2527 bkey_init(&iter->k);
2528 iter->k.p = iter->pos;
2530 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2531 bch2_key_resize(&iter->k,
2532 min_t(u64, KEY_SIZE_MAX,
2533 (next.inode == iter->pos.inode
2537 EBUG_ON(!iter->k.size);
2540 k = (struct bkey_s_c) { &iter->k, NULL };
2544 btree_path_set_should_be_locked(iter->path);
2546 bch2_btree_iter_verify_entry_exit(iter);
2547 bch2_btree_iter_verify(iter);
2548 ret = bch2_btree_iter_verify_ret(iter, k);
2550 return bkey_s_c_err(ret);
2555 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2557 if (!bch2_btree_iter_advance(iter))
2558 return bkey_s_c_null;
2560 return bch2_btree_iter_peek_slot(iter);
2563 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2565 if (!bch2_btree_iter_rewind(iter))
2566 return bkey_s_c_null;
2568 return bch2_btree_iter_peek_slot(iter);
2571 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2575 while (btree_trans_too_many_iters(iter->trans) ||
2576 (k = bch2_btree_iter_peek_type(iter, iter->flags),
2577 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2578 bch2_trans_begin(iter->trans);
2583 /* new transactional stuff: */
2585 #ifdef CONFIG_BCACHEFS_DEBUG
2586 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2588 struct btree_path *path;
2591 BUG_ON(trans->nr_sorted != hweight64(trans->paths_allocated));
2593 trans_for_each_path(trans, path) {
2594 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2595 BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2598 for (i = 0; i < trans->nr_sorted; i++) {
2599 unsigned idx = trans->sorted[i];
2601 EBUG_ON(!(trans->paths_allocated & (1ULL << idx)));
2602 BUG_ON(trans->paths[idx].sorted_idx != i);
2606 static void btree_trans_verify_sorted(struct btree_trans *trans)
2608 struct btree_path *path, *prev = NULL;
2611 if (!bch2_debug_check_iterators)
2614 trans_for_each_path_inorder(trans, path, i) {
2615 if (prev && btree_path_cmp(prev, path) > 0) {
2616 __bch2_dump_trans_paths_updates(trans, true);
2617 panic("trans paths out of order!\n");
2623 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2624 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2627 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2629 int i, l = 0, r = trans->nr_sorted, inc = 1;
2632 btree_trans_verify_sorted_refs(trans);
2634 if (trans->paths_sorted)
2638 * Cocktail shaker sort: this is efficient because iterators will be
2644 for (i = inc > 0 ? l : r - 2;
2645 i + 1 < r && i >= l;
2647 if (btree_path_cmp(trans->paths + trans->sorted[i],
2648 trans->paths + trans->sorted[i + 1]) > 0) {
2649 swap(trans->sorted[i], trans->sorted[i + 1]);
2650 trans->paths[trans->sorted[i]].sorted_idx = i;
2651 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2663 trans->paths_sorted = true;
2665 btree_trans_verify_sorted(trans);
2668 static inline void btree_path_list_remove(struct btree_trans *trans,
2669 struct btree_path *path)
2673 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2674 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2676 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2677 trans->sorted + path->sorted_idx + 1,
2678 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
2680 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2682 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2683 trans->paths[trans->sorted[i]].sorted_idx = i;
2685 path->sorted_idx = U8_MAX;
2688 static inline void btree_path_list_add(struct btree_trans *trans,
2689 struct btree_path *pos,
2690 struct btree_path *path)
2694 path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
2696 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2697 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2698 trans->sorted + path->sorted_idx,
2699 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
2701 trans->sorted[path->sorted_idx] = path->idx;
2703 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2706 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2707 trans->paths[trans->sorted[i]].sorted_idx = i;
2709 btree_trans_verify_sorted_refs(trans);
2712 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2715 bch2_path_put(trans, iter->path,
2716 iter->flags & BTREE_ITER_INTENT);
2717 if (iter->update_path)
2718 bch2_path_put_nokeep(trans, iter->update_path,
2719 iter->flags & BTREE_ITER_INTENT);
2720 if (iter->key_cache_path)
2721 bch2_path_put(trans, iter->key_cache_path,
2722 iter->flags & BTREE_ITER_INTENT);
2724 iter->update_path = NULL;
2725 iter->key_cache_path = NULL;
2728 static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
2729 struct btree_iter *iter,
2730 unsigned btree_id, struct bpos pos,
2733 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2734 bch2_btree_iter_flags(trans, btree_id, flags),
2738 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2739 struct btree_iter *iter,
2740 unsigned btree_id, struct bpos pos,
2743 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2744 bch2_btree_iter_flags(trans, btree_id, flags),
2748 void bch2_trans_node_iter_init(struct btree_trans *trans,
2749 struct btree_iter *iter,
2750 enum btree_id btree_id,
2752 unsigned locks_want,
2756 flags |= BTREE_ITER_NOT_EXTENTS;
2757 flags |= __BTREE_ITER_ALL_SNAPSHOTS;
2758 flags |= BTREE_ITER_ALL_SNAPSHOTS;
2760 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2761 __bch2_btree_iter_flags(trans, btree_id, flags),
2764 iter->min_depth = depth;
2766 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2767 BUG_ON(iter->path->level != depth);
2768 BUG_ON(iter->min_depth != depth);
2771 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2775 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2776 if (src->update_path)
2777 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
2778 dst->key_cache_path = NULL;
2781 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2783 unsigned new_top = trans->mem_top + size;
2784 size_t old_bytes = trans->mem_bytes;
2785 size_t new_bytes = roundup_pow_of_two(new_top);
2789 trans->mem_max = max(trans->mem_max, new_top);
2791 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2793 new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2794 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2795 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2796 new_bytes = BTREE_TRANS_MEM_MAX;
2801 return ERR_PTR(-ENOMEM);
2803 trans->mem = new_mem;
2804 trans->mem_bytes = new_bytes;
2807 trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2808 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2811 p = trans->mem + trans->mem_top;
2812 trans->mem_top += size;
2817 static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
2819 struct bch_fs *c = trans->c;
2820 struct btree_path *path;
2822 trans_for_each_path(trans, path)
2823 if (path->cached && !btree_node_locked(path, 0))
2824 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
2826 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2827 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2828 trans->srcu_lock_time = jiffies;
2832 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2833 * @trans: transaction to reset
2835 * While iterating over nodes or updating nodes a attempt to lock a btree node
2836 * may return BCH_ERR_transaction_restart when the trylock fails. When this
2837 * occurs bch2_trans_begin() should be called and the transaction retried.
2839 u32 bch2_trans_begin(struct btree_trans *trans)
2841 struct btree_path *path;
2843 bch2_trans_reset_updates(trans);
2845 trans->restart_count++;
2848 trans_for_each_path(trans, path) {
2849 path->should_be_locked = false;
2852 * If the transaction wasn't restarted, we're presuming to be
2853 * doing something new: dont keep iterators excpt the ones that
2854 * are in use - except for the subvolumes btree:
2856 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
2857 path->preserve = false;
2860 * XXX: we probably shouldn't be doing this if the transaction
2861 * was restarted, but currently we still overflow transaction
2862 * iterators if we do that
2864 if (!path->ref && !path->preserve)
2865 __bch2_path_free(trans, path);
2867 path->preserve = false;
2870 if (!trans->restarted &&
2872 local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
2873 bch2_trans_unlock(trans);
2875 bch2_trans_relock(trans);
2878 if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
2879 bch2_trans_reset_srcu_lock(trans);
2881 trans->last_begin_ip = _RET_IP_;
2882 if (trans->restarted) {
2883 bch2_btree_path_traverse_all(trans);
2884 trans->notrace_relock_fail = false;
2887 trans->last_begin_time = local_clock();
2888 return trans->restart_count;
2891 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2893 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2894 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2897 BUG_ON(trans->used_mempool);
2900 p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
2903 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2905 trans->paths = p; p += paths_bytes;
2906 trans->updates = p; p += updates_bytes;
2909 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
2911 unsigned bch2_trans_get_fn_idx(const char *fn)
2915 for (i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
2916 if (!bch2_btree_transaction_fns[i] ||
2917 bch2_btree_transaction_fns[i] == fn) {
2918 bch2_btree_transaction_fns[i] = fn;
2922 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
2926 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_idx)
2927 __acquires(&c->btree_trans_barrier)
2929 struct btree_transaction_stats *s;
2931 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
2933 memset(trans, 0, sizeof(*trans));
2935 trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns)
2936 ? bch2_btree_transaction_fns[fn_idx] : NULL;
2937 trans->last_begin_time = local_clock();
2938 trans->fn_idx = fn_idx;
2939 trans->locking_wait.task = current;
2940 trans->journal_replay_not_finished =
2941 !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
2942 closure_init_stack(&trans->ref);
2944 bch2_trans_alloc_paths(trans, c);
2946 s = btree_trans_stats(trans);
2947 if (s && s->max_mem) {
2948 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
2950 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
2952 if (!unlikely(trans->mem)) {
2953 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2954 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2956 trans->mem_bytes = expected_mem_bytes;
2961 trans->nr_max_paths = s->nr_max_paths;
2962 trans->wb_updates_size = s->wb_updates_size;
2965 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2966 trans->srcu_lock_time = jiffies;
2968 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
2969 struct btree_trans *pos;
2971 mutex_lock(&c->btree_trans_lock);
2972 list_for_each_entry(pos, &c->btree_trans_list, list) {
2974 * We'd much prefer to be stricter here and completely
2975 * disallow multiple btree_trans in the same thread -
2976 * but the data move path calls bch2_write when we
2977 * already have a btree_trans initialized.
2979 BUG_ON(trans->locking_wait.task->pid == pos->locking_wait.task->pid &&
2980 bch2_trans_locked(pos));
2982 if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
2983 list_add_tail(&trans->list, &pos->list);
2987 list_add_tail(&trans->list, &c->btree_trans_list);
2989 mutex_unlock(&c->btree_trans_lock);
2993 static void check_btree_paths_leaked(struct btree_trans *trans)
2995 #ifdef CONFIG_BCACHEFS_DEBUG
2996 struct bch_fs *c = trans->c;
2997 struct btree_path *path;
2999 trans_for_each_path(trans, path)
3004 bch_err(c, "btree paths leaked from %s!", trans->fn);
3005 trans_for_each_path(trans, path)
3007 printk(KERN_ERR " btree %s %pS\n",
3008 bch2_btree_ids[path->btree_id],
3009 (void *) path->ip_allocated);
3010 /* Be noisy about this: */
3011 bch2_fatal_error(c);
3015 void bch2_trans_exit(struct btree_trans *trans)
3016 __releases(&c->btree_trans_barrier)
3018 struct btree_insert_entry *i;
3019 struct bch_fs *c = trans->c;
3020 struct btree_transaction_stats *s = btree_trans_stats(trans);
3022 bch2_trans_unlock(trans);
3024 closure_sync(&trans->ref);
3027 s->max_mem = max(s->max_mem, trans->mem_max);
3029 trans_for_each_update(trans, i)
3030 __btree_path_put(i->path, true);
3031 trans->nr_updates = 0;
3033 check_btree_paths_leaked(trans);
3035 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
3036 mutex_lock(&c->btree_trans_lock);
3037 list_del(&trans->list);
3038 mutex_unlock(&c->btree_trans_lock);
3041 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3043 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3045 kfree(trans->extra_journal_entries.data);
3047 if (trans->fs_usage_deltas) {
3048 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3049 REPLICAS_DELTA_LIST_MAX)
3050 mempool_free(trans->fs_usage_deltas,
3051 &c->replicas_delta_pool);
3053 kfree(trans->fs_usage_deltas);
3056 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3057 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3063 * Userspace doesn't have a real percpu implementation:
3065 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3069 mempool_free(trans->paths, &c->btree_paths_pool);
3071 trans->mem = (void *) 0x1;
3072 trans->paths = (void *) 0x1;
3075 static void __maybe_unused
3076 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3077 struct btree_bkey_cached_common *b)
3079 struct six_lock_count c = six_lock_counts(&b->lock);
3080 struct task_struct *owner;
3084 owner = READ_ONCE(b->lock.owner);
3085 pid = owner ? owner->pid : 0;
3089 prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3090 b->level, bch2_btree_ids[b->btree_id]);
3091 bch2_bpos_to_text(out, btree_node_pos(b));
3094 prt_printf(out, " locks %u:%u:%u held by pid %u",
3095 c.n[0], c.n[1], c.n[2], pid);
3098 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3100 struct btree_path *path;
3101 struct btree_bkey_cached_common *b;
3102 static char lock_types[] = { 'r', 'i', 'w' };
3105 if (!out->nr_tabstops) {
3106 printbuf_tabstop_push(out, 16);
3107 printbuf_tabstop_push(out, 32);
3110 prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn);
3112 trans_for_each_path(trans, path) {
3113 if (!path->nodes_locked)
3116 prt_printf(out, " path %u %c l=%u %s:",
3118 path->cached ? 'c' : 'b',
3120 bch2_btree_ids[path->btree_id]);
3121 bch2_bpos_to_text(out, path->pos);
3124 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3125 if (btree_node_locked(path, l) &&
3126 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3127 prt_printf(out, " %c l=%u ",
3128 lock_types[btree_node_locked_type(path, l)], l);
3129 bch2_btree_bkey_cached_common_to_text(out, b);
3135 b = READ_ONCE(trans->locking);
3137 prt_printf(out, " blocked for %lluus on",
3138 div_u64(local_clock() - trans->locking_wait.start_time,
3141 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3142 bch2_btree_bkey_cached_common_to_text(out, b);
3147 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3149 struct btree_transaction_stats *s;
3151 for (s = c->btree_transaction_stats;
3152 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3154 kfree(s->max_paths_text);
3155 bch2_time_stats_exit(&s->lock_hold_times);
3158 if (c->btree_trans_barrier_initialized)
3159 cleanup_srcu_struct(&c->btree_trans_barrier);
3160 mempool_exit(&c->btree_trans_mem_pool);
3161 mempool_exit(&c->btree_paths_pool);
3164 int bch2_fs_btree_iter_init(struct bch_fs *c)
3166 struct btree_transaction_stats *s;
3167 unsigned nr = BTREE_ITER_MAX;
3170 for (s = c->btree_transaction_stats;
3171 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3173 bch2_time_stats_init(&s->lock_hold_times);
3174 mutex_init(&s->lock);
3177 INIT_LIST_HEAD(&c->btree_trans_list);
3178 mutex_init(&c->btree_trans_lock);
3180 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3181 sizeof(struct btree_path) * nr +
3182 sizeof(struct btree_insert_entry) * nr) ?:
3183 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3184 BTREE_TRANS_MEM_MAX) ?:
3185 init_srcu_struct(&c->btree_trans_barrier);
3187 c->btree_trans_barrier_initialized = true;