1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
5 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
12 #include <linux/prefetch.h>
13 #include <trace/events/bcachefs.h>
15 #define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
16 #define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
17 #define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
18 #define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
19 #define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
20 #define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
21 #define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
23 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
25 return l < BTREE_MAX_DEPTH &&
26 (unsigned long) iter->l[l].b >= 128;
29 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
31 struct bpos pos = iter->pos;
33 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
34 bkey_cmp(pos, POS_MAX))
35 pos = bkey_successor(pos);
39 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
42 return bkey_cmp(btree_iter_search_key(iter), b->data->min_key) < 0;
45 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
48 return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
51 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
54 return iter->btree_id == b->btree_id &&
55 !btree_iter_pos_before_node(iter, b) &&
56 !btree_iter_pos_after_node(iter, b);
59 /* Btree node locking: */
61 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
63 bch2_btree_node_unlock_write_inlined(b, iter);
66 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
68 struct btree_iter *linked;
71 EBUG_ON(!btree_node_intent_locked(iter, b->level));
73 trans_for_each_iter(iter->trans, linked)
74 if (linked->l[b->level].b == b &&
75 btree_node_read_locked(linked, b->level))
79 * Must drop our read locks before calling six_lock_write() -
80 * six_unlock() won't do wakeups until the reader count
81 * goes to 0, and it's safe because we have the node intent
84 atomic64_sub(__SIX_VAL(read_lock, readers),
85 &b->lock.state.counter);
86 btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
87 atomic64_add(__SIX_VAL(read_lock, readers),
88 &b->lock.state.counter);
91 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
93 struct btree *b = btree_iter_node(iter, level);
94 int want = __btree_lock_want(iter, level);
96 if (!is_btree_node(iter, level))
102 if (six_relock_type(&b->lock, want, iter->l[level].lock_seq) ||
103 (btree_node_lock_seq_matches(iter, b, level) &&
104 btree_node_lock_increment(iter, b, level, want))) {
105 mark_btree_node_locked(iter, level, want);
112 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
114 struct btree *b = iter->l[level].b;
116 EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
118 if (!is_btree_node(iter, level))
121 if (btree_node_intent_locked(iter, level))
127 if (btree_node_locked(iter, level)
128 ? six_lock_tryupgrade(&b->lock)
129 : six_relock_type(&b->lock, SIX_LOCK_intent, iter->l[level].lock_seq))
132 if (btree_node_lock_seq_matches(iter, b, level) &&
133 btree_node_lock_increment(iter, b, level, BTREE_NODE_INTENT_LOCKED)) {
134 btree_node_unlock(iter, level);
140 mark_btree_node_intent_locked(iter, level);
144 static inline bool btree_iter_get_locks(struct btree_iter *iter,
145 bool upgrade, bool trace)
147 unsigned l = iter->level;
151 if (!btree_iter_node(iter, l))
155 ? bch2_btree_node_upgrade(iter, l)
156 : bch2_btree_node_relock(iter, l))) {
159 ? trace_node_upgrade_fail
160 : trace_node_relock_fail)(l, iter->l[l].lock_seq,
161 is_btree_node(iter, l)
163 : (unsigned long) iter->l[l].b,
164 is_btree_node(iter, l)
165 ? iter->l[l].b->lock.state.seq
169 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
173 } while (l < iter->locks_want);
176 * When we fail to get a lock, we have to ensure that any child nodes
177 * can't be relocked so bch2_btree_iter_traverse has to walk back up to
178 * the node that we failed to relock:
180 while (fail_idx >= 0) {
181 btree_node_unlock(iter, fail_idx);
182 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
186 if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
187 iter->uptodate = BTREE_ITER_NEED_PEEK;
189 bch2_btree_trans_verify_locks(iter->trans);
191 return iter->uptodate < BTREE_ITER_NEED_RELOCK;
195 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
197 struct btree_iter *iter,
198 enum six_lock_type type)
200 struct btree_iter *linked;
203 /* Check if it's safe to block: */
204 trans_for_each_iter(iter->trans, linked) {
205 if (!linked->nodes_locked)
208 /* Must lock btree nodes in key order: */
209 if ((cmp_int(iter->btree_id, linked->btree_id) ?:
210 bkey_cmp(pos, linked->pos)) < 0)
214 * Can't block taking an intent lock if we have _any_ nodes read
217 * - Our read lock blocks another thread with an intent lock on
218 * the same node from getting a write lock, and thus from
219 * dropping its intent lock
221 * - And the other thread may have multiple nodes intent locked:
222 * both the node we want to intent lock, and the node we
223 * already have read locked - deadlock:
225 if (type == SIX_LOCK_intent &&
226 linked->nodes_locked != linked->nodes_intent_locked) {
227 if (!(iter->trans->nounlock)) {
228 linked->locks_want = max_t(unsigned,
230 __fls(linked->nodes_locked) + 1);
231 btree_iter_get_locks(linked, true, false);
237 * Interior nodes must be locked before their descendants: if
238 * another iterator has possible descendants locked of the node
239 * we're about to lock, it must have the ancestors locked too:
241 if (linked->btree_id == iter->btree_id &&
242 level > __fls(linked->nodes_locked)) {
243 if (!(iter->trans->nounlock)) {
245 max(level + 1, max_t(unsigned,
248 btree_iter_get_locks(linked, true, false);
254 if (unlikely(!ret)) {
255 trace_trans_restart_would_deadlock(iter->trans->ip);
259 __btree_node_lock_type(iter->trans->c, b, type);
263 /* Btree iterator locking: */
265 #ifdef CONFIG_BCACHEFS_DEBUG
266 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
270 for (l = 0; btree_iter_node(iter, l); l++) {
271 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
272 !btree_node_locked(iter, l))
275 BUG_ON(btree_lock_want(iter, l) !=
276 btree_node_locked_type(iter, l));
280 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
282 struct btree_iter *iter;
284 trans_for_each_iter(trans, iter)
285 bch2_btree_iter_verify_locks(iter);
288 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
292 static bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
294 return btree_iter_get_locks(iter, false, trace);
297 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
298 unsigned new_locks_want)
300 struct btree_iter *linked;
302 EBUG_ON(iter->locks_want >= new_locks_want);
304 iter->locks_want = new_locks_want;
306 if (btree_iter_get_locks(iter, true, true))
310 * Ancestor nodes must be locked before child nodes, so set locks_want
311 * on iterators that might lock ancestors before us to avoid getting
314 trans_for_each_iter(iter->trans, linked)
315 if (linked != iter &&
316 linked->btree_id == iter->btree_id &&
317 linked->locks_want < new_locks_want) {
318 linked->locks_want = new_locks_want;
319 btree_iter_get_locks(linked, true, false);
325 bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter,
326 unsigned new_locks_want)
328 unsigned l = iter->level;
330 EBUG_ON(iter->locks_want >= new_locks_want);
332 iter->locks_want = new_locks_want;
335 if (!btree_iter_node(iter, l))
338 if (!bch2_btree_node_upgrade(iter, l)) {
339 iter->locks_want = l;
344 } while (l < iter->locks_want);
349 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
350 unsigned downgrade_to)
352 struct btree_iter *linked;
356 * We downgrade linked iterators as well because btree_iter_upgrade
357 * might have had to modify locks_want on linked iterators due to lock
360 trans_for_each_iter(iter->trans, linked) {
361 unsigned new_locks_want = downgrade_to ?:
362 (linked->flags & BTREE_ITER_INTENT ? 1 : 0);
364 if (linked->locks_want <= new_locks_want)
367 linked->locks_want = new_locks_want;
369 while (linked->nodes_locked &&
370 (l = __fls(linked->nodes_locked)) >= linked->locks_want) {
371 if (l > linked->level) {
372 btree_node_unlock(linked, l);
374 if (btree_node_intent_locked(linked, l)) {
375 six_lock_downgrade(&linked->l[l].b->lock);
376 linked->nodes_intent_locked ^= 1 << l;
383 bch2_btree_trans_verify_locks(iter->trans);
386 /* Btree transaction locking: */
388 bool bch2_trans_relock(struct btree_trans *trans)
390 struct btree_iter *iter;
393 trans_for_each_iter(trans, iter)
394 if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
395 ret &= bch2_btree_iter_relock(iter, true);
400 void bch2_trans_unlock(struct btree_trans *trans)
402 struct btree_iter *iter;
404 trans_for_each_iter(trans, iter)
405 __bch2_btree_iter_unlock(iter);
408 /* Btree iterator: */
410 #ifdef CONFIG_BCACHEFS_DEBUG
412 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
415 struct bpos pos = btree_iter_search_key(iter);
416 struct btree_iter_level *l = &iter->l[level];
417 struct btree_node_iter tmp = l->iter;
418 bool locked = btree_node_locked(iter, level);
419 struct bkey_packed *p, *k;
420 char buf1[100], buf2[100];
423 if (!debug_check_iterators(iter->trans->c))
426 BUG_ON(iter->level < iter->min_depth);
428 if (!btree_iter_node(iter, level))
431 if (!bch2_btree_node_relock(iter, level))
435 * Ideally this invariant would always be true, and hopefully in the
436 * future it will be, but for now set_pos_same_leaf() breaks it:
438 BUG_ON(iter->uptodate < BTREE_ITER_NEED_TRAVERSE &&
439 !btree_iter_pos_in_node(iter, l->b));
442 * node iterators don't use leaf node iterator:
444 if (btree_iter_type(iter) == BTREE_ITER_NODES &&
445 level <= iter->min_depth)
448 bch2_btree_node_iter_verify(&l->iter, l->b);
451 * For interior nodes, the iterator will have skipped past
454 * For extents, the iterator may have skipped past deleted keys (but not
457 p = level || btree_node_type_is_extents(iter->btree_id)
458 ? bch2_btree_node_iter_prev_filter(&tmp, l->b, KEY_TYPE_discard)
459 : bch2_btree_node_iter_prev_all(&tmp, l->b);
460 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
462 if (p && bkey_iter_pos_cmp(l->b, p, &pos) >= 0) {
467 if (k && bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
473 btree_node_unlock(iter, level);
476 strcpy(buf1, "(none)");
477 strcpy(buf2, "(none)");
480 struct bkey uk = bkey_unpack_key(l->b, p);
481 bch2_bkey_to_text(&PBUF(buf1), &uk);
485 struct bkey uk = bkey_unpack_key(l->b, k);
486 bch2_bkey_to_text(&PBUF(buf2), &uk);
489 panic("iterator should be %s key at level %u:\n"
490 "iter pos %s %llu:%llu\n"
494 iter->flags & BTREE_ITER_IS_EXTENTS ? ">" : "=>",
495 iter->pos.inode, iter->pos.offset,
499 static void bch2_btree_iter_verify(struct btree_iter *iter)
503 bch2_btree_trans_verify_locks(iter->trans);
505 for (i = 0; i < BTREE_MAX_DEPTH; i++)
506 bch2_btree_iter_verify_level(iter, i);
509 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
511 struct btree_iter *iter;
513 if (!debug_check_iterators(trans->c))
516 trans_for_each_iter_with_node(trans, b, iter)
517 bch2_btree_iter_verify_level(iter, b->level);
522 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
523 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
527 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
530 struct bkey_packed *k)
532 struct btree_node_iter_set *set;
534 btree_node_iter_for_each(iter, set)
535 if (set->end == t->end_offset) {
536 set->k = __btree_node_key_to_offset(b, k);
537 bch2_btree_node_iter_sort(iter, b);
541 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
544 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
546 struct bkey_packed *where)
548 struct btree_iter_level *l = &iter->l[b->level];
549 struct bpos pos = btree_iter_search_key(iter);
551 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
554 if (bkey_iter_pos_cmp(l->b, where, &pos) < 0)
555 bch2_btree_node_iter_advance(&l->iter, l->b);
557 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
560 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
562 struct bkey_packed *where)
564 struct btree_iter *linked;
566 trans_for_each_iter_with_node(iter->trans, b, linked) {
567 __bch2_btree_iter_fix_key_modified(linked, b, where);
568 bch2_btree_iter_verify_level(linked, b->level);
572 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
574 struct btree_node_iter *node_iter,
576 struct bkey_packed *where,
577 unsigned clobber_u64s,
580 const struct bkey_packed *end = btree_bkey_last(b, t);
581 struct btree_node_iter_set *set;
582 unsigned offset = __btree_node_key_to_offset(b, where);
583 int shift = new_u64s - clobber_u64s;
584 unsigned old_end = t->end_offset - shift;
585 unsigned orig_iter_pos = node_iter->data[0].k;
586 bool iter_current_key_modified =
587 orig_iter_pos >= offset &&
588 orig_iter_pos <= offset + clobber_u64s;
589 struct bpos iter_pos = btree_iter_search_key(iter);
591 btree_node_iter_for_each(node_iter, set)
592 if (set->end == old_end)
595 /* didn't find the bset in the iterator - might have to readd it: */
597 bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
598 bch2_btree_node_iter_push(node_iter, b, where, end);
601 /* Iterator is after key that changed */
605 set->end = t->end_offset;
607 /* Iterator hasn't gotten to the key that changed yet: */
612 bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
614 } else if (set->k < offset + clobber_u64s) {
615 set->k = offset + new_u64s;
616 if (set->k == set->end)
617 bch2_btree_node_iter_set_drop(node_iter, set);
619 /* Iterator is after key that changed */
620 set->k = (int) set->k + shift;
624 bch2_btree_node_iter_sort(node_iter, b);
626 if (node_iter->data[0].k != orig_iter_pos)
627 iter_current_key_modified = true;
630 * When a new key is added, and the node iterator now points to that
631 * key, the iterator might have skipped past deleted keys that should
632 * come after the key the iterator now points to. We have to rewind to
633 * before those deleted keys - otherwise
634 * bch2_btree_node_iter_prev_all() breaks:
636 if (!bch2_btree_node_iter_end(node_iter) &&
637 iter_current_key_modified &&
639 btree_node_type_is_extents(iter->btree_id))) {
641 struct bkey_packed *k, *k2, *p;
643 k = bch2_btree_node_iter_peek_all(node_iter, b);
645 for_each_bset(b, t) {
646 bool set_pos = false;
648 if (node_iter->data[0].end == t->end_offset)
651 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
653 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
654 bkey_iter_cmp(b, k, p) < 0) {
660 btree_node_iter_set_set_pos(node_iter,
666 node_iter == &iter->l[0].iter &&
667 iter_current_key_modified)
668 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
671 void bch2_btree_node_iter_fix(struct btree_iter *iter,
673 struct btree_node_iter *node_iter,
674 struct bkey_packed *where,
675 unsigned clobber_u64s,
678 struct bset_tree *t = bch2_bkey_to_bset(b, where);
679 struct btree_iter *linked;
681 if (node_iter != &iter->l[b->level].iter) {
682 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
683 where, clobber_u64s, new_u64s);
685 if (debug_check_iterators(iter->trans->c))
686 bch2_btree_node_iter_verify(node_iter, b);
689 trans_for_each_iter_with_node(iter->trans, b, linked) {
690 __bch2_btree_node_iter_fix(linked, b,
691 &linked->l[b->level].iter, t,
692 where, clobber_u64s, new_u64s);
693 bch2_btree_iter_verify_level(linked, b->level);
697 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
698 struct btree_iter_level *l,
700 struct bkey_packed *k)
706 * signal to bch2_btree_iter_peek_slot() that we're currently at
709 u->type = KEY_TYPE_deleted;
710 return bkey_s_c_null;
713 ret = bkey_disassemble(l->b, k, u);
715 if (debug_check_bkeys(iter->trans->c))
716 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
721 /* peek_all() doesn't skip deleted keys */
722 static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
723 struct btree_iter_level *l,
726 return __btree_iter_unpack(iter, l, u,
727 bch2_btree_node_iter_peek_all(&l->iter, l->b));
730 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
731 struct btree_iter_level *l)
733 return __btree_iter_unpack(iter, l, &iter->k,
734 bch2_btree_node_iter_peek(&l->iter, l->b));
737 static inline struct bkey_s_c __btree_iter_prev(struct btree_iter *iter,
738 struct btree_iter_level *l)
740 return __btree_iter_unpack(iter, l, &iter->k,
741 bch2_btree_node_iter_prev(&l->iter, l->b));
744 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
745 struct btree_iter_level *l,
748 struct bpos pos = btree_iter_search_key(iter);
749 struct bkey_packed *k;
752 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
753 bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
754 if (max_advance > 0 && nr_advanced >= max_advance)
757 bch2_btree_node_iter_advance(&l->iter, l->b);
765 * Verify that iterator for parent node points to child node:
767 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
769 struct btree_iter_level *l;
772 struct bkey_packed *k;
774 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
777 plevel = b->level + 1;
778 if (!btree_iter_node(iter, plevel))
781 parent_locked = btree_node_locked(iter, plevel);
783 if (!bch2_btree_node_relock(iter, plevel))
786 l = &iter->l[plevel];
787 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
790 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
792 struct bkey uk = bkey_unpack_key(b, k);
794 bch2_bkey_to_text(&PBUF(buf), &uk);
795 panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
796 buf, b->key.k.p.inode, b->key.k.p.offset);
800 btree_node_unlock(iter, b->level + 1);
803 static inline void __btree_iter_init(struct btree_iter *iter,
806 struct bpos pos = btree_iter_search_key(iter);
807 struct btree_iter_level *l = &iter->l[level];
809 bch2_btree_node_iter_init(&l->iter, l->b, &pos);
811 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
814 static inline void btree_iter_node_set(struct btree_iter *iter,
817 btree_iter_verify_new_node(iter, b);
819 EBUG_ON(!btree_iter_pos_in_node(iter, b));
820 EBUG_ON(b->lock.state.seq & 1);
822 iter->l[b->level].lock_seq = b->lock.state.seq;
823 iter->l[b->level].b = b;
824 __btree_iter_init(iter, b->level);
828 * A btree node is being replaced - update the iterator to point to the new
831 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
833 enum btree_node_locked_type t;
834 struct btree_iter *linked;
836 trans_for_each_iter(iter->trans, linked)
837 if (btree_iter_pos_in_node(linked, b)) {
839 * bch2_btree_iter_node_drop() has already been called -
840 * the old node we're replacing has already been
841 * unlocked and the pointer invalidated
843 BUG_ON(btree_node_locked(linked, b->level));
845 t = btree_lock_want(linked, b->level);
846 if (t != BTREE_NODE_UNLOCKED) {
847 six_lock_increment(&b->lock, t);
848 mark_btree_node_locked(linked, b->level, t);
851 btree_iter_node_set(linked, b);
855 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
857 struct btree_iter *linked;
858 unsigned level = b->level;
860 trans_for_each_iter(iter->trans, linked)
861 if (linked->l[level].b == b) {
862 __btree_node_unlock(linked, level);
863 linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
868 * A btree node has been modified in such a way as to invalidate iterators - fix
871 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
873 struct btree_iter *linked;
875 trans_for_each_iter_with_node(iter->trans, b, linked)
876 __btree_iter_init(linked, b->level);
879 static inline int btree_iter_lock_root(struct btree_iter *iter,
882 struct bch_fs *c = iter->trans->c;
884 enum six_lock_type lock_type;
887 EBUG_ON(iter->nodes_locked);
890 b = READ_ONCE(c->btree_roots[iter->btree_id].b);
891 iter->level = READ_ONCE(b->level);
893 if (unlikely(iter->level < depth_want)) {
895 * the root is at a lower depth than the depth we want:
896 * got to the end of the btree, or we're walking nodes
897 * greater than some depth and there are no nodes >=
900 iter->level = depth_want;
901 for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
906 lock_type = __btree_lock_want(iter, iter->level);
907 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
911 if (likely(b == c->btree_roots[iter->btree_id].b &&
912 b->level == iter->level &&
914 for (i = 0; i < iter->level; i++)
915 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
916 iter->l[iter->level].b = b;
917 for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
920 mark_btree_node_locked(iter, iter->level, lock_type);
921 btree_iter_node_set(iter, b);
925 six_unlock_type(&b->lock, lock_type);
930 static void btree_iter_prefetch(struct btree_iter *iter)
932 struct bch_fs *c = iter->trans->c;
933 struct btree_iter_level *l = &iter->l[iter->level];
934 struct btree_node_iter node_iter = l->iter;
935 struct bkey_packed *k;
937 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
938 ? (iter->level > 1 ? 0 : 2)
939 : (iter->level > 1 ? 1 : 16);
940 bool was_locked = btree_node_locked(iter, iter->level);
943 if (!bch2_btree_node_relock(iter, iter->level))
946 bch2_btree_node_iter_advance(&node_iter, l->b);
947 k = bch2_btree_node_iter_peek(&node_iter, l->b);
951 bch2_bkey_unpack(l->b, &tmp.k, k);
952 bch2_btree_node_prefetch(c, iter, &tmp.k, iter->level - 1);
956 btree_node_unlock(iter, iter->level);
959 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
960 unsigned plevel, struct btree *b)
962 struct btree_iter_level *l = &iter->l[plevel];
963 bool locked = btree_node_locked(iter, plevel);
964 struct bkey_packed *k;
965 struct bch_btree_ptr_v2 *bp;
967 if (!bch2_btree_node_relock(iter, plevel))
970 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
971 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
973 bp = (void *) bkeyp_val(&l->b->format, k);
974 bp->mem_ptr = (unsigned long)b;
977 btree_node_unlock(iter, plevel);
980 static __always_inline int btree_iter_down(struct btree_iter *iter)
982 struct bch_fs *c = iter->trans->c;
983 struct btree_iter_level *l = &iter->l[iter->level];
985 unsigned level = iter->level - 1;
986 enum six_lock_type lock_type = __btree_lock_want(iter, level);
989 EBUG_ON(!btree_node_locked(iter, iter->level));
991 bch2_bkey_unpack(l->b, &tmp.k,
992 bch2_btree_node_iter_peek(&l->iter, l->b));
994 b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type);
995 if (unlikely(IS_ERR(b)))
998 mark_btree_node_locked(iter, level, lock_type);
999 btree_iter_node_set(iter, b);
1001 if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 &&
1002 unlikely(b != btree_node_mem_ptr(&tmp.k)))
1003 btree_node_mem_ptr_set(iter, level + 1, b);
1005 if (iter->flags & BTREE_ITER_PREFETCH)
1006 btree_iter_prefetch(iter);
1008 iter->level = level;
1013 static void btree_iter_up(struct btree_iter *iter)
1015 btree_node_unlock(iter, iter->level++);
1018 static int btree_iter_traverse_one(struct btree_iter *);
1020 static int __btree_iter_traverse_all(struct btree_trans *trans,
1021 struct btree_iter *orig_iter, int ret)
1023 struct bch_fs *c = trans->c;
1024 struct btree_iter *iter;
1025 u8 sorted[BTREE_ITER_MAX];
1026 unsigned i, nr_sorted = 0;
1028 trans_for_each_iter(trans, iter)
1029 sorted[nr_sorted++] = iter - trans->iters;
1031 #define btree_iter_cmp_by_idx(_l, _r) \
1032 btree_iter_cmp(&trans->iters[_l], &trans->iters[_r])
1034 bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
1035 #undef btree_iter_cmp_by_idx
1038 bch2_trans_unlock(trans);
1040 if (unlikely(ret == -ENOMEM)) {
1043 closure_init_stack(&cl);
1046 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1051 if (unlikely(ret == -EIO)) {
1052 trans->error = true;
1054 orig_iter->flags |= BTREE_ITER_ERROR;
1055 orig_iter->l[orig_iter->level].b =
1056 BTREE_ITER_NO_NODE_ERROR;
1061 BUG_ON(ret && ret != -EINTR);
1063 /* Now, redo traversals in correct order: */
1064 for (i = 0; i < nr_sorted; i++) {
1065 iter = &trans->iters[sorted[i]];
1067 ret = btree_iter_traverse_one(iter);
1072 if (hweight64(trans->iters_live) > 1)
1075 trans_for_each_iter(trans, iter)
1076 if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
1081 bch2_btree_cache_cannibalize_unlock(c);
1085 int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1087 return __btree_iter_traverse_all(trans, NULL, 0);
1090 static inline bool btree_iter_good_node(struct btree_iter *iter,
1091 unsigned l, int check_pos)
1093 if (!is_btree_node(iter, l) ||
1094 !bch2_btree_node_relock(iter, l))
1097 if (check_pos <= 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1099 if (check_pos >= 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1104 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1107 unsigned l = iter->level;
1109 while (btree_iter_node(iter, l) &&
1110 !btree_iter_good_node(iter, l, check_pos)) {
1111 btree_node_unlock(iter, l);
1112 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1120 * This is the main state machine for walking down the btree - walks down to a
1123 * Returns 0 on success, -EIO on error (error reading in a btree node).
1125 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1126 * stashed in the iterator and returned from bch2_trans_exit().
1128 static int btree_iter_traverse_one(struct btree_iter *iter)
1130 unsigned depth_want = iter->level;
1132 if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1136 * if we need interior nodes locked, call btree_iter_relock() to make
1137 * sure we walk back up enough that we lock them:
1139 if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
1140 iter->locks_want > 1)
1141 bch2_btree_iter_relock(iter, false);
1143 if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
1147 * XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
1150 iter->level = btree_iter_up_until_good_node(iter, 0);
1153 * If we've got a btree node locked (i.e. we aren't about to relock the
1154 * root) - advance its node iterator if necessary:
1156 * XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
1158 if (btree_iter_node(iter, iter->level)) {
1159 BUG_ON(!btree_iter_pos_in_node(iter, iter->l[iter->level].b));
1161 btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1);
1165 * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1166 * would indicate to other code that we got to the end of the btree,
1167 * here it indicates that relocking the root failed - it's critical that
1168 * btree_iter_lock_root() comes next and that it can't fail
1170 while (iter->level > depth_want) {
1171 int ret = btree_iter_node(iter, iter->level)
1172 ? btree_iter_down(iter)
1173 : btree_iter_lock_root(iter, depth_want);
1174 if (unlikely(ret)) {
1178 iter->level = depth_want;
1179 iter->l[iter->level].b = BTREE_ITER_NO_NODE_DOWN;
1184 iter->uptodate = BTREE_ITER_NEED_PEEK;
1186 bch2_btree_iter_verify(iter);
1190 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1194 ret = bch2_trans_cond_resched(iter->trans) ?:
1195 btree_iter_traverse_one(iter);
1197 ret = __btree_iter_traverse_all(iter->trans, iter, ret);
1202 static inline void bch2_btree_iter_checks(struct btree_iter *iter,
1203 enum btree_iter_type type)
1205 EBUG_ON(iter->btree_id >= BTREE_ID_NR);
1206 EBUG_ON(btree_iter_type(iter) != type);
1208 BUG_ON(type == BTREE_ITER_KEYS &&
1209 (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
1210 bkey_cmp(iter->pos, iter->k.p) > 0));
1212 bch2_btree_iter_verify_locks(iter);
1213 bch2_btree_iter_verify_level(iter, iter->level);
1216 /* Iterate across nodes (leaf and interior nodes) */
1218 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1223 bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
1225 if (iter->uptodate == BTREE_ITER_UPTODATE)
1226 return iter->l[iter->level].b;
1228 ret = bch2_btree_iter_traverse(iter);
1232 b = btree_iter_node(iter, iter->level);
1236 BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
1238 iter->pos = b->key.k.p;
1239 iter->uptodate = BTREE_ITER_UPTODATE;
1241 bch2_btree_iter_verify(iter);
1246 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1251 bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
1253 /* already got to end? */
1254 if (!btree_iter_node(iter, iter->level))
1257 bch2_trans_cond_resched(iter->trans);
1259 btree_iter_up(iter);
1261 if (!bch2_btree_node_relock(iter, iter->level))
1262 btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
1264 ret = bch2_btree_iter_traverse(iter);
1269 b = btree_iter_node(iter, iter->level);
1273 if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
1275 * Haven't gotten to the end of the parent node: go back down to
1276 * the next child node
1280 * We don't really want to be unlocking here except we can't
1281 * directly tell btree_iter_traverse() "traverse to this level"
1282 * except by setting iter->level, so we have to unlock so we
1283 * don't screw up our lock invariants:
1285 if (btree_node_read_locked(iter, iter->level))
1286 btree_node_unlock(iter, iter->level);
1288 iter->pos = bkey_successor(iter->pos);
1289 iter->level = iter->min_depth;
1291 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1292 ret = bch2_btree_iter_traverse(iter);
1296 b = iter->l[iter->level].b;
1299 iter->pos = b->key.k.p;
1300 iter->uptodate = BTREE_ITER_UPTODATE;
1302 bch2_btree_iter_verify(iter);
1307 /* Iterate across keys (in leaf nodes only) */
1309 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
1311 struct btree_iter_level *l = &iter->l[0];
1313 EBUG_ON(iter->level != 0);
1314 EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
1315 EBUG_ON(!btree_node_locked(iter, 0));
1316 EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0);
1318 bkey_init(&iter->k);
1319 iter->k.p = iter->pos = new_pos;
1320 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1322 btree_iter_advance_to_pos(iter, l, -1);
1326 * keeping a node locked that's outside (even just outside) iter->pos
1327 * breaks __bch2_btree_node_lock(). This seems to only affect
1328 * bch2_btree_node_get_sibling so for now it's fixed there, but we
1329 * should try to get rid of this corner case.
1331 * (this behaviour is currently needed for BTREE_INSERT_NOUNLOCK)
1334 if (bch2_btree_node_iter_end(&l->iter) &&
1335 btree_iter_pos_after_node(iter, l->b))
1336 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1339 static void btree_iter_pos_changed(struct btree_iter *iter, int cmp)
1341 unsigned l = iter->level;
1346 l = btree_iter_up_until_good_node(iter, cmp);
1348 if (btree_iter_node(iter, l)) {
1350 * We might have to skip over many keys, or just a few: try
1351 * advancing the node iterator, and if we have to skip over too
1352 * many keys just reinit it (or if we're rewinding, since that
1356 !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1357 __btree_iter_init(iter, l);
1359 /* Don't leave it locked if we're not supposed to: */
1360 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1361 btree_node_unlock(iter, l);
1364 if (l != iter->level)
1365 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1367 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1370 void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos,
1371 bool strictly_greater)
1373 struct bpos old = btree_iter_search_key(iter);
1376 iter->flags &= ~BTREE_ITER_IS_EXTENTS;
1377 iter->flags |= strictly_greater ? BTREE_ITER_IS_EXTENTS : 0;
1379 bkey_init(&iter->k);
1380 iter->k.p = iter->pos = new_pos;
1382 cmp = bkey_cmp(btree_iter_search_key(iter), old);
1384 btree_iter_pos_changed(iter, cmp);
1387 void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
1389 int cmp = bkey_cmp(new_pos, iter->pos);
1391 bkey_init(&iter->k);
1392 iter->k.p = iter->pos = new_pos;
1394 btree_iter_pos_changed(iter, cmp);
1397 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1399 struct btree_iter_level *l = &iter->l[0];
1402 bkey_init(&iter->k);
1403 iter->k.p = iter->pos = l->b->key.k.p;
1405 ret = bkey_cmp(iter->pos, POS_MAX) != 0;
1406 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1407 iter->k.p = iter->pos = bkey_successor(iter->pos);
1409 btree_iter_pos_changed(iter, 1);
1413 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1415 struct btree_iter_level *l = &iter->l[0];
1418 bkey_init(&iter->k);
1419 iter->k.p = iter->pos = l->b->data->min_key;
1420 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
1422 ret = bkey_cmp(iter->pos, POS_MIN) != 0;
1424 iter->k.p = iter->pos = bkey_predecessor(iter->pos);
1426 if (iter->flags & BTREE_ITER_IS_EXTENTS)
1427 iter->k.p = iter->pos = bkey_predecessor(iter->pos);
1430 btree_iter_pos_changed(iter, -1);
1435 * btree_iter_peek_uptodate - given an iterator that is uptodate, return the key
1436 * it currently points to
1438 static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
1440 struct btree_iter_level *l = &iter->l[0];
1441 struct bkey_s_c ret = { .k = &iter->k };
1443 if (!bkey_deleted(&iter->k)) {
1444 struct bkey_packed *_k =
1445 __bch2_btree_node_iter_peek_all(&l->iter, l->b);
1447 ret.v = bkeyp_val(&l->b->format, _k);
1449 if (debug_check_iterators(iter->trans->c)) {
1450 struct bkey k = bkey_unpack_key(l->b, _k);
1452 BUG_ON(memcmp(&k, &iter->k, sizeof(k)));
1455 if (debug_check_bkeys(iter->trans->c))
1456 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
1463 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1466 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1468 struct btree_iter_level *l = &iter->l[0];
1472 bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
1474 if (iter->uptodate == BTREE_ITER_UPTODATE &&
1475 !bkey_deleted(&iter->k))
1476 return btree_iter_peek_uptodate(iter);
1479 ret = bch2_btree_iter_traverse(iter);
1481 return bkey_s_c_err(ret);
1483 k = __btree_iter_peek(iter, l);
1487 if (!btree_iter_set_pos_to_next_leaf(iter))
1488 return bkey_s_c_null;
1492 * iter->pos should always be equal to the key we just
1493 * returned - except extents can straddle iter->pos:
1495 if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
1496 bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1497 iter->pos = bkey_start_pos(k.k);
1499 iter->uptodate = BTREE_ITER_UPTODATE;
1501 bch2_btree_iter_verify_level(iter, 0);
1506 * bch2_btree_iter_next: returns first key greater than iterator's current
1509 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1511 if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
1512 return bkey_s_c_null;
1514 bch2_btree_iter_set_pos(iter,
1515 (iter->flags & BTREE_ITER_IS_EXTENTS)
1517 : bkey_successor(iter->k.p));
1519 return bch2_btree_iter_peek(iter);
1522 static struct bkey_s_c __btree_trans_updates_peek(struct btree_iter *iter)
1524 struct bpos pos = btree_iter_search_key(iter);
1525 struct btree_trans *trans = iter->trans;
1526 struct btree_insert_entry *i;
1528 trans_for_each_update2(trans, i)
1529 if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
1530 bkey_cmp(pos, i->k->k.p)) <= 0)
1533 return i < trans->updates2 + trans->nr_updates2 &&
1534 iter->btree_id == i->iter->btree_id
1535 ? bkey_i_to_s_c(i->k)
1539 static struct bkey_s_c __bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
1541 struct btree_iter_level *l = &iter->l[0];
1542 struct bkey_s_c k = __btree_iter_peek(iter, l);
1543 struct bkey_s_c u = __btree_trans_updates_peek(iter);
1545 if (k.k && (!u.k || bkey_cmp(k.k->p, u.k->p) < 0))
1547 if (u.k && bkey_cmp(u.k->p, l->b->key.k.p) <= 0) {
1551 return bkey_s_c_null;
1554 struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
1559 bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
1562 ret = bch2_btree_iter_traverse(iter);
1564 return bkey_s_c_err(ret);
1566 k = __bch2_btree_iter_peek_with_updates(iter);
1568 if (k.k && bkey_deleted(k.k)) {
1569 bch2_btree_iter_set_pos(iter,
1570 (iter->flags & BTREE_ITER_IS_EXTENTS)
1572 : bkey_successor(iter->k.p));
1579 if (!btree_iter_set_pos_to_next_leaf(iter))
1580 return bkey_s_c_null;
1584 * iter->pos should always be equal to the key we just
1585 * returned - except extents can straddle iter->pos:
1587 if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
1588 bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1589 iter->pos = bkey_start_pos(k.k);
1591 iter->uptodate = BTREE_ITER_UPTODATE;
1595 struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
1597 if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
1598 return bkey_s_c_null;
1600 bch2_btree_iter_set_pos(iter,
1601 (iter->flags & BTREE_ITER_IS_EXTENTS)
1603 : bkey_successor(iter->k.p));
1605 return bch2_btree_iter_peek_with_updates(iter);
1609 * bch2_btree_iter_peek_prev: returns first key less than or equal to
1610 * iterator's current position
1612 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1614 struct bpos pos = iter->pos;
1615 struct btree_iter_level *l = &iter->l[0];
1619 bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
1621 if (iter->uptodate == BTREE_ITER_UPTODATE &&
1622 !bkey_deleted(&iter->k))
1623 return btree_iter_peek_uptodate(iter);
1626 ret = bch2_btree_iter_traverse(iter);
1628 return bkey_s_c_err(ret);
1630 k = __btree_iter_peek(iter, l);
1631 if (!k.k || bkey_cmp(bkey_start_pos(k.k), pos) > 0)
1632 k = __btree_iter_prev(iter, l);
1637 if (!btree_iter_set_pos_to_prev_leaf(iter))
1638 return bkey_s_c_null;
1641 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), pos) > 0);
1642 iter->pos = bkey_start_pos(k.k);
1643 iter->uptodate = BTREE_ITER_UPTODATE;
1648 * bch2_btree_iter_prev: returns first key less than iterator's current
1651 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1653 struct bpos pos = bkey_start_pos(&iter->k);
1655 bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
1657 if (unlikely(!bkey_cmp(pos, POS_MIN)))
1658 return bkey_s_c_null;
1660 bch2_btree_iter_set_pos(iter, bkey_predecessor(pos));
1662 return bch2_btree_iter_peek_prev(iter);
1665 static inline struct bkey_s_c
1666 __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
1668 struct btree_iter_level *l = &iter->l[0];
1669 struct btree_node_iter node_iter;
1674 /* keys & holes can't span inode numbers: */
1675 if (iter->pos.offset == KEY_OFFSET_MAX) {
1676 if (iter->pos.inode == KEY_INODE_MAX)
1677 return bkey_s_c_null;
1679 bch2_btree_iter_set_pos(iter, bkey_successor(iter->pos));
1681 ret = bch2_btree_iter_traverse(iter);
1683 return bkey_s_c_err(ret);
1687 * iterator is now at the correct position for inserting at iter->pos,
1688 * but we need to keep iterating until we find the first non whiteout so
1689 * we know how big a hole we have, if any:
1692 node_iter = l->iter;
1693 k = __btree_iter_unpack(iter, l, &iter->k,
1694 bch2_btree_node_iter_peek(&node_iter, l->b));
1696 if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
1698 * We're not setting iter->uptodate because the node iterator
1699 * doesn't necessarily point at the key we're returning:
1702 EBUG_ON(bkey_cmp(k.k->p, iter->pos) <= 0);
1703 bch2_btree_iter_verify_level(iter, 0);
1715 min_t(u64, KEY_SIZE_MAX,
1716 (k.k->p.inode == n.p.inode
1717 ? bkey_start_offset(k.k)
1724 iter->uptodate = BTREE_ITER_UPTODATE;
1726 bch2_btree_iter_verify_level(iter, 0);
1727 return (struct bkey_s_c) { &iter->k, NULL };
1730 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1732 struct btree_iter_level *l = &iter->l[0];
1736 bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
1738 if (iter->uptodate == BTREE_ITER_UPTODATE)
1739 return btree_iter_peek_uptodate(iter);
1741 ret = bch2_btree_iter_traverse(iter);
1743 return bkey_s_c_err(ret);
1745 if (iter->flags & BTREE_ITER_IS_EXTENTS)
1746 return __bch2_btree_iter_peek_slot_extents(iter);
1748 k = __btree_iter_peek_all(iter, l, &iter->k);
1750 EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
1752 if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
1754 bkey_init(&iter->k);
1755 iter->k.p = iter->pos;
1756 k = (struct bkey_s_c) { &iter->k, NULL };
1759 iter->uptodate = BTREE_ITER_UPTODATE;
1760 bch2_btree_iter_verify_level(iter, 0);
1764 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1766 if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
1767 return bkey_s_c_null;
1769 bch2_btree_iter_set_pos(iter,
1770 (iter->flags & BTREE_ITER_IS_EXTENTS)
1772 : bkey_successor(iter->k.p));
1774 return bch2_btree_iter_peek_slot(iter);
1777 static inline void bch2_btree_iter_init(struct btree_trans *trans,
1778 struct btree_iter *iter, enum btree_id btree_id,
1779 struct bpos pos, unsigned flags)
1781 struct bch_fs *c = trans->c;
1784 if (btree_node_type_is_extents(btree_id) &&
1785 !(flags & BTREE_ITER_NODES))
1786 flags |= BTREE_ITER_IS_EXTENTS;
1788 iter->trans = trans;
1790 bkey_init(&iter->k);
1792 iter->flags = flags;
1793 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
1794 iter->btree_id = btree_id;
1796 iter->min_depth = 0;
1797 iter->locks_want = flags & BTREE_ITER_INTENT ? 1 : 0;
1798 iter->nodes_locked = 0;
1799 iter->nodes_intent_locked = 0;
1800 for (i = 0; i < ARRAY_SIZE(iter->l); i++)
1801 iter->l[i].b = BTREE_ITER_NO_NODE_INIT;
1803 prefetch(c->btree_roots[btree_id].b);
1806 /* new transactional stuff: */
1808 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
1811 __bch2_btree_iter_unlock(&trans->iters[idx]);
1812 trans->iters_linked &= ~(1ULL << idx);
1813 trans->iters_live &= ~(1ULL << idx);
1814 trans->iters_touched &= ~(1ULL << idx);
1817 int bch2_trans_iter_put(struct btree_trans *trans,
1818 struct btree_iter *iter)
1822 if (IS_ERR_OR_NULL(iter))
1825 BUG_ON(trans->iters + iter->idx != iter);
1827 ret = btree_iter_err(iter);
1829 if (!(trans->iters_touched & (1ULL << iter->idx)) &&
1830 !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
1831 __bch2_trans_iter_free(trans, iter->idx);
1833 trans->iters_live &= ~(1ULL << iter->idx);
1837 int bch2_trans_iter_free(struct btree_trans *trans,
1838 struct btree_iter *iter)
1840 if (IS_ERR_OR_NULL(iter))
1843 trans->iters_touched &= ~(1ULL << iter->idx);
1845 return bch2_trans_iter_put(trans, iter);
1848 static int bch2_trans_realloc_iters(struct btree_trans *trans,
1851 void *p, *new_iters, *new_updates, *new_updates2;
1853 size_t updates_bytes;
1855 new_size = roundup_pow_of_two(new_size);
1857 BUG_ON(new_size > BTREE_ITER_MAX);
1859 if (new_size <= trans->size)
1862 BUG_ON(trans->used_mempool);
1864 bch2_trans_unlock(trans);
1866 iters_bytes = sizeof(struct btree_iter) * new_size;
1867 updates_bytes = sizeof(struct btree_insert_entry) * new_size;
1869 p = kmalloc(iters_bytes +
1871 updates_bytes, GFP_NOFS);
1875 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
1876 new_size = BTREE_ITER_MAX;
1878 trans->used_mempool = true;
1880 new_iters = p; p += iters_bytes;
1881 new_updates = p; p += updates_bytes;
1882 new_updates2 = p; p += updates_bytes;
1884 memcpy(new_iters, trans->iters,
1885 sizeof(struct btree_iter) * trans->nr_iters);
1886 memcpy(new_updates, trans->updates,
1887 sizeof(struct btree_insert_entry) * trans->nr_updates);
1888 memcpy(new_updates2, trans->updates2,
1889 sizeof(struct btree_insert_entry) * trans->nr_updates2);
1891 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
1892 memset(trans->iters, POISON_FREE,
1893 sizeof(struct btree_iter) * trans->nr_iters +
1894 sizeof(struct btree_insert_entry) * trans->nr_iters);
1896 if (trans->iters != trans->iters_onstack)
1897 kfree(trans->iters);
1899 trans->iters = new_iters;
1900 trans->updates = new_updates;
1901 trans->updates2 = new_updates2;
1902 trans->size = new_size;
1904 if (trans->iters_live) {
1905 trace_trans_restart_iters_realloced(trans->ip, trans->size);
1912 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
1914 unsigned idx = __ffs64(~trans->iters_linked);
1916 if (idx < trans->nr_iters)
1919 if (trans->nr_iters == trans->size) {
1922 if (trans->nr_iters >= BTREE_ITER_MAX) {
1923 struct btree_iter *iter;
1925 trans_for_each_iter(trans, iter) {
1926 pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
1927 bch2_btree_ids[iter->btree_id],
1930 (trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
1931 (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
1932 iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
1933 (void *) iter->ip_allocated);
1936 panic("trans iter oveflow\n");
1939 ret = bch2_trans_realloc_iters(trans, trans->size * 2);
1941 return ERR_PTR(ret);
1944 idx = trans->nr_iters++;
1945 BUG_ON(trans->nr_iters > trans->size);
1947 trans->iters[idx].idx = idx;
1949 BUG_ON(trans->iters_linked & (1ULL << idx));
1950 trans->iters_linked |= 1ULL << idx;
1951 trans->iters[idx].flags = 0;
1952 return &trans->iters[idx];
1955 static inline void btree_iter_copy(struct btree_iter *dst,
1956 struct btree_iter *src)
1958 unsigned i, idx = dst->idx;
1963 for (i = 0; i < BTREE_MAX_DEPTH; i++)
1964 if (btree_node_locked(dst, i))
1965 six_lock_increment(&dst->l[i].b->lock,
1966 __btree_lock_want(dst, i));
1968 dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
1969 dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
1972 static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
1974 if (bkey_cmp(l, r) > 0)
1977 return POS(r.inode - l.inode, r.offset - l.offset);
1980 static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
1981 unsigned btree_id, struct bpos pos,
1984 struct btree_iter *iter, *best = NULL;
1986 BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
1988 trans_for_each_iter(trans, iter) {
1989 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
1992 if (iter->btree_id != btree_id)
1996 bkey_cmp(bpos_diff(best->pos, pos),
1997 bpos_diff(iter->pos, pos)) < 0)
2004 iter = btree_trans_iter_alloc(trans);
2008 bch2_btree_iter_init(trans, iter, btree_id, pos, flags);
2009 } else if ((trans->iters_live & (1ULL << best->idx)) ||
2010 (best->flags & BTREE_ITER_KEEP_UNTIL_COMMIT)) {
2011 iter = btree_trans_iter_alloc(trans);
2015 btree_iter_copy(iter, best);
2020 iter->flags &= ~(BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
2021 iter->flags |= flags & (BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
2023 if (iter->flags & BTREE_ITER_INTENT)
2024 bch2_btree_iter_upgrade(iter, 1);
2026 bch2_btree_iter_downgrade(iter);
2028 BUG_ON(iter->btree_id != btree_id);
2029 BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
2030 BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
2031 BUG_ON(iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT);
2032 BUG_ON(trans->iters_live & (1ULL << iter->idx));
2034 trans->iters_live |= 1ULL << iter->idx;
2035 trans->iters_touched |= 1ULL << iter->idx;
2040 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2041 enum btree_id btree_id,
2042 struct bpos pos, unsigned flags)
2044 struct btree_iter *iter =
2045 __btree_trans_get_iter(trans, btree_id, pos, flags);
2048 __bch2_btree_iter_set_pos(iter, pos,
2049 btree_node_type_is_extents(btree_id));
2053 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2054 enum btree_id btree_id,
2056 unsigned locks_want,
2060 struct btree_iter *iter =
2061 __btree_trans_get_iter(trans, btree_id, pos,
2062 flags|BTREE_ITER_NODES);
2065 BUG_ON(IS_ERR(iter));
2066 BUG_ON(bkey_cmp(iter->pos, pos));
2068 iter->locks_want = locks_want;
2069 iter->level = depth;
2070 iter->min_depth = depth;
2072 for (i = 0; i < ARRAY_SIZE(iter->l); i++)
2073 iter->l[i].b = NULL;
2074 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2079 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2080 struct btree_iter *src)
2082 struct btree_iter *iter;
2084 iter = btree_trans_iter_alloc(trans);
2088 btree_iter_copy(iter, src);
2090 trans->iters_live |= 1ULL << iter->idx;
2092 * We don't need to preserve this iter since it's cheap to copy it
2093 * again - this will cause trans_iter_put() to free it right away:
2095 trans->iters_touched &= ~(1ULL << iter->idx);
2100 static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size)
2102 if (size > trans->mem_bytes) {
2103 size_t old_bytes = trans->mem_bytes;
2104 size_t new_bytes = roundup_pow_of_two(size);
2105 void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2110 trans->mem = new_mem;
2111 trans->mem_bytes = new_bytes;
2114 trace_trans_restart_mem_realloced(trans->ip, new_bytes);
2122 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2127 ret = bch2_trans_preload_mem(trans, trans->mem_top + size);
2129 return ERR_PTR(ret);
2131 p = trans->mem + trans->mem_top;
2132 trans->mem_top += size;
2136 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2138 u64 iters = trans->iters_linked &
2139 ~trans->iters_touched &
2143 unsigned idx = __ffs64(iters);
2145 iters &= ~(1ULL << idx);
2146 __bch2_trans_iter_free(trans, idx);
2150 void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
2152 struct btree_iter *iter;
2154 trans_for_each_iter(trans, iter)
2155 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2156 BTREE_ITER_SET_POS_AFTER_COMMIT);
2158 bch2_trans_unlink_iters(trans);
2160 trans->iters_touched &= trans->iters_live;
2162 trans->need_reset = 0;
2163 trans->nr_updates = 0;
2164 trans->nr_updates2 = 0;
2167 trans->extra_journal_entries = NULL;
2168 trans->extra_journal_entry_u64s = 0;
2170 if (trans->fs_usage_deltas) {
2171 trans->fs_usage_deltas->used = 0;
2172 memset(&trans->fs_usage_deltas->memset_start, 0,
2173 (void *) &trans->fs_usage_deltas->memset_end -
2174 (void *) &trans->fs_usage_deltas->memset_start);
2177 if (!(flags & TRANS_RESET_NOTRAVERSE))
2178 bch2_btree_iter_traverse_all(trans);
2181 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2182 unsigned expected_nr_iters,
2183 size_t expected_mem_bytes)
2185 memset(trans, 0, offsetof(struct btree_trans, iters_onstack));
2188 * reallocating iterators currently completely breaks
2189 * bch2_trans_iter_put():
2191 expected_nr_iters = BTREE_ITER_MAX;
2194 trans->ip = _RET_IP_;
2195 trans->size = ARRAY_SIZE(trans->iters_onstack);
2196 trans->iters = trans->iters_onstack;
2197 trans->updates = trans->updates_onstack;
2198 trans->updates2 = trans->updates2_onstack;
2199 trans->fs_usage_deltas = NULL;
2201 if (expected_nr_iters > trans->size)
2202 bch2_trans_realloc_iters(trans, expected_nr_iters);
2204 if (expected_mem_bytes)
2205 bch2_trans_preload_mem(trans, expected_mem_bytes);
2207 #ifdef CONFIG_BCACHEFS_DEBUG
2208 trans->pid = current->pid;
2209 mutex_lock(&c->btree_trans_lock);
2210 list_add(&trans->list, &c->btree_trans_list);
2211 mutex_unlock(&c->btree_trans_lock);
2215 int bch2_trans_exit(struct btree_trans *trans)
2217 bch2_trans_unlock(trans);
2219 #ifdef CONFIG_BCACHEFS_DEBUG
2220 mutex_lock(&trans->c->btree_trans_lock);
2221 list_del(&trans->list);
2222 mutex_unlock(&trans->c->btree_trans_lock);
2225 kfree(trans->fs_usage_deltas);
2227 if (trans->used_mempool)
2228 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2229 else if (trans->iters != trans->iters_onstack)
2230 kfree(trans->iters);
2231 trans->mem = (void *) 0x1;
2232 trans->iters = (void *) 0x1;
2234 return trans->error ? -EIO : 0;
2237 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2239 #ifdef CONFIG_BCACHEFS_DEBUG
2240 struct btree_trans *trans;
2241 struct btree_iter *iter;
2245 mutex_lock(&c->btree_trans_lock);
2246 list_for_each_entry(trans, &c->btree_trans_list, list) {
2247 pr_buf(out, "%i %ps\n", trans->pid, (void *) trans->ip);
2249 trans_for_each_iter(trans, iter) {
2250 if (!iter->nodes_locked)
2253 pr_buf(out, " iter %s:", bch2_btree_ids[iter->btree_id]);
2254 bch2_bpos_to_text(out, iter->pos);
2257 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2258 if (btree_node_locked(iter, l)) {
2261 pr_buf(out, " %p l=%u %s ",
2262 b, l, btree_node_intent_locked(iter, l) ? "i" : "r");
2263 bch2_bpos_to_text(out, b->key.k.p);
2269 b = READ_ONCE(trans->locking);
2271 pr_buf(out, " locking %px l=%u %s:",
2273 bch2_btree_ids[b->btree_id]);
2274 bch2_bpos_to_text(out, b->key.k.p);
2278 mutex_unlock(&c->btree_trans_lock);
2282 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2284 mempool_exit(&c->btree_iters_pool);
2287 int bch2_fs_btree_iter_init(struct bch_fs *c)
2289 unsigned nr = BTREE_ITER_MAX;
2291 INIT_LIST_HEAD(&c->btree_trans_list);
2292 mutex_init(&c->btree_trans_lock);
2294 return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2295 sizeof(struct btree_iter) * nr +
2296 sizeof(struct btree_insert_entry) * nr +
2297 sizeof(struct btree_insert_entry) * nr);