3 #include "bkey_methods.h"
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_locking.h"
10 #include <trace/events/bcachefs.h>
12 #define BTREE_ITER_NOT_END ((struct btree *) 1)
14 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
16 return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
19 /* Btree node locking: */
22 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
25 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
27 struct btree_iter *linked;
29 EBUG_ON(iter->nodes[b->level] != b);
30 EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
32 for_each_linked_btree_node(iter, b, linked)
33 linked->lock_seq[b->level] += 2;
35 iter->lock_seq[b->level] += 2;
37 six_unlock_write(&b->lock);
40 void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
42 struct btree_iter *linked;
45 EBUG_ON(iter->nodes[b->level] != b);
46 EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
48 if (six_trylock_write(&b->lock))
51 for_each_linked_btree_iter(iter, linked)
52 if (linked->nodes[b->level] == b &&
53 btree_node_read_locked(linked, b->level))
56 if (likely(!readers)) {
57 six_lock_write(&b->lock);
60 * Must drop our read locks before calling six_lock_write() -
61 * six_unlock() won't do wakeups until the reader count
62 * goes to 0, and it's safe because we have the node intent
65 atomic64_sub(__SIX_VAL(read_lock, readers),
66 &b->lock.state.counter);
67 six_lock_write(&b->lock);
68 atomic64_add(__SIX_VAL(read_lock, readers),
69 &b->lock.state.counter);
73 bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
75 struct btree_iter *linked;
76 struct btree *b = iter->nodes[level];
77 enum btree_node_locked_type want = btree_lock_want(iter, level);
78 enum btree_node_locked_type have = btree_node_locked_type(iter, level);
83 if (!is_btree_node(iter, level))
89 if (have != BTREE_NODE_UNLOCKED
90 ? six_trylock_convert(&b->lock, have, want)
91 : six_relock_type(&b->lock, want, iter->lock_seq[level]))
94 for_each_linked_btree_iter(iter, linked)
95 if (linked->nodes[level] == b &&
96 btree_node_locked_type(linked, level) == want &&
97 iter->lock_seq[level] == b->lock.state.seq) {
98 btree_node_unlock(iter, level);
99 six_lock_increment(&b->lock, want);
105 mark_btree_node_unlocked(iter, level);
106 mark_btree_node_locked(iter, level, want);
111 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
113 struct btree_iter *iter,
114 enum six_lock_type type)
116 struct btree_iter *linked;
118 /* Can't have children locked before ancestors: */
119 EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked));
122 * Can't hold any read locks while we block taking an intent lock - see
123 * below for reasoning, and we should have already dropped any read
124 * locks in the current iterator
126 EBUG_ON(type == SIX_LOCK_intent &&
127 iter->nodes_locked != iter->nodes_intent_locked);
129 for_each_linked_btree_iter(iter, linked)
130 if (linked->nodes[level] == b &&
131 btree_node_locked_type(linked, level) == type) {
132 six_lock_increment(&b->lock, type);
137 * Must lock btree nodes in key order - this case hapens when locking
138 * the prev sibling in btree node merging:
140 if (iter->nodes_locked &&
141 __ffs(iter->nodes_locked) == level &&
142 __btree_iter_cmp(iter->btree_id, pos, iter))
145 for_each_linked_btree_iter(iter, linked) {
146 if (!linked->nodes_locked)
150 * Can't block taking an intent lock if we have _any_ nodes read
153 * - Our read lock blocks another thread with an intent lock on
154 * the same node from getting a write lock, and thus from
155 * dropping its intent lock
157 * - And the other thread may have multiple nodes intent locked:
158 * both the node we want to intent lock, and the node we
159 * already have read locked - deadlock:
161 if (type == SIX_LOCK_intent &&
162 linked->nodes_locked != linked->nodes_intent_locked) {
163 linked->locks_want = max(linked->locks_want,
168 /* We have to lock btree nodes in key order: */
169 if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
173 * Interior nodes must be locked before their descendants: if
174 * another iterator has possible descendants locked of the node
175 * we're about to lock, it must have the ancestors locked too:
177 if (linked->btree_id == iter->btree_id &&
178 level > __fls(linked->nodes_locked)) {
179 linked->locks_want = max(linked->locks_want,
185 six_lock_type(&b->lock, type);
189 /* Btree iterator locking: */
192 static void btree_iter_drop_extra_locks(struct btree_iter *iter)
196 while (iter->nodes_locked &&
197 (l = __fls(iter->nodes_locked)) > iter->locks_want) {
198 if (!btree_node_locked(iter, l))
199 panic("l %u nodes_locked %u\n", l, iter->nodes_locked);
201 if (l > iter->level) {
202 btree_node_unlock(iter, l);
203 } else if (btree_node_intent_locked(iter, l)) {
204 six_lock_downgrade(&iter->nodes[l]->lock);
205 iter->nodes_intent_locked ^= 1 << l;
210 bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
211 unsigned new_locks_want)
213 struct btree_iter *linked;
216 /* Drop locks we don't want anymore: */
217 if (new_locks_want < iter->locks_want)
218 for_each_linked_btree_iter(iter, linked)
219 if (linked->locks_want > new_locks_want) {
220 linked->locks_want = max_t(unsigned, 1,
222 btree_iter_drop_extra_locks(linked);
225 iter->locks_want = new_locks_want;
226 btree_iter_drop_extra_locks(iter);
228 for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
229 if (!bch2_btree_node_relock(iter, l))
235 * Just an optimization: ancestor nodes must be locked before child
236 * nodes, so set locks_want on iterators that might lock ancestors
237 * before us to avoid getting -EINTR later:
239 for_each_linked_btree_iter(iter, linked)
240 if (linked->btree_id == iter->btree_id &&
241 btree_iter_cmp(linked, iter) <= 0)
242 linked->locks_want = max_t(unsigned, linked->locks_want,
247 static int __bch2_btree_iter_unlock(struct btree_iter *iter)
249 BUG_ON(iter->error == -EINTR);
251 while (iter->nodes_locked)
252 btree_node_unlock(iter, __ffs(iter->nodes_locked));
257 int bch2_btree_iter_unlock(struct btree_iter *iter)
259 struct btree_iter *linked;
261 for_each_linked_btree_iter(iter, linked)
262 __bch2_btree_iter_unlock(linked);
263 return __bch2_btree_iter_unlock(iter);
266 /* Btree iterator: */
268 #ifdef CONFIG_BCACHEFS_DEBUG
270 static void __bch2_btree_iter_verify(struct btree_iter *iter,
273 struct btree_node_iter *node_iter = &iter->node_iters[b->level];
274 struct btree_node_iter tmp = *node_iter;
275 struct bkey_packed *k;
277 bch2_btree_node_iter_verify(node_iter, b);
280 * For interior nodes, the iterator will have skipped past
284 ? bch2_btree_node_iter_prev(&tmp, b)
285 : bch2_btree_node_iter_prev_all(&tmp, b);
286 if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
289 struct bkey uk = bkey_unpack_key(b, k);
291 bch2_bkey_to_text(buf, sizeof(buf), &uk);
292 panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
293 buf, iter->pos.inode, iter->pos.offset);
296 k = bch2_btree_node_iter_peek_all(node_iter, b);
297 if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
300 struct bkey uk = bkey_unpack_key(b, k);
302 bch2_bkey_to_text(buf, sizeof(buf), &uk);
303 panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
304 iter->pos.inode, iter->pos.offset, buf);
308 void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
310 struct btree_iter *linked;
312 if (iter->nodes[b->level] == b)
313 __bch2_btree_iter_verify(iter, b);
315 for_each_linked_btree_node(iter, b, linked)
316 __bch2_btree_iter_verify(iter, b);
321 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
323 struct btree_node_iter *node_iter,
325 struct bkey_packed *where,
326 unsigned clobber_u64s,
329 const struct bkey_packed *end = btree_bkey_last(b, t);
330 struct btree_node_iter_set *set;
331 unsigned offset = __btree_node_key_to_offset(b, where);
332 int shift = new_u64s - clobber_u64s;
333 unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift;
335 btree_node_iter_for_each(node_iter, set)
336 if (set->end == old_end)
339 /* didn't find the bset in the iterator - might have to readd it: */
341 btree_iter_pos_cmp_packed(b, &iter->pos, where,
343 bch2_btree_node_iter_push(node_iter, b, where, end);
346 set->end = (int) set->end + shift;
348 /* Iterator hasn't gotten to the key that changed yet: */
353 btree_iter_pos_cmp_packed(b, &iter->pos, where,
356 bch2_btree_node_iter_sort(node_iter, b);
357 } else if (set->k < offset + clobber_u64s) {
358 set->k = offset + new_u64s;
359 if (set->k == set->end)
360 *set = node_iter->data[--node_iter->used];
361 bch2_btree_node_iter_sort(node_iter, b);
363 set->k = (int) set->k + shift;
367 * Interior nodes are special because iterators for interior nodes don't
368 * obey the usual invariants regarding the iterator position:
370 * We may have whiteouts that compare greater than the iterator
371 * position, and logically should be in the iterator, but that we
372 * skipped past to find the first live key greater than the iterator
373 * position. This becomes an issue when we insert a new key that is
374 * greater than the current iterator position, but smaller than the
375 * whiteouts we've already skipped past - this happens in the course of
378 * We have to rewind the iterator past to before those whiteouts here,
379 * else bkey_node_iter_prev() is not going to work and who knows what
380 * else would happen. And we have to do it manually, because here we've
381 * already done the insert and the iterator is currently inconsistent:
383 * We've got multiple competing invariants, here - we have to be careful
384 * about rewinding iterators for interior nodes, because they should
385 * always point to the key for the child node the btree iterator points
388 if (b->level && new_u64s && !bkey_deleted(where) &&
389 btree_iter_pos_cmp_packed(b, &iter->pos, where,
392 struct bkey_packed *k;
394 for_each_bset(b, t) {
395 if (bch2_bkey_to_bset(b, where) == t)
398 k = bch2_bkey_prev_all(b, t,
399 bch2_btree_node_iter_bset_pos(node_iter, b, t));
401 __btree_node_iter_cmp(node_iter, b,
403 struct btree_node_iter_set *set;
405 __btree_node_key_to_offset(b, bkey_next(k));
407 btree_node_iter_for_each(node_iter, set)
408 if (set->k == offset) {
409 set->k = __btree_node_key_to_offset(b, k);
410 bch2_btree_node_iter_sort(node_iter, b);
414 bch2_btree_node_iter_push(node_iter, b, k,
415 btree_bkey_last(b, t));
423 void bch2_btree_node_iter_fix(struct btree_iter *iter,
425 struct btree_node_iter *node_iter,
427 struct bkey_packed *where,
428 unsigned clobber_u64s,
431 struct btree_iter *linked;
433 if (node_iter != &iter->node_iters[b->level])
434 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
435 where, clobber_u64s, new_u64s);
437 if (iter->nodes[b->level] == b)
438 __bch2_btree_node_iter_fix(iter, b,
439 &iter->node_iters[b->level], t,
440 where, clobber_u64s, new_u64s);
442 for_each_linked_btree_node(iter, b, linked)
443 __bch2_btree_node_iter_fix(linked, b,
444 &linked->node_iters[b->level], t,
445 where, clobber_u64s, new_u64s);
447 /* interior node iterators are... special... */
449 bch2_btree_iter_verify(iter, b);
452 /* peek_all() doesn't skip deleted keys */
453 static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
455 struct btree *b = iter->nodes[iter->level];
456 struct bkey_packed *k =
457 bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
460 EBUG_ON(!btree_node_locked(iter, iter->level));
463 return bkey_s_c_null;
465 ret = bkey_disassemble(b, k, &iter->k);
467 if (debug_check_bkeys(iter->c))
468 bch2_bkey_debugcheck(iter->c, b, ret);
473 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
475 struct btree *b = iter->nodes[iter->level];
476 struct bkey_packed *k =
477 bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
480 EBUG_ON(!btree_node_locked(iter, iter->level));
483 return bkey_s_c_null;
485 ret = bkey_disassemble(b, k, &iter->k);
487 if (debug_check_bkeys(iter->c))
488 bch2_bkey_debugcheck(iter->c, b, ret);
493 static inline void __btree_iter_advance(struct btree_iter *iter)
495 bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
496 iter->nodes[iter->level]);
500 * Verify that iterator for parent node points to child node:
502 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
505 struct bkey_packed *k;
507 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
508 !iter->nodes[b->level + 1])
511 parent_locked = btree_node_locked(iter, b->level + 1);
513 if (!bch2_btree_node_relock(iter, b->level + 1))
516 k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
517 iter->nodes[b->level + 1]);
520 bkey_cmp_left_packed(iter->nodes[b->level + 1],
523 struct bkey uk = bkey_unpack_key(b, k);
525 bch2_bkey_to_text(buf, sizeof(buf), &uk);
526 panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
527 buf, b->key.k.p.inode, b->key.k.p.offset);
531 btree_node_unlock(iter, b->level + 1);
534 static inline void __btree_iter_init(struct btree_iter *iter,
537 bch2_btree_node_iter_init(&iter->node_iters[b->level], b,
538 iter->pos, iter->is_extents,
539 btree_node_is_extents(b));
541 /* Skip to first non whiteout: */
543 bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
546 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
549 return iter->btree_id == b->btree_id &&
550 bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
551 btree_iter_pos_cmp(iter->pos, &b->key.k, iter->is_extents);
554 static inline void btree_iter_node_set(struct btree_iter *iter,
557 btree_iter_verify_new_node(iter, b);
559 EBUG_ON(!btree_iter_pos_in_node(iter, b));
560 EBUG_ON(b->lock.state.seq & 1);
562 iter->lock_seq[b->level] = b->lock.state.seq;
563 iter->nodes[b->level] = b;
564 __btree_iter_init(iter, b);
568 * A btree node is being replaced - update the iterator to point to the new
571 bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
573 struct btree_iter *linked;
575 for_each_linked_btree_iter(iter, linked)
576 if (btree_iter_pos_in_node(linked, b)) {
578 * bch2_btree_iter_node_drop() has already been called -
579 * the old node we're replacing has already been
580 * unlocked and the pointer invalidated
582 BUG_ON(btree_node_locked(linked, b->level));
585 * If @linked wants this node read locked, we don't want
586 * to actually take the read lock now because it's not
587 * legal to hold read locks on other nodes while we take
588 * write locks, so the journal can make forward
591 * Instead, btree_iter_node_set() sets things up so
592 * bch2_btree_node_relock() will succeed:
595 if (btree_want_intent(linked, b->level)) {
596 six_lock_increment(&b->lock, SIX_LOCK_intent);
597 mark_btree_node_intent_locked(linked, b->level);
600 btree_iter_node_set(linked, b);
603 if (!btree_iter_pos_in_node(iter, b)) {
604 six_unlock_intent(&b->lock);
608 mark_btree_node_intent_locked(iter, b->level);
609 btree_iter_node_set(iter, b);
613 void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
615 struct btree_iter *linked;
616 unsigned level = b->level;
618 for_each_linked_btree_iter(iter, linked)
619 if (linked->nodes[level] == b) {
620 btree_node_unlock(linked, level);
621 linked->nodes[level] = BTREE_ITER_NOT_END;
625 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
627 unsigned level = b->level;
629 if (iter->nodes[level] == b) {
630 BUG_ON(b->lock.state.intent_lock != 1);
631 btree_node_unlock(iter, level);
632 iter->nodes[level] = BTREE_ITER_NOT_END;
637 * A btree node has been modified in such a way as to invalidate iterators - fix
640 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
642 struct btree_iter *linked;
644 for_each_linked_btree_node(iter, b, linked)
645 __btree_iter_init(linked, b);
646 __btree_iter_init(iter, b);
649 static inline int btree_iter_lock_root(struct btree_iter *iter,
652 struct bch_fs *c = iter->c;
654 enum six_lock_type lock_type;
657 EBUG_ON(iter->nodes_locked);
660 b = READ_ONCE(c->btree_roots[iter->btree_id].b);
661 iter->level = READ_ONCE(b->level);
663 if (unlikely(iter->level < depth_want)) {
665 * the root is at a lower depth than the depth we want:
666 * got to the end of the btree, or we're walking nodes
667 * greater than some depth and there are no nodes >=
670 iter->level = depth_want;
671 iter->nodes[iter->level] = NULL;
675 lock_type = btree_lock_want(iter, iter->level);
676 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
680 if (likely(b == c->btree_roots[iter->btree_id].b &&
681 b->level == iter->level &&
683 for (i = 0; i < iter->level; i++)
684 iter->nodes[i] = BTREE_ITER_NOT_END;
685 iter->nodes[iter->level] = b;
687 mark_btree_node_locked(iter, iter->level, lock_type);
688 btree_iter_node_set(iter, b);
693 six_unlock_type(&b->lock, lock_type);
697 static inline int btree_iter_down(struct btree_iter *iter)
700 struct bkey_s_c k = __btree_iter_peek(iter);
701 unsigned level = iter->level - 1;
702 enum six_lock_type lock_type = btree_lock_want(iter, level);
705 bkey_reassemble(&tmp.k, k);
707 b = bch2_btree_node_get(iter, &tmp.k, level, lock_type);
708 if (unlikely(IS_ERR(b)))
712 mark_btree_node_locked(iter, level, lock_type);
713 btree_iter_node_set(iter, b);
717 static void btree_iter_up(struct btree_iter *iter)
719 btree_node_unlock(iter, iter->level++);
722 int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
724 static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
726 struct bch_fs *c = iter->c;
727 struct btree_iter *linked, *sorted_iters, **i;
729 bch2_btree_iter_unlock(iter);
731 if (ret != -ENOMEM && ret != -EINTR)
734 if (ret == -ENOMEM) {
737 closure_init_stack(&cl);
740 ret = bch2_btree_node_cannibalize_lock(c, &cl);
746 * Linked iters are normally a circular singly linked list - break cycle
747 * while we sort them:
755 linked = linked->next;
758 while (*i && btree_iter_cmp(iter, *i) > 0)
765 /* Make list circular again: */
769 iter->next = sorted_iters;
771 /* Now, redo traversals in correct order: */
776 ret = __bch2_btree_iter_traverse(iter);
784 } while (iter != sorted_iters);
786 ret = btree_iter_linked(iter) ? -EINTR : 0;
788 bch2_btree_node_cannibalize_unlock(c);
794 iter->nodes[iter->level] = NULL;
799 * This is the main state machine for walking down the btree - walks down to a
802 * Returns 0 on success, -EIO on error (error reading in a btree node).
804 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
805 * stashed in the iterator and returned from bch2_btree_iter_unlock().
807 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
809 unsigned depth_want = iter->level;
811 /* make sure we have all the intent locks we need - ugh */
812 if (unlikely(iter->nodes[iter->level] &&
813 iter->level + 1 < iter->locks_want)) {
816 for (i = iter->level + 1;
817 i < iter->locks_want && iter->nodes[i];
819 if (!bch2_btree_node_relock(iter, i)) {
820 while (iter->nodes[iter->level] &&
821 iter->level + 1 < iter->locks_want)
828 * If the current node isn't locked, go up until we have a locked node
829 * or run out of nodes:
831 while (iter->nodes[iter->level] &&
832 !(is_btree_node(iter, iter->level) &&
833 bch2_btree_node_relock(iter, iter->level) &&
834 btree_iter_pos_cmp(iter->pos,
835 &iter->nodes[iter->level]->key.k,
840 * If we've got a btree node locked (i.e. we aren't about to relock the
841 * root) - advance its node iterator if necessary:
843 if (iter->nodes[iter->level]) {
846 while ((k = __btree_iter_peek_all(iter)).k &&
847 !btree_iter_pos_cmp(iter->pos, k.k, iter->is_extents))
848 __btree_iter_advance(iter);
852 * Note: iter->nodes[iter->level] may be temporarily NULL here - that
853 * would indicate to other code that we got to the end of the btree,
854 * here it indicates that relocking the root failed - it's critical that
855 * btree_iter_lock_root() comes next and that it can't fail
857 while (iter->level > depth_want) {
858 int ret = iter->nodes[iter->level]
859 ? btree_iter_down(iter)
860 : btree_iter_lock_root(iter, depth_want);
862 iter->level = depth_want;
870 int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
874 if (unlikely(!iter->nodes[iter->level]))
877 iter->at_end_of_leaf = false;
879 ret = __bch2_btree_iter_traverse(iter);
881 ret = btree_iter_traverse_error(iter, ret);
886 /* Iterate across nodes (leaf and interior nodes) */
888 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
893 EBUG_ON(iter->is_extents);
895 ret = bch2_btree_iter_traverse(iter);
899 b = iter->nodes[iter->level];
902 EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
903 iter->pos = b->key.k.p;
909 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
914 EBUG_ON(iter->is_extents);
918 if (!iter->nodes[iter->level])
921 /* parent node usually won't be locked: redo traversal if necessary */
922 ret = bch2_btree_iter_traverse(iter);
926 b = iter->nodes[iter->level];
930 if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
931 /* Haven't gotten to the end of the parent node: */
934 iter->pos = iter->btree_id == BTREE_ID_INODES
935 ? btree_type_successor(iter->btree_id, iter->pos)
936 : bkey_successor(iter->pos);
939 ret = bch2_btree_iter_traverse(iter);
943 b = iter->nodes[iter->level];
946 iter->pos = b->key.k.p;
951 /* Iterate across keys (in leaf nodes only) */
953 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
955 struct btree *b = iter->nodes[0];
956 struct btree_node_iter *node_iter = &iter->node_iters[0];
957 struct bkey_packed *k;
959 EBUG_ON(iter->level != 0);
960 EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
961 EBUG_ON(!btree_node_locked(iter, 0));
962 EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
964 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
965 !btree_iter_pos_cmp_packed(b, &new_pos, k,
967 bch2_btree_node_iter_advance(node_iter, b);
970 !btree_iter_pos_cmp(new_pos, &b->key.k, iter->is_extents))
971 iter->at_end_of_leaf = true;
976 void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
978 EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
982 void bch2_btree_iter_advance_pos(struct btree_iter *iter)
985 * We use iter->k instead of iter->pos for extents: iter->pos will be
986 * equal to the start of the extent we returned, but we need to advance
987 * to the end of the extent we returned.
989 bch2_btree_iter_set_pos(iter,
990 btree_type_successor(iter->btree_id, iter->k.p));
994 void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
996 /* incapable of rewinding across nodes: */
997 BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
1000 __btree_iter_init(iter, iter->nodes[iter->level]);
1003 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1009 ret = bch2_btree_iter_traverse(iter);
1010 if (unlikely(ret)) {
1011 iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1012 return bkey_s_c_err(ret);
1015 k = __btree_iter_peek(iter);
1018 * iter->pos should always be equal to the key we just
1019 * returned - except extents can straddle iter->pos:
1021 if (!iter->is_extents ||
1022 bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1023 bch2_btree_iter_set_pos(iter, bkey_start_pos(k.k));
1027 iter->pos = iter->nodes[0]->key.k.p;
1029 if (!bkey_cmp(iter->pos, POS_MAX)) {
1030 iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1031 bch2_btree_iter_unlock(iter);
1032 return bkey_s_c_null;
1035 iter->pos = btree_type_successor(iter->btree_id, iter->pos);
1039 struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
1046 ret = bch2_btree_iter_traverse(iter);
1047 if (unlikely(ret)) {
1048 iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1049 return bkey_s_c_err(ret);
1052 k = __btree_iter_peek_all(iter);
1054 if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
1059 if (iter->is_extents) {
1060 if (n.p.offset == KEY_OFFSET_MAX) {
1061 iter->pos = bkey_successor(iter->pos);
1066 k.k = &iter->nodes[0]->key.k;
1069 min_t(u64, KEY_SIZE_MAX,
1070 (k.k->p.inode == n.p.inode
1071 ? bkey_start_offset(k.k)
1079 return (struct bkey_s_c) { &iter->k, NULL };
1080 } else if (!bkey_deleted(k.k)) {
1083 __btree_iter_advance(iter);
1088 void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
1089 enum btree_id btree_id, struct bpos pos,
1090 unsigned locks_want, unsigned depth)
1092 iter->level = depth;
1093 /* bch2_bkey_ops isn't used much, this would be a cache miss */
1094 /* iter->is_extents = bch2_bkey_ops[btree_id]->is_extents; */
1095 iter->is_extents = btree_id == BTREE_ID_EXTENTS;
1096 iter->nodes_locked = 0;
1097 iter->nodes_intent_locked = 0;
1098 iter->locks_want = min(locks_want, BTREE_MAX_DEPTH);
1099 iter->btree_id = btree_id;
1100 iter->at_end_of_leaf = 0;
1104 memset(iter->nodes, 0, sizeof(iter->nodes));
1105 iter->nodes[iter->level] = BTREE_ITER_NOT_END;
1108 prefetch(c->btree_roots[btree_id].b);
1111 void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
1113 BUG_ON(btree_iter_linked(new));
1115 new->next = iter->next;
1118 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1119 unsigned nr_iters = 1;
1121 for_each_linked_btree_iter(iter, new)
1124 BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
1128 void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
1130 bch2_btree_iter_unlock(dst);
1131 memcpy(dst, src, offsetof(struct btree_iter, next));
1132 dst->nodes_locked = dst->nodes_intent_locked = 0;