3 #include "bkey_methods.h"
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_locking.h"
10 #include <linux/prefetch.h>
11 #include <trace/events/bcachefs.h>
13 #define BTREE_ITER_NOT_END ((struct btree *) 1)
15 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
17 return iter->nodes[l] && iter->nodes[l] != BTREE_ITER_NOT_END;
20 /* Btree node locking: */
23 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
26 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
28 struct btree_iter *linked;
30 EBUG_ON(iter->nodes[b->level] != b);
31 EBUG_ON(iter->lock_seq[b->level] + 1 != b->lock.state.seq);
33 for_each_linked_btree_node(iter, b, linked)
34 linked->lock_seq[b->level] += 2;
36 iter->lock_seq[b->level] += 2;
38 six_unlock_write(&b->lock);
41 void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
43 struct btree_iter *linked;
46 EBUG_ON(iter->nodes[b->level] != b);
47 EBUG_ON(iter->lock_seq[b->level] != b->lock.state.seq);
49 if (six_trylock_write(&b->lock))
52 for_each_linked_btree_iter(iter, linked)
53 if (linked->nodes[b->level] == b &&
54 btree_node_read_locked(linked, b->level))
57 if (likely(!readers)) {
58 six_lock_write(&b->lock);
61 * Must drop our read locks before calling six_lock_write() -
62 * six_unlock() won't do wakeups until the reader count
63 * goes to 0, and it's safe because we have the node intent
66 atomic64_sub(__SIX_VAL(read_lock, readers),
67 &b->lock.state.counter);
68 six_lock_write(&b->lock);
69 atomic64_add(__SIX_VAL(read_lock, readers),
70 &b->lock.state.counter);
74 bool bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
76 struct btree_iter *linked;
77 struct btree *b = iter->nodes[level];
78 enum btree_node_locked_type want = btree_lock_want(iter, level);
79 enum btree_node_locked_type have = btree_node_locked_type(iter, level);
84 if (!is_btree_node(iter, level))
90 if (have != BTREE_NODE_UNLOCKED
91 ? six_trylock_convert(&b->lock, have, want)
92 : six_relock_type(&b->lock, want, iter->lock_seq[level]))
95 for_each_linked_btree_iter(iter, linked)
96 if (linked->nodes[level] == b &&
97 btree_node_locked_type(linked, level) == want &&
98 iter->lock_seq[level] == b->lock.state.seq) {
99 btree_node_unlock(iter, level);
100 six_lock_increment(&b->lock, want);
106 mark_btree_node_unlocked(iter, level);
107 mark_btree_node_locked(iter, level, want);
112 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
114 struct btree_iter *iter,
115 enum six_lock_type type)
117 struct btree_iter *linked;
119 /* Can't have children locked before ancestors: */
120 EBUG_ON(iter->nodes_locked && level > __ffs(iter->nodes_locked));
123 * Can't hold any read locks while we block taking an intent lock - see
124 * below for reasoning, and we should have already dropped any read
125 * locks in the current iterator
127 EBUG_ON(type == SIX_LOCK_intent &&
128 iter->nodes_locked != iter->nodes_intent_locked);
130 for_each_linked_btree_iter(iter, linked)
131 if (linked->nodes[level] == b &&
132 btree_node_locked_type(linked, level) == type) {
133 six_lock_increment(&b->lock, type);
138 * Must lock btree nodes in key order - this case hapens when locking
139 * the prev sibling in btree node merging:
141 if (iter->nodes_locked &&
142 __ffs(iter->nodes_locked) == level &&
143 __btree_iter_cmp(iter->btree_id, pos, iter))
146 for_each_linked_btree_iter(iter, linked) {
147 if (!linked->nodes_locked)
151 * Can't block taking an intent lock if we have _any_ nodes read
154 * - Our read lock blocks another thread with an intent lock on
155 * the same node from getting a write lock, and thus from
156 * dropping its intent lock
158 * - And the other thread may have multiple nodes intent locked:
159 * both the node we want to intent lock, and the node we
160 * already have read locked - deadlock:
162 if (type == SIX_LOCK_intent &&
163 linked->nodes_locked != linked->nodes_intent_locked) {
164 linked->locks_want = max_t(unsigned,
170 /* We have to lock btree nodes in key order: */
171 if (__btree_iter_cmp(iter->btree_id, pos, linked) < 0)
175 * Interior nodes must be locked before their descendants: if
176 * another iterator has possible descendants locked of the node
177 * we're about to lock, it must have the ancestors locked too:
179 if (linked->btree_id == iter->btree_id &&
180 level > __fls(linked->nodes_locked)) {
181 linked->locks_want = max_t(unsigned,
188 six_lock_type(&b->lock, type);
192 /* Btree iterator locking: */
195 static void btree_iter_drop_extra_locks(struct btree_iter *iter)
199 while (iter->nodes_locked &&
200 (l = __fls(iter->nodes_locked)) > iter->locks_want) {
201 if (!btree_node_locked(iter, l))
202 panic("l %u nodes_locked %u\n", l, iter->nodes_locked);
204 if (l > iter->level) {
205 btree_node_unlock(iter, l);
206 } else if (btree_node_intent_locked(iter, l)) {
207 six_lock_downgrade(&iter->nodes[l]->lock);
208 iter->nodes_intent_locked ^= 1 << l;
213 bool __bch2_btree_iter_set_locks_want(struct btree_iter *iter,
214 unsigned new_locks_want)
216 struct btree_iter *linked;
219 /* Drop locks we don't want anymore: */
220 if (new_locks_want < iter->locks_want)
221 for_each_linked_btree_iter(iter, linked)
222 if (linked->locks_want > new_locks_want) {
223 linked->locks_want = max_t(unsigned, 1,
225 btree_iter_drop_extra_locks(linked);
228 iter->locks_want = new_locks_want;
229 btree_iter_drop_extra_locks(iter);
231 for (l = iter->level; l < iter->locks_want && iter->nodes[l]; l++)
232 if (!bch2_btree_node_relock(iter, l))
238 * Just an optimization: ancestor nodes must be locked before child
239 * nodes, so set locks_want on iterators that might lock ancestors
240 * before us to avoid getting -EINTR later:
242 for_each_linked_btree_iter(iter, linked)
243 if (linked->btree_id == iter->btree_id &&
244 btree_iter_cmp(linked, iter) <= 0)
245 linked->locks_want = max_t(unsigned, linked->locks_want,
250 static void __bch2_btree_iter_unlock(struct btree_iter *iter)
252 while (iter->nodes_locked)
253 btree_node_unlock(iter, __ffs(iter->nodes_locked));
255 iter->flags &= ~BTREE_ITER_UPTODATE;
258 int bch2_btree_iter_unlock(struct btree_iter *iter)
260 struct btree_iter *linked;
262 for_each_linked_btree_iter(iter, linked)
263 __bch2_btree_iter_unlock(linked);
264 __bch2_btree_iter_unlock(iter);
266 return iter->flags & BTREE_ITER_ERROR ? -EIO : 0;
269 /* Btree iterator: */
271 #ifdef CONFIG_BCACHEFS_DEBUG
273 static void __bch2_btree_iter_verify(struct btree_iter *iter,
276 struct btree_node_iter *node_iter = &iter->node_iters[b->level];
277 struct btree_node_iter tmp = *node_iter;
278 struct bkey_packed *k;
280 bch2_btree_node_iter_verify(node_iter, b);
283 * For interior nodes, the iterator will have skipped past
287 ? bch2_btree_node_iter_prev(&tmp, b)
288 : bch2_btree_node_iter_prev_all(&tmp, b);
289 if (k && btree_iter_pos_cmp_packed(b, &iter->pos, k,
290 iter->flags & BTREE_ITER_IS_EXTENTS)) {
292 struct bkey uk = bkey_unpack_key(b, k);
294 bch2_bkey_to_text(buf, sizeof(buf), &uk);
295 panic("prev key should be before after pos:\n%s\n%llu:%llu\n",
296 buf, iter->pos.inode, iter->pos.offset);
299 k = bch2_btree_node_iter_peek_all(node_iter, b);
300 if (k && !btree_iter_pos_cmp_packed(b, &iter->pos, k,
301 iter->flags & BTREE_ITER_IS_EXTENTS)) {
303 struct bkey uk = bkey_unpack_key(b, k);
305 bch2_bkey_to_text(buf, sizeof(buf), &uk);
306 panic("next key should be before iter pos:\n%llu:%llu\n%s\n",
307 iter->pos.inode, iter->pos.offset, buf);
311 void bch2_btree_iter_verify(struct btree_iter *iter, struct btree *b)
313 struct btree_iter *linked;
315 if (iter->nodes[b->level] == b)
316 __bch2_btree_iter_verify(iter, b);
318 for_each_linked_btree_node(iter, b, linked)
319 __bch2_btree_iter_verify(iter, b);
324 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
326 struct btree_node_iter *node_iter,
328 struct bkey_packed *where,
329 unsigned clobber_u64s,
332 const struct bkey_packed *end = btree_bkey_last(b, t);
333 struct btree_node_iter_set *set;
334 unsigned offset = __btree_node_key_to_offset(b, where);
335 int shift = new_u64s - clobber_u64s;
336 unsigned old_end = (int) __btree_node_key_to_offset(b, end) - shift;
338 btree_node_iter_for_each(node_iter, set)
339 if (set->end == old_end)
342 /* didn't find the bset in the iterator - might have to readd it: */
344 btree_iter_pos_cmp_packed(b, &iter->pos, where,
345 iter->flags & BTREE_ITER_IS_EXTENTS))
346 bch2_btree_node_iter_push(node_iter, b, where, end);
349 set->end = (int) set->end + shift;
351 /* Iterator hasn't gotten to the key that changed yet: */
356 btree_iter_pos_cmp_packed(b, &iter->pos, where,
357 iter->flags & BTREE_ITER_IS_EXTENTS)) {
359 bch2_btree_node_iter_sort(node_iter, b);
360 } else if (set->k < offset + clobber_u64s) {
361 set->k = offset + new_u64s;
362 if (set->k == set->end)
363 *set = node_iter->data[--node_iter->used];
364 bch2_btree_node_iter_sort(node_iter, b);
366 set->k = (int) set->k + shift;
370 * Interior nodes are special because iterators for interior nodes don't
371 * obey the usual invariants regarding the iterator position:
373 * We may have whiteouts that compare greater than the iterator
374 * position, and logically should be in the iterator, but that we
375 * skipped past to find the first live key greater than the iterator
376 * position. This becomes an issue when we insert a new key that is
377 * greater than the current iterator position, but smaller than the
378 * whiteouts we've already skipped past - this happens in the course of
381 * We have to rewind the iterator past to before those whiteouts here,
382 * else bkey_node_iter_prev() is not going to work and who knows what
383 * else would happen. And we have to do it manually, because here we've
384 * already done the insert and the iterator is currently inconsistent:
386 * We've got multiple competing invariants, here - we have to be careful
387 * about rewinding iterators for interior nodes, because they should
388 * always point to the key for the child node the btree iterator points
391 if (b->level && new_u64s && !bkey_deleted(where) &&
392 btree_iter_pos_cmp_packed(b, &iter->pos, where,
393 iter->flags & BTREE_ITER_IS_EXTENTS)) {
395 struct bkey_packed *k;
397 for_each_bset(b, t) {
398 if (bch2_bkey_to_bset(b, where) == t)
401 k = bch2_bkey_prev_all(b, t,
402 bch2_btree_node_iter_bset_pos(node_iter, b, t));
404 __btree_node_iter_cmp(node_iter, b,
406 struct btree_node_iter_set *set;
408 __btree_node_key_to_offset(b, bkey_next(k));
410 btree_node_iter_for_each(node_iter, set)
411 if (set->k == offset) {
412 set->k = __btree_node_key_to_offset(b, k);
413 bch2_btree_node_iter_sort(node_iter, b);
417 bch2_btree_node_iter_push(node_iter, b, k,
418 btree_bkey_last(b, t));
426 void bch2_btree_node_iter_fix(struct btree_iter *iter,
428 struct btree_node_iter *node_iter,
430 struct bkey_packed *where,
431 unsigned clobber_u64s,
434 struct btree_iter *linked;
436 if (node_iter != &iter->node_iters[b->level])
437 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
438 where, clobber_u64s, new_u64s);
440 if (iter->nodes[b->level] == b)
441 __bch2_btree_node_iter_fix(iter, b,
442 &iter->node_iters[b->level], t,
443 where, clobber_u64s, new_u64s);
445 for_each_linked_btree_node(iter, b, linked)
446 __bch2_btree_node_iter_fix(linked, b,
447 &linked->node_iters[b->level], t,
448 where, clobber_u64s, new_u64s);
450 /* interior node iterators are... special... */
452 bch2_btree_iter_verify(iter, b);
455 /* peek_all() doesn't skip deleted keys */
456 static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter)
458 struct btree *b = iter->nodes[iter->level];
459 struct bkey_packed *k =
460 bch2_btree_node_iter_peek_all(&iter->node_iters[iter->level], b);
463 EBUG_ON(!btree_node_locked(iter, iter->level));
466 return bkey_s_c_null;
468 ret = bkey_disassemble(b, k, &iter->k);
470 if (debug_check_bkeys(iter->c))
471 bch2_bkey_debugcheck(iter->c, b, ret);
476 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter)
478 struct btree *b = iter->nodes[iter->level];
479 struct bkey_packed *k =
480 bch2_btree_node_iter_peek(&iter->node_iters[iter->level], b);
483 EBUG_ON(!btree_node_locked(iter, iter->level));
486 return bkey_s_c_null;
488 ret = bkey_disassemble(b, k, &iter->k);
490 if (debug_check_bkeys(iter->c))
491 bch2_bkey_debugcheck(iter->c, b, ret);
496 static inline void __btree_iter_advance(struct btree_iter *iter)
498 bch2_btree_node_iter_advance(&iter->node_iters[iter->level],
499 iter->nodes[iter->level]);
503 * Verify that iterator for parent node points to child node:
505 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
508 struct bkey_packed *k;
510 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
511 !iter->nodes[b->level + 1])
514 parent_locked = btree_node_locked(iter, b->level + 1);
516 if (!bch2_btree_node_relock(iter, b->level + 1))
519 k = bch2_btree_node_iter_peek_all(&iter->node_iters[b->level + 1],
520 iter->nodes[b->level + 1]);
523 bkey_cmp_left_packed(iter->nodes[b->level + 1],
526 struct bkey uk = bkey_unpack_key(b, k);
528 bch2_bkey_to_text(buf, sizeof(buf), &uk);
529 panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
530 buf, b->key.k.p.inode, b->key.k.p.offset);
534 btree_node_unlock(iter, b->level + 1);
537 static inline void __btree_iter_init(struct btree_iter *iter,
540 bch2_btree_node_iter_init(&iter->node_iters[b->level], b, iter->pos,
541 iter->flags & BTREE_ITER_IS_EXTENTS,
542 btree_node_is_extents(b));
544 /* Skip to first non whiteout: */
546 bch2_btree_node_iter_peek(&iter->node_iters[b->level], b);
549 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
552 return iter->btree_id == b->btree_id &&
553 bkey_cmp(iter->pos, b->data->min_key) >= 0 &&
554 btree_iter_pos_cmp(iter->pos, &b->key.k,
555 iter->flags & BTREE_ITER_IS_EXTENTS);
558 static inline void btree_iter_node_set(struct btree_iter *iter,
561 btree_iter_verify_new_node(iter, b);
563 EBUG_ON(!btree_iter_pos_in_node(iter, b));
564 EBUG_ON(b->lock.state.seq & 1);
566 iter->lock_seq[b->level] = b->lock.state.seq;
567 iter->nodes[b->level] = b;
568 __btree_iter_init(iter, b);
572 * A btree node is being replaced - update the iterator to point to the new
575 bool bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
577 struct btree_iter *linked;
579 for_each_linked_btree_iter(iter, linked)
580 if (btree_iter_pos_in_node(linked, b)) {
582 * bch2_btree_iter_node_drop() has already been called -
583 * the old node we're replacing has already been
584 * unlocked and the pointer invalidated
586 BUG_ON(btree_node_locked(linked, b->level));
589 * If @linked wants this node read locked, we don't want
590 * to actually take the read lock now because it's not
591 * legal to hold read locks on other nodes while we take
592 * write locks, so the journal can make forward
595 * Instead, btree_iter_node_set() sets things up so
596 * bch2_btree_node_relock() will succeed:
599 if (btree_want_intent(linked, b->level)) {
600 six_lock_increment(&b->lock, SIX_LOCK_intent);
601 mark_btree_node_intent_locked(linked, b->level);
604 btree_iter_node_set(linked, b);
607 if (!btree_iter_pos_in_node(iter, b)) {
608 six_unlock_intent(&b->lock);
612 mark_btree_node_intent_locked(iter, b->level);
613 btree_iter_node_set(iter, b);
617 void bch2_btree_iter_node_drop_linked(struct btree_iter *iter, struct btree *b)
619 struct btree_iter *linked;
621 for_each_linked_btree_iter(iter, linked)
622 bch2_btree_iter_node_drop(linked, b);
625 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
627 unsigned level = b->level;
629 if (iter->nodes[level] == b) {
630 btree_node_unlock(iter, level);
631 iter->nodes[level] = BTREE_ITER_NOT_END;
632 iter->flags &= ~BTREE_ITER_UPTODATE;
637 * A btree node has been modified in such a way as to invalidate iterators - fix
640 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
642 struct btree_iter *linked;
644 for_each_linked_btree_node(iter, b, linked)
645 __btree_iter_init(linked, b);
646 __btree_iter_init(iter, b);
649 static inline int btree_iter_lock_root(struct btree_iter *iter,
652 struct bch_fs *c = iter->c;
654 enum six_lock_type lock_type;
657 EBUG_ON(iter->nodes_locked);
660 b = READ_ONCE(c->btree_roots[iter->btree_id].b);
661 iter->level = READ_ONCE(b->level);
663 if (unlikely(iter->level < depth_want)) {
665 * the root is at a lower depth than the depth we want:
666 * got to the end of the btree, or we're walking nodes
667 * greater than some depth and there are no nodes >=
670 iter->level = depth_want;
671 iter->nodes[iter->level] = NULL;
675 lock_type = btree_lock_want(iter, iter->level);
676 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
680 if (likely(b == c->btree_roots[iter->btree_id].b &&
681 b->level == iter->level &&
683 for (i = 0; i < iter->level; i++)
684 iter->nodes[i] = BTREE_ITER_NOT_END;
685 iter->nodes[iter->level] = b;
687 mark_btree_node_locked(iter, iter->level, lock_type);
688 btree_iter_node_set(iter, b);
693 six_unlock_type(&b->lock, lock_type);
698 static void btree_iter_prefetch(struct btree_iter *iter)
700 struct btree *b = iter->nodes[iter->level + 1];
701 struct btree_node_iter node_iter = iter->node_iters[iter->level + 1];
702 struct bkey_packed *k;
704 unsigned nr = iter->level ? 1 : 8;
705 bool was_locked = btree_node_locked(iter, iter->level + 1);
708 if (!bch2_btree_node_relock(iter, iter->level + 1))
711 bch2_btree_node_iter_advance(&node_iter, b);
712 k = bch2_btree_node_iter_peek(&node_iter, b);
716 bch2_bkey_unpack(b, &tmp.k, k);
717 bch2_btree_node_prefetch(iter->c, &tmp.k,
718 iter->level, iter->btree_id);
722 btree_node_unlock(iter, iter->level + 1);
725 static inline int btree_iter_down(struct btree_iter *iter)
728 struct bkey_s_c k = __btree_iter_peek(iter);
729 unsigned level = iter->level - 1;
730 enum six_lock_type lock_type = btree_lock_want(iter, level);
733 bkey_reassemble(&tmp.k, k);
735 b = bch2_btree_node_get(iter->c, iter, &tmp.k, level, lock_type);
736 if (unlikely(IS_ERR(b)))
740 mark_btree_node_locked(iter, level, lock_type);
741 btree_iter_node_set(iter, b);
743 if (iter->flags & BTREE_ITER_PREFETCH)
744 btree_iter_prefetch(iter);
749 static void btree_iter_up(struct btree_iter *iter)
751 btree_node_unlock(iter, iter->level++);
754 int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
756 static int btree_iter_traverse_error(struct btree_iter *iter, int ret)
758 struct bch_fs *c = iter->c;
759 struct btree_iter *linked, *sorted_iters, **i;
761 bch2_btree_iter_unlock(iter);
763 if (ret != -ENOMEM && ret != -EINTR)
766 if (ret == -ENOMEM) {
769 closure_init_stack(&cl);
772 ret = bch2_btree_node_cannibalize_lock(c, &cl);
778 * Linked iters are normally a circular singly linked list - break cycle
779 * while we sort them:
787 linked = linked->next;
790 while (*i && btree_iter_cmp(iter, *i) > 0)
797 /* Make list circular again: */
801 iter->next = sorted_iters;
803 /* Now, redo traversals in correct order: */
808 ret = __bch2_btree_iter_traverse(iter);
816 } while (iter != sorted_iters);
818 ret = btree_iter_linked(iter) ? -EINTR : 0;
820 bch2_btree_node_cannibalize_unlock(c);
825 iter->flags |= BTREE_ITER_ERROR;
826 iter->nodes[iter->level] = NULL;
831 * This is the main state machine for walking down the btree - walks down to a
834 * Returns 0 on success, -EIO on error (error reading in a btree node).
836 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
837 * stashed in the iterator and returned from bch2_btree_iter_unlock().
839 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
841 unsigned depth_want = iter->level;
843 /* make sure we have all the intent locks we need - ugh */
844 if (unlikely(iter->nodes[iter->level] &&
845 iter->level + 1 < iter->locks_want)) {
848 for (i = iter->level + 1;
849 i < iter->locks_want && iter->nodes[i];
851 if (!bch2_btree_node_relock(iter, i)) {
852 while (iter->nodes[iter->level] &&
853 iter->level + 1 < iter->locks_want)
860 * If the current node isn't locked, go up until we have a locked node
861 * or run out of nodes:
863 while (iter->nodes[iter->level] &&
864 !(is_btree_node(iter, iter->level) &&
865 bch2_btree_node_relock(iter, iter->level) &&
866 btree_iter_pos_cmp(iter->pos,
867 &iter->nodes[iter->level]->key.k,
868 iter->flags & BTREE_ITER_IS_EXTENTS)))
872 * If we've got a btree node locked (i.e. we aren't about to relock the
873 * root) - advance its node iterator if necessary:
875 if (iter->nodes[iter->level]) {
878 while ((k = __btree_iter_peek_all(iter)).k &&
879 !btree_iter_pos_cmp(iter->pos, k.k,
880 iter->flags & BTREE_ITER_IS_EXTENTS))
881 __btree_iter_advance(iter);
885 * Note: iter->nodes[iter->level] may be temporarily NULL here - that
886 * would indicate to other code that we got to the end of the btree,
887 * here it indicates that relocking the root failed - it's critical that
888 * btree_iter_lock_root() comes next and that it can't fail
890 while (iter->level > depth_want) {
891 int ret = iter->nodes[iter->level]
892 ? btree_iter_down(iter)
893 : btree_iter_lock_root(iter, depth_want);
895 iter->level = depth_want;
903 int __must_check bch2_btree_iter_traverse(struct btree_iter *iter)
907 iter->flags &= ~BTREE_ITER_UPTODATE;
909 if (unlikely(!iter->nodes[iter->level]))
912 iter->flags &= ~BTREE_ITER_AT_END_OF_LEAF;
914 ret = __bch2_btree_iter_traverse(iter);
916 ret = btree_iter_traverse_error(iter, ret);
921 /* Iterate across nodes (leaf and interior nodes) */
923 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
928 EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
930 ret = bch2_btree_iter_traverse(iter);
934 b = iter->nodes[iter->level];
937 EBUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
938 iter->pos = b->key.k.p;
944 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter, unsigned depth)
949 EBUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
953 if (!iter->nodes[iter->level])
956 /* parent node usually won't be locked: redo traversal if necessary */
957 ret = bch2_btree_iter_traverse(iter);
961 b = iter->nodes[iter->level];
965 if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
966 /* Haven't gotten to the end of the parent node: */
969 iter->pos = iter->btree_id == BTREE_ID_INODES
970 ? btree_type_successor(iter->btree_id, iter->pos)
971 : bkey_successor(iter->pos);
974 ret = bch2_btree_iter_traverse(iter);
978 b = iter->nodes[iter->level];
981 iter->pos = b->key.k.p;
986 /* Iterate across keys (in leaf nodes only) */
988 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
990 struct btree *b = iter->nodes[0];
991 struct btree_node_iter *node_iter = &iter->node_iters[0];
992 struct bkey_packed *k;
994 EBUG_ON(iter->level != 0);
995 EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
996 EBUG_ON(!btree_node_locked(iter, 0));
997 EBUG_ON(bkey_cmp(new_pos, b->key.k.p) > 0);
999 while ((k = bch2_btree_node_iter_peek_all(node_iter, b)) &&
1000 !btree_iter_pos_cmp_packed(b, &new_pos, k,
1001 iter->flags & BTREE_ITER_IS_EXTENTS))
1002 bch2_btree_node_iter_advance(node_iter, b);
1005 !btree_iter_pos_cmp(new_pos, &b->key.k,
1006 iter->flags & BTREE_ITER_IS_EXTENTS))
1007 iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
1009 iter->pos = new_pos;
1010 iter->flags &= ~BTREE_ITER_UPTODATE;
1013 void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
1015 EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0); /* XXX handle this */
1016 iter->pos = new_pos;
1017 iter->flags &= ~BTREE_ITER_UPTODATE;
1020 void bch2_btree_iter_advance_pos(struct btree_iter *iter)
1022 if (iter->flags & BTREE_ITER_UPTODATE &&
1023 !(iter->flags & BTREE_ITER_WITH_HOLES)) {
1026 __btree_iter_advance(iter);
1027 k = __btree_iter_peek(iter);
1029 iter->pos = bkey_start_pos(k.k);
1035 * We use iter->k instead of iter->pos for extents: iter->pos will be
1036 * equal to the start of the extent we returned, but we need to advance
1037 * to the end of the extent we returned.
1039 bch2_btree_iter_set_pos(iter,
1040 btree_type_successor(iter->btree_id, iter->k.p));
1043 /* XXX: expensive */
1044 void bch2_btree_iter_rewind(struct btree_iter *iter, struct bpos pos)
1046 /* incapable of rewinding across nodes: */
1047 BUG_ON(bkey_cmp(pos, iter->nodes[iter->level]->data->min_key) < 0);
1050 iter->flags &= ~BTREE_ITER_UPTODATE;
1051 __btree_iter_init(iter, iter->nodes[iter->level]);
1054 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1059 EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
1060 (iter->btree_id == BTREE_ID_EXTENTS));
1062 if (iter->flags & BTREE_ITER_UPTODATE) {
1063 struct btree *b = iter->nodes[0];
1064 struct bkey_packed *k =
1065 __bch2_btree_node_iter_peek_all(&iter->node_iters[0], b);
1066 struct bkey_s_c ret = {
1068 .v = bkeyp_val(&b->format, k)
1071 if (debug_check_bkeys(iter->c))
1072 bch2_bkey_debugcheck(iter->c, b, ret);
1077 ret = bch2_btree_iter_traverse(iter);
1078 if (unlikely(ret)) {
1079 iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1080 return bkey_s_c_err(ret);
1083 k = __btree_iter_peek(iter);
1086 * iter->pos should always be equal to the key we just
1087 * returned - except extents can straddle iter->pos:
1089 if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
1090 bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1091 iter->pos = bkey_start_pos(k.k);
1093 iter->flags |= BTREE_ITER_UPTODATE;
1097 iter->pos = iter->nodes[0]->key.k.p;
1099 if (!bkey_cmp(iter->pos, POS_MAX)) {
1100 iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1101 bch2_btree_iter_unlock(iter);
1102 return bkey_s_c_null;
1105 iter->pos = btree_type_successor(iter->btree_id, iter->pos);
1109 struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *iter)
1115 EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
1116 (iter->btree_id == BTREE_ID_EXTENTS));
1118 iter->flags &= ~BTREE_ITER_UPTODATE;
1121 ret = bch2_btree_iter_traverse(iter);
1122 if (unlikely(ret)) {
1123 iter->k = KEY(iter->pos.inode, iter->pos.offset, 0);
1124 return bkey_s_c_err(ret);
1127 k = __btree_iter_peek_all(iter);
1129 if (!k.k || bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0) {
1134 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
1135 if (n.p.offset == KEY_OFFSET_MAX) {
1136 iter->pos = bkey_successor(iter->pos);
1141 k.k = &iter->nodes[0]->key.k;
1144 min_t(u64, KEY_SIZE_MAX,
1145 (k.k->p.inode == n.p.inode
1146 ? bkey_start_offset(k.k)
1154 return (struct bkey_s_c) { &iter->k, NULL };
1155 } else if (!bkey_deleted(k.k)) {
1158 __btree_iter_advance(iter);
1163 void __bch2_btree_iter_init(struct btree_iter *iter, struct bch_fs *c,
1164 enum btree_id btree_id, struct bpos pos,
1165 unsigned locks_want, unsigned depth,
1168 EBUG_ON(depth >= BTREE_MAX_DEPTH);
1169 EBUG_ON(locks_want > BTREE_MAX_DEPTH);
1173 iter->flags = flags;
1174 iter->btree_id = btree_id;
1175 iter->level = depth;
1176 iter->locks_want = locks_want;
1177 iter->nodes_locked = 0;
1178 iter->nodes_intent_locked = 0;
1179 memset(iter->nodes, 0, sizeof(iter->nodes));
1180 iter->nodes[iter->level] = BTREE_ITER_NOT_END;
1183 prefetch(c->btree_roots[btree_id].b);
1186 void bch2_btree_iter_unlink(struct btree_iter *iter)
1188 struct btree_iter *linked;
1190 __bch2_btree_iter_unlock(iter);
1192 if (!btree_iter_linked(iter))
1195 for_each_linked_btree_iter(iter, linked) {
1197 if (linked->next == iter) {
1198 linked->next = iter->next;
1206 void bch2_btree_iter_link(struct btree_iter *iter, struct btree_iter *new)
1208 BUG_ON(btree_iter_linked(new));
1210 new->next = iter->next;
1213 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
1214 unsigned nr_iters = 1;
1216 for_each_linked_btree_iter(iter, new)
1219 BUG_ON(nr_iters > SIX_LOCK_MAX_RECURSE);
1223 void bch2_btree_iter_copy(struct btree_iter *dst, struct btree_iter *src)
1225 __bch2_btree_iter_unlock(dst);
1226 memcpy(dst, src, offsetof(struct btree_iter, next));
1227 dst->nodes_locked = dst->nodes_intent_locked = 0;