1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
5 #include "btree_cache.h"
6 #include "btree_iter.h"
7 #include "btree_key_cache.h"
8 #include "btree_locking.h"
9 #include "btree_update.h"
14 #include <linux/prefetch.h>
15 #include <trace/events/bcachefs.h>
17 static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
19 return l < BTREE_MAX_DEPTH &&
20 (unsigned long) iter->l[l].b >= 128;
23 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
25 struct bpos pos = iter->pos;
27 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
28 bkey_cmp(pos, POS_MAX))
29 pos = bkey_successor(pos);
33 static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
36 return bkey_cmp(btree_iter_search_key(iter), b->data->min_key) < 0;
39 static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
42 return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
45 static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
48 return iter->btree_id == b->c.btree_id &&
49 !btree_iter_pos_before_node(iter, b) &&
50 !btree_iter_pos_after_node(iter, b);
53 /* Btree node locking: */
55 void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
57 bch2_btree_node_unlock_write_inlined(b, iter);
60 void __bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
62 struct btree_iter *linked;
65 EBUG_ON(!btree_node_intent_locked(iter, b->c.level));
67 trans_for_each_iter(iter->trans, linked)
68 if (linked->l[b->c.level].b == b &&
69 btree_node_read_locked(linked, b->c.level))
73 * Must drop our read locks before calling six_lock_write() -
74 * six_unlock() won't do wakeups until the reader count
75 * goes to 0, and it's safe because we have the node intent
78 atomic64_sub(__SIX_VAL(read_lock, readers),
79 &b->c.lock.state.counter);
80 btree_node_lock_type(iter->trans->c, b, SIX_LOCK_write);
81 atomic64_add(__SIX_VAL(read_lock, readers),
82 &b->c.lock.state.counter);
85 bool __bch2_btree_node_relock(struct btree_iter *iter, unsigned level)
87 struct btree *b = btree_iter_node(iter, level);
88 int want = __btree_lock_want(iter, level);
90 if (!is_btree_node(iter, level))
96 if (six_relock_type(&b->c.lock, want, iter->l[level].lock_seq) ||
97 (btree_node_lock_seq_matches(iter, b, level) &&
98 btree_node_lock_increment(iter->trans, b, level, want))) {
99 mark_btree_node_locked(iter, level, want);
106 static bool bch2_btree_node_upgrade(struct btree_iter *iter, unsigned level)
108 struct btree *b = iter->l[level].b;
110 EBUG_ON(btree_lock_want(iter, level) != BTREE_NODE_INTENT_LOCKED);
112 if (!is_btree_node(iter, level))
115 if (btree_node_intent_locked(iter, level))
121 if (btree_node_locked(iter, level)
122 ? six_lock_tryupgrade(&b->c.lock)
123 : six_relock_type(&b->c.lock, SIX_LOCK_intent, iter->l[level].lock_seq))
126 if (btree_node_lock_seq_matches(iter, b, level) &&
127 btree_node_lock_increment(iter->trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
128 btree_node_unlock(iter, level);
134 mark_btree_node_intent_locked(iter, level);
138 static inline bool btree_iter_get_locks(struct btree_iter *iter,
139 bool upgrade, bool trace)
141 unsigned l = iter->level;
145 if (!btree_iter_node(iter, l))
149 ? bch2_btree_node_upgrade(iter, l)
150 : bch2_btree_node_relock(iter, l))) {
153 ? trace_node_upgrade_fail
154 : trace_node_relock_fail)(l, iter->l[l].lock_seq,
155 is_btree_node(iter, l)
157 : (unsigned long) iter->l[l].b,
158 is_btree_node(iter, l)
159 ? iter->l[l].b->c.lock.state.seq
163 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
167 } while (l < iter->locks_want);
170 * When we fail to get a lock, we have to ensure that any child nodes
171 * can't be relocked so bch2_btree_iter_traverse has to walk back up to
172 * the node that we failed to relock:
174 while (fail_idx >= 0) {
175 btree_node_unlock(iter, fail_idx);
176 iter->l[fail_idx].b = BTREE_ITER_NO_NODE_GET_LOCKS;
180 if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
181 iter->uptodate = BTREE_ITER_NEED_PEEK;
183 bch2_btree_trans_verify_locks(iter->trans);
185 return iter->uptodate < BTREE_ITER_NEED_RELOCK;
188 static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
189 enum btree_iter_type type)
191 return type != BTREE_ITER_CACHED
192 ? container_of(_b, struct btree, c)->key.k.p
193 : container_of(_b, struct bkey_cached, c)->key.pos;
197 bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
198 unsigned level, struct btree_iter *iter,
199 enum six_lock_type type,
200 six_lock_should_sleep_fn should_sleep_fn, void *p,
203 struct btree_trans *trans = iter->trans;
204 struct btree_iter *linked, *deadlock_iter = NULL;
205 u64 start_time = local_clock();
208 /* Check if it's safe to block: */
209 trans_for_each_iter(trans, linked) {
210 if (!linked->nodes_locked)
214 * Can't block taking an intent lock if we have _any_ nodes read
217 * - Our read lock blocks another thread with an intent lock on
218 * the same node from getting a write lock, and thus from
219 * dropping its intent lock
221 * - And the other thread may have multiple nodes intent locked:
222 * both the node we want to intent lock, and the node we
223 * already have read locked - deadlock:
225 if (type == SIX_LOCK_intent &&
226 linked->nodes_locked != linked->nodes_intent_locked) {
227 if (!(trans->nounlock)) {
228 linked->locks_want = max_t(unsigned,
230 __fls(linked->nodes_locked) + 1);
231 if (!btree_iter_get_locks(linked, true, false)) {
232 deadlock_iter = linked;
236 deadlock_iter = linked;
241 if (linked->btree_id != iter->btree_id) {
242 if (linked->btree_id > iter->btree_id) {
243 deadlock_iter = linked;
250 * Within the same btree, cached iterators come before non
253 if (btree_iter_is_cached(linked) != btree_iter_is_cached(iter)) {
254 if (btree_iter_is_cached(iter)) {
255 deadlock_iter = linked;
262 * Interior nodes must be locked before their descendants: if
263 * another iterator has possible descendants locked of the node
264 * we're about to lock, it must have the ancestors locked too:
266 if (level > __fls(linked->nodes_locked)) {
267 if (!(trans->nounlock)) {
269 max(level + 1, max_t(unsigned,
272 if (!btree_iter_get_locks(linked, true, false)) {
273 deadlock_iter = linked;
277 deadlock_iter = linked;
282 /* Must lock btree nodes in key order: */
283 if (btree_node_locked(linked, level) &&
284 bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b,
285 btree_iter_type(linked))) <= 0) {
286 deadlock_iter = linked;
291 * Recheck if this is a node we already have locked - since one
292 * of the get_locks() calls might've successfully
293 * upgraded/relocked it:
295 if (linked->l[level].b == b &&
296 btree_node_locked_type(linked, level) >= type) {
297 six_lock_increment(&b->c.lock, type);
302 if (unlikely(deadlock_iter)) {
303 trace_trans_restart_would_deadlock(iter->trans->ip, ip,
305 deadlock_iter->btree_id,
306 btree_iter_type(deadlock_iter),
308 btree_iter_type(iter));
312 if (six_trylock_type(&b->c.lock, type))
315 if (six_lock_type(&b->c.lock, type, should_sleep_fn, p))
318 bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
323 /* Btree iterator locking: */
325 #ifdef CONFIG_BCACHEFS_DEBUG
326 static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
330 if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
331 BUG_ON(iter->nodes_locked);
335 for (l = 0; is_btree_node(iter, l); l++) {
336 if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
337 !btree_node_locked(iter, l))
340 BUG_ON(btree_lock_want(iter, l) !=
341 btree_node_locked_type(iter, l));
345 void bch2_btree_trans_verify_locks(struct btree_trans *trans)
347 struct btree_iter *iter;
349 trans_for_each_iter_all(trans, iter)
350 bch2_btree_iter_verify_locks(iter);
353 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
357 bool bch2_btree_iter_relock(struct btree_iter *iter, bool trace)
359 return btree_iter_get_locks(iter, false, trace);
362 bool __bch2_btree_iter_upgrade(struct btree_iter *iter,
363 unsigned new_locks_want)
365 struct btree_iter *linked;
367 EBUG_ON(iter->locks_want >= new_locks_want);
369 iter->locks_want = new_locks_want;
371 if (btree_iter_get_locks(iter, true, true))
375 * Ancestor nodes must be locked before child nodes, so set locks_want
376 * on iterators that might lock ancestors before us to avoid getting
379 trans_for_each_iter(iter->trans, linked)
380 if (linked != iter &&
381 linked->btree_id == iter->btree_id &&
382 linked->locks_want < new_locks_want) {
383 linked->locks_want = new_locks_want;
384 btree_iter_get_locks(linked, true, false);
390 bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *iter,
391 unsigned new_locks_want)
393 unsigned l = iter->level;
395 EBUG_ON(iter->locks_want >= new_locks_want);
397 iter->locks_want = new_locks_want;
400 if (!btree_iter_node(iter, l))
403 if (!bch2_btree_node_upgrade(iter, l)) {
404 iter->locks_want = l;
409 } while (l < iter->locks_want);
414 void __bch2_btree_iter_downgrade(struct btree_iter *iter,
415 unsigned downgrade_to)
417 unsigned l, new_locks_want = downgrade_to ?:
418 (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
420 if (iter->locks_want < downgrade_to) {
421 iter->locks_want = new_locks_want;
423 while (iter->nodes_locked &&
424 (l = __fls(iter->nodes_locked)) >= iter->locks_want) {
425 if (l > iter->level) {
426 btree_node_unlock(iter, l);
428 if (btree_node_intent_locked(iter, l)) {
429 six_lock_downgrade(&iter->l[l].b->c.lock);
430 iter->nodes_intent_locked ^= 1 << l;
437 bch2_btree_trans_verify_locks(iter->trans);
440 void bch2_trans_downgrade(struct btree_trans *trans)
442 struct btree_iter *iter;
444 trans_for_each_iter(trans, iter)
445 bch2_btree_iter_downgrade(iter);
448 /* Btree transaction locking: */
450 bool bch2_trans_relock(struct btree_trans *trans)
452 struct btree_iter *iter;
455 trans_for_each_iter(trans, iter)
456 if (iter->uptodate == BTREE_ITER_NEED_RELOCK)
457 ret &= bch2_btree_iter_relock(iter, true);
462 void bch2_trans_unlock(struct btree_trans *trans)
464 struct btree_iter *iter;
466 trans_for_each_iter(trans, iter)
467 __bch2_btree_iter_unlock(iter);
470 /* Btree iterator: */
472 #ifdef CONFIG_BCACHEFS_DEBUG
474 static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
476 struct bkey_cached *ck;
477 bool locked = btree_node_locked(iter, 0);
479 if (!bch2_btree_node_relock(iter, 0))
482 ck = (void *) iter->l[0].b;
483 BUG_ON(ck->key.btree_id != iter->btree_id ||
484 bkey_cmp(ck->key.pos, iter->pos));
487 btree_node_unlock(iter, 0);
490 static void bch2_btree_iter_verify_level(struct btree_iter *iter,
493 struct bpos pos = btree_iter_search_key(iter);
494 struct btree_iter_level *l = &iter->l[level];
495 struct btree_node_iter tmp = l->iter;
496 bool locked = btree_node_locked(iter, level);
497 struct bkey_packed *p, *k;
498 char buf1[100], buf2[100];
501 if (!bch2_debug_check_iterators)
504 if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
506 bch2_btree_iter_verify_cached(iter);
510 BUG_ON(iter->level < iter->min_depth);
512 if (!btree_iter_node(iter, level))
515 if (!bch2_btree_node_relock(iter, level))
519 * Ideally this invariant would always be true, and hopefully in the
520 * future it will be, but for now set_pos_same_leaf() breaks it:
522 BUG_ON(iter->uptodate < BTREE_ITER_NEED_TRAVERSE &&
523 !btree_iter_pos_in_node(iter, l->b));
526 * node iterators don't use leaf node iterator:
528 if (btree_iter_type(iter) == BTREE_ITER_NODES &&
529 level <= iter->min_depth)
532 bch2_btree_node_iter_verify(&l->iter, l->b);
535 * For interior nodes, the iterator will have skipped past
538 * For extents, the iterator may have skipped past deleted keys (but not
541 p = level || btree_node_type_is_extents(iter->btree_id)
542 ? bch2_btree_node_iter_prev_filter(&tmp, l->b, KEY_TYPE_discard)
543 : bch2_btree_node_iter_prev_all(&tmp, l->b);
544 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
546 if (p && bkey_iter_pos_cmp(l->b, p, &pos) >= 0) {
551 if (k && bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
557 btree_node_unlock(iter, level);
560 strcpy(buf1, "(none)");
561 strcpy(buf2, "(none)");
564 struct bkey uk = bkey_unpack_key(l->b, p);
565 bch2_bkey_to_text(&PBUF(buf1), &uk);
569 struct bkey uk = bkey_unpack_key(l->b, k);
570 bch2_bkey_to_text(&PBUF(buf2), &uk);
573 panic("iterator should be %s key at level %u:\n"
574 "iter pos %s %llu:%llu\n"
578 iter->flags & BTREE_ITER_IS_EXTENTS ? ">" : "=>",
579 iter->pos.inode, iter->pos.offset,
583 static void bch2_btree_iter_verify(struct btree_iter *iter)
587 bch2_btree_trans_verify_locks(iter->trans);
589 for (i = 0; i < BTREE_MAX_DEPTH; i++)
590 bch2_btree_iter_verify_level(iter, i);
593 void bch2_btree_trans_verify_iters(struct btree_trans *trans, struct btree *b)
595 struct btree_iter *iter;
597 if (!bch2_debug_check_iterators)
600 trans_for_each_iter_with_node(trans, b, iter)
601 bch2_btree_iter_verify_level(iter, b->c.level);
606 static inline void bch2_btree_iter_verify_level(struct btree_iter *iter, unsigned l) {}
607 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
611 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
614 struct bkey_packed *k)
616 struct btree_node_iter_set *set;
618 btree_node_iter_for_each(iter, set)
619 if (set->end == t->end_offset) {
620 set->k = __btree_node_key_to_offset(b, k);
621 bch2_btree_node_iter_sort(iter, b);
625 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
628 static void __bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
630 struct bkey_packed *where)
632 struct btree_iter_level *l = &iter->l[b->c.level];
633 struct bpos pos = btree_iter_search_key(iter);
635 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
638 if (bkey_iter_pos_cmp(l->b, where, &pos) < 0)
639 bch2_btree_node_iter_advance(&l->iter, l->b);
641 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
644 void bch2_btree_iter_fix_key_modified(struct btree_iter *iter,
646 struct bkey_packed *where)
648 struct btree_iter *linked;
650 trans_for_each_iter_with_node(iter->trans, b, linked) {
651 __bch2_btree_iter_fix_key_modified(linked, b, where);
652 bch2_btree_iter_verify_level(linked, b->c.level);
656 static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
658 struct btree_node_iter *node_iter,
660 struct bkey_packed *where,
661 unsigned clobber_u64s,
664 const struct bkey_packed *end = btree_bkey_last(b, t);
665 struct btree_node_iter_set *set;
666 unsigned offset = __btree_node_key_to_offset(b, where);
667 int shift = new_u64s - clobber_u64s;
668 unsigned old_end = t->end_offset - shift;
669 unsigned orig_iter_pos = node_iter->data[0].k;
670 bool iter_current_key_modified =
671 orig_iter_pos >= offset &&
672 orig_iter_pos <= offset + clobber_u64s;
673 struct bpos iter_pos = btree_iter_search_key(iter);
675 btree_node_iter_for_each(node_iter, set)
676 if (set->end == old_end)
679 /* didn't find the bset in the iterator - might have to readd it: */
681 bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
682 bch2_btree_node_iter_push(node_iter, b, where, end);
685 /* Iterator is after key that changed */
689 set->end = t->end_offset;
691 /* Iterator hasn't gotten to the key that changed yet: */
696 bkey_iter_pos_cmp(b, where, &iter_pos) >= 0) {
698 } else if (set->k < offset + clobber_u64s) {
699 set->k = offset + new_u64s;
700 if (set->k == set->end)
701 bch2_btree_node_iter_set_drop(node_iter, set);
703 /* Iterator is after key that changed */
704 set->k = (int) set->k + shift;
708 bch2_btree_node_iter_sort(node_iter, b);
710 if (node_iter->data[0].k != orig_iter_pos)
711 iter_current_key_modified = true;
714 * When a new key is added, and the node iterator now points to that
715 * key, the iterator might have skipped past deleted keys that should
716 * come after the key the iterator now points to. We have to rewind to
717 * before those deleted keys - otherwise
718 * bch2_btree_node_iter_prev_all() breaks:
720 if (!bch2_btree_node_iter_end(node_iter) &&
721 iter_current_key_modified &&
723 btree_node_type_is_extents(iter->btree_id))) {
725 struct bkey_packed *k, *k2, *p;
727 k = bch2_btree_node_iter_peek_all(node_iter, b);
729 for_each_bset(b, t) {
730 bool set_pos = false;
732 if (node_iter->data[0].end == t->end_offset)
735 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
737 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
738 bkey_iter_cmp(b, k, p) < 0) {
744 btree_node_iter_set_set_pos(node_iter,
750 node_iter == &iter->l[0].iter &&
751 iter_current_key_modified)
752 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
755 void bch2_btree_node_iter_fix(struct btree_iter *iter,
757 struct btree_node_iter *node_iter,
758 struct bkey_packed *where,
759 unsigned clobber_u64s,
762 struct bset_tree *t = bch2_bkey_to_bset(b, where);
763 struct btree_iter *linked;
765 if (node_iter != &iter->l[b->c.level].iter) {
766 __bch2_btree_node_iter_fix(iter, b, node_iter, t,
767 where, clobber_u64s, new_u64s);
769 if (bch2_debug_check_iterators)
770 bch2_btree_node_iter_verify(node_iter, b);
773 trans_for_each_iter_with_node(iter->trans, b, linked) {
774 __bch2_btree_node_iter_fix(linked, b,
775 &linked->l[b->c.level].iter, t,
776 where, clobber_u64s, new_u64s);
777 bch2_btree_iter_verify_level(linked, b->c.level);
781 static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
782 struct btree_iter_level *l,
784 struct bkey_packed *k)
790 * signal to bch2_btree_iter_peek_slot() that we're currently at
793 u->type = KEY_TYPE_deleted;
794 return bkey_s_c_null;
797 ret = bkey_disassemble(l->b, k, u);
799 if (bch2_debug_check_bkeys)
800 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
805 /* peek_all() doesn't skip deleted keys */
806 static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
807 struct btree_iter_level *l,
810 return __btree_iter_unpack(iter, l, u,
811 bch2_btree_node_iter_peek_all(&l->iter, l->b));
814 static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
815 struct btree_iter_level *l)
817 return __btree_iter_unpack(iter, l, &iter->k,
818 bch2_btree_node_iter_peek(&l->iter, l->b));
821 static inline struct bkey_s_c __btree_iter_prev(struct btree_iter *iter,
822 struct btree_iter_level *l)
824 return __btree_iter_unpack(iter, l, &iter->k,
825 bch2_btree_node_iter_prev(&l->iter, l->b));
828 static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
829 struct btree_iter_level *l,
832 struct bpos pos = btree_iter_search_key(iter);
833 struct bkey_packed *k;
836 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
837 bkey_iter_pos_cmp(l->b, k, &pos) < 0) {
838 if (max_advance > 0 && nr_advanced >= max_advance)
841 bch2_btree_node_iter_advance(&l->iter, l->b);
849 * Verify that iterator for parent node points to child node:
851 static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
853 struct btree_iter_level *l;
856 struct bkey_packed *k;
858 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
861 plevel = b->c.level + 1;
862 if (!btree_iter_node(iter, plevel))
865 parent_locked = btree_node_locked(iter, plevel);
867 if (!bch2_btree_node_relock(iter, plevel))
870 l = &iter->l[plevel];
871 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
874 bkey_cmp_left_packed(l->b, k, &b->key.k.p)) {
876 struct bkey uk = bkey_unpack_key(b, k);
878 bch2_bkey_to_text(&PBUF(buf), &uk);
879 panic("parent iter doesn't point to new node:\n%s\n%llu:%llu\n",
880 buf, b->key.k.p.inode, b->key.k.p.offset);
884 btree_node_unlock(iter, b->c.level + 1);
887 static inline void __btree_iter_init(struct btree_iter *iter,
890 struct bpos pos = btree_iter_search_key(iter);
891 struct btree_iter_level *l = &iter->l[level];
893 bch2_btree_node_iter_init(&l->iter, l->b, &pos);
895 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
898 static inline void btree_iter_node_set(struct btree_iter *iter,
901 BUG_ON(btree_iter_type(iter) == BTREE_ITER_CACHED);
903 btree_iter_verify_new_node(iter, b);
905 EBUG_ON(!btree_iter_pos_in_node(iter, b));
906 EBUG_ON(b->c.lock.state.seq & 1);
908 iter->l[b->c.level].lock_seq = b->c.lock.state.seq;
909 iter->l[b->c.level].b = b;
910 __btree_iter_init(iter, b->c.level);
914 * A btree node is being replaced - update the iterator to point to the new
917 void bch2_btree_iter_node_replace(struct btree_iter *iter, struct btree *b)
919 enum btree_node_locked_type t;
920 struct btree_iter *linked;
922 trans_for_each_iter(iter->trans, linked)
923 if (btree_iter_type(linked) != BTREE_ITER_CACHED &&
924 btree_iter_pos_in_node(linked, b)) {
926 * bch2_btree_iter_node_drop() has already been called -
927 * the old node we're replacing has already been
928 * unlocked and the pointer invalidated
930 BUG_ON(btree_node_locked(linked, b->c.level));
932 t = btree_lock_want(linked, b->c.level);
933 if (t != BTREE_NODE_UNLOCKED) {
934 six_lock_increment(&b->c.lock, t);
935 mark_btree_node_locked(linked, b->c.level, t);
938 btree_iter_node_set(linked, b);
942 void bch2_btree_iter_node_drop(struct btree_iter *iter, struct btree *b)
944 struct btree_iter *linked;
945 unsigned level = b->c.level;
947 trans_for_each_iter(iter->trans, linked)
948 if (linked->l[level].b == b) {
949 __btree_node_unlock(linked, level);
950 linked->l[level].b = BTREE_ITER_NO_NODE_DROP;
955 * A btree node has been modified in such a way as to invalidate iterators - fix
958 void bch2_btree_iter_reinit_node(struct btree_iter *iter, struct btree *b)
960 struct btree_iter *linked;
962 trans_for_each_iter_with_node(iter->trans, b, linked)
963 __btree_iter_init(linked, b->c.level);
966 static int lock_root_check_fn(struct six_lock *lock, void *p)
968 struct btree *b = container_of(lock, struct btree, c.lock);
969 struct btree **rootp = p;
971 return b == *rootp ? 0 : -1;
974 static inline int btree_iter_lock_root(struct btree_iter *iter,
976 unsigned long trace_ip)
978 struct bch_fs *c = iter->trans->c;
979 struct btree *b, **rootp = &c->btree_roots[iter->btree_id].b;
980 enum six_lock_type lock_type;
983 EBUG_ON(iter->nodes_locked);
986 b = READ_ONCE(*rootp);
987 iter->level = READ_ONCE(b->c.level);
989 if (unlikely(iter->level < depth_want)) {
991 * the root is at a lower depth than the depth we want:
992 * got to the end of the btree, or we're walking nodes
993 * greater than some depth and there are no nodes >=
996 iter->level = depth_want;
997 for (i = iter->level; i < BTREE_MAX_DEPTH; i++)
1002 lock_type = __btree_lock_want(iter, iter->level);
1003 if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
1005 lock_root_check_fn, rootp,
1009 if (likely(b == READ_ONCE(*rootp) &&
1010 b->c.level == iter->level &&
1012 for (i = 0; i < iter->level; i++)
1013 iter->l[i].b = BTREE_ITER_NO_NODE_LOCK_ROOT;
1014 iter->l[iter->level].b = b;
1015 for (i = iter->level + 1; i < BTREE_MAX_DEPTH; i++)
1016 iter->l[i].b = NULL;
1018 mark_btree_node_locked(iter, iter->level, lock_type);
1019 btree_iter_node_set(iter, b);
1023 six_unlock_type(&b->c.lock, lock_type);
1028 static void btree_iter_prefetch(struct btree_iter *iter)
1030 struct bch_fs *c = iter->trans->c;
1031 struct btree_iter_level *l = &iter->l[iter->level];
1032 struct btree_node_iter node_iter = l->iter;
1033 struct bkey_packed *k;
1035 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
1036 ? (iter->level > 1 ? 0 : 2)
1037 : (iter->level > 1 ? 1 : 16);
1038 bool was_locked = btree_node_locked(iter, iter->level);
1041 if (!bch2_btree_node_relock(iter, iter->level))
1044 bch2_btree_node_iter_advance(&node_iter, l->b);
1045 k = bch2_btree_node_iter_peek(&node_iter, l->b);
1049 bch2_bkey_unpack(l->b, &tmp.k, k);
1050 bch2_btree_node_prefetch(c, iter, &tmp.k, iter->level - 1);
1054 btree_node_unlock(iter, iter->level);
1057 static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
1058 unsigned plevel, struct btree *b)
1060 struct btree_iter_level *l = &iter->l[plevel];
1061 bool locked = btree_node_locked(iter, plevel);
1062 struct bkey_packed *k;
1063 struct bch_btree_ptr_v2 *bp;
1065 if (!bch2_btree_node_relock(iter, plevel))
1068 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1069 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
1071 bp = (void *) bkeyp_val(&l->b->format, k);
1072 bp->mem_ptr = (unsigned long)b;
1075 btree_node_unlock(iter, plevel);
1078 static __always_inline int btree_iter_down(struct btree_iter *iter,
1079 unsigned long trace_ip)
1081 struct bch_fs *c = iter->trans->c;
1082 struct btree_iter_level *l = &iter->l[iter->level];
1084 unsigned level = iter->level - 1;
1085 enum six_lock_type lock_type = __btree_lock_want(iter, level);
1088 EBUG_ON(!btree_node_locked(iter, iter->level));
1090 bch2_bkey_unpack(l->b, &tmp.k,
1091 bch2_btree_node_iter_peek(&l->iter, l->b));
1093 b = bch2_btree_node_get(c, iter, &tmp.k, level, lock_type, trace_ip);
1094 if (unlikely(IS_ERR(b)))
1097 mark_btree_node_locked(iter, level, lock_type);
1098 btree_iter_node_set(iter, b);
1100 if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 &&
1101 unlikely(b != btree_node_mem_ptr(&tmp.k)))
1102 btree_node_mem_ptr_set(iter, level + 1, b);
1104 if (iter->flags & BTREE_ITER_PREFETCH)
1105 btree_iter_prefetch(iter);
1107 iter->level = level;
1112 static void btree_iter_up(struct btree_iter *iter)
1114 btree_node_unlock(iter, iter->level++);
1117 static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
1119 static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
1121 struct bch_fs *c = trans->c;
1122 struct btree_iter *iter;
1123 u8 sorted[BTREE_ITER_MAX];
1124 unsigned i, nr_sorted = 0;
1126 if (trans->in_traverse_all)
1129 trans->in_traverse_all = true;
1133 trans_for_each_iter(trans, iter)
1134 sorted[nr_sorted++] = iter->idx;
1136 #define btree_iter_cmp_by_idx(_l, _r) \
1137 btree_iter_lock_cmp(&trans->iters[_l], &trans->iters[_r])
1139 bubble_sort(sorted, nr_sorted, btree_iter_cmp_by_idx);
1140 #undef btree_iter_cmp_by_idx
1141 bch2_trans_unlock(trans);
1144 if (unlikely(ret == -ENOMEM)) {
1147 closure_init_stack(&cl);
1150 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
1155 if (unlikely(ret == -EIO)) {
1156 trans->error = true;
1160 BUG_ON(ret && ret != -EINTR);
1162 /* Now, redo traversals in correct order: */
1163 for (i = 0; i < nr_sorted; i++) {
1164 unsigned idx = sorted[i];
1167 * sucessfully traversing one iterator can cause another to be
1168 * unlinked, in btree_key_cache_fill()
1170 if (!(trans->iters_linked & (1ULL << idx)))
1173 ret = btree_iter_traverse_one(&trans->iters[idx], _THIS_IP_);
1178 if (hweight64(trans->iters_live) > 1)
1181 trans_for_each_iter(trans, iter)
1182 if (iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT) {
1187 bch2_btree_cache_cannibalize_unlock(c);
1189 trans->in_traverse_all = false;
1193 int bch2_btree_iter_traverse_all(struct btree_trans *trans)
1195 return __btree_iter_traverse_all(trans, 0);
1198 static inline bool btree_iter_good_node(struct btree_iter *iter,
1199 unsigned l, int check_pos)
1201 if (!is_btree_node(iter, l) ||
1202 !bch2_btree_node_relock(iter, l))
1205 if (check_pos <= 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
1207 if (check_pos >= 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
1212 static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
1215 unsigned l = iter->level;
1217 while (btree_iter_node(iter, l) &&
1218 !btree_iter_good_node(iter, l, check_pos)) {
1219 btree_node_unlock(iter, l);
1220 iter->l[l].b = BTREE_ITER_NO_NODE_UP;
1228 * This is the main state machine for walking down the btree - walks down to a
1231 * Returns 0 on success, -EIO on error (error reading in a btree node).
1233 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1234 * stashed in the iterator and returned from bch2_trans_exit().
1236 static int btree_iter_traverse_one(struct btree_iter *iter,
1237 unsigned long trace_ip)
1239 unsigned depth_want = iter->level;
1242 * if we need interior nodes locked, call btree_iter_relock() to make
1243 * sure we walk back up enough that we lock them:
1245 if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
1246 iter->locks_want > 1)
1247 bch2_btree_iter_relock(iter, false);
1249 if (btree_iter_type(iter) == BTREE_ITER_CACHED)
1250 return bch2_btree_iter_traverse_cached(iter);
1252 if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
1255 if (unlikely(iter->level >= BTREE_MAX_DEPTH))
1259 * XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
1262 iter->level = btree_iter_up_until_good_node(iter, 0);
1265 * If we've got a btree node locked (i.e. we aren't about to relock the
1266 * root) - advance its node iterator if necessary:
1268 * XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
1270 if (is_btree_node(iter, iter->level)) {
1271 BUG_ON(!btree_iter_pos_in_node(iter, iter->l[iter->level].b));
1273 btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1);
1277 * Note: iter->nodes[iter->level] may be temporarily NULL here - that
1278 * would indicate to other code that we got to the end of the btree,
1279 * here it indicates that relocking the root failed - it's critical that
1280 * btree_iter_lock_root() comes next and that it can't fail
1282 while (iter->level > depth_want) {
1283 int ret = btree_iter_node(iter, iter->level)
1284 ? btree_iter_down(iter, trace_ip)
1285 : btree_iter_lock_root(iter, depth_want, trace_ip);
1286 if (unlikely(ret)) {
1290 iter->level = depth_want;
1293 iter->flags |= BTREE_ITER_ERROR;
1294 iter->l[iter->level].b =
1295 BTREE_ITER_NO_NODE_ERROR;
1297 iter->l[iter->level].b =
1298 BTREE_ITER_NO_NODE_DOWN;
1304 iter->uptodate = BTREE_ITER_NEED_PEEK;
1306 bch2_btree_iter_verify(iter);
1310 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
1312 struct btree_trans *trans = iter->trans;
1315 ret = bch2_trans_cond_resched(trans) ?:
1316 btree_iter_traverse_one(iter, _RET_IP_);
1318 ret = __btree_iter_traverse_all(trans, ret);
1323 static inline void bch2_btree_iter_checks(struct btree_iter *iter)
1325 enum btree_iter_type type = btree_iter_type(iter);
1327 EBUG_ON(iter->btree_id >= BTREE_ID_NR);
1329 BUG_ON((type == BTREE_ITER_KEYS ||
1330 type == BTREE_ITER_CACHED) &&
1331 (bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
1332 bkey_cmp(iter->pos, iter->k.p) > 0));
1334 bch2_btree_iter_verify_locks(iter);
1335 bch2_btree_iter_verify_level(iter, iter->level);
1338 /* Iterate across nodes (leaf and interior nodes) */
1340 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1345 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1346 bch2_btree_iter_checks(iter);
1348 if (iter->uptodate == BTREE_ITER_UPTODATE)
1349 return iter->l[iter->level].b;
1351 ret = bch2_btree_iter_traverse(iter);
1355 b = btree_iter_node(iter, iter->level);
1359 BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
1361 iter->pos = b->key.k.p;
1362 iter->uptodate = BTREE_ITER_UPTODATE;
1364 bch2_btree_iter_verify(iter);
1369 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1374 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
1375 bch2_btree_iter_checks(iter);
1377 /* already got to end? */
1378 if (!btree_iter_node(iter, iter->level))
1381 bch2_trans_cond_resched(iter->trans);
1383 btree_iter_up(iter);
1385 if (!bch2_btree_node_relock(iter, iter->level))
1386 btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
1388 ret = bch2_btree_iter_traverse(iter);
1393 b = btree_iter_node(iter, iter->level);
1397 if (bkey_cmp(iter->pos, b->key.k.p) < 0) {
1399 * Haven't gotten to the end of the parent node: go back down to
1400 * the next child node
1404 * We don't really want to be unlocking here except we can't
1405 * directly tell btree_iter_traverse() "traverse to this level"
1406 * except by setting iter->level, so we have to unlock so we
1407 * don't screw up our lock invariants:
1409 if (btree_node_read_locked(iter, iter->level))
1410 btree_node_unlock(iter, iter->level);
1412 iter->pos = bkey_successor(iter->pos);
1413 iter->level = iter->min_depth;
1415 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1416 ret = bch2_btree_iter_traverse(iter);
1420 b = iter->l[iter->level].b;
1423 iter->pos = b->key.k.p;
1424 iter->uptodate = BTREE_ITER_UPTODATE;
1426 bch2_btree_iter_verify(iter);
1431 /* Iterate across keys (in leaf nodes only) */
1433 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *iter, struct bpos new_pos)
1435 struct btree_iter_level *l = &iter->l[0];
1437 EBUG_ON(iter->level != 0);
1438 EBUG_ON(bkey_cmp(new_pos, iter->pos) < 0);
1439 EBUG_ON(!btree_node_locked(iter, 0));
1440 EBUG_ON(bkey_cmp(new_pos, l->b->key.k.p) > 0);
1442 bkey_init(&iter->k);
1443 iter->k.p = iter->pos = new_pos;
1444 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1446 btree_iter_advance_to_pos(iter, l, -1);
1450 * keeping a node locked that's outside (even just outside) iter->pos
1451 * breaks __bch2_btree_node_lock(). This seems to only affect
1452 * bch2_btree_node_get_sibling so for now it's fixed there, but we
1453 * should try to get rid of this corner case.
1455 * (this behaviour is currently needed for BTREE_INSERT_NOUNLOCK)
1458 if (bch2_btree_node_iter_end(&l->iter) &&
1459 btree_iter_pos_after_node(iter, l->b))
1460 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1463 static void btree_iter_pos_changed(struct btree_iter *iter, int cmp)
1465 unsigned l = iter->level;
1470 if (unlikely(btree_iter_type(iter) == BTREE_ITER_CACHED)) {
1471 btree_node_unlock(iter, 0);
1472 iter->l[0].b = BTREE_ITER_NO_NODE_UP;
1473 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1477 l = btree_iter_up_until_good_node(iter, cmp);
1479 if (btree_iter_node(iter, l)) {
1481 * We might have to skip over many keys, or just a few: try
1482 * advancing the node iterator, and if we have to skip over too
1483 * many keys just reinit it (or if we're rewinding, since that
1487 !btree_iter_advance_to_pos(iter, &iter->l[l], 8))
1488 __btree_iter_init(iter, l);
1490 /* Don't leave it locked if we're not supposed to: */
1491 if (btree_lock_want(iter, l) == BTREE_NODE_UNLOCKED)
1492 btree_node_unlock(iter, l);
1495 if (l != iter->level)
1496 btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
1498 btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
1501 void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos,
1502 bool strictly_greater)
1504 struct bpos old = btree_iter_search_key(iter);
1507 iter->flags &= ~BTREE_ITER_IS_EXTENTS;
1508 iter->flags |= strictly_greater ? BTREE_ITER_IS_EXTENTS : 0;
1510 bkey_init(&iter->k);
1511 iter->k.p = iter->pos = new_pos;
1513 cmp = bkey_cmp(btree_iter_search_key(iter), old);
1515 btree_iter_pos_changed(iter, cmp);
1518 void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
1520 int cmp = bkey_cmp(new_pos, iter->pos);
1522 bkey_init(&iter->k);
1523 iter->k.p = iter->pos = new_pos;
1525 btree_iter_pos_changed(iter, cmp);
1528 static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
1530 struct btree_iter_level *l = &iter->l[0];
1533 bkey_init(&iter->k);
1534 iter->k.p = iter->pos = l->b->key.k.p;
1536 ret = bkey_cmp(iter->pos, POS_MAX) != 0;
1537 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1538 iter->k.p = iter->pos = bkey_successor(iter->pos);
1540 btree_iter_pos_changed(iter, 1);
1544 static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
1546 struct btree_iter_level *l = &iter->l[0];
1549 bkey_init(&iter->k);
1550 iter->k.p = iter->pos = l->b->data->min_key;
1551 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
1553 ret = bkey_cmp(iter->pos, POS_MIN) != 0;
1555 iter->k.p = iter->pos = bkey_predecessor(iter->pos);
1557 if (iter->flags & BTREE_ITER_IS_EXTENTS)
1558 iter->k.p = iter->pos = bkey_predecessor(iter->pos);
1561 btree_iter_pos_changed(iter, -1);
1566 * btree_iter_peek_uptodate - given an iterator that is uptodate, return the key
1567 * it currently points to
1569 static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
1571 struct btree_iter_level *l = &iter->l[0];
1572 struct bkey_s_c ret = { .k = &iter->k };
1574 if (!bkey_deleted(&iter->k)) {
1575 struct bkey_packed *_k =
1576 __bch2_btree_node_iter_peek_all(&l->iter, l->b);
1578 ret.v = bkeyp_val(&l->b->format, _k);
1580 if (bch2_debug_check_iterators) {
1581 struct bkey k = bkey_unpack_key(l->b, _k);
1583 BUG_ON(memcmp(&k, &iter->k, sizeof(k)));
1586 if (bch2_debug_check_bkeys)
1587 bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
1594 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
1597 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
1599 struct btree_iter_level *l = &iter->l[0];
1603 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1604 bch2_btree_iter_checks(iter);
1606 if (iter->uptodate == BTREE_ITER_UPTODATE &&
1607 !bkey_deleted(&iter->k))
1608 return btree_iter_peek_uptodate(iter);
1611 ret = bch2_btree_iter_traverse(iter);
1613 return bkey_s_c_err(ret);
1615 k = __btree_iter_peek(iter, l);
1619 if (!btree_iter_set_pos_to_next_leaf(iter))
1620 return bkey_s_c_null;
1624 * iter->pos should always be equal to the key we just
1625 * returned - except extents can straddle iter->pos:
1627 if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
1628 bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1629 iter->pos = bkey_start_pos(k.k);
1631 iter->uptodate = BTREE_ITER_UPTODATE;
1633 bch2_btree_iter_verify_level(iter, 0);
1638 * bch2_btree_iter_next: returns first key greater than iterator's current
1641 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
1643 if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
1644 return bkey_s_c_null;
1646 bch2_btree_iter_set_pos(iter,
1647 (iter->flags & BTREE_ITER_IS_EXTENTS)
1649 : bkey_successor(iter->k.p));
1651 return bch2_btree_iter_peek(iter);
1654 static struct bkey_s_c __btree_trans_updates_peek(struct btree_iter *iter)
1656 struct bpos pos = btree_iter_search_key(iter);
1657 struct btree_trans *trans = iter->trans;
1658 struct btree_insert_entry *i;
1660 trans_for_each_update2(trans, i)
1661 if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
1662 bkey_cmp(pos, i->k->k.p)) <= 0)
1665 return i < trans->updates2 + trans->nr_updates2 &&
1666 iter->btree_id == i->iter->btree_id
1667 ? bkey_i_to_s_c(i->k)
1671 static struct bkey_s_c __bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
1673 struct btree_iter_level *l = &iter->l[0];
1674 struct bkey_s_c k = __btree_iter_peek(iter, l);
1675 struct bkey_s_c u = __btree_trans_updates_peek(iter);
1677 if (k.k && (!u.k || bkey_cmp(k.k->p, u.k->p) < 0))
1679 if (u.k && bkey_cmp(u.k->p, l->b->key.k.p) <= 0) {
1683 return bkey_s_c_null;
1686 struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
1691 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1692 bch2_btree_iter_checks(iter);
1695 ret = bch2_btree_iter_traverse(iter);
1697 return bkey_s_c_err(ret);
1699 k = __bch2_btree_iter_peek_with_updates(iter);
1701 if (k.k && bkey_deleted(k.k)) {
1702 bch2_btree_iter_set_pos(iter,
1703 (iter->flags & BTREE_ITER_IS_EXTENTS)
1705 : bkey_successor(iter->k.p));
1712 if (!btree_iter_set_pos_to_next_leaf(iter))
1713 return bkey_s_c_null;
1717 * iter->pos should always be equal to the key we just
1718 * returned - except extents can straddle iter->pos:
1720 if (!(iter->flags & BTREE_ITER_IS_EXTENTS) ||
1721 bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
1722 iter->pos = bkey_start_pos(k.k);
1724 iter->uptodate = BTREE_ITER_UPTODATE;
1728 struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
1730 if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
1731 return bkey_s_c_null;
1733 bch2_btree_iter_set_pos(iter,
1734 (iter->flags & BTREE_ITER_IS_EXTENTS)
1736 : bkey_successor(iter->k.p));
1738 return bch2_btree_iter_peek_with_updates(iter);
1742 * bch2_btree_iter_peek_prev: returns first key less than or equal to
1743 * iterator's current position
1745 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
1747 struct bpos pos = iter->pos;
1748 struct btree_iter_level *l = &iter->l[0];
1752 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1753 bch2_btree_iter_checks(iter);
1755 if (iter->uptodate == BTREE_ITER_UPTODATE &&
1756 !bkey_deleted(&iter->k))
1757 return btree_iter_peek_uptodate(iter);
1760 ret = bch2_btree_iter_traverse(iter);
1762 return bkey_s_c_err(ret);
1764 k = __btree_iter_peek(iter, l);
1765 if (!k.k || bkey_cmp(bkey_start_pos(k.k), pos) > 0)
1766 k = __btree_iter_prev(iter, l);
1771 if (!btree_iter_set_pos_to_prev_leaf(iter))
1772 return bkey_s_c_null;
1775 EBUG_ON(bkey_cmp(bkey_start_pos(k.k), pos) > 0);
1776 iter->pos = bkey_start_pos(k.k);
1777 iter->uptodate = BTREE_ITER_UPTODATE;
1782 * bch2_btree_iter_prev: returns first key less than iterator's current
1785 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
1787 struct bpos pos = bkey_start_pos(&iter->k);
1789 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1790 bch2_btree_iter_checks(iter);
1792 if (unlikely(!bkey_cmp(pos, POS_MIN)))
1793 return bkey_s_c_null;
1795 bch2_btree_iter_set_pos(iter, bkey_predecessor(pos));
1797 return bch2_btree_iter_peek_prev(iter);
1800 static inline struct bkey_s_c
1801 __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
1803 struct btree_iter_level *l = &iter->l[0];
1804 struct btree_node_iter node_iter;
1809 /* keys & holes can't span inode numbers: */
1810 if (iter->pos.offset == KEY_OFFSET_MAX) {
1811 if (iter->pos.inode == KEY_INODE_MAX)
1812 return bkey_s_c_null;
1814 bch2_btree_iter_set_pos(iter, bkey_successor(iter->pos));
1816 ret = bch2_btree_iter_traverse(iter);
1818 return bkey_s_c_err(ret);
1822 * iterator is now at the correct position for inserting at iter->pos,
1823 * but we need to keep iterating until we find the first non whiteout so
1824 * we know how big a hole we have, if any:
1827 node_iter = l->iter;
1828 k = __btree_iter_unpack(iter, l, &iter->k,
1829 bch2_btree_node_iter_peek(&node_iter, l->b));
1831 if (k.k && bkey_cmp(bkey_start_pos(k.k), iter->pos) <= 0) {
1833 * We're not setting iter->uptodate because the node iterator
1834 * doesn't necessarily point at the key we're returning:
1837 EBUG_ON(bkey_cmp(k.k->p, iter->pos) <= 0);
1838 bch2_btree_iter_verify_level(iter, 0);
1850 min_t(u64, KEY_SIZE_MAX,
1851 (k.k->p.inode == n.p.inode
1852 ? bkey_start_offset(k.k)
1859 iter->uptodate = BTREE_ITER_UPTODATE;
1861 bch2_btree_iter_verify_level(iter, 0);
1862 return (struct bkey_s_c) { &iter->k, NULL };
1865 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
1867 struct btree_iter_level *l = &iter->l[0];
1871 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
1872 bch2_btree_iter_checks(iter);
1874 if (iter->uptodate == BTREE_ITER_UPTODATE)
1875 return btree_iter_peek_uptodate(iter);
1877 ret = bch2_btree_iter_traverse(iter);
1879 return bkey_s_c_err(ret);
1881 if (iter->flags & BTREE_ITER_IS_EXTENTS)
1882 return __bch2_btree_iter_peek_slot_extents(iter);
1884 k = __btree_iter_peek_all(iter, l, &iter->k);
1886 EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
1888 if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
1890 bkey_init(&iter->k);
1891 iter->k.p = iter->pos;
1892 k = (struct bkey_s_c) { &iter->k, NULL };
1895 iter->uptodate = BTREE_ITER_UPTODATE;
1896 bch2_btree_iter_verify_level(iter, 0);
1900 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
1902 if (unlikely(!bkey_cmp(iter->k.p, POS_MAX)))
1903 return bkey_s_c_null;
1905 bch2_btree_iter_set_pos(iter,
1906 (iter->flags & BTREE_ITER_IS_EXTENTS)
1908 : bkey_successor(iter->k.p));
1910 return bch2_btree_iter_peek_slot(iter);
1913 struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
1915 struct bkey_cached *ck;
1918 EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
1919 bch2_btree_iter_checks(iter);
1921 ret = bch2_btree_iter_traverse(iter);
1923 return bkey_s_c_err(ret);
1925 ck = (void *) iter->l[0].b;
1927 EBUG_ON(iter->btree_id != ck->key.btree_id ||
1928 bkey_cmp(iter->pos, ck->key.pos));
1931 return bkey_i_to_s_c(ck->k);
1934 static inline void bch2_btree_iter_init(struct btree_trans *trans,
1935 struct btree_iter *iter, enum btree_id btree_id,
1936 struct bpos pos, unsigned flags)
1938 struct bch_fs *c = trans->c;
1941 if (btree_node_type_is_extents(btree_id) &&
1942 !(flags & BTREE_ITER_NODES))
1943 flags |= BTREE_ITER_IS_EXTENTS;
1945 iter->trans = trans;
1947 bkey_init(&iter->k);
1949 iter->flags = flags;
1950 iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
1951 iter->btree_id = btree_id;
1953 iter->min_depth = 0;
1954 iter->locks_want = flags & BTREE_ITER_INTENT ? 1 : 0;
1955 iter->nodes_locked = 0;
1956 iter->nodes_intent_locked = 0;
1957 for (i = 0; i < ARRAY_SIZE(iter->l); i++)
1958 iter->l[i].b = BTREE_ITER_NO_NODE_INIT;
1960 prefetch(c->btree_roots[btree_id].b);
1963 /* new transactional stuff: */
1965 static inline void __bch2_trans_iter_free(struct btree_trans *trans,
1968 __bch2_btree_iter_unlock(&trans->iters[idx]);
1969 trans->iters_linked &= ~(1ULL << idx);
1970 trans->iters_live &= ~(1ULL << idx);
1971 trans->iters_touched &= ~(1ULL << idx);
1974 int bch2_trans_iter_put(struct btree_trans *trans,
1975 struct btree_iter *iter)
1979 if (IS_ERR_OR_NULL(iter))
1982 BUG_ON(trans->iters + iter->idx != iter);
1984 ret = btree_iter_err(iter);
1986 if (!(trans->iters_touched & (1ULL << iter->idx)) &&
1987 !(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
1988 __bch2_trans_iter_free(trans, iter->idx);
1990 trans->iters_live &= ~(1ULL << iter->idx);
1994 int bch2_trans_iter_free(struct btree_trans *trans,
1995 struct btree_iter *iter)
1997 if (IS_ERR_OR_NULL(iter))
2000 trans->iters_touched &= ~(1ULL << iter->idx);
2002 return bch2_trans_iter_put(trans, iter);
2006 static int bch2_trans_realloc_iters(struct btree_trans *trans,
2009 void *p, *new_iters, *new_updates, *new_updates2;
2011 size_t updates_bytes;
2013 new_size = roundup_pow_of_two(new_size);
2015 BUG_ON(new_size > BTREE_ITER_MAX);
2017 if (new_size <= trans->size)
2020 BUG_ON(trans->used_mempool);
2022 bch2_trans_unlock(trans);
2024 iters_bytes = sizeof(struct btree_iter) * new_size;
2025 updates_bytes = sizeof(struct btree_insert_entry) * new_size;
2027 p = kmalloc(iters_bytes +
2029 updates_bytes, GFP_NOFS);
2033 p = mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2034 new_size = BTREE_ITER_MAX;
2036 trans->used_mempool = true;
2038 new_iters = p; p += iters_bytes;
2039 new_updates = p; p += updates_bytes;
2040 new_updates2 = p; p += updates_bytes;
2042 memcpy(new_iters, trans->iters,
2043 sizeof(struct btree_iter) * trans->nr_iters);
2044 memcpy(new_updates, trans->updates,
2045 sizeof(struct btree_insert_entry) * trans->nr_updates);
2046 memcpy(new_updates2, trans->updates2,
2047 sizeof(struct btree_insert_entry) * trans->nr_updates2);
2049 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
2050 memset(trans->iters, POISON_FREE,
2051 sizeof(struct btree_iter) * trans->nr_iters +
2052 sizeof(struct btree_insert_entry) * trans->nr_iters);
2054 kfree(trans->iters);
2056 trans->iters = new_iters;
2057 trans->updates = new_updates;
2058 trans->updates2 = new_updates2;
2059 trans->size = new_size;
2061 if (trans->iters_live) {
2062 trace_trans_restart_iters_realloced(trans->ip, trans->size);
2070 static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
2072 unsigned idx = __ffs64(~trans->iters_linked);
2074 if (idx < trans->nr_iters)
2077 if (trans->nr_iters == trans->size) {
2078 struct btree_iter *iter;
2080 BUG_ON(trans->size < BTREE_ITER_MAX);
2082 trans_for_each_iter(trans, iter) {
2083 pr_err("iter: btree %s pos %llu:%llu%s%s%s %ps",
2084 bch2_btree_ids[iter->btree_id],
2087 (trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
2088 (trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
2089 iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "",
2090 (void *) iter->ip_allocated);
2093 panic("trans iter oveflow\n");
2095 ret = bch2_trans_realloc_iters(trans, trans->size * 2);
2097 return ERR_PTR(ret);
2101 idx = trans->nr_iters++;
2102 BUG_ON(trans->nr_iters > trans->size);
2104 trans->iters[idx].idx = idx;
2106 BUG_ON(trans->iters_linked & (1ULL << idx));
2107 trans->iters_linked |= 1ULL << idx;
2108 trans->iters[idx].flags = 0;
2109 return &trans->iters[idx];
2112 static inline void btree_iter_copy(struct btree_iter *dst,
2113 struct btree_iter *src)
2115 unsigned i, idx = dst->idx;
2119 dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2121 for (i = 0; i < BTREE_MAX_DEPTH; i++)
2122 if (btree_node_locked(dst, i))
2123 six_lock_increment(&dst->l[i].b->c.lock,
2124 __btree_lock_want(dst, i));
2126 dst->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2127 dst->flags &= ~BTREE_ITER_SET_POS_AFTER_COMMIT;
2130 static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
2132 if (bkey_cmp(l, r) > 0)
2135 return POS(r.inode - l.inode, r.offset - l.offset);
2138 static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
2139 unsigned btree_id, struct bpos pos,
2142 struct btree_iter *iter, *best = NULL;
2144 BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
2146 trans_for_each_iter(trans, iter) {
2147 if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
2150 if (iter->btree_id != btree_id)
2154 bkey_cmp(bpos_diff(best->pos, pos),
2155 bpos_diff(iter->pos, pos)) < 0)
2162 iter = btree_trans_iter_alloc(trans);
2166 bch2_btree_iter_init(trans, iter, btree_id, pos, flags);
2167 } else if ((trans->iters_live & (1ULL << best->idx)) ||
2168 (best->flags & BTREE_ITER_KEEP_UNTIL_COMMIT)) {
2169 iter = btree_trans_iter_alloc(trans);
2173 btree_iter_copy(iter, best);
2178 iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
2179 iter->flags &= ~BTREE_ITER_USER_FLAGS;
2180 iter->flags |= flags & BTREE_ITER_USER_FLAGS;
2182 if (iter->flags & BTREE_ITER_INTENT)
2183 bch2_btree_iter_upgrade(iter, 1);
2185 bch2_btree_iter_downgrade(iter);
2187 BUG_ON(iter->btree_id != btree_id);
2188 BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
2189 BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
2190 BUG_ON(iter->flags & BTREE_ITER_SET_POS_AFTER_COMMIT);
2191 BUG_ON(trans->iters_live & (1ULL << iter->idx));
2193 trans->iters_live |= 1ULL << iter->idx;
2194 trans->iters_touched |= 1ULL << iter->idx;
2199 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
2200 enum btree_id btree_id,
2201 struct bpos pos, unsigned flags)
2203 struct btree_iter *iter =
2204 __btree_trans_get_iter(trans, btree_id, pos, flags);
2207 __bch2_btree_iter_set_pos(iter, pos,
2208 btree_node_type_is_extents(btree_id));
2212 struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
2213 enum btree_id btree_id,
2215 unsigned locks_want,
2219 struct btree_iter *iter =
2220 __btree_trans_get_iter(trans, btree_id, pos,
2221 flags|BTREE_ITER_NODES);
2224 BUG_ON(IS_ERR(iter));
2225 BUG_ON(bkey_cmp(iter->pos, pos));
2227 iter->locks_want = locks_want;
2228 iter->level = depth;
2229 iter->min_depth = depth;
2231 for (i = 0; i < ARRAY_SIZE(iter->l); i++)
2232 iter->l[i].b = NULL;
2233 iter->l[iter->level].b = BTREE_ITER_NO_NODE_INIT;
2238 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *trans,
2239 struct btree_iter *src)
2241 struct btree_iter *iter;
2243 iter = btree_trans_iter_alloc(trans);
2247 btree_iter_copy(iter, src);
2249 trans->iters_live |= 1ULL << iter->idx;
2251 * We don't need to preserve this iter since it's cheap to copy it
2252 * again - this will cause trans_iter_put() to free it right away:
2254 trans->iters_touched &= ~(1ULL << iter->idx);
2259 static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size)
2261 if (size > trans->mem_bytes) {
2262 size_t old_bytes = trans->mem_bytes;
2263 size_t new_bytes = roundup_pow_of_two(size);
2264 void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOFS);
2269 trans->mem = new_mem;
2270 trans->mem_bytes = new_bytes;
2273 trace_trans_restart_mem_realloced(trans->ip, new_bytes);
2281 void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2286 ret = bch2_trans_preload_mem(trans, trans->mem_top + size);
2288 return ERR_PTR(ret);
2290 p = trans->mem + trans->mem_top;
2291 trans->mem_top += size;
2295 inline void bch2_trans_unlink_iters(struct btree_trans *trans)
2297 u64 iters = trans->iters_linked &
2298 ~trans->iters_touched &
2302 unsigned idx = __ffs64(iters);
2304 iters &= ~(1ULL << idx);
2305 __bch2_trans_iter_free(trans, idx);
2309 void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
2311 struct btree_iter *iter;
2313 trans_for_each_iter(trans, iter)
2314 iter->flags &= ~(BTREE_ITER_KEEP_UNTIL_COMMIT|
2315 BTREE_ITER_SET_POS_AFTER_COMMIT);
2317 bch2_trans_unlink_iters(trans);
2319 trans->iters_touched &= trans->iters_live;
2321 trans->need_reset = 0;
2322 trans->nr_updates = 0;
2323 trans->nr_updates2 = 0;
2326 trans->extra_journal_entries = NULL;
2327 trans->extra_journal_entry_u64s = 0;
2329 if (trans->fs_usage_deltas) {
2330 trans->fs_usage_deltas->used = 0;
2331 memset(&trans->fs_usage_deltas->memset_start, 0,
2332 (void *) &trans->fs_usage_deltas->memset_end -
2333 (void *) &trans->fs_usage_deltas->memset_start);
2336 if (!(flags & TRANS_RESET_NOTRAVERSE))
2337 bch2_btree_iter_traverse_all(trans);
2340 static void bch2_trans_alloc_iters(struct btree_trans *trans, struct bch_fs *c)
2342 unsigned new_size = BTREE_ITER_MAX;
2343 size_t iters_bytes = sizeof(struct btree_iter) * new_size;
2344 size_t updates_bytes = sizeof(struct btree_insert_entry) * new_size;
2347 BUG_ON(trans->used_mempool);
2349 p = this_cpu_xchg(c->btree_iters_bufs->iter, NULL) ?:
2350 mempool_alloc(&trans->c->btree_iters_pool, GFP_NOFS);
2352 trans->iters = p; p += iters_bytes;
2353 trans->updates = p; p += updates_bytes;
2354 trans->updates2 = p; p += updates_bytes;
2355 trans->size = new_size;
2358 void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
2359 unsigned expected_nr_iters,
2360 size_t expected_mem_bytes)
2362 memset(trans, 0, sizeof(*trans));
2364 trans->ip = _RET_IP_;
2367 * reallocating iterators currently completely breaks
2368 * bch2_trans_iter_put(), we always allocate the max:
2370 bch2_trans_alloc_iters(trans, c);
2372 if (expected_mem_bytes)
2373 bch2_trans_preload_mem(trans, expected_mem_bytes);
2375 #ifdef CONFIG_BCACHEFS_DEBUG
2376 trans->pid = current->pid;
2377 mutex_lock(&c->btree_trans_lock);
2378 list_add(&trans->list, &c->btree_trans_list);
2379 mutex_unlock(&c->btree_trans_lock);
2383 int bch2_trans_exit(struct btree_trans *trans)
2385 struct bch_fs *c = trans->c;
2387 bch2_trans_unlock(trans);
2389 #ifdef CONFIG_BCACHEFS_DEBUG
2390 mutex_lock(&trans->c->btree_trans_lock);
2391 list_del(&trans->list);
2392 mutex_unlock(&trans->c->btree_trans_lock);
2395 bch2_journal_preres_put(&trans->c->journal, &trans->journal_preres);
2397 kfree(trans->fs_usage_deltas);
2400 trans->iters = this_cpu_xchg(c->btree_iters_bufs->iter, trans->iters);
2402 mempool_free(trans->iters, &trans->c->btree_iters_pool);
2404 trans->mem = (void *) 0x1;
2405 trans->iters = (void *) 0x1;
2407 return trans->error ? -EIO : 0;
2410 static void __maybe_unused
2411 bch2_btree_iter_node_to_text(struct printbuf *out,
2412 struct btree_bkey_cached_common *_b,
2413 enum btree_iter_type type)
2415 pr_buf(out, " %px l=%u %s:",
2416 _b, _b->level, bch2_btree_ids[_b->btree_id]);
2417 bch2_bpos_to_text(out, btree_node_pos(_b, type));
2420 void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
2422 #ifdef CONFIG_BCACHEFS_DEBUG
2423 struct btree_trans *trans;
2424 struct btree_iter *iter;
2428 mutex_lock(&c->btree_trans_lock);
2429 list_for_each_entry(trans, &c->btree_trans_list, list) {
2430 pr_buf(out, "%i %px %ps\n", trans->pid, trans, (void *) trans->ip);
2432 trans_for_each_iter(trans, iter) {
2433 if (!iter->nodes_locked)
2436 pr_buf(out, " iter %u %s:",
2438 bch2_btree_ids[iter->btree_id]);
2439 bch2_bpos_to_text(out, iter->pos);
2442 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
2443 if (btree_node_locked(iter, l)) {
2444 pr_buf(out, " %s l=%u ",
2445 btree_node_intent_locked(iter, l) ? "i" : "r", l);
2446 bch2_btree_iter_node_to_text(out,
2447 (void *) iter->l[l].b,
2448 btree_iter_type(iter));
2454 b = READ_ONCE(trans->locking);
2456 pr_buf(out, " locking iter %u l=%u %s:",
2457 trans->locking_iter_idx,
2458 trans->locking_level,
2459 bch2_btree_ids[trans->locking_btree_id]);
2460 bch2_bpos_to_text(out, trans->locking_pos);
2463 pr_buf(out, " node ");
2464 bch2_btree_iter_node_to_text(out,
2466 btree_iter_type(&trans->iters[trans->locking_iter_idx]));
2470 mutex_unlock(&c->btree_trans_lock);
2474 void bch2_fs_btree_iter_exit(struct bch_fs *c)
2476 mempool_exit(&c->btree_iters_pool);
2479 int bch2_fs_btree_iter_init(struct bch_fs *c)
2481 unsigned nr = BTREE_ITER_MAX;
2483 INIT_LIST_HEAD(&c->btree_trans_list);
2484 mutex_init(&c->btree_trans_lock);
2486 return mempool_init_kmalloc_pool(&c->btree_iters_pool, 1,
2487 sizeof(struct btree_iter) * nr +
2488 sizeof(struct btree_insert_entry) * nr +
2489 sizeof(struct btree_insert_entry) * nr);