1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
17 #include "subvolume.h"
20 #include <linux/random.h>
21 #include <linux/prefetch.h>
23 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
24 static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
27 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
29 #ifdef TRACK_PATH_ALLOCATED
30 return iter->ip_allocated;
36 static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
38 static inline int __btree_path_cmp(const struct btree_path *l,
39 enum btree_id r_btree_id,
45 * Must match lock ordering as defined by __bch2_btree_node_lock:
47 return cmp_int(l->btree_id, r_btree_id) ?:
48 cmp_int((int) l->cached, (int) r_cached) ?:
49 bpos_cmp(l->pos, r_pos) ?:
50 -cmp_int(l->level, r_level);
53 static inline int btree_path_cmp(const struct btree_path *l,
54 const struct btree_path *r)
56 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
59 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
61 /* Are we iterating over keys in all snapshots? */
62 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
63 p = bpos_successor(p);
65 p = bpos_nosnap_successor(p);
66 p.snapshot = iter->snapshot;
72 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
74 /* Are we iterating over keys in all snapshots? */
75 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
76 p = bpos_predecessor(p);
78 p = bpos_nosnap_predecessor(p);
79 p.snapshot = iter->snapshot;
85 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
87 struct bpos pos = iter->pos;
89 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
90 !bkey_eq(pos, POS_MAX))
91 pos = bkey_successor(iter, pos);
95 static inline bool btree_path_pos_before_node(struct btree_path *path,
98 return bpos_lt(path->pos, b->data->min_key);
101 static inline bool btree_path_pos_after_node(struct btree_path *path,
104 return bpos_gt(path->pos, b->key.k.p);
107 static inline bool btree_path_pos_in_node(struct btree_path *path,
110 return path->btree_id == b->c.btree_id &&
111 !btree_path_pos_before_node(path, b) &&
112 !btree_path_pos_after_node(path, b);
115 /* Btree iterator: */
117 #ifdef CONFIG_BCACHEFS_DEBUG
119 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
120 struct btree_path *path)
122 struct bkey_cached *ck;
123 bool locked = btree_node_locked(path, 0);
125 if (!bch2_btree_node_relock(trans, path, 0))
128 ck = (void *) path->l[0].b;
129 BUG_ON(ck->key.btree_id != path->btree_id ||
130 !bkey_eq(ck->key.pos, path->pos));
133 btree_node_unlock(trans, path, 0);
136 static void bch2_btree_path_verify_level(struct btree_trans *trans,
137 struct btree_path *path, unsigned level)
139 struct btree_path_level *l;
140 struct btree_node_iter tmp;
142 struct bkey_packed *p, *k;
143 struct printbuf buf1 = PRINTBUF;
144 struct printbuf buf2 = PRINTBUF;
145 struct printbuf buf3 = PRINTBUF;
148 if (!bch2_debug_check_iterators)
153 locked = btree_node_locked(path, level);
157 bch2_btree_path_verify_cached(trans, path);
161 if (!btree_path_node(path, level))
164 if (!bch2_btree_node_relock_notrace(trans, path, level))
167 BUG_ON(!btree_path_pos_in_node(path, l->b));
169 bch2_btree_node_iter_verify(&l->iter, l->b);
172 * For interior nodes, the iterator will have skipped past deleted keys:
175 ? bch2_btree_node_iter_prev(&tmp, l->b)
176 : bch2_btree_node_iter_prev_all(&tmp, l->b);
177 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
179 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
184 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
190 btree_node_unlock(trans, path, level);
193 bch2_bpos_to_text(&buf1, path->pos);
196 struct bkey uk = bkey_unpack_key(l->b, p);
198 bch2_bkey_to_text(&buf2, &uk);
200 prt_printf(&buf2, "(none)");
204 struct bkey uk = bkey_unpack_key(l->b, k);
206 bch2_bkey_to_text(&buf3, &uk);
208 prt_printf(&buf3, "(none)");
211 panic("path should be %s key at level %u:\n"
215 msg, level, buf1.buf, buf2.buf, buf3.buf);
218 static void bch2_btree_path_verify(struct btree_trans *trans,
219 struct btree_path *path)
221 struct bch_fs *c = trans->c;
224 EBUG_ON(path->btree_id >= BTREE_ID_NR);
226 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
228 BUG_ON(!path->cached &&
229 bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
233 bch2_btree_path_verify_level(trans, path, i);
236 bch2_btree_path_verify_locks(path);
239 void bch2_trans_verify_paths(struct btree_trans *trans)
241 struct btree_path *path;
243 trans_for_each_path(trans, path)
244 bch2_btree_path_verify(trans, path);
247 static void bch2_btree_iter_verify(struct btree_iter *iter)
249 struct btree_trans *trans = iter->trans;
251 BUG_ON(iter->btree_id >= BTREE_ID_NR);
253 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != iter->path->cached);
255 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
256 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
258 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
259 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
260 !btree_type_has_snapshots(iter->btree_id));
262 if (iter->update_path)
263 bch2_btree_path_verify(trans, iter->update_path);
264 bch2_btree_path_verify(trans, iter->path);
267 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
269 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
270 !iter->pos.snapshot);
272 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
273 iter->pos.snapshot != iter->snapshot);
275 BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
276 bkey_gt(iter->pos, iter->k.p));
279 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
281 struct btree_trans *trans = iter->trans;
282 struct btree_iter copy;
283 struct bkey_s_c prev;
286 if (!bch2_debug_check_iterators)
289 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
292 if (bkey_err(k) || !k.k)
295 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
299 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
300 BTREE_ITER_NOPRESERVE|
301 BTREE_ITER_ALL_SNAPSHOTS);
302 prev = bch2_btree_iter_prev(©);
306 ret = bkey_err(prev);
310 if (bkey_eq(prev.k->p, k.k->p) &&
311 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
312 prev.k->p.snapshot) > 0) {
313 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
315 bch2_bkey_to_text(&buf1, k.k);
316 bch2_bkey_to_text(&buf2, prev.k);
318 panic("iter snap %u\n"
325 bch2_trans_iter_exit(trans, ©);
329 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
330 struct bpos pos, bool key_cache)
332 struct btree_path *path;
334 struct printbuf buf = PRINTBUF;
336 btree_trans_sort_paths(trans);
338 trans_for_each_path_inorder(trans, path, idx) {
339 int cmp = cmp_int(path->btree_id, id) ?:
340 cmp_int(path->cached, key_cache);
347 if (!btree_node_locked(path, 0) ||
348 !path->should_be_locked)
352 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
353 bkey_le(pos, path->l[0].b->key.k.p))
356 if (bkey_eq(pos, path->pos))
361 bch2_dump_trans_paths_updates(trans);
362 bch2_bpos_to_text(&buf, pos);
364 panic("not locked: %s %s%s\n",
365 bch2_btree_ids[id], buf.buf,
366 key_cache ? " cached" : "");
371 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
372 struct btree_path *path, unsigned l) {}
373 static inline void bch2_btree_path_verify(struct btree_trans *trans,
374 struct btree_path *path) {}
375 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
376 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
377 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
381 /* Btree path: fixups after btree updates */
383 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
386 struct bkey_packed *k)
388 struct btree_node_iter_set *set;
390 btree_node_iter_for_each(iter, set)
391 if (set->end == t->end_offset) {
392 set->k = __btree_node_key_to_offset(b, k);
393 bch2_btree_node_iter_sort(iter, b);
397 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
400 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
402 struct bkey_packed *where)
404 struct btree_path_level *l = &path->l[b->c.level];
406 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
409 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
410 bch2_btree_node_iter_advance(&l->iter, l->b);
413 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
415 struct bkey_packed *where)
417 struct btree_path *path;
419 trans_for_each_path_with_node(trans, b, path) {
420 __bch2_btree_path_fix_key_modified(path, b, where);
421 bch2_btree_path_verify_level(trans, path, b->c.level);
425 static void __bch2_btree_node_iter_fix(struct btree_path *path,
427 struct btree_node_iter *node_iter,
429 struct bkey_packed *where,
430 unsigned clobber_u64s,
433 const struct bkey_packed *end = btree_bkey_last(b, t);
434 struct btree_node_iter_set *set;
435 unsigned offset = __btree_node_key_to_offset(b, where);
436 int shift = new_u64s - clobber_u64s;
437 unsigned old_end = t->end_offset - shift;
438 unsigned orig_iter_pos = node_iter->data[0].k;
439 bool iter_current_key_modified =
440 orig_iter_pos >= offset &&
441 orig_iter_pos <= offset + clobber_u64s;
443 btree_node_iter_for_each(node_iter, set)
444 if (set->end == old_end)
447 /* didn't find the bset in the iterator - might have to readd it: */
449 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
450 bch2_btree_node_iter_push(node_iter, b, where, end);
453 /* Iterator is after key that changed */
457 set->end = t->end_offset;
459 /* Iterator hasn't gotten to the key that changed yet: */
464 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
466 } else if (set->k < offset + clobber_u64s) {
467 set->k = offset + new_u64s;
468 if (set->k == set->end)
469 bch2_btree_node_iter_set_drop(node_iter, set);
471 /* Iterator is after key that changed */
472 set->k = (int) set->k + shift;
476 bch2_btree_node_iter_sort(node_iter, b);
478 if (node_iter->data[0].k != orig_iter_pos)
479 iter_current_key_modified = true;
482 * When a new key is added, and the node iterator now points to that
483 * key, the iterator might have skipped past deleted keys that should
484 * come after the key the iterator now points to. We have to rewind to
485 * before those deleted keys - otherwise
486 * bch2_btree_node_iter_prev_all() breaks:
488 if (!bch2_btree_node_iter_end(node_iter) &&
489 iter_current_key_modified &&
492 struct bkey_packed *k, *k2, *p;
494 k = bch2_btree_node_iter_peek_all(node_iter, b);
496 for_each_bset(b, t) {
497 bool set_pos = false;
499 if (node_iter->data[0].end == t->end_offset)
502 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
504 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
505 bkey_iter_cmp(b, k, p) < 0) {
511 btree_node_iter_set_set_pos(node_iter,
517 void bch2_btree_node_iter_fix(struct btree_trans *trans,
518 struct btree_path *path,
520 struct btree_node_iter *node_iter,
521 struct bkey_packed *where,
522 unsigned clobber_u64s,
525 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
526 struct btree_path *linked;
528 if (node_iter != &path->l[b->c.level].iter) {
529 __bch2_btree_node_iter_fix(path, b, node_iter, t,
530 where, clobber_u64s, new_u64s);
532 if (bch2_debug_check_iterators)
533 bch2_btree_node_iter_verify(node_iter, b);
536 trans_for_each_path_with_node(trans, b, linked) {
537 __bch2_btree_node_iter_fix(linked, b,
538 &linked->l[b->c.level].iter, t,
539 where, clobber_u64s, new_u64s);
540 bch2_btree_path_verify_level(trans, linked, b->c.level);
544 /* Btree path level: pointer to a particular btree node and node iter */
546 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
547 struct btree_path_level *l,
549 struct bkey_packed *k)
553 * signal to bch2_btree_iter_peek_slot() that we're currently at
556 u->type = KEY_TYPE_deleted;
557 return bkey_s_c_null;
560 return bkey_disassemble(l->b, k, u);
563 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
564 struct btree_path_level *l,
567 return __btree_iter_unpack(c, l, u,
568 bch2_btree_node_iter_peek_all(&l->iter, l->b));
571 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
572 struct btree_path *path,
573 struct btree_path_level *l,
576 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
577 bch2_btree_node_iter_peek(&l->iter, l->b));
579 path->pos = k.k ? k.k->p : l->b->key.k.p;
580 trans->paths_sorted = false;
581 bch2_btree_path_verify_level(trans, path, l - path->l);
585 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
586 struct btree_path *path,
587 struct btree_path_level *l,
590 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
591 bch2_btree_node_iter_prev(&l->iter, l->b));
593 path->pos = k.k ? k.k->p : l->b->data->min_key;
594 trans->paths_sorted = false;
595 bch2_btree_path_verify_level(trans, path, l - path->l);
599 static inline bool btree_path_advance_to_pos(struct btree_path *path,
600 struct btree_path_level *l,
603 struct bkey_packed *k;
606 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
607 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
608 if (max_advance > 0 && nr_advanced >= max_advance)
611 bch2_btree_node_iter_advance(&l->iter, l->b);
618 static inline void __btree_path_level_init(struct btree_path *path,
621 struct btree_path_level *l = &path->l[level];
623 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
626 * Iterators to interior nodes should always be pointed at the first non
630 bch2_btree_node_iter_peek(&l->iter, l->b);
633 void bch2_btree_path_level_init(struct btree_trans *trans,
634 struct btree_path *path,
637 BUG_ON(path->cached);
639 EBUG_ON(!btree_path_pos_in_node(path, b));
641 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
642 path->l[b->c.level].b = b;
643 __btree_path_level_init(path, b->c.level);
646 /* Btree path: fixups after btree node updates: */
648 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
650 struct bch_fs *c = trans->c;
651 struct btree_insert_entry *i;
653 trans_for_each_update(trans, i)
655 i->level == b->c.level &&
656 i->btree_id == b->c.btree_id &&
657 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
658 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
659 i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
661 if (unlikely(trans->journal_replay_not_finished)) {
663 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
675 * A btree node is being replaced - update the iterator to point to the new
678 void bch2_trans_node_add(struct btree_trans *trans, struct btree *b)
680 struct btree_path *path;
682 trans_for_each_path(trans, path)
683 if (path->uptodate == BTREE_ITER_UPTODATE &&
685 btree_path_pos_in_node(path, b)) {
686 enum btree_node_locked_type t =
687 btree_lock_want(path, b->c.level);
689 if (t != BTREE_NODE_UNLOCKED) {
690 btree_node_unlock(trans, path, b->c.level);
691 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
692 mark_btree_node_locked(trans, path, b->c.level, (enum six_lock_type) t);
695 bch2_btree_path_level_init(trans, path, b);
698 bch2_trans_revalidate_updates_in_node(trans, b);
702 * A btree node has been modified in such a way as to invalidate iterators - fix
705 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
707 struct btree_path *path;
709 trans_for_each_path_with_node(trans, b, path)
710 __btree_path_level_init(path, b->c.level);
712 bch2_trans_revalidate_updates_in_node(trans, b);
715 /* Btree path: traverse, set_pos: */
717 static inline int btree_path_lock_root(struct btree_trans *trans,
718 struct btree_path *path,
720 unsigned long trace_ip)
722 struct bch_fs *c = trans->c;
723 struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
724 enum six_lock_type lock_type;
728 EBUG_ON(path->nodes_locked);
731 b = READ_ONCE(*rootp);
732 path->level = READ_ONCE(b->c.level);
734 if (unlikely(path->level < depth_want)) {
736 * the root is at a lower depth than the depth we want:
737 * got to the end of the btree, or we're walking nodes
738 * greater than some depth and there are no nodes >=
741 path->level = depth_want;
742 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
747 lock_type = __btree_lock_want(path, path->level);
748 ret = btree_node_lock(trans, path, &b->c,
749 path->level, lock_type, trace_ip);
751 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
753 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
758 if (likely(b == READ_ONCE(*rootp) &&
759 b->c.level == path->level &&
761 for (i = 0; i < path->level; i++)
762 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
763 path->l[path->level].b = b;
764 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
767 mark_btree_node_locked(trans, path, path->level, lock_type);
768 bch2_btree_path_level_init(trans, path, b);
772 six_unlock_type(&b->c.lock, lock_type);
777 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
779 struct bch_fs *c = trans->c;
780 struct btree_path_level *l = path_l(path);
781 struct btree_node_iter node_iter = l->iter;
782 struct bkey_packed *k;
784 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
785 ? (path->level > 1 ? 0 : 2)
786 : (path->level > 1 ? 1 : 16);
787 bool was_locked = btree_node_locked(path, path->level);
790 bch2_bkey_buf_init(&tmp);
792 while (nr-- && !ret) {
793 if (!bch2_btree_node_relock(trans, path, path->level))
796 bch2_btree_node_iter_advance(&node_iter, l->b);
797 k = bch2_btree_node_iter_peek(&node_iter, l->b);
801 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
802 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
807 btree_node_unlock(trans, path, path->level);
809 bch2_bkey_buf_exit(&tmp, c);
813 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
814 struct btree_and_journal_iter *jiter)
816 struct bch_fs *c = trans->c;
819 unsigned nr = test_bit(BCH_FS_STARTED, &c->flags)
820 ? (path->level > 1 ? 0 : 2)
821 : (path->level > 1 ? 1 : 16);
822 bool was_locked = btree_node_locked(path, path->level);
825 bch2_bkey_buf_init(&tmp);
827 while (nr-- && !ret) {
828 if (!bch2_btree_node_relock(trans, path, path->level))
831 bch2_btree_and_journal_iter_advance(jiter);
832 k = bch2_btree_and_journal_iter_peek(jiter);
836 bch2_bkey_buf_reassemble(&tmp, c, k);
837 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
842 btree_node_unlock(trans, path, path->level);
844 bch2_bkey_buf_exit(&tmp, c);
848 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
849 struct btree_path *path,
850 unsigned plevel, struct btree *b)
852 struct btree_path_level *l = &path->l[plevel];
853 bool locked = btree_node_locked(path, plevel);
854 struct bkey_packed *k;
855 struct bch_btree_ptr_v2 *bp;
857 if (!bch2_btree_node_relock(trans, path, plevel))
860 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
861 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
863 bp = (void *) bkeyp_val(&l->b->format, k);
864 bp->mem_ptr = (unsigned long)b;
867 btree_node_unlock(trans, path, plevel);
870 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
871 struct btree_path *path,
873 struct bkey_buf *out)
875 struct bch_fs *c = trans->c;
876 struct btree_path_level *l = path_l(path);
877 struct btree_and_journal_iter jiter;
881 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
883 k = bch2_btree_and_journal_iter_peek(&jiter);
885 bch2_bkey_buf_reassemble(out, c, k);
887 if (flags & BTREE_ITER_PREFETCH)
888 ret = btree_path_prefetch_j(trans, path, &jiter);
890 bch2_btree_and_journal_iter_exit(&jiter);
894 static __always_inline int btree_path_down(struct btree_trans *trans,
895 struct btree_path *path,
897 unsigned long trace_ip)
899 struct bch_fs *c = trans->c;
900 struct btree_path_level *l = path_l(path);
902 unsigned level = path->level - 1;
903 enum six_lock_type lock_type = __btree_lock_want(path, level);
907 EBUG_ON(!btree_node_locked(path, path->level));
909 bch2_bkey_buf_init(&tmp);
911 if (unlikely(trans->journal_replay_not_finished)) {
912 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
916 bch2_bkey_buf_unpack(&tmp, c, l->b,
917 bch2_btree_node_iter_peek(&l->iter, l->b));
919 if (flags & BTREE_ITER_PREFETCH) {
920 ret = btree_path_prefetch(trans, path);
926 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
927 ret = PTR_ERR_OR_ZERO(b);
931 if (likely(!trans->journal_replay_not_finished &&
932 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
933 unlikely(b != btree_node_mem_ptr(tmp.k)))
934 btree_node_mem_ptr_set(trans, path, level + 1, b);
936 if (btree_node_read_locked(path, level + 1))
937 btree_node_unlock(trans, path, level + 1);
939 mark_btree_node_locked(trans, path, level, lock_type);
941 bch2_btree_path_level_init(trans, path, b);
943 bch2_btree_path_verify_locks(path);
945 bch2_bkey_buf_exit(&tmp, c);
950 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
952 struct bch_fs *c = trans->c;
953 struct btree_path *path;
954 unsigned long trace_ip = _RET_IP_;
957 if (trans->in_traverse_all)
958 return -BCH_ERR_transaction_restart_in_traverse_all;
960 trans->in_traverse_all = true;
962 trans->restarted = 0;
963 trans->last_restarted_ip = 0;
965 trans_for_each_path(trans, path)
966 path->should_be_locked = false;
968 btree_trans_sort_paths(trans);
970 bch2_trans_unlock(trans);
973 if (unlikely(trans->memory_allocation_failure)) {
976 closure_init_stack(&cl);
979 ret = bch2_btree_cache_cannibalize_lock(c, &cl);
984 /* Now, redo traversals in correct order: */
986 while (i < trans->nr_sorted) {
987 path = trans->paths + trans->sorted[i];
990 * Traversing a path can cause another path to be added at about
993 if (path->uptodate) {
994 __btree_path_get(path, false);
995 ret = bch2_btree_path_traverse_one(trans, path, 0, _THIS_IP_);
996 __btree_path_put(path, false);
998 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
999 bch2_err_matches(ret, ENOMEM))
1009 * We used to assert that all paths had been traversed here
1010 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1011 * path->Should_be_locked is not set yet, we we might have unlocked and
1012 * then failed to relock a path - that's fine.
1015 bch2_btree_cache_cannibalize_unlock(c);
1017 trans->in_traverse_all = false;
1019 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1023 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1024 unsigned l, int check_pos)
1026 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1028 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1033 static inline bool btree_path_good_node(struct btree_trans *trans,
1034 struct btree_path *path,
1035 unsigned l, int check_pos)
1037 return is_btree_node(path, l) &&
1038 bch2_btree_node_relock(trans, path, l) &&
1039 btree_path_check_pos_in_node(path, l, check_pos);
1042 static void btree_path_set_level_down(struct btree_trans *trans,
1043 struct btree_path *path,
1048 path->level = new_level;
1050 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1051 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1052 btree_node_unlock(trans, path, l);
1054 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1055 bch2_btree_path_verify(trans, path);
1058 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1059 struct btree_path *path,
1062 unsigned i, l = path->level;
1064 while (btree_path_node(path, l) &&
1065 !btree_path_good_node(trans, path, l, check_pos))
1066 __btree_path_set_level_up(trans, path, l++);
1068 /* If we need intent locks, take them too: */
1070 i < path->locks_want && btree_path_node(path, i);
1072 if (!bch2_btree_node_relock(trans, path, i)) {
1074 __btree_path_set_level_up(trans, path, l++);
1081 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1082 struct btree_path *path,
1085 return likely(btree_node_locked(path, path->level) &&
1086 btree_path_check_pos_in_node(path, path->level, check_pos))
1088 : __btree_path_up_until_good_node(trans, path, check_pos);
1092 * This is the main state machine for walking down the btree - walks down to a
1095 * Returns 0 on success, -EIO on error (error reading in a btree node).
1097 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1098 * stashed in the iterator and returned from bch2_trans_exit().
1100 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1101 struct btree_path *path,
1103 unsigned long trace_ip)
1105 unsigned depth_want = path->level;
1106 int ret = -((int) trans->restarted);
1112 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1113 * and re-traverse the path without a transaction restart:
1115 if (path->should_be_locked) {
1116 ret = bch2_btree_path_relock(trans, path, trace_ip);
1121 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1125 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1128 path->level = btree_path_up_until_good_node(trans, path, 0);
1130 EBUG_ON(btree_path_node(path, path->level) &&
1131 !btree_node_locked(path, path->level));
1134 * Note: path->nodes[path->level] may be temporarily NULL here - that
1135 * would indicate to other code that we got to the end of the btree,
1136 * here it indicates that relocking the root failed - it's critical that
1137 * btree_path_lock_root() comes next and that it can't fail
1139 while (path->level > depth_want) {
1140 ret = btree_path_node(path, path->level)
1141 ? btree_path_down(trans, path, flags, trace_ip)
1142 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1143 if (unlikely(ret)) {
1146 * No nodes at this level - got to the end of
1153 __bch2_btree_path_unlock(trans, path);
1154 path->level = depth_want;
1155 path->l[path->level].b = ERR_PTR(ret);
1160 path->uptodate = BTREE_ITER_UPTODATE;
1162 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1163 panic("ret %s (%i) trans->restarted %s (%i)\n",
1164 bch2_err_str(ret), ret,
1165 bch2_err_str(trans->restarted), trans->restarted);
1166 bch2_btree_path_verify(trans, path);
1170 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1171 struct btree_path *src)
1173 unsigned i, offset = offsetof(struct btree_path, pos);
1175 memcpy((void *) dst + offset,
1176 (void *) src + offset,
1177 sizeof(struct btree_path) - offset);
1179 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1180 unsigned t = btree_node_locked_type(dst, i);
1182 if (t != BTREE_NODE_UNLOCKED)
1183 six_lock_increment(&dst->l[i].b->c.lock, t);
1187 static struct btree_path *btree_path_clone(struct btree_trans *trans, struct btree_path *src,
1190 struct btree_path *new = btree_path_alloc(trans, src);
1192 btree_path_copy(trans, new, src);
1193 __btree_path_get(new, intent);
1198 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
1199 struct btree_path *path, bool intent,
1202 __btree_path_put(path, intent);
1203 path = btree_path_clone(trans, path, intent);
1204 path->preserve = false;
1208 struct btree_path * __must_check
1209 __bch2_btree_path_set_pos(struct btree_trans *trans,
1210 struct btree_path *path, struct bpos new_pos,
1211 bool intent, unsigned long ip, int cmp)
1213 unsigned level = path->level;
1215 bch2_trans_verify_not_in_restart(trans);
1216 EBUG_ON(!path->ref);
1218 path = bch2_btree_path_make_mut(trans, path, intent, ip);
1220 path->pos = new_pos;
1221 trans->paths_sorted = false;
1223 if (unlikely(path->cached)) {
1224 btree_node_unlock(trans, path, 0);
1225 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1226 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1230 level = btree_path_up_until_good_node(trans, path, cmp);
1232 if (btree_path_node(path, level)) {
1233 struct btree_path_level *l = &path->l[level];
1235 BUG_ON(!btree_node_locked(path, level));
1237 * We might have to skip over many keys, or just a few: try
1238 * advancing the node iterator, and if we have to skip over too
1239 * many keys just reinit it (or if we're rewinding, since that
1243 !btree_path_advance_to_pos(path, l, 8))
1244 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1247 * Iterators to interior nodes should always be pointed at the first non
1250 if (unlikely(level))
1251 bch2_btree_node_iter_peek(&l->iter, l->b);
1254 if (unlikely(level != path->level)) {
1255 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1256 __bch2_btree_path_unlock(trans, path);
1259 bch2_btree_path_verify(trans, path);
1263 /* Btree path: main interface: */
1265 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1267 struct btree_path *sib;
1269 sib = prev_btree_path(trans, path);
1270 if (sib && !btree_path_cmp(sib, path))
1273 sib = next_btree_path(trans, path);
1274 if (sib && !btree_path_cmp(sib, path))
1280 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1282 struct btree_path *sib;
1284 sib = prev_btree_path(trans, path);
1285 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1288 sib = next_btree_path(trans, path);
1289 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1295 static inline void __bch2_path_free(struct btree_trans *trans, struct btree_path *path)
1297 __bch2_btree_path_unlock(trans, path);
1298 btree_path_list_remove(trans, path);
1299 trans->paths_allocated &= ~(1ULL << path->idx);
1302 void bch2_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
1304 struct btree_path *dup;
1306 EBUG_ON(trans->paths + path->idx != path);
1307 EBUG_ON(!path->ref);
1309 if (!__btree_path_put(path, intent))
1312 dup = path->preserve
1313 ? have_path_at_pos(trans, path)
1314 : have_node_at_pos(trans, path);
1316 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1319 if (path->should_be_locked &&
1320 !trans->restarted &&
1321 (!dup || !bch2_btree_path_relock_norestart(trans, dup, _THIS_IP_)))
1325 dup->preserve |= path->preserve;
1326 dup->should_be_locked |= path->should_be_locked;
1329 __bch2_path_free(trans, path);
1332 static void bch2_path_put_nokeep(struct btree_trans *trans, struct btree_path *path,
1335 EBUG_ON(trans->paths + path->idx != path);
1336 EBUG_ON(!path->ref);
1338 if (!__btree_path_put(path, intent))
1341 __bch2_path_free(trans, path);
1344 void bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1346 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1347 trans->restart_count, restart_count,
1348 (void *) trans->last_begin_ip);
1351 void bch2_trans_in_restart_error(struct btree_trans *trans)
1353 panic("in transaction restart: %s, last restarted by %pS\n",
1354 bch2_err_str(trans->restarted),
1355 (void *) trans->last_restarted_ip);
1359 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1361 struct btree_insert_entry *i;
1362 struct btree_write_buffered_key *wb;
1364 prt_printf(buf, "transaction updates for %s journal seq %llu",
1365 trans->fn, trans->journal_res.seq);
1367 printbuf_indent_add(buf, 2);
1369 trans_for_each_update(trans, i) {
1370 struct bkey_s_c old = { &i->old_k, i->old_v };
1372 prt_printf(buf, "update: btree=%s cached=%u %pS",
1373 bch2_btree_ids[i->btree_id],
1375 (void *) i->ip_allocated);
1378 prt_printf(buf, " old ");
1379 bch2_bkey_val_to_text(buf, trans->c, old);
1382 prt_printf(buf, " new ");
1383 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1387 trans_for_each_wb_update(trans, wb) {
1388 prt_printf(buf, "update: btree=%s wb=1 %pS",
1389 bch2_btree_ids[wb->btree],
1390 (void *) i->ip_allocated);
1393 prt_printf(buf, " new ");
1394 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(&wb->k));
1398 printbuf_indent_sub(buf, 2);
1402 void bch2_dump_trans_updates(struct btree_trans *trans)
1404 struct printbuf buf = PRINTBUF;
1406 bch2_trans_updates_to_text(&buf, trans);
1407 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1408 printbuf_exit(&buf);
1412 void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
1414 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1415 path->idx, path->ref, path->intent_ref,
1416 path->preserve ? 'P' : ' ',
1417 path->should_be_locked ? 'S' : ' ',
1418 bch2_btree_ids[path->btree_id],
1420 bch2_bpos_to_text(out, path->pos);
1422 prt_printf(out, " locks %u", path->nodes_locked);
1423 #ifdef TRACK_PATH_ALLOCATED
1424 prt_printf(out, " %pS", (void *) path->ip_allocated);
1429 static noinline __cold
1430 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1433 struct btree_path *path;
1437 btree_trans_sort_paths(trans);
1439 trans_for_each_path_inorder(trans, path, idx)
1440 bch2_btree_path_to_text(out, path);
1444 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1446 __bch2_trans_paths_to_text(out, trans, false);
1449 static noinline __cold
1450 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1452 struct printbuf buf = PRINTBUF;
1454 __bch2_trans_paths_to_text(&buf, trans, nosort);
1455 bch2_trans_updates_to_text(&buf, trans);
1457 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1458 printbuf_exit(&buf);
1462 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1464 __bch2_dump_trans_paths_updates(trans, false);
1468 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1470 struct btree_transaction_stats *s = btree_trans_stats(trans);
1471 struct printbuf buf = PRINTBUF;
1476 bch2_trans_paths_to_text(&buf, trans);
1478 if (!buf.allocation_failure) {
1479 mutex_lock(&s->lock);
1480 if (s->nr_max_paths < hweight64(trans->paths_allocated)) {
1481 s->nr_max_paths = trans->nr_max_paths =
1482 hweight64(trans->paths_allocated);
1483 swap(s->max_paths_text, buf.buf);
1485 mutex_unlock(&s->lock);
1488 printbuf_exit(&buf);
1490 trans->nr_max_paths = hweight64(trans->paths_allocated);
1493 static noinline void btree_path_overflow(struct btree_trans *trans)
1495 bch2_dump_trans_paths_updates(trans);
1496 panic("trans path oveflow\n");
1499 static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
1500 struct btree_path *pos)
1502 struct btree_path *path;
1505 if (unlikely(trans->paths_allocated ==
1506 ~((~0ULL << 1) << (BTREE_ITER_MAX - 1))))
1507 btree_path_overflow(trans);
1509 idx = __ffs64(~trans->paths_allocated);
1512 * Do this before marking the new path as allocated, since it won't be
1515 if (unlikely(idx > trans->nr_max_paths))
1516 bch2_trans_update_max_paths(trans);
1518 trans->paths_allocated |= 1ULL << idx;
1520 path = &trans->paths[idx];
1523 path->intent_ref = 0;
1524 path->nodes_locked = 0;
1526 btree_path_list_add(trans, pos, path);
1527 trans->paths_sorted = false;
1531 struct btree_path *bch2_path_get(struct btree_trans *trans,
1532 enum btree_id btree_id, struct bpos pos,
1533 unsigned locks_want, unsigned level,
1534 unsigned flags, unsigned long ip)
1536 struct btree_path *path, *path_pos = NULL;
1537 bool cached = flags & BTREE_ITER_CACHED;
1538 bool intent = flags & BTREE_ITER_INTENT;
1541 bch2_trans_verify_not_in_restart(trans);
1542 bch2_trans_verify_locks(trans);
1544 btree_trans_sort_paths(trans);
1546 trans_for_each_path_inorder(trans, path, i) {
1547 if (__btree_path_cmp(path,
1558 path_pos->cached == cached &&
1559 path_pos->btree_id == btree_id &&
1560 path_pos->level == level) {
1561 __btree_path_get(path_pos, intent);
1562 path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1564 path = btree_path_alloc(trans, path_pos);
1567 __btree_path_get(path, intent);
1569 path->btree_id = btree_id;
1570 path->cached = cached;
1571 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1572 path->should_be_locked = false;
1573 path->level = level;
1574 path->locks_want = locks_want;
1575 path->nodes_locked = 0;
1576 for (i = 0; i < ARRAY_SIZE(path->l); i++)
1577 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1578 #ifdef TRACK_PATH_ALLOCATED
1579 path->ip_allocated = ip;
1581 trans->paths_sorted = false;
1584 if (!(flags & BTREE_ITER_NOPRESERVE))
1585 path->preserve = true;
1587 if (path->intent_ref)
1588 locks_want = max(locks_want, level + 1);
1591 * If the path has locks_want greater than requested, we don't downgrade
1592 * it here - on transaction restart because btree node split needs to
1593 * upgrade locks, we might be putting/getting the iterator again.
1594 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1595 * a successful transaction commit.
1598 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1599 if (locks_want > path->locks_want)
1600 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
1605 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1608 struct btree_path_level *l = path_l(path);
1609 struct bkey_packed *_k;
1612 if (unlikely(!l->b))
1613 return bkey_s_c_null;
1615 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1616 EBUG_ON(!btree_node_locked(path, path->level));
1618 if (!path->cached) {
1619 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1620 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1622 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1624 if (!k.k || !bpos_eq(path->pos, k.k->p))
1627 struct bkey_cached *ck = (void *) path->l[0].b;
1630 (path->btree_id != ck->key.btree_id ||
1631 !bkey_eq(path->pos, ck->key.pos)));
1632 if (!ck || !ck->valid)
1633 return bkey_s_c_null;
1636 k = bkey_i_to_s_c(ck->k);
1643 return (struct bkey_s_c) { u, NULL };
1646 /* Btree iterators: */
1649 __bch2_btree_iter_traverse(struct btree_iter *iter)
1651 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1655 bch2_btree_iter_traverse(struct btree_iter *iter)
1659 iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
1660 btree_iter_search_key(iter),
1661 iter->flags & BTREE_ITER_INTENT,
1662 btree_iter_ip_allocated(iter));
1664 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1668 btree_path_set_should_be_locked(iter->path);
1672 /* Iterate across nodes (leaf and interior nodes) */
1674 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1676 struct btree_trans *trans = iter->trans;
1677 struct btree *b = NULL;
1680 EBUG_ON(iter->path->cached);
1681 bch2_btree_iter_verify(iter);
1683 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1687 b = btree_path_node(iter->path, iter->path->level);
1691 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1693 bkey_init(&iter->k);
1694 iter->k.p = iter->pos = b->key.k.p;
1696 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1697 iter->flags & BTREE_ITER_INTENT,
1698 btree_iter_ip_allocated(iter));
1699 btree_path_set_should_be_locked(iter->path);
1701 bch2_btree_iter_verify_entry_exit(iter);
1702 bch2_btree_iter_verify(iter);
1710 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1714 while (b = bch2_btree_iter_peek_node(iter),
1715 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1716 bch2_trans_begin(iter->trans);
1721 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1723 struct btree_trans *trans = iter->trans;
1724 struct btree_path *path = iter->path;
1725 struct btree *b = NULL;
1728 bch2_trans_verify_not_in_restart(trans);
1729 EBUG_ON(iter->path->cached);
1730 bch2_btree_iter_verify(iter);
1732 /* already at end? */
1733 if (!btree_path_node(path, path->level))
1737 if (!btree_path_node(path, path->level + 1)) {
1738 btree_path_set_level_up(trans, path);
1742 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1743 __bch2_btree_path_unlock(trans, path);
1744 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1745 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1746 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1747 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1748 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1752 b = btree_path_node(path, path->level + 1);
1754 if (bpos_eq(iter->pos, b->key.k.p)) {
1755 __btree_path_set_level_up(trans, path, path->level++);
1758 * Haven't gotten to the end of the parent node: go back down to
1759 * the next child node
1762 bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
1763 iter->flags & BTREE_ITER_INTENT,
1764 btree_iter_ip_allocated(iter));
1766 btree_path_set_level_down(trans, path, iter->min_depth);
1768 ret = bch2_btree_path_traverse(trans, path, iter->flags);
1772 b = path->l[path->level].b;
1775 bkey_init(&iter->k);
1776 iter->k.p = iter->pos = b->key.k.p;
1778 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1779 iter->flags & BTREE_ITER_INTENT,
1780 btree_iter_ip_allocated(iter));
1781 btree_path_set_should_be_locked(iter->path);
1782 BUG_ON(iter->path->uptodate);
1784 bch2_btree_iter_verify_entry_exit(iter);
1785 bch2_btree_iter_verify(iter);
1793 /* Iterate across keys (in leaf nodes only) */
1795 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1797 if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
1798 struct bpos pos = iter->k.p;
1799 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1800 ? bpos_eq(pos, SPOS_MAX)
1801 : bkey_eq(pos, SPOS_MAX));
1803 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1804 pos = bkey_successor(iter, pos);
1805 bch2_btree_iter_set_pos(iter, pos);
1808 if (!btree_path_node(iter->path, iter->path->level))
1811 iter->advanced = true;
1816 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1818 struct bpos pos = bkey_start_pos(&iter->k);
1819 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1820 ? bpos_eq(pos, POS_MIN)
1821 : bkey_eq(pos, POS_MIN));
1823 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1824 pos = bkey_predecessor(iter, pos);
1825 bch2_btree_iter_set_pos(iter, pos);
1830 struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
1832 struct btree_insert_entry *i;
1833 struct bkey_i *ret = NULL;
1835 trans_for_each_update(iter->trans, i) {
1836 if (i->btree_id < iter->btree_id)
1838 if (i->btree_id > iter->btree_id)
1840 if (bpos_lt(i->k->k.p, iter->path->pos))
1842 if (i->key_cache_already_flushed)
1844 if (!ret || bpos_lt(i->k->k.p, ret->k.p))
1851 static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
1853 return iter->flags & BTREE_ITER_WITH_UPDATES
1854 ? __bch2_btree_trans_peek_updates(iter)
1858 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
1859 struct btree_iter *iter,
1860 struct bpos end_pos)
1864 if (bpos_lt(iter->path->pos, iter->journal_pos))
1865 iter->journal_idx = 0;
1867 k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
1871 &iter->journal_idx);
1873 iter->journal_pos = k ? k->k.p : end_pos;
1878 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
1879 struct btree_iter *iter)
1881 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, iter->path->pos);
1885 return bkey_i_to_s_c(k);
1887 return bkey_s_c_null;
1892 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
1893 struct btree_iter *iter,
1896 struct bkey_i *next_journal =
1897 bch2_btree_journal_peek(trans, iter,
1898 k.k ? k.k->p : path_l(iter->path)->b->key.k.p);
1901 iter->k = next_journal->k;
1902 k = bkey_i_to_s_c(next_journal);
1909 * Checks btree key cache for key at iter->pos and returns it if present, or
1913 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1915 struct btree_trans *trans = iter->trans;
1916 struct bch_fs *c = trans->c;
1921 if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
1922 bpos_eq(iter->pos, pos))
1923 return bkey_s_c_null;
1925 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
1926 return bkey_s_c_null;
1928 if (!iter->key_cache_path)
1929 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
1930 iter->flags & BTREE_ITER_INTENT, 0,
1931 iter->flags|BTREE_ITER_CACHED|
1932 BTREE_ITER_CACHED_NOFILL,
1935 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
1936 iter->flags & BTREE_ITER_INTENT,
1937 btree_iter_ip_allocated(iter));
1939 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
1940 iter->flags|BTREE_ITER_CACHED) ?:
1941 bch2_btree_path_relock(trans, iter->path, _THIS_IP_);
1943 return bkey_s_c_err(ret);
1945 btree_path_set_should_be_locked(iter->key_cache_path);
1947 k = bch2_btree_path_peek_slot(iter->key_cache_path, &u);
1948 if (k.k && !bkey_err(k)) {
1955 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
1957 struct btree_trans *trans = iter->trans;
1958 struct bkey_i *next_update;
1959 struct bkey_s_c k, k2;
1962 EBUG_ON(iter->path->cached);
1963 bch2_btree_iter_verify(iter);
1966 struct btree_path_level *l;
1968 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
1969 iter->flags & BTREE_ITER_INTENT,
1970 btree_iter_ip_allocated(iter));
1972 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1973 if (unlikely(ret)) {
1974 /* ensure that iter->k is consistent with iter->pos: */
1975 bch2_btree_iter_set_pos(iter, iter->pos);
1976 k = bkey_s_c_err(ret);
1980 l = path_l(iter->path);
1982 if (unlikely(!l->b)) {
1983 /* No btree nodes at requested level: */
1984 bch2_btree_iter_set_pos(iter, SPOS_MAX);
1989 btree_path_set_should_be_locked(iter->path);
1991 k = btree_path_level_peek_all(trans->c, l, &iter->k);
1993 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
1995 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
1999 bch2_btree_iter_set_pos(iter, iter->pos);
2004 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2005 k = btree_trans_peek_journal(trans, iter, k);
2007 next_update = btree_trans_peek_updates(iter);
2010 bpos_le(next_update->k.p,
2011 k.k ? k.k->p : l->b->key.k.p)) {
2012 iter->k = next_update->k;
2013 k = bkey_i_to_s_c(next_update);
2016 if (k.k && bkey_deleted(k.k)) {
2018 * If we've got a whiteout, and it's after the search
2019 * key, advance the search key to the whiteout instead
2020 * of just after the whiteout - it might be a btree
2021 * whiteout, with a real key at the same position, since
2022 * in the btree deleted keys sort before non deleted.
2024 search_key = !bpos_eq(search_key, k.k->p)
2026 : bpos_successor(k.k->p);
2032 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2033 /* Advance to next leaf node: */
2034 search_key = bpos_successor(l->b->key.k.p);
2037 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2043 bch2_btree_iter_verify(iter);
2049 * bch2_btree_iter_peek: returns first key greater than or equal to iterator's
2052 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2054 struct btree_trans *trans = iter->trans;
2055 struct bpos search_key = btree_iter_search_key(iter);
2057 struct bpos iter_pos;
2060 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2061 EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
2063 if (iter->update_path) {
2064 bch2_path_put_nokeep(trans, iter->update_path,
2065 iter->flags & BTREE_ITER_INTENT);
2066 iter->update_path = NULL;
2069 bch2_btree_iter_verify_entry_exit(iter);
2072 k = __bch2_btree_iter_peek(iter, search_key);
2075 if (unlikely(bkey_err(k)))
2079 * iter->pos should be mononotically increasing, and always be
2080 * equal to the key we just returned - except extents can
2081 * straddle iter->pos:
2083 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2086 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2088 if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
2089 ? bkey_gt(iter_pos, end)
2090 : bkey_ge(iter_pos, end)))
2093 if (iter->update_path &&
2094 !bkey_eq(iter->update_path->pos, k.k->p)) {
2095 bch2_path_put_nokeep(trans, iter->update_path,
2096 iter->flags & BTREE_ITER_INTENT);
2097 iter->update_path = NULL;
2100 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2101 (iter->flags & BTREE_ITER_INTENT) &&
2102 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2103 !iter->update_path) {
2104 struct bpos pos = k.k->p;
2106 if (pos.snapshot < iter->snapshot) {
2107 search_key = bpos_successor(k.k->p);
2111 pos.snapshot = iter->snapshot;
2114 * advance, same as on exit for iter->path, but only up
2117 __btree_path_get(iter->path, iter->flags & BTREE_ITER_INTENT);
2118 iter->update_path = iter->path;
2120 iter->update_path = bch2_btree_path_set_pos(trans,
2121 iter->update_path, pos,
2122 iter->flags & BTREE_ITER_INTENT,
2124 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2125 if (unlikely(ret)) {
2126 k = bkey_s_c_err(ret);
2132 * We can never have a key in a leaf node at POS_MAX, so
2133 * we don't have to check these successor() calls:
2135 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2136 !bch2_snapshot_is_ancestor(trans->c,
2139 search_key = bpos_successor(k.k->p);
2143 if (bkey_whiteout(k.k) &&
2144 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2145 search_key = bkey_successor(iter, k.k->p);
2152 iter->pos = iter_pos;
2154 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2155 iter->flags & BTREE_ITER_INTENT,
2156 btree_iter_ip_allocated(iter));
2158 btree_path_set_should_be_locked(iter->path);
2160 if (iter->update_path) {
2161 ret = bch2_btree_path_relock(trans, iter->update_path, _THIS_IP_);
2163 k = bkey_s_c_err(ret);
2165 btree_path_set_should_be_locked(iter->update_path);
2168 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2169 iter->pos.snapshot = iter->snapshot;
2171 ret = bch2_btree_iter_verify_ret(iter, k);
2172 if (unlikely(ret)) {
2173 bch2_btree_iter_set_pos(iter, iter->pos);
2174 k = bkey_s_c_err(ret);
2177 bch2_btree_iter_verify_entry_exit(iter);
2181 bch2_btree_iter_set_pos(iter, end);
2187 * bch2_btree_iter_peek_all_levels: returns the first key greater than or equal
2188 * to iterator's current position, returning keys from every level of the btree.
2189 * For keys at different levels of the btree that compare equal, the key from
2190 * the lower level (leaf) is returned first.
2192 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
2194 struct btree_trans *trans = iter->trans;
2198 EBUG_ON(iter->path->cached);
2199 bch2_btree_iter_verify(iter);
2200 BUG_ON(iter->path->level < iter->min_depth);
2201 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
2202 EBUG_ON(!(iter->flags & BTREE_ITER_ALL_LEVELS));
2205 iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
2206 iter->flags & BTREE_ITER_INTENT,
2207 btree_iter_ip_allocated(iter));
2209 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2210 if (unlikely(ret)) {
2211 /* ensure that iter->k is consistent with iter->pos: */
2212 bch2_btree_iter_set_pos(iter, iter->pos);
2213 k = bkey_s_c_err(ret);
2217 /* Already at end? */
2218 if (!btree_path_node(iter->path, iter->path->level)) {
2223 k = btree_path_level_peek_all(trans->c,
2224 &iter->path->l[iter->path->level], &iter->k);
2226 /* Check if we should go up to the parent node: */
2229 bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
2230 iter->pos = path_l(iter->path)->b->key.k.p;
2231 btree_path_set_level_up(trans, iter->path);
2232 iter->advanced = false;
2237 * Check if we should go back down to a leaf:
2238 * If we're not in a leaf node, we only return the current key
2239 * if it exactly matches iter->pos - otherwise we first have to
2240 * go back to the leaf:
2242 if (iter->path->level != iter->min_depth &&
2245 !bpos_eq(iter->pos, k.k->p))) {
2246 btree_path_set_level_down(trans, iter->path, iter->min_depth);
2247 iter->pos = bpos_successor(iter->pos);
2248 iter->advanced = false;
2252 /* Check if we should go to the next key: */
2253 if (iter->path->level == iter->min_depth &&
2256 bpos_eq(iter->pos, k.k->p)) {
2257 iter->pos = bpos_successor(iter->pos);
2258 iter->advanced = false;
2262 if (iter->advanced &&
2263 iter->path->level == iter->min_depth &&
2264 !bpos_eq(k.k->p, iter->pos))
2265 iter->advanced = false;
2267 BUG_ON(iter->advanced);
2273 btree_path_set_should_be_locked(iter->path);
2275 bch2_btree_iter_verify(iter);
2281 * bch2_btree_iter_next: returns first key greater than iterator's current
2284 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2286 if (!bch2_btree_iter_advance(iter))
2287 return bkey_s_c_null;
2289 return bch2_btree_iter_peek(iter);
2293 * bch2_btree_iter_peek_prev: returns first key less than or equal to
2294 * iterator's current position
2296 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2298 struct btree_trans *trans = iter->trans;
2299 struct bpos search_key = iter->pos;
2300 struct btree_path *saved_path = NULL;
2302 struct bkey saved_k;
2303 const struct bch_val *saved_v;
2306 EBUG_ON(iter->path->cached || iter->path->level);
2307 EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
2309 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2310 return bkey_s_c_err(-EIO);
2312 bch2_btree_iter_verify(iter);
2313 bch2_btree_iter_verify_entry_exit(iter);
2315 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2316 search_key.snapshot = U32_MAX;
2319 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2320 iter->flags & BTREE_ITER_INTENT,
2321 btree_iter_ip_allocated(iter));
2323 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2324 if (unlikely(ret)) {
2325 /* ensure that iter->k is consistent with iter->pos: */
2326 bch2_btree_iter_set_pos(iter, iter->pos);
2327 k = bkey_s_c_err(ret);
2331 k = btree_path_level_peek(trans, iter->path,
2332 &iter->path->l[0], &iter->k);
2334 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2335 ? bpos_ge(bkey_start_pos(k.k), search_key)
2336 : bpos_gt(k.k->p, search_key)))
2337 k = btree_path_level_prev(trans, iter->path,
2338 &iter->path->l[0], &iter->k);
2341 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2342 if (k.k->p.snapshot == iter->snapshot)
2346 * If we have a saved candidate, and we're no
2347 * longer at the same _key_ (not pos), return
2350 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2351 bch2_path_put_nokeep(trans, iter->path,
2352 iter->flags & BTREE_ITER_INTENT);
2353 iter->path = saved_path;
2360 if (bch2_snapshot_is_ancestor(iter->trans->c,
2364 bch2_path_put_nokeep(trans, saved_path,
2365 iter->flags & BTREE_ITER_INTENT);
2366 saved_path = btree_path_clone(trans, iter->path,
2367 iter->flags & BTREE_ITER_INTENT);
2372 search_key = bpos_predecessor(k.k->p);
2376 if (bkey_whiteout(k.k) &&
2377 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2378 search_key = bkey_predecessor(iter, k.k->p);
2379 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2380 search_key.snapshot = U32_MAX;
2385 } else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
2386 /* Advance to previous leaf node: */
2387 search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
2389 /* Start of btree: */
2390 bch2_btree_iter_set_pos(iter, POS_MIN);
2396 EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2398 /* Extents can straddle iter->pos: */
2399 if (bkey_lt(k.k->p, iter->pos))
2402 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2403 iter->pos.snapshot = iter->snapshot;
2405 btree_path_set_should_be_locked(iter->path);
2408 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2410 bch2_btree_iter_verify_entry_exit(iter);
2411 bch2_btree_iter_verify(iter);
2417 * bch2_btree_iter_prev: returns first key less than iterator's current
2420 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2422 if (!bch2_btree_iter_rewind(iter))
2423 return bkey_s_c_null;
2425 return bch2_btree_iter_peek_prev(iter);
2428 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2430 struct btree_trans *trans = iter->trans;
2431 struct bpos search_key;
2435 bch2_btree_iter_verify(iter);
2436 bch2_btree_iter_verify_entry_exit(iter);
2437 EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
2438 EBUG_ON(iter->path->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2440 /* extents can't span inode numbers: */
2441 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2442 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2443 if (iter->pos.inode == KEY_INODE_MAX)
2444 return bkey_s_c_null;
2446 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2449 search_key = btree_iter_search_key(iter);
2450 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2451 iter->flags & BTREE_ITER_INTENT,
2452 btree_iter_ip_allocated(iter));
2454 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2455 if (unlikely(ret)) {
2456 k = bkey_s_c_err(ret);
2460 if ((iter->flags & BTREE_ITER_CACHED) ||
2461 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2462 struct bkey_i *next_update;
2464 if ((next_update = btree_trans_peek_updates(iter)) &&
2465 bpos_eq(next_update->k.p, iter->pos)) {
2466 iter->k = next_update->k;
2467 k = bkey_i_to_s_c(next_update);
2471 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2472 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2475 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2476 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2479 /* We're not returning a key from iter->path: */
2483 k = bch2_btree_path_peek_slot(iter->path, &iter->k);
2488 struct bpos end = iter->pos;
2490 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2491 end.offset = U64_MAX;
2493 EBUG_ON(iter->path->level);
2495 if (iter->flags & BTREE_ITER_INTENT) {
2496 struct btree_iter iter2;
2498 bch2_trans_copy_iter(&iter2, iter);
2499 k = bch2_btree_iter_peek_upto(&iter2, end);
2501 if (k.k && !bkey_err(k)) {
2505 bch2_trans_iter_exit(trans, &iter2);
2507 struct bpos pos = iter->pos;
2509 k = bch2_btree_iter_peek_upto(iter, end);
2510 if (unlikely(bkey_err(k)))
2511 bch2_btree_iter_set_pos(iter, pos);
2516 if (unlikely(bkey_err(k)))
2519 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2521 if (bkey_lt(iter->pos, next)) {
2522 bkey_init(&iter->k);
2523 iter->k.p = iter->pos;
2525 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2526 bch2_key_resize(&iter->k,
2527 min_t(u64, KEY_SIZE_MAX,
2528 (next.inode == iter->pos.inode
2532 EBUG_ON(!iter->k.size);
2535 k = (struct bkey_s_c) { &iter->k, NULL };
2539 btree_path_set_should_be_locked(iter->path);
2541 bch2_btree_iter_verify_entry_exit(iter);
2542 bch2_btree_iter_verify(iter);
2543 ret = bch2_btree_iter_verify_ret(iter, k);
2545 return bkey_s_c_err(ret);
2550 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2552 if (!bch2_btree_iter_advance(iter))
2553 return bkey_s_c_null;
2555 return bch2_btree_iter_peek_slot(iter);
2558 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2560 if (!bch2_btree_iter_rewind(iter))
2561 return bkey_s_c_null;
2563 return bch2_btree_iter_peek_slot(iter);
2566 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2570 while (btree_trans_too_many_iters(iter->trans) ||
2571 (k = bch2_btree_iter_peek_type(iter, iter->flags),
2572 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2573 bch2_trans_begin(iter->trans);
2578 /* new transactional stuff: */
2580 #ifdef CONFIG_BCACHEFS_DEBUG
2581 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2583 struct btree_path *path;
2586 BUG_ON(trans->nr_sorted != hweight64(trans->paths_allocated));
2588 trans_for_each_path(trans, path) {
2589 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2590 BUG_ON(trans->sorted[path->sorted_idx] != path->idx);
2593 for (i = 0; i < trans->nr_sorted; i++) {
2594 unsigned idx = trans->sorted[i];
2596 EBUG_ON(!(trans->paths_allocated & (1ULL << idx)));
2597 BUG_ON(trans->paths[idx].sorted_idx != i);
2601 static void btree_trans_verify_sorted(struct btree_trans *trans)
2603 struct btree_path *path, *prev = NULL;
2606 if (!bch2_debug_check_iterators)
2609 trans_for_each_path_inorder(trans, path, i) {
2610 if (prev && btree_path_cmp(prev, path) > 0) {
2611 __bch2_dump_trans_paths_updates(trans, true);
2612 panic("trans paths out of order!\n");
2618 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2619 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2622 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2624 int i, l = 0, r = trans->nr_sorted, inc = 1;
2627 btree_trans_verify_sorted_refs(trans);
2629 if (trans->paths_sorted)
2633 * Cocktail shaker sort: this is efficient because iterators will be
2639 for (i = inc > 0 ? l : r - 2;
2640 i + 1 < r && i >= l;
2642 if (btree_path_cmp(trans->paths + trans->sorted[i],
2643 trans->paths + trans->sorted[i + 1]) > 0) {
2644 swap(trans->sorted[i], trans->sorted[i + 1]);
2645 trans->paths[trans->sorted[i]].sorted_idx = i;
2646 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2658 trans->paths_sorted = true;
2660 btree_trans_verify_sorted(trans);
2663 static inline void btree_path_list_remove(struct btree_trans *trans,
2664 struct btree_path *path)
2668 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2669 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2671 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2672 trans->sorted + path->sorted_idx + 1,
2673 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
2675 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2677 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2678 trans->paths[trans->sorted[i]].sorted_idx = i;
2680 path->sorted_idx = U8_MAX;
2683 static inline void btree_path_list_add(struct btree_trans *trans,
2684 struct btree_path *pos,
2685 struct btree_path *path)
2689 path->sorted_idx = pos ? pos->sorted_idx + 1 : trans->nr_sorted;
2691 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2692 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2693 trans->sorted + path->sorted_idx,
2694 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
2696 trans->sorted[path->sorted_idx] = path->idx;
2698 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
2701 for (i = path->sorted_idx; i < trans->nr_sorted; i++)
2702 trans->paths[trans->sorted[i]].sorted_idx = i;
2704 btree_trans_verify_sorted_refs(trans);
2707 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2709 if (iter->update_path)
2710 bch2_path_put_nokeep(trans, iter->update_path,
2711 iter->flags & BTREE_ITER_INTENT);
2713 bch2_path_put(trans, iter->path,
2714 iter->flags & BTREE_ITER_INTENT);
2715 if (iter->key_cache_path)
2716 bch2_path_put(trans, iter->key_cache_path,
2717 iter->flags & BTREE_ITER_INTENT);
2719 iter->update_path = NULL;
2720 iter->key_cache_path = NULL;
2723 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2724 struct btree_iter *iter,
2725 unsigned btree_id, struct bpos pos,
2728 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2729 bch2_btree_iter_flags(trans, btree_id, flags),
2733 void bch2_trans_node_iter_init(struct btree_trans *trans,
2734 struct btree_iter *iter,
2735 enum btree_id btree_id,
2737 unsigned locks_want,
2741 flags |= BTREE_ITER_NOT_EXTENTS;
2742 flags |= __BTREE_ITER_ALL_SNAPSHOTS;
2743 flags |= BTREE_ITER_ALL_SNAPSHOTS;
2745 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2746 __bch2_btree_iter_flags(trans, btree_id, flags),
2749 iter->min_depth = depth;
2751 BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2752 BUG_ON(iter->path->level != depth);
2753 BUG_ON(iter->min_depth != depth);
2756 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2760 __btree_path_get(src->path, src->flags & BTREE_ITER_INTENT);
2761 if (src->update_path)
2762 __btree_path_get(src->update_path, src->flags & BTREE_ITER_INTENT);
2763 dst->key_cache_path = NULL;
2766 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2768 unsigned new_top = trans->mem_top + size;
2769 size_t old_bytes = trans->mem_bytes;
2770 size_t new_bytes = roundup_pow_of_two(new_top);
2775 trans->mem_max = max(trans->mem_max, new_top);
2777 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2779 new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2780 if (unlikely(!new_mem)) {
2781 bch2_trans_unlock(trans);
2783 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2784 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2785 new_mem = mempool_alloc(&trans->c->btree_trans_mem_pool, GFP_KERNEL);
2786 new_bytes = BTREE_TRANS_MEM_MAX;
2791 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2793 trans->mem = new_mem;
2794 trans->mem_bytes = new_bytes;
2796 ret = bch2_trans_relock(trans);
2798 return ERR_PTR(ret);
2801 trans->mem = new_mem;
2802 trans->mem_bytes = new_bytes;
2805 trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2806 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2809 p = trans->mem + trans->mem_top;
2810 trans->mem_top += size;
2815 static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
2817 struct bch_fs *c = trans->c;
2818 struct btree_path *path;
2820 trans_for_each_path(trans, path)
2821 if (path->cached && !btree_node_locked(path, 0))
2822 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
2824 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2825 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2826 trans->srcu_lock_time = jiffies;
2830 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2831 * @trans: transaction to reset
2833 * While iterating over nodes or updating nodes a attempt to lock a btree node
2834 * may return BCH_ERR_transaction_restart when the trylock fails. When this
2835 * occurs bch2_trans_begin() should be called and the transaction retried.
2837 u32 bch2_trans_begin(struct btree_trans *trans)
2839 struct btree_path *path;
2842 bch2_trans_reset_updates(trans);
2844 trans->restart_count++;
2847 trans_for_each_path(trans, path) {
2848 path->should_be_locked = false;
2851 * If the transaction wasn't restarted, we're presuming to be
2852 * doing something new: dont keep iterators excpt the ones that
2853 * are in use - except for the subvolumes btree:
2855 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
2856 path->preserve = false;
2859 * XXX: we probably shouldn't be doing this if the transaction
2860 * was restarted, but currently we still overflow transaction
2861 * iterators if we do that
2863 if (!path->ref && !path->preserve)
2864 __bch2_path_free(trans, path);
2866 path->preserve = false;
2869 now = local_clock();
2870 if (!trans->restarted &&
2872 now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
2873 drop_locks_do(trans, (cond_resched(), 0));
2874 now = local_clock();
2876 trans->last_begin_time = now;
2878 if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
2879 bch2_trans_reset_srcu_lock(trans);
2881 trans->last_begin_ip = _RET_IP_;
2882 if (trans->restarted) {
2883 bch2_btree_path_traverse_all(trans);
2884 trans->notrace_relock_fail = false;
2887 return trans->restart_count;
2890 static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
2892 size_t paths_bytes = sizeof(struct btree_path) * BTREE_ITER_MAX;
2893 size_t updates_bytes = sizeof(struct btree_insert_entry) * BTREE_ITER_MAX;
2896 BUG_ON(trans->used_mempool);
2899 p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
2902 p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
2904 * paths need to be zeroed, bch2_check_for_deadlock looks at paths in
2908 trans->paths = p; p += paths_bytes;
2909 trans->updates = p; p += updates_bytes;
2912 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
2914 unsigned bch2_trans_get_fn_idx(const char *fn)
2918 for (i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
2919 if (!bch2_btree_transaction_fns[i] ||
2920 bch2_btree_transaction_fns[i] == fn) {
2921 bch2_btree_transaction_fns[i] = fn;
2925 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
2929 void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_idx)
2930 __acquires(&c->btree_trans_barrier)
2932 struct btree_transaction_stats *s;
2934 bch2_assert_btree_nodes_not_locked();
2936 memset(trans, 0, sizeof(*trans));
2938 trans->fn = fn_idx < ARRAY_SIZE(bch2_btree_transaction_fns)
2939 ? bch2_btree_transaction_fns[fn_idx] : NULL;
2940 trans->last_begin_time = local_clock();
2941 trans->fn_idx = fn_idx;
2942 trans->locking_wait.task = current;
2943 trans->journal_replay_not_finished =
2944 !test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
2945 closure_init_stack(&trans->ref);
2947 bch2_trans_alloc_paths(trans, c);
2949 s = btree_trans_stats(trans);
2950 if (s && s->max_mem) {
2951 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
2953 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
2955 if (!unlikely(trans->mem)) {
2956 trans->mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2957 trans->mem_bytes = BTREE_TRANS_MEM_MAX;
2959 trans->mem_bytes = expected_mem_bytes;
2964 trans->nr_max_paths = s->nr_max_paths;
2965 trans->wb_updates_size = s->wb_updates_size;
2968 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
2969 trans->srcu_lock_time = jiffies;
2971 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
2972 struct btree_trans *pos;
2974 seqmutex_lock(&c->btree_trans_lock);
2975 list_for_each_entry(pos, &c->btree_trans_list, list) {
2977 * We'd much prefer to be stricter here and completely
2978 * disallow multiple btree_trans in the same thread -
2979 * but the data move path calls bch2_write when we
2980 * already have a btree_trans initialized.
2982 BUG_ON(trans->locking_wait.task->pid == pos->locking_wait.task->pid &&
2983 bch2_trans_locked(pos));
2985 if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
2986 list_add_tail(&trans->list, &pos->list);
2990 list_add_tail(&trans->list, &c->btree_trans_list);
2992 seqmutex_unlock(&c->btree_trans_lock);
2996 static void check_btree_paths_leaked(struct btree_trans *trans)
2998 #ifdef CONFIG_BCACHEFS_DEBUG
2999 struct bch_fs *c = trans->c;
3000 struct btree_path *path;
3002 trans_for_each_path(trans, path)
3007 bch_err(c, "btree paths leaked from %s!", trans->fn);
3008 trans_for_each_path(trans, path)
3010 printk(KERN_ERR " btree %s %pS\n",
3011 bch2_btree_ids[path->btree_id],
3012 (void *) path->ip_allocated);
3013 /* Be noisy about this: */
3014 bch2_fatal_error(c);
3018 void bch2_trans_exit(struct btree_trans *trans)
3019 __releases(&c->btree_trans_barrier)
3021 struct btree_insert_entry *i;
3022 struct bch_fs *c = trans->c;
3023 struct btree_transaction_stats *s = btree_trans_stats(trans);
3025 bch2_trans_unlock(trans);
3027 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
3028 seqmutex_lock(&c->btree_trans_lock);
3029 list_del(&trans->list);
3030 seqmutex_unlock(&c->btree_trans_lock);
3033 closure_sync(&trans->ref);
3036 s->max_mem = max(s->max_mem, trans->mem_max);
3038 trans_for_each_update(trans, i)
3039 __btree_path_put(i->path, true);
3040 trans->nr_updates = 0;
3042 check_btree_paths_leaked(trans);
3044 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3046 bch2_journal_preres_put(&c->journal, &trans->journal_preres);
3048 kfree(trans->extra_journal_entries.data);
3050 if (trans->fs_usage_deltas) {
3051 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3052 REPLICAS_DELTA_LIST_MAX)
3053 mempool_free(trans->fs_usage_deltas,
3054 &c->replicas_delta_pool);
3056 kfree(trans->fs_usage_deltas);
3059 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3060 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3066 * Userspace doesn't have a real percpu implementation:
3068 trans->paths = this_cpu_xchg(c->btree_paths_bufs->path, trans->paths);
3072 mempool_free(trans->paths, &c->btree_paths_pool);
3074 trans->mem = (void *) 0x1;
3075 trans->paths = (void *) 0x1;
3078 static void __maybe_unused
3079 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3080 struct btree_bkey_cached_common *b)
3082 struct six_lock_count c = six_lock_counts(&b->lock);
3083 struct task_struct *owner;
3087 owner = READ_ONCE(b->lock.owner);
3088 pid = owner ? owner->pid : 0;
3092 prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3093 b->level, bch2_btree_ids[b->btree_id]);
3094 bch2_bpos_to_text(out, btree_node_pos(b));
3097 prt_printf(out, " locks %u:%u:%u held by pid %u",
3098 c.n[0], c.n[1], c.n[2], pid);
3101 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3103 struct btree_path *path;
3104 struct btree_bkey_cached_common *b;
3105 static char lock_types[] = { 'r', 'i', 'w' };
3108 if (!out->nr_tabstops) {
3109 printbuf_tabstop_push(out, 16);
3110 printbuf_tabstop_push(out, 32);
3113 prt_printf(out, "%i %s\n", trans->locking_wait.task->pid, trans->fn);
3115 trans_for_each_path_safe(trans, path, idx) {
3116 if (!path->nodes_locked)
3119 prt_printf(out, " path %u %c l=%u %s:",
3121 path->cached ? 'c' : 'b',
3123 bch2_btree_ids[path->btree_id]);
3124 bch2_bpos_to_text(out, path->pos);
3127 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3128 if (btree_node_locked(path, l) &&
3129 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3130 prt_printf(out, " %c l=%u ",
3131 lock_types[btree_node_locked_type(path, l)], l);
3132 bch2_btree_bkey_cached_common_to_text(out, b);
3138 b = READ_ONCE(trans->locking);
3140 prt_printf(out, " blocked for %lluus on",
3141 div_u64(local_clock() - trans->locking_wait.start_time,
3144 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3145 bch2_btree_bkey_cached_common_to_text(out, b);
3150 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3152 struct btree_transaction_stats *s;
3154 for (s = c->btree_transaction_stats;
3155 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3157 kfree(s->max_paths_text);
3158 bch2_time_stats_exit(&s->lock_hold_times);
3161 if (c->btree_trans_barrier_initialized)
3162 cleanup_srcu_struct(&c->btree_trans_barrier);
3163 mempool_exit(&c->btree_trans_mem_pool);
3164 mempool_exit(&c->btree_paths_pool);
3167 int bch2_fs_btree_iter_init(struct bch_fs *c)
3169 struct btree_transaction_stats *s;
3170 unsigned nr = BTREE_ITER_MAX;
3173 for (s = c->btree_transaction_stats;
3174 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3176 bch2_time_stats_init(&s->lock_hold_times);
3177 mutex_init(&s->lock);
3180 INIT_LIST_HEAD(&c->btree_trans_list);
3181 seqmutex_init(&c->btree_trans_lock);
3183 ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
3184 sizeof(struct btree_path) * nr +
3185 sizeof(struct btree_insert_entry) * nr) ?:
3186 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3187 BTREE_TRANS_MEM_MAX) ?:
3188 init_srcu_struct(&c->btree_trans_barrier);
3190 c->btree_trans_barrier_initialized = true;