1 // SPDX-License-Identifier: GPL-2.0
4 #include "bkey_methods.h"
6 #include "btree_cache.h"
7 #include "btree_iter.h"
8 #include "btree_journal_iter.h"
9 #include "btree_key_cache.h"
10 #include "btree_locking.h"
11 #include "btree_update.h"
16 #include "journal_io.h"
21 #include <linux/random.h>
22 #include <linux/prefetch.h>
24 static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
25 static inline void btree_path_list_add(struct btree_trans *,
26 btree_path_idx_t, btree_path_idx_t);
28 static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
30 #ifdef TRACK_PATH_ALLOCATED
31 return iter->ip_allocated;
37 static btree_path_idx_t btree_path_alloc(struct btree_trans *, btree_path_idx_t);
38 static void bch2_trans_srcu_lock(struct btree_trans *);
40 static inline int __btree_path_cmp(const struct btree_path *l,
41 enum btree_id r_btree_id,
47 * Must match lock ordering as defined by __bch2_btree_node_lock:
49 return cmp_int(l->btree_id, r_btree_id) ?:
50 cmp_int((int) l->cached, (int) r_cached) ?:
51 bpos_cmp(l->pos, r_pos) ?:
52 -cmp_int(l->level, r_level);
55 static inline int btree_path_cmp(const struct btree_path *l,
56 const struct btree_path *r)
58 return __btree_path_cmp(l, r->btree_id, r->cached, r->pos, r->level);
61 static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
63 /* Are we iterating over keys in all snapshots? */
64 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
65 p = bpos_successor(p);
67 p = bpos_nosnap_successor(p);
68 p.snapshot = iter->snapshot;
74 static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
76 /* Are we iterating over keys in all snapshots? */
77 if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
78 p = bpos_predecessor(p);
80 p = bpos_nosnap_predecessor(p);
81 p.snapshot = iter->snapshot;
87 static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
89 struct bpos pos = iter->pos;
91 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
92 !bkey_eq(pos, POS_MAX))
93 pos = bkey_successor(iter, pos);
97 static inline bool btree_path_pos_before_node(struct btree_path *path,
100 return bpos_lt(path->pos, b->data->min_key);
103 static inline bool btree_path_pos_after_node(struct btree_path *path,
106 return bpos_gt(path->pos, b->key.k.p);
109 static inline bool btree_path_pos_in_node(struct btree_path *path,
112 return path->btree_id == b->c.btree_id &&
113 !btree_path_pos_before_node(path, b) &&
114 !btree_path_pos_after_node(path, b);
117 /* Btree iterator: */
119 #ifdef CONFIG_BCACHEFS_DEBUG
121 static void bch2_btree_path_verify_cached(struct btree_trans *trans,
122 struct btree_path *path)
124 struct bkey_cached *ck;
125 bool locked = btree_node_locked(path, 0);
127 if (!bch2_btree_node_relock(trans, path, 0))
130 ck = (void *) path->l[0].b;
131 BUG_ON(ck->key.btree_id != path->btree_id ||
132 !bkey_eq(ck->key.pos, path->pos));
135 btree_node_unlock(trans, path, 0);
138 static void bch2_btree_path_verify_level(struct btree_trans *trans,
139 struct btree_path *path, unsigned level)
141 struct btree_path_level *l;
142 struct btree_node_iter tmp;
144 struct bkey_packed *p, *k;
145 struct printbuf buf1 = PRINTBUF;
146 struct printbuf buf2 = PRINTBUF;
147 struct printbuf buf3 = PRINTBUF;
150 if (!bch2_debug_check_iterators)
155 locked = btree_node_locked(path, level);
159 bch2_btree_path_verify_cached(trans, path);
163 if (!btree_path_node(path, level))
166 if (!bch2_btree_node_relock_notrace(trans, path, level))
169 BUG_ON(!btree_path_pos_in_node(path, l->b));
171 bch2_btree_node_iter_verify(&l->iter, l->b);
174 * For interior nodes, the iterator will have skipped past deleted keys:
177 ? bch2_btree_node_iter_prev(&tmp, l->b)
178 : bch2_btree_node_iter_prev_all(&tmp, l->b);
179 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
181 if (p && bkey_iter_pos_cmp(l->b, p, &path->pos) >= 0) {
186 if (k && bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
192 btree_node_unlock(trans, path, level);
195 bch2_bpos_to_text(&buf1, path->pos);
198 struct bkey uk = bkey_unpack_key(l->b, p);
200 bch2_bkey_to_text(&buf2, &uk);
202 prt_printf(&buf2, "(none)");
206 struct bkey uk = bkey_unpack_key(l->b, k);
208 bch2_bkey_to_text(&buf3, &uk);
210 prt_printf(&buf3, "(none)");
213 panic("path should be %s key at level %u:\n"
217 msg, level, buf1.buf, buf2.buf, buf3.buf);
220 static void bch2_btree_path_verify(struct btree_trans *trans,
221 struct btree_path *path)
223 struct bch_fs *c = trans->c;
226 EBUG_ON(path->btree_id >= BTREE_ID_NR);
228 for (i = 0; i < (!path->cached ? BTREE_MAX_DEPTH : 1); i++) {
230 BUG_ON(!path->cached &&
231 bch2_btree_id_root(c, path->btree_id)->b->c.level > i);
235 bch2_btree_path_verify_level(trans, path, i);
238 bch2_btree_path_verify_locks(path);
241 void bch2_trans_verify_paths(struct btree_trans *trans)
243 struct btree_path *path;
246 trans_for_each_path(trans, path, iter)
247 bch2_btree_path_verify(trans, path);
250 static void bch2_btree_iter_verify(struct btree_iter *iter)
252 struct btree_trans *trans = iter->trans;
254 BUG_ON(iter->btree_id >= BTREE_ID_NR);
256 BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != btree_iter_path(trans, iter)->cached);
258 BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
259 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
261 BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
262 (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
263 !btree_type_has_snapshot_field(iter->btree_id));
265 if (iter->update_path)
266 bch2_btree_path_verify(trans, &trans->paths[iter->update_path]);
267 bch2_btree_path_verify(trans, btree_iter_path(trans, iter));
270 static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
272 BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
273 !iter->pos.snapshot);
275 BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
276 iter->pos.snapshot != iter->snapshot);
278 BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
279 bkey_gt(iter->pos, iter->k.p));
282 static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
284 struct btree_trans *trans = iter->trans;
285 struct btree_iter copy;
286 struct bkey_s_c prev;
289 if (!bch2_debug_check_iterators)
292 if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
295 if (bkey_err(k) || !k.k)
298 BUG_ON(!bch2_snapshot_is_ancestor(trans->c,
302 bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
303 BTREE_ITER_NOPRESERVE|
304 BTREE_ITER_ALL_SNAPSHOTS);
305 prev = bch2_btree_iter_prev(©);
309 ret = bkey_err(prev);
313 if (bkey_eq(prev.k->p, k.k->p) &&
314 bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
315 prev.k->p.snapshot) > 0) {
316 struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
318 bch2_bkey_to_text(&buf1, k.k);
319 bch2_bkey_to_text(&buf2, prev.k);
321 panic("iter snap %u\n"
328 bch2_trans_iter_exit(trans, ©);
332 void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
333 struct bpos pos, bool key_cache)
335 struct btree_path *path;
336 struct trans_for_each_path_inorder_iter iter;
337 struct printbuf buf = PRINTBUF;
339 btree_trans_sort_paths(trans);
341 trans_for_each_path_inorder(trans, path, iter) {
342 int cmp = cmp_int(path->btree_id, id) ?:
343 cmp_int(path->cached, key_cache);
350 if (!btree_node_locked(path, 0) ||
351 !path->should_be_locked)
355 if (bkey_ge(pos, path->l[0].b->data->min_key) &&
356 bkey_le(pos, path->l[0].b->key.k.p))
359 if (bkey_eq(pos, path->pos))
364 bch2_dump_trans_paths_updates(trans);
365 bch2_bpos_to_text(&buf, pos);
367 panic("not locked: %s %s%s\n",
368 bch2_btree_id_str(id), buf.buf,
369 key_cache ? " cached" : "");
374 static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
375 struct btree_path *path, unsigned l) {}
376 static inline void bch2_btree_path_verify(struct btree_trans *trans,
377 struct btree_path *path) {}
378 static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
379 static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
380 static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
384 /* Btree path: fixups after btree updates */
386 static void btree_node_iter_set_set_pos(struct btree_node_iter *iter,
389 struct bkey_packed *k)
391 struct btree_node_iter_set *set;
393 btree_node_iter_for_each(iter, set)
394 if (set->end == t->end_offset) {
395 set->k = __btree_node_key_to_offset(b, k);
396 bch2_btree_node_iter_sort(iter, b);
400 bch2_btree_node_iter_push(iter, b, k, btree_bkey_last(b, t));
403 static void __bch2_btree_path_fix_key_modified(struct btree_path *path,
405 struct bkey_packed *where)
407 struct btree_path_level *l = &path->l[b->c.level];
409 if (where != bch2_btree_node_iter_peek_all(&l->iter, l->b))
412 if (bkey_iter_pos_cmp(l->b, where, &path->pos) < 0)
413 bch2_btree_node_iter_advance(&l->iter, l->b);
416 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
418 struct bkey_packed *where)
420 struct btree_path *path;
423 trans_for_each_path_with_node(trans, b, path, i) {
424 __bch2_btree_path_fix_key_modified(path, b, where);
425 bch2_btree_path_verify_level(trans, path, b->c.level);
429 static void __bch2_btree_node_iter_fix(struct btree_path *path,
431 struct btree_node_iter *node_iter,
433 struct bkey_packed *where,
434 unsigned clobber_u64s,
437 const struct bkey_packed *end = btree_bkey_last(b, t);
438 struct btree_node_iter_set *set;
439 unsigned offset = __btree_node_key_to_offset(b, where);
440 int shift = new_u64s - clobber_u64s;
441 unsigned old_end = t->end_offset - shift;
442 unsigned orig_iter_pos = node_iter->data[0].k;
443 bool iter_current_key_modified =
444 orig_iter_pos >= offset &&
445 orig_iter_pos <= offset + clobber_u64s;
447 btree_node_iter_for_each(node_iter, set)
448 if (set->end == old_end)
451 /* didn't find the bset in the iterator - might have to readd it: */
453 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
454 bch2_btree_node_iter_push(node_iter, b, where, end);
457 /* Iterator is after key that changed */
461 set->end = t->end_offset;
463 /* Iterator hasn't gotten to the key that changed yet: */
468 bkey_iter_pos_cmp(b, where, &path->pos) >= 0) {
470 } else if (set->k < offset + clobber_u64s) {
471 set->k = offset + new_u64s;
472 if (set->k == set->end)
473 bch2_btree_node_iter_set_drop(node_iter, set);
475 /* Iterator is after key that changed */
476 set->k = (int) set->k + shift;
480 bch2_btree_node_iter_sort(node_iter, b);
482 if (node_iter->data[0].k != orig_iter_pos)
483 iter_current_key_modified = true;
486 * When a new key is added, and the node iterator now points to that
487 * key, the iterator might have skipped past deleted keys that should
488 * come after the key the iterator now points to. We have to rewind to
489 * before those deleted keys - otherwise
490 * bch2_btree_node_iter_prev_all() breaks:
492 if (!bch2_btree_node_iter_end(node_iter) &&
493 iter_current_key_modified &&
495 struct bkey_packed *k, *k2, *p;
497 k = bch2_btree_node_iter_peek_all(node_iter, b);
499 for_each_bset(b, t) {
500 bool set_pos = false;
502 if (node_iter->data[0].end == t->end_offset)
505 k2 = bch2_btree_node_iter_bset_pos(node_iter, b, t);
507 while ((p = bch2_bkey_prev_all(b, t, k2)) &&
508 bkey_iter_cmp(b, k, p) < 0) {
514 btree_node_iter_set_set_pos(node_iter,
520 void bch2_btree_node_iter_fix(struct btree_trans *trans,
521 struct btree_path *path,
523 struct btree_node_iter *node_iter,
524 struct bkey_packed *where,
525 unsigned clobber_u64s,
528 struct bset_tree *t = bch2_bkey_to_bset_inlined(b, where);
529 struct btree_path *linked;
532 if (node_iter != &path->l[b->c.level].iter) {
533 __bch2_btree_node_iter_fix(path, b, node_iter, t,
534 where, clobber_u64s, new_u64s);
536 if (bch2_debug_check_iterators)
537 bch2_btree_node_iter_verify(node_iter, b);
540 trans_for_each_path_with_node(trans, b, linked, i) {
541 __bch2_btree_node_iter_fix(linked, b,
542 &linked->l[b->c.level].iter, t,
543 where, clobber_u64s, new_u64s);
544 bch2_btree_path_verify_level(trans, linked, b->c.level);
548 /* Btree path level: pointer to a particular btree node and node iter */
550 static inline struct bkey_s_c __btree_iter_unpack(struct bch_fs *c,
551 struct btree_path_level *l,
553 struct bkey_packed *k)
557 * signal to bch2_btree_iter_peek_slot() that we're currently at
560 u->type = KEY_TYPE_deleted;
561 return bkey_s_c_null;
564 return bkey_disassemble(l->b, k, u);
567 static inline struct bkey_s_c btree_path_level_peek_all(struct bch_fs *c,
568 struct btree_path_level *l,
571 return __btree_iter_unpack(c, l, u,
572 bch2_btree_node_iter_peek_all(&l->iter, l->b));
575 static inline struct bkey_s_c btree_path_level_peek(struct btree_trans *trans,
576 struct btree_path *path,
577 struct btree_path_level *l,
580 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
581 bch2_btree_node_iter_peek(&l->iter, l->b));
583 path->pos = k.k ? k.k->p : l->b->key.k.p;
584 trans->paths_sorted = false;
585 bch2_btree_path_verify_level(trans, path, l - path->l);
589 static inline struct bkey_s_c btree_path_level_prev(struct btree_trans *trans,
590 struct btree_path *path,
591 struct btree_path_level *l,
594 struct bkey_s_c k = __btree_iter_unpack(trans->c, l, u,
595 bch2_btree_node_iter_prev(&l->iter, l->b));
597 path->pos = k.k ? k.k->p : l->b->data->min_key;
598 trans->paths_sorted = false;
599 bch2_btree_path_verify_level(trans, path, l - path->l);
603 static inline bool btree_path_advance_to_pos(struct btree_path *path,
604 struct btree_path_level *l,
607 struct bkey_packed *k;
610 while ((k = bch2_btree_node_iter_peek_all(&l->iter, l->b)) &&
611 bkey_iter_pos_cmp(l->b, k, &path->pos) < 0) {
612 if (max_advance > 0 && nr_advanced >= max_advance)
615 bch2_btree_node_iter_advance(&l->iter, l->b);
622 static inline void __btree_path_level_init(struct btree_path *path,
625 struct btree_path_level *l = &path->l[level];
627 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
630 * Iterators to interior nodes should always be pointed at the first non
634 bch2_btree_node_iter_peek(&l->iter, l->b);
637 void bch2_btree_path_level_init(struct btree_trans *trans,
638 struct btree_path *path,
641 BUG_ON(path->cached);
643 EBUG_ON(!btree_path_pos_in_node(path, b));
645 path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
646 path->l[b->c.level].b = b;
647 __btree_path_level_init(path, b->c.level);
650 /* Btree path: fixups after btree node updates: */
652 static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
654 struct bch_fs *c = trans->c;
656 trans_for_each_update(trans, i)
658 i->level == b->c.level &&
659 i->btree_id == b->c.btree_id &&
660 bpos_cmp(i->k->k.p, b->data->min_key) >= 0 &&
661 bpos_cmp(i->k->k.p, b->data->max_key) <= 0) {
662 i->old_v = bch2_btree_path_peek_slot(trans->paths + i->path, &i->old_k).v;
664 if (unlikely(trans->journal_replay_not_finished)) {
666 bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
678 * A btree node is being replaced - update the iterator to point to the new
681 void bch2_trans_node_add(struct btree_trans *trans,
682 struct btree_path *path,
685 struct btree_path *prev;
687 BUG_ON(!btree_path_pos_in_node(path, b));
689 while ((prev = prev_btree_path(trans, path)) &&
690 btree_path_pos_in_node(prev, b))
694 path && btree_path_pos_in_node(path, b);
695 path = next_btree_path(trans, path))
696 if (path->uptodate == BTREE_ITER_UPTODATE && !path->cached) {
697 enum btree_node_locked_type t =
698 btree_lock_want(path, b->c.level);
700 if (t != BTREE_NODE_UNLOCKED) {
701 btree_node_unlock(trans, path, b->c.level);
702 six_lock_increment(&b->c.lock, (enum six_lock_type) t);
703 mark_btree_node_locked(trans, path, b->c.level, t);
706 bch2_btree_path_level_init(trans, path, b);
709 bch2_trans_revalidate_updates_in_node(trans, b);
713 * A btree node has been modified in such a way as to invalidate iterators - fix
716 void bch2_trans_node_reinit_iter(struct btree_trans *trans, struct btree *b)
718 struct btree_path *path;
721 trans_for_each_path_with_node(trans, b, path, i)
722 __btree_path_level_init(path, b->c.level);
724 bch2_trans_revalidate_updates_in_node(trans, b);
727 /* Btree path: traverse, set_pos: */
729 static inline int btree_path_lock_root(struct btree_trans *trans,
730 struct btree_path *path,
732 unsigned long trace_ip)
734 struct bch_fs *c = trans->c;
735 struct btree *b, **rootp = &bch2_btree_id_root(c, path->btree_id)->b;
736 enum six_lock_type lock_type;
740 EBUG_ON(path->nodes_locked);
743 b = READ_ONCE(*rootp);
744 path->level = READ_ONCE(b->c.level);
746 if (unlikely(path->level < depth_want)) {
748 * the root is at a lower depth than the depth we want:
749 * got to the end of the btree, or we're walking nodes
750 * greater than some depth and there are no nodes >=
753 path->level = depth_want;
754 for (i = path->level; i < BTREE_MAX_DEPTH; i++)
759 lock_type = __btree_lock_want(path, path->level);
760 ret = btree_node_lock(trans, path, &b->c,
761 path->level, lock_type, trace_ip);
763 if (bch2_err_matches(ret, BCH_ERR_lock_fail_root_changed))
765 if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
770 if (likely(b == READ_ONCE(*rootp) &&
771 b->c.level == path->level &&
773 for (i = 0; i < path->level; i++)
774 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_lock_root);
775 path->l[path->level].b = b;
776 for (i = path->level + 1; i < BTREE_MAX_DEPTH; i++)
779 mark_btree_node_locked(trans, path, path->level,
780 (enum btree_node_locked_type) lock_type);
781 bch2_btree_path_level_init(trans, path, b);
785 six_unlock_type(&b->c.lock, lock_type);
790 static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *path)
792 struct bch_fs *c = trans->c;
793 struct btree_path_level *l = path_l(path);
794 struct btree_node_iter node_iter = l->iter;
795 struct bkey_packed *k;
797 unsigned nr = test_bit(BCH_FS_started, &c->flags)
798 ? (path->level > 1 ? 0 : 2)
799 : (path->level > 1 ? 1 : 16);
800 bool was_locked = btree_node_locked(path, path->level);
803 bch2_bkey_buf_init(&tmp);
805 while (nr-- && !ret) {
806 if (!bch2_btree_node_relock(trans, path, path->level))
809 bch2_btree_node_iter_advance(&node_iter, l->b);
810 k = bch2_btree_node_iter_peek(&node_iter, l->b);
814 bch2_bkey_buf_unpack(&tmp, c, l->b, k);
815 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
820 btree_node_unlock(trans, path, path->level);
822 bch2_bkey_buf_exit(&tmp, c);
826 static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
827 struct btree_and_journal_iter *jiter)
829 struct bch_fs *c = trans->c;
832 unsigned nr = test_bit(BCH_FS_started, &c->flags)
833 ? (path->level > 1 ? 0 : 2)
834 : (path->level > 1 ? 1 : 16);
835 bool was_locked = btree_node_locked(path, path->level);
838 bch2_bkey_buf_init(&tmp);
840 while (nr-- && !ret) {
841 if (!bch2_btree_node_relock(trans, path, path->level))
844 bch2_btree_and_journal_iter_advance(jiter);
845 k = bch2_btree_and_journal_iter_peek(jiter);
849 bch2_bkey_buf_reassemble(&tmp, c, k);
850 ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
855 btree_node_unlock(trans, path, path->level);
857 bch2_bkey_buf_exit(&tmp, c);
861 static noinline void btree_node_mem_ptr_set(struct btree_trans *trans,
862 struct btree_path *path,
863 unsigned plevel, struct btree *b)
865 struct btree_path_level *l = &path->l[plevel];
866 bool locked = btree_node_locked(path, plevel);
867 struct bkey_packed *k;
868 struct bch_btree_ptr_v2 *bp;
870 if (!bch2_btree_node_relock(trans, path, plevel))
873 k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
874 BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
876 bp = (void *) bkeyp_val(&l->b->format, k);
877 bp->mem_ptr = (unsigned long)b;
880 btree_node_unlock(trans, path, plevel);
883 static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
884 struct btree_path *path,
886 struct bkey_buf *out)
888 struct bch_fs *c = trans->c;
889 struct btree_path_level *l = path_l(path);
890 struct btree_and_journal_iter jiter;
894 __bch2_btree_and_journal_iter_init_node_iter(&jiter, c, l->b, l->iter, path->pos);
896 k = bch2_btree_and_journal_iter_peek(&jiter);
898 bch2_bkey_buf_reassemble(out, c, k);
900 if ((flags & BTREE_ITER_PREFETCH) &&
901 c->opts.btree_node_prefetch)
902 ret = btree_path_prefetch_j(trans, path, &jiter);
904 bch2_btree_and_journal_iter_exit(&jiter);
908 static __always_inline int btree_path_down(struct btree_trans *trans,
909 struct btree_path *path,
911 unsigned long trace_ip)
913 struct bch_fs *c = trans->c;
914 struct btree_path_level *l = path_l(path);
916 unsigned level = path->level - 1;
917 enum six_lock_type lock_type = __btree_lock_want(path, level);
921 EBUG_ON(!btree_node_locked(path, path->level));
923 bch2_bkey_buf_init(&tmp);
925 if (unlikely(trans->journal_replay_not_finished)) {
926 ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
930 bch2_bkey_buf_unpack(&tmp, c, l->b,
931 bch2_btree_node_iter_peek(&l->iter, l->b));
933 if ((flags & BTREE_ITER_PREFETCH) &&
934 c->opts.btree_node_prefetch) {
935 ret = btree_path_prefetch(trans, path);
941 b = bch2_btree_node_get(trans, path, tmp.k, level, lock_type, trace_ip);
942 ret = PTR_ERR_OR_ZERO(b);
946 if (likely(!trans->journal_replay_not_finished &&
947 tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
948 unlikely(b != btree_node_mem_ptr(tmp.k)))
949 btree_node_mem_ptr_set(trans, path, level + 1, b);
951 if (btree_node_read_locked(path, level + 1))
952 btree_node_unlock(trans, path, level + 1);
954 mark_btree_node_locked(trans, path, level,
955 (enum btree_node_locked_type) lock_type);
957 bch2_btree_path_level_init(trans, path, b);
959 bch2_btree_path_verify_locks(path);
961 bch2_bkey_buf_exit(&tmp, c);
966 static int bch2_btree_path_traverse_all(struct btree_trans *trans)
968 struct bch_fs *c = trans->c;
969 struct btree_path *path;
970 unsigned long trace_ip = _RET_IP_;
974 if (trans->in_traverse_all)
975 return -BCH_ERR_transaction_restart_in_traverse_all;
977 trans->in_traverse_all = true;
979 trans->restarted = 0;
980 trans->last_restarted_ip = 0;
982 trans_for_each_path(trans, path, i)
983 path->should_be_locked = false;
985 btree_trans_sort_paths(trans);
987 bch2_trans_unlock(trans);
990 if (unlikely(trans->memory_allocation_failure)) {
993 closure_init_stack(&cl);
996 ret = bch2_btree_cache_cannibalize_lock(trans, &cl);
1001 /* Now, redo traversals in correct order: */
1003 while (i < trans->nr_sorted) {
1004 btree_path_idx_t idx = trans->sorted[i];
1007 * Traversing a path can cause another path to be added at about
1008 * the same position:
1010 if (trans->paths[idx].uptodate) {
1011 __btree_path_get(&trans->paths[idx], false);
1012 ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
1013 __btree_path_put(&trans->paths[idx], false);
1015 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
1016 bch2_err_matches(ret, ENOMEM))
1026 * We used to assert that all paths had been traversed here
1027 * (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
1028 * path->should_be_locked is not set yet, we might have unlocked and
1029 * then failed to relock a path - that's fine.
1032 bch2_btree_cache_cannibalize_unlock(trans);
1034 trans->in_traverse_all = false;
1036 trace_and_count(c, trans_traverse_all, trans, trace_ip);
1040 static inline bool btree_path_check_pos_in_node(struct btree_path *path,
1041 unsigned l, int check_pos)
1043 if (check_pos < 0 && btree_path_pos_before_node(path, path->l[l].b))
1045 if (check_pos > 0 && btree_path_pos_after_node(path, path->l[l].b))
1050 static inline bool btree_path_good_node(struct btree_trans *trans,
1051 struct btree_path *path,
1052 unsigned l, int check_pos)
1054 return is_btree_node(path, l) &&
1055 bch2_btree_node_relock(trans, path, l) &&
1056 btree_path_check_pos_in_node(path, l, check_pos);
1059 static void btree_path_set_level_down(struct btree_trans *trans,
1060 struct btree_path *path,
1065 path->level = new_level;
1067 for (l = path->level + 1; l < BTREE_MAX_DEPTH; l++)
1068 if (btree_lock_want(path, l) == BTREE_NODE_UNLOCKED)
1069 btree_node_unlock(trans, path, l);
1071 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1072 bch2_btree_path_verify(trans, path);
1075 static noinline unsigned __btree_path_up_until_good_node(struct btree_trans *trans,
1076 struct btree_path *path,
1079 unsigned i, l = path->level;
1081 while (btree_path_node(path, l) &&
1082 !btree_path_good_node(trans, path, l, check_pos))
1083 __btree_path_set_level_up(trans, path, l++);
1085 /* If we need intent locks, take them too: */
1087 i < path->locks_want && btree_path_node(path, i);
1089 if (!bch2_btree_node_relock(trans, path, i)) {
1091 __btree_path_set_level_up(trans, path, l++);
1098 static inline unsigned btree_path_up_until_good_node(struct btree_trans *trans,
1099 struct btree_path *path,
1102 return likely(btree_node_locked(path, path->level) &&
1103 btree_path_check_pos_in_node(path, path->level, check_pos))
1105 : __btree_path_up_until_good_node(trans, path, check_pos);
1109 * This is the main state machine for walking down the btree - walks down to a
1112 * Returns 0 on success, -EIO on error (error reading in a btree node).
1114 * On error, caller (peek_node()/peek_key()) must return NULL; the error is
1115 * stashed in the iterator and returned from bch2_trans_exit().
1117 int bch2_btree_path_traverse_one(struct btree_trans *trans,
1118 btree_path_idx_t path_idx,
1120 unsigned long trace_ip)
1122 struct btree_path *path = &trans->paths[path_idx];
1123 unsigned depth_want = path->level;
1124 int ret = -((int) trans->restarted);
1129 if (unlikely(!trans->srcu_held))
1130 bch2_trans_srcu_lock(trans);
1133 * Ensure we obey path->should_be_locked: if it's set, we can't unlock
1134 * and re-traverse the path without a transaction restart:
1136 if (path->should_be_locked) {
1137 ret = bch2_btree_path_relock(trans, path, trace_ip);
1142 ret = bch2_btree_path_traverse_cached(trans, path, flags);
1146 path = &trans->paths[path_idx];
1148 if (unlikely(path->level >= BTREE_MAX_DEPTH))
1151 path->level = btree_path_up_until_good_node(trans, path, 0);
1153 EBUG_ON(btree_path_node(path, path->level) &&
1154 !btree_node_locked(path, path->level));
1157 * Note: path->nodes[path->level] may be temporarily NULL here - that
1158 * would indicate to other code that we got to the end of the btree,
1159 * here it indicates that relocking the root failed - it's critical that
1160 * btree_path_lock_root() comes next and that it can't fail
1162 while (path->level > depth_want) {
1163 ret = btree_path_node(path, path->level)
1164 ? btree_path_down(trans, path, flags, trace_ip)
1165 : btree_path_lock_root(trans, path, depth_want, trace_ip);
1166 if (unlikely(ret)) {
1169 * No nodes at this level - got to the end of
1176 __bch2_btree_path_unlock(trans, path);
1177 path->level = depth_want;
1178 path->l[path->level].b = ERR_PTR(ret);
1183 path->uptodate = BTREE_ITER_UPTODATE;
1185 if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
1186 panic("ret %s (%i) trans->restarted %s (%i)\n",
1187 bch2_err_str(ret), ret,
1188 bch2_err_str(trans->restarted), trans->restarted);
1189 bch2_btree_path_verify(trans, path);
1193 static inline void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
1194 struct btree_path *src)
1196 unsigned i, offset = offsetof(struct btree_path, pos);
1198 memcpy((void *) dst + offset,
1199 (void *) src + offset,
1200 sizeof(struct btree_path) - offset);
1202 for (i = 0; i < BTREE_MAX_DEPTH; i++) {
1203 unsigned t = btree_node_locked_type(dst, i);
1205 if (t != BTREE_NODE_UNLOCKED)
1206 six_lock_increment(&dst->l[i].b->c.lock, t);
1210 static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_idx_t src,
1213 btree_path_idx_t new = btree_path_alloc(trans, src);
1214 btree_path_copy(trans, trans->paths + new, trans->paths + src);
1215 __btree_path_get(trans->paths + new, intent);
1220 btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
1221 btree_path_idx_t path, bool intent, unsigned long ip)
1223 __btree_path_put(trans->paths + path, intent);
1224 path = btree_path_clone(trans, path, intent);
1225 trans->paths[path].preserve = false;
1229 btree_path_idx_t __must_check
1230 __bch2_btree_path_set_pos(struct btree_trans *trans,
1231 btree_path_idx_t path_idx, struct bpos new_pos,
1232 bool intent, unsigned long ip)
1234 int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
1236 bch2_trans_verify_not_in_restart(trans);
1237 EBUG_ON(!trans->paths[path_idx].ref);
1239 path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
1241 struct btree_path *path = trans->paths + path_idx;
1242 path->pos = new_pos;
1243 trans->paths_sorted = false;
1245 if (unlikely(path->cached)) {
1246 btree_node_unlock(trans, path, 0);
1247 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
1248 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1252 unsigned level = btree_path_up_until_good_node(trans, path, cmp);
1254 if (btree_path_node(path, level)) {
1255 struct btree_path_level *l = &path->l[level];
1257 BUG_ON(!btree_node_locked(path, level));
1259 * We might have to skip over many keys, or just a few: try
1260 * advancing the node iterator, and if we have to skip over too
1261 * many keys just reinit it (or if we're rewinding, since that
1265 !btree_path_advance_to_pos(path, l, 8))
1266 bch2_btree_node_iter_init(&l->iter, l->b, &path->pos);
1269 * Iterators to interior nodes should always be pointed at the first non
1272 if (unlikely(level))
1273 bch2_btree_node_iter_peek(&l->iter, l->b);
1276 if (unlikely(level != path->level)) {
1277 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1278 __bch2_btree_path_unlock(trans, path);
1281 bch2_btree_path_verify(trans, path);
1285 /* Btree path: main interface: */
1287 static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
1289 struct btree_path *sib;
1291 sib = prev_btree_path(trans, path);
1292 if (sib && !btree_path_cmp(sib, path))
1295 sib = next_btree_path(trans, path);
1296 if (sib && !btree_path_cmp(sib, path))
1302 static struct btree_path *have_node_at_pos(struct btree_trans *trans, struct btree_path *path)
1304 struct btree_path *sib;
1306 sib = prev_btree_path(trans, path);
1307 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1310 sib = next_btree_path(trans, path);
1311 if (sib && sib->level == path->level && path_l(sib)->b == path_l(path)->b)
1317 static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t path)
1319 __bch2_btree_path_unlock(trans, trans->paths + path);
1320 btree_path_list_remove(trans, trans->paths + path);
1321 __clear_bit(path, trans->paths_allocated);
1324 void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
1326 struct btree_path *path = trans->paths + path_idx, *dup;
1328 if (!__btree_path_put(path, intent))
1331 dup = path->preserve
1332 ? have_path_at_pos(trans, path)
1333 : have_node_at_pos(trans, path);
1335 if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
1338 if (path->should_be_locked &&
1339 !trans->restarted &&
1340 (!dup || !bch2_btree_path_relock_norestart(trans, dup)))
1344 dup->preserve |= path->preserve;
1345 dup->should_be_locked |= path->should_be_locked;
1348 __bch2_path_free(trans, path_idx);
1351 static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
1354 if (!__btree_path_put(trans->paths + path, intent))
1357 __bch2_path_free(trans, path);
1360 void __noreturn bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
1362 panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
1363 trans->restart_count, restart_count,
1364 (void *) trans->last_begin_ip);
1367 void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
1369 panic("in transaction restart: %s, last restarted by %pS\n",
1370 bch2_err_str(trans->restarted),
1371 (void *) trans->last_restarted_ip);
1375 void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
1377 prt_printf(buf, "transaction updates for %s journal seq %llu",
1378 trans->fn, trans->journal_res.seq);
1380 printbuf_indent_add(buf, 2);
1382 trans_for_each_update(trans, i) {
1383 struct bkey_s_c old = { &i->old_k, i->old_v };
1385 prt_printf(buf, "update: btree=%s cached=%u %pS",
1386 bch2_btree_id_str(i->btree_id),
1388 (void *) i->ip_allocated);
1391 prt_printf(buf, " old ");
1392 bch2_bkey_val_to_text(buf, trans->c, old);
1395 prt_printf(buf, " new ");
1396 bch2_bkey_val_to_text(buf, trans->c, bkey_i_to_s_c(i->k));
1400 for (struct jset_entry *e = trans->journal_entries;
1401 e != btree_trans_journal_entries_top(trans);
1402 e = vstruct_next(e))
1403 bch2_journal_entry_to_text(buf, trans->c, e);
1405 printbuf_indent_sub(buf, 2);
1409 void bch2_dump_trans_updates(struct btree_trans *trans)
1411 struct printbuf buf = PRINTBUF;
1413 bch2_trans_updates_to_text(&buf, trans);
1414 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1415 printbuf_exit(&buf);
1418 static void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
1420 struct btree_path *path = trans->paths + path_idx;
1422 prt_printf(out, "path: idx %2u ref %u:%u %c %c btree=%s l=%u pos ",
1423 path_idx, path->ref, path->intent_ref,
1424 path->preserve ? 'P' : ' ',
1425 path->should_be_locked ? 'S' : ' ',
1426 bch2_btree_id_str(path->btree_id),
1428 bch2_bpos_to_text(out, path->pos);
1430 prt_printf(out, " locks %u", path->nodes_locked);
1431 #ifdef TRACK_PATH_ALLOCATED
1432 prt_printf(out, " %pS", (void *) path->ip_allocated);
1437 static noinline __cold
1438 void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
1441 struct trans_for_each_path_inorder_iter iter;
1444 btree_trans_sort_paths(trans);
1446 trans_for_each_path_idx_inorder(trans, iter)
1447 bch2_btree_path_to_text(out, trans, iter.path_idx);
1451 void bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans)
1453 __bch2_trans_paths_to_text(out, trans, false);
1456 static noinline __cold
1457 void __bch2_dump_trans_paths_updates(struct btree_trans *trans, bool nosort)
1459 struct printbuf buf = PRINTBUF;
1461 __bch2_trans_paths_to_text(&buf, trans, nosort);
1462 bch2_trans_updates_to_text(&buf, trans);
1464 bch2_print_string_as_lines(KERN_ERR, buf.buf);
1465 printbuf_exit(&buf);
1469 void bch2_dump_trans_paths_updates(struct btree_trans *trans)
1471 __bch2_dump_trans_paths_updates(trans, false);
1475 static void bch2_trans_update_max_paths(struct btree_trans *trans)
1477 struct btree_transaction_stats *s = btree_trans_stats(trans);
1478 struct printbuf buf = PRINTBUF;
1479 size_t nr = bitmap_weight(trans->paths_allocated, trans->nr_paths);
1481 bch2_trans_paths_to_text(&buf, trans);
1483 if (!buf.allocation_failure) {
1484 mutex_lock(&s->lock);
1485 if (nr > s->nr_max_paths) {
1486 s->nr_max_paths = nr;
1487 swap(s->max_paths_text, buf.buf);
1489 mutex_unlock(&s->lock);
1492 printbuf_exit(&buf);
1494 trans->nr_paths_max = nr;
1498 int __bch2_btree_trans_too_many_iters(struct btree_trans *trans)
1500 if (trace_trans_restart_too_many_iters_enabled()) {
1501 struct printbuf buf = PRINTBUF;
1503 bch2_trans_paths_to_text(&buf, trans);
1504 trace_trans_restart_too_many_iters(trans, _THIS_IP_, buf.buf);
1505 printbuf_exit(&buf);
1508 count_event(trans->c, trans_restart_too_many_iters);
1510 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
1513 static noinline void btree_path_overflow(struct btree_trans *trans)
1515 bch2_dump_trans_paths_updates(trans);
1516 bch_err(trans->c, "trans path overflow");
1519 static noinline void btree_paths_realloc(struct btree_trans *trans)
1521 unsigned nr = trans->nr_paths * 2;
1523 void *p = kzalloc(BITS_TO_LONGS(nr) * sizeof(unsigned long) +
1524 sizeof(struct btree_trans_paths) +
1525 nr * sizeof(struct btree_path) +
1526 nr * sizeof(btree_path_idx_t) + 8 +
1527 nr * sizeof(struct btree_insert_entry), GFP_KERNEL|__GFP_NOFAIL);
1529 unsigned long *paths_allocated = p;
1530 memcpy(paths_allocated, trans->paths_allocated, BITS_TO_LONGS(trans->nr_paths) * sizeof(unsigned long));
1531 p += BITS_TO_LONGS(nr) * sizeof(unsigned long);
1533 p += sizeof(struct btree_trans_paths);
1534 struct btree_path *paths = p;
1535 *trans_paths_nr(paths) = nr;
1536 memcpy(paths, trans->paths, trans->nr_paths * sizeof(struct btree_path));
1537 p += nr * sizeof(struct btree_path);
1539 btree_path_idx_t *sorted = p;
1540 memcpy(sorted, trans->sorted, trans->nr_sorted * sizeof(btree_path_idx_t));
1541 p += nr * sizeof(btree_path_idx_t) + 8;
1543 struct btree_insert_entry *updates = p;
1544 memcpy(updates, trans->updates, trans->nr_paths * sizeof(struct btree_insert_entry));
1546 unsigned long *old = trans->paths_allocated;
1548 rcu_assign_pointer(trans->paths_allocated, paths_allocated);
1549 rcu_assign_pointer(trans->paths, paths);
1550 rcu_assign_pointer(trans->sorted, sorted);
1551 rcu_assign_pointer(trans->updates, updates);
1553 trans->nr_paths = nr;
1555 if (old != trans->_paths_allocated)
1556 kfree_rcu_mightsleep(old);
1559 static inline btree_path_idx_t btree_path_alloc(struct btree_trans *trans,
1560 btree_path_idx_t pos)
1562 btree_path_idx_t idx = find_first_zero_bit(trans->paths_allocated, trans->nr_paths);
1564 if (unlikely(idx == trans->nr_paths)) {
1565 if (trans->nr_paths == BTREE_ITER_MAX) {
1566 btree_path_overflow(trans);
1570 btree_paths_realloc(trans);
1574 * Do this before marking the new path as allocated, since it won't be
1577 if (unlikely(idx > trans->nr_paths_max))
1578 bch2_trans_update_max_paths(trans);
1580 __set_bit(idx, trans->paths_allocated);
1582 struct btree_path *path = &trans->paths[idx];
1584 path->intent_ref = 0;
1585 path->nodes_locked = 0;
1587 btree_path_list_add(trans, pos, idx);
1588 trans->paths_sorted = false;
1592 btree_path_idx_t bch2_path_get(struct btree_trans *trans,
1593 enum btree_id btree_id, struct bpos pos,
1594 unsigned locks_want, unsigned level,
1595 unsigned flags, unsigned long ip)
1597 struct btree_path *path;
1598 bool cached = flags & BTREE_ITER_CACHED;
1599 bool intent = flags & BTREE_ITER_INTENT;
1600 struct trans_for_each_path_inorder_iter iter;
1601 btree_path_idx_t path_pos = 0, path_idx;
1603 bch2_trans_verify_not_in_restart(trans);
1604 bch2_trans_verify_locks(trans);
1606 btree_trans_sort_paths(trans);
1608 trans_for_each_path_inorder(trans, path, iter) {
1609 if (__btree_path_cmp(path,
1616 path_pos = iter.path_idx;
1620 trans->paths[path_pos].cached == cached &&
1621 trans->paths[path_pos].btree_id == btree_id &&
1622 trans->paths[path_pos].level == level) {
1623 __btree_path_get(trans->paths + path_pos, intent);
1624 path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
1625 path = trans->paths + path_idx;
1627 path_idx = btree_path_alloc(trans, path_pos);
1628 path = trans->paths + path_idx;
1630 __btree_path_get(path, intent);
1632 path->btree_id = btree_id;
1633 path->cached = cached;
1634 path->uptodate = BTREE_ITER_NEED_TRAVERSE;
1635 path->should_be_locked = false;
1636 path->level = level;
1637 path->locks_want = locks_want;
1638 path->nodes_locked = 0;
1639 for (unsigned i = 0; i < ARRAY_SIZE(path->l); i++)
1640 path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
1641 #ifdef TRACK_PATH_ALLOCATED
1642 path->ip_allocated = ip;
1644 trans->paths_sorted = false;
1647 if (!(flags & BTREE_ITER_NOPRESERVE))
1648 path->preserve = true;
1650 if (path->intent_ref)
1651 locks_want = max(locks_want, level + 1);
1654 * If the path has locks_want greater than requested, we don't downgrade
1655 * it here - on transaction restart because btree node split needs to
1656 * upgrade locks, we might be putting/getting the iterator again.
1657 * Downgrading iterators only happens via bch2_trans_downgrade(), after
1658 * a successful transaction commit.
1661 locks_want = min(locks_want, BTREE_MAX_DEPTH);
1662 if (locks_want > path->locks_want)
1663 bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
1668 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
1671 struct btree_path_level *l = path_l(path);
1672 struct bkey_packed *_k;
1675 if (unlikely(!l->b))
1676 return bkey_s_c_null;
1678 EBUG_ON(path->uptodate != BTREE_ITER_UPTODATE);
1679 EBUG_ON(!btree_node_locked(path, path->level));
1681 if (!path->cached) {
1682 _k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
1683 k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
1685 EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
1687 if (!k.k || !bpos_eq(path->pos, k.k->p))
1690 struct bkey_cached *ck = (void *) path->l[0].b;
1693 (path->btree_id != ck->key.btree_id ||
1694 !bkey_eq(path->pos, ck->key.pos)));
1695 if (!ck || !ck->valid)
1696 return bkey_s_c_null;
1699 k = bkey_i_to_s_c(ck->k);
1706 return (struct bkey_s_c) { u, NULL };
1709 /* Btree iterators: */
1712 __bch2_btree_iter_traverse(struct btree_iter *iter)
1714 return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1718 bch2_btree_iter_traverse(struct btree_iter *iter)
1720 struct btree_trans *trans = iter->trans;
1723 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1724 btree_iter_search_key(iter),
1725 iter->flags & BTREE_ITER_INTENT,
1726 btree_iter_ip_allocated(iter));
1728 ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
1732 btree_path_set_should_be_locked(trans->paths + iter->path);
1736 /* Iterate across nodes (leaf and interior nodes) */
1738 struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
1740 struct btree_trans *trans = iter->trans;
1741 struct btree *b = NULL;
1744 EBUG_ON(trans->paths[iter->path].cached);
1745 bch2_btree_iter_verify(iter);
1747 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1751 struct btree_path *path = btree_iter_path(trans, iter);
1752 b = btree_path_node(path, path->level);
1756 BUG_ON(bpos_lt(b->key.k.p, iter->pos));
1758 bkey_init(&iter->k);
1759 iter->k.p = iter->pos = b->key.k.p;
1761 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1762 iter->flags & BTREE_ITER_INTENT,
1763 btree_iter_ip_allocated(iter));
1764 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1766 bch2_btree_iter_verify_entry_exit(iter);
1767 bch2_btree_iter_verify(iter);
1775 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
1779 while (b = bch2_btree_iter_peek_node(iter),
1780 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
1781 bch2_trans_begin(iter->trans);
1786 struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
1788 struct btree_trans *trans = iter->trans;
1789 struct btree *b = NULL;
1792 EBUG_ON(trans->paths[iter->path].cached);
1793 bch2_trans_verify_not_in_restart(trans);
1794 bch2_btree_iter_verify(iter);
1796 struct btree_path *path = btree_iter_path(trans, iter);
1798 /* already at end? */
1799 if (!btree_path_node(path, path->level))
1803 if (!btree_path_node(path, path->level + 1)) {
1804 btree_path_set_level_up(trans, path);
1808 if (!bch2_btree_node_relock(trans, path, path->level + 1)) {
1809 __bch2_btree_path_unlock(trans, path);
1810 path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1811 path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
1812 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
1813 trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
1814 ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
1818 b = btree_path_node(path, path->level + 1);
1820 if (bpos_eq(iter->pos, b->key.k.p)) {
1821 __btree_path_set_level_up(trans, path, path->level++);
1824 * Haven't gotten to the end of the parent node: go back down to
1825 * the next child node
1827 iter->path = bch2_btree_path_set_pos(trans, iter->path,
1828 bpos_successor(iter->pos),
1829 iter->flags & BTREE_ITER_INTENT,
1830 btree_iter_ip_allocated(iter));
1832 path = btree_iter_path(trans, iter);
1833 btree_path_set_level_down(trans, path, iter->min_depth);
1835 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
1839 path = btree_iter_path(trans, iter);
1840 b = path->l[path->level].b;
1843 bkey_init(&iter->k);
1844 iter->k.p = iter->pos = b->key.k.p;
1846 iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
1847 iter->flags & BTREE_ITER_INTENT,
1848 btree_iter_ip_allocated(iter));
1849 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
1850 EBUG_ON(btree_iter_path(trans, iter)->uptodate);
1852 bch2_btree_iter_verify_entry_exit(iter);
1853 bch2_btree_iter_verify(iter);
1861 /* Iterate across keys (in leaf nodes only) */
1863 inline bool bch2_btree_iter_advance(struct btree_iter *iter)
1865 struct bpos pos = iter->k.p;
1866 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1867 ? bpos_eq(pos, SPOS_MAX)
1868 : bkey_eq(pos, SPOS_MAX));
1870 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1871 pos = bkey_successor(iter, pos);
1872 bch2_btree_iter_set_pos(iter, pos);
1876 inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
1878 struct bpos pos = bkey_start_pos(&iter->k);
1879 bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
1880 ? bpos_eq(pos, POS_MIN)
1881 : bkey_eq(pos, POS_MIN));
1883 if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
1884 pos = bkey_predecessor(iter, pos);
1885 bch2_btree_iter_set_pos(iter, pos);
1890 void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
1893 struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
1895 trans_for_each_update(trans, i)
1896 if (!i->key_cache_already_flushed &&
1897 i->btree_id == iter->btree_id &&
1898 bpos_le(i->k->k.p, iter->pos) &&
1899 bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
1901 *k = bkey_i_to_s_c(i->k);
1906 void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
1909 struct btree_path *path = btree_iter_path(trans, iter);
1910 struct bpos end = path_l(path)->b->key.k.p;
1912 trans_for_each_update(trans, i)
1913 if (!i->key_cache_already_flushed &&
1914 i->btree_id == iter->btree_id &&
1915 bpos_ge(i->k->k.p, path->pos) &&
1916 bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
1918 *k = bkey_i_to_s_c(i->k);
1923 void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
1926 trans_for_each_update(trans, i)
1927 if (!i->key_cache_already_flushed &&
1928 i->btree_id == iter->btree_id &&
1929 bpos_eq(i->k->k.p, iter->pos)) {
1931 *k = bkey_i_to_s_c(i->k);
1935 static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
1936 struct btree_iter *iter,
1937 struct bpos end_pos)
1939 struct btree_path *path = btree_iter_path(trans, iter);
1941 return bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
1945 &iter->journal_idx);
1949 struct bkey_s_c btree_trans_peek_slot_journal(struct btree_trans *trans,
1950 struct btree_iter *iter)
1952 struct btree_path *path = btree_iter_path(trans, iter);
1953 struct bkey_i *k = bch2_btree_journal_peek(trans, iter, path->pos);
1957 return bkey_i_to_s_c(k);
1959 return bkey_s_c_null;
1964 struct bkey_s_c btree_trans_peek_journal(struct btree_trans *trans,
1965 struct btree_iter *iter,
1968 struct btree_path *path = btree_iter_path(trans, iter);
1969 struct bkey_i *next_journal =
1970 bch2_btree_journal_peek(trans, iter,
1971 k.k ? k.k->p : path_l(path)->b->key.k.p);
1974 iter->k = next_journal->k;
1975 k = bkey_i_to_s_c(next_journal);
1982 * Checks btree key cache for key at iter->pos and returns it if present, or
1986 struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
1988 struct btree_trans *trans = iter->trans;
1989 struct bch_fs *c = trans->c;
1994 if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
1995 bpos_eq(iter->pos, pos))
1996 return bkey_s_c_null;
1998 if (!bch2_btree_key_cache_find(c, iter->btree_id, pos))
1999 return bkey_s_c_null;
2001 if (!iter->key_cache_path)
2002 iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
2003 iter->flags & BTREE_ITER_INTENT, 0,
2004 iter->flags|BTREE_ITER_CACHED|
2005 BTREE_ITER_CACHED_NOFILL,
2008 iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
2009 iter->flags & BTREE_ITER_INTENT,
2010 btree_iter_ip_allocated(iter));
2012 ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
2013 iter->flags|BTREE_ITER_CACHED) ?:
2014 bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
2016 return bkey_s_c_err(ret);
2018 btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
2020 k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
2021 if (k.k && !bkey_err(k)) {
2028 static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
2030 struct btree_trans *trans = iter->trans;
2031 struct bkey_s_c k, k2;
2034 EBUG_ON(btree_iter_path(trans, iter)->cached);
2035 bch2_btree_iter_verify(iter);
2038 struct btree_path_level *l;
2040 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2041 iter->flags & BTREE_ITER_INTENT,
2042 btree_iter_ip_allocated(iter));
2044 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2045 if (unlikely(ret)) {
2046 /* ensure that iter->k is consistent with iter->pos: */
2047 bch2_btree_iter_set_pos(iter, iter->pos);
2048 k = bkey_s_c_err(ret);
2052 struct btree_path *path = btree_iter_path(trans, iter);
2055 if (unlikely(!l->b)) {
2056 /* No btree nodes at requested level: */
2057 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2062 btree_path_set_should_be_locked(path);
2064 k = btree_path_level_peek_all(trans->c, l, &iter->k);
2066 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2068 (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
2072 bch2_btree_iter_set_pos(iter, iter->pos);
2077 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
2078 k = btree_trans_peek_journal(trans, iter, k);
2080 if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2082 bch2_btree_trans_peek_updates(trans, iter, &k);
2084 if (k.k && bkey_deleted(k.k)) {
2086 * If we've got a whiteout, and it's after the search
2087 * key, advance the search key to the whiteout instead
2088 * of just after the whiteout - it might be a btree
2089 * whiteout, with a real key at the same position, since
2090 * in the btree deleted keys sort before non deleted.
2092 search_key = !bpos_eq(search_key, k.k->p)
2094 : bpos_successor(k.k->p);
2100 } else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
2101 /* Advance to next leaf node: */
2102 search_key = bpos_successor(l->b->key.k.p);
2105 bch2_btree_iter_set_pos(iter, SPOS_MAX);
2111 bch2_btree_iter_verify(iter);
2117 * bch2_btree_iter_peek_upto() - returns first key greater than or equal to
2118 * iterator's current position
2119 * @iter: iterator to peek from
2120 * @end: search limit: returns keys less than or equal to @end
2122 * Returns: key if found, or an error extractable with bkey_err().
2124 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos end)
2126 struct btree_trans *trans = iter->trans;
2127 struct bpos search_key = btree_iter_search_key(iter);
2129 struct bpos iter_pos;
2132 EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
2134 if (iter->update_path) {
2135 bch2_path_put_nokeep(trans, iter->update_path,
2136 iter->flags & BTREE_ITER_INTENT);
2137 iter->update_path = 0;
2140 bch2_btree_iter_verify_entry_exit(iter);
2143 k = __bch2_btree_iter_peek(iter, search_key);
2146 if (unlikely(bkey_err(k)))
2150 * We need to check against @end before FILTER_SNAPSHOTS because
2151 * if we get to a different inode that requested we might be
2152 * seeing keys for a different snapshot tree that will all be
2155 * But we can't do the full check here, because bkey_start_pos()
2156 * isn't monotonically increasing before FILTER_SNAPSHOTS, and
2157 * that's what we check against in extents mode:
2159 if (k.k->p.inode > end.inode)
2162 if (iter->update_path &&
2163 !bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
2164 bch2_path_put_nokeep(trans, iter->update_path,
2165 iter->flags & BTREE_ITER_INTENT);
2166 iter->update_path = 0;
2169 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2170 (iter->flags & BTREE_ITER_INTENT) &&
2171 !(iter->flags & BTREE_ITER_IS_EXTENTS) &&
2172 !iter->update_path) {
2173 struct bpos pos = k.k->p;
2175 if (pos.snapshot < iter->snapshot) {
2176 search_key = bpos_successor(k.k->p);
2180 pos.snapshot = iter->snapshot;
2183 * advance, same as on exit for iter->path, but only up
2186 __btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT);
2187 iter->update_path = iter->path;
2189 iter->update_path = bch2_btree_path_set_pos(trans,
2190 iter->update_path, pos,
2191 iter->flags & BTREE_ITER_INTENT,
2193 ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
2194 if (unlikely(ret)) {
2195 k = bkey_s_c_err(ret);
2201 * We can never have a key in a leaf node at POS_MAX, so
2202 * we don't have to check these successor() calls:
2204 if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
2205 !bch2_snapshot_is_ancestor(trans->c,
2208 search_key = bpos_successor(k.k->p);
2212 if (bkey_whiteout(k.k) &&
2213 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2214 search_key = bkey_successor(iter, k.k->p);
2219 * iter->pos should be mononotically increasing, and always be
2220 * equal to the key we just returned - except extents can
2221 * straddle iter->pos:
2223 if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
2226 iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
2228 if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
2229 ? bkey_gt(iter_pos, end)
2230 : bkey_ge(iter_pos, end)))
2236 iter->pos = iter_pos;
2238 iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
2239 iter->flags & BTREE_ITER_INTENT,
2240 btree_iter_ip_allocated(iter));
2242 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2244 if (iter->update_path) {
2245 ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
2247 k = bkey_s_c_err(ret);
2249 btree_path_set_should_be_locked(trans->paths + iter->update_path);
2252 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
2253 iter->pos.snapshot = iter->snapshot;
2255 ret = bch2_btree_iter_verify_ret(iter, k);
2256 if (unlikely(ret)) {
2257 bch2_btree_iter_set_pos(iter, iter->pos);
2258 k = bkey_s_c_err(ret);
2261 bch2_btree_iter_verify_entry_exit(iter);
2265 bch2_btree_iter_set_pos(iter, end);
2271 * bch2_btree_iter_next() - returns first key greater than iterator's current
2273 * @iter: iterator to peek from
2275 * Returns: key if found, or an error extractable with bkey_err().
2277 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
2279 if (!bch2_btree_iter_advance(iter))
2280 return bkey_s_c_null;
2282 return bch2_btree_iter_peek(iter);
2286 * bch2_btree_iter_peek_prev() - returns first key less than or equal to
2287 * iterator's current position
2288 * @iter: iterator to peek from
2290 * Returns: key if found, or an error extractable with bkey_err().
2292 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
2294 struct btree_trans *trans = iter->trans;
2295 struct bpos search_key = iter->pos;
2297 struct bkey saved_k;
2298 const struct bch_val *saved_v;
2299 btree_path_idx_t saved_path = 0;
2302 EBUG_ON(btree_iter_path(trans, iter)->cached ||
2303 btree_iter_path(trans, iter)->level);
2305 if (iter->flags & BTREE_ITER_WITH_JOURNAL)
2306 return bkey_s_c_err(-EIO);
2308 bch2_btree_iter_verify(iter);
2309 bch2_btree_iter_verify_entry_exit(iter);
2311 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2312 search_key.snapshot = U32_MAX;
2315 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2316 iter->flags & BTREE_ITER_INTENT,
2317 btree_iter_ip_allocated(iter));
2319 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2320 if (unlikely(ret)) {
2321 /* ensure that iter->k is consistent with iter->pos: */
2322 bch2_btree_iter_set_pos(iter, iter->pos);
2323 k = bkey_s_c_err(ret);
2327 struct btree_path *path = btree_iter_path(trans, iter);
2329 k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
2331 ((iter->flags & BTREE_ITER_IS_EXTENTS)
2332 ? bpos_ge(bkey_start_pos(k.k), search_key)
2333 : bpos_gt(k.k->p, search_key)))
2334 k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
2336 if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2338 bch2_btree_trans_peek_prev_updates(trans, iter, &k);
2341 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
2342 if (k.k->p.snapshot == iter->snapshot)
2346 * If we have a saved candidate, and we're no
2347 * longer at the same _key_ (not pos), return
2350 if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
2351 bch2_path_put_nokeep(trans, iter->path,
2352 iter->flags & BTREE_ITER_INTENT);
2353 iter->path = saved_path;
2360 if (bch2_snapshot_is_ancestor(trans->c,
2364 bch2_path_put_nokeep(trans, saved_path,
2365 iter->flags & BTREE_ITER_INTENT);
2366 saved_path = btree_path_clone(trans, iter->path,
2367 iter->flags & BTREE_ITER_INTENT);
2368 path = btree_iter_path(trans, iter);
2373 search_key = bpos_predecessor(k.k->p);
2377 if (bkey_whiteout(k.k) &&
2378 !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
2379 search_key = bkey_predecessor(iter, k.k->p);
2380 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2381 search_key.snapshot = U32_MAX;
2385 btree_path_set_should_be_locked(path);
2387 } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
2388 /* Advance to previous leaf node: */
2389 search_key = bpos_predecessor(path->l[0].b->data->min_key);
2391 /* Start of btree: */
2392 bch2_btree_iter_set_pos(iter, POS_MIN);
2398 EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
2400 /* Extents can straddle iter->pos: */
2401 if (bkey_lt(k.k->p, iter->pos))
2404 if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
2405 iter->pos.snapshot = iter->snapshot;
2408 bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
2410 bch2_btree_iter_verify_entry_exit(iter);
2411 bch2_btree_iter_verify(iter);
2417 * bch2_btree_iter_prev() - returns first key less than iterator's current
2419 * @iter: iterator to peek from
2421 * Returns: key if found, or an error extractable with bkey_err().
2423 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
2425 if (!bch2_btree_iter_rewind(iter))
2426 return bkey_s_c_null;
2428 return bch2_btree_iter_peek_prev(iter);
2431 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
2433 struct btree_trans *trans = iter->trans;
2434 struct bpos search_key;
2438 bch2_btree_iter_verify(iter);
2439 bch2_btree_iter_verify_entry_exit(iter);
2440 EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
2442 /* extents can't span inode numbers: */
2443 if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
2444 unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
2445 if (iter->pos.inode == KEY_INODE_MAX)
2446 return bkey_s_c_null;
2448 bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
2451 search_key = btree_iter_search_key(iter);
2452 iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
2453 iter->flags & BTREE_ITER_INTENT,
2454 btree_iter_ip_allocated(iter));
2456 ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
2457 if (unlikely(ret)) {
2458 k = bkey_s_c_err(ret);
2462 if ((iter->flags & BTREE_ITER_CACHED) ||
2463 !(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
2466 if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
2467 trans->nr_updates)) {
2468 bch2_btree_trans_peek_slot_updates(trans, iter, &k);
2473 if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
2474 (k = btree_trans_peek_slot_journal(trans, iter)).k)
2477 if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
2478 (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
2481 /* We're not returning a key from iter->path: */
2485 k = bch2_btree_path_peek_slot(trans->paths + iter->path, &iter->k);
2490 struct bpos end = iter->pos;
2492 if (iter->flags & BTREE_ITER_IS_EXTENTS)
2493 end.offset = U64_MAX;
2495 EBUG_ON(btree_iter_path(trans, iter)->level);
2497 if (iter->flags & BTREE_ITER_INTENT) {
2498 struct btree_iter iter2;
2500 bch2_trans_copy_iter(&iter2, iter);
2501 k = bch2_btree_iter_peek_upto(&iter2, end);
2503 if (k.k && !bkey_err(k)) {
2507 bch2_trans_iter_exit(trans, &iter2);
2509 struct bpos pos = iter->pos;
2511 k = bch2_btree_iter_peek_upto(iter, end);
2512 if (unlikely(bkey_err(k)))
2513 bch2_btree_iter_set_pos(iter, pos);
2518 if (unlikely(bkey_err(k)))
2521 next = k.k ? bkey_start_pos(k.k) : POS_MAX;
2523 if (bkey_lt(iter->pos, next)) {
2524 bkey_init(&iter->k);
2525 iter->k.p = iter->pos;
2527 if (iter->flags & BTREE_ITER_IS_EXTENTS) {
2528 bch2_key_resize(&iter->k,
2529 min_t(u64, KEY_SIZE_MAX,
2530 (next.inode == iter->pos.inode
2534 EBUG_ON(!iter->k.size);
2537 k = (struct bkey_s_c) { &iter->k, NULL };
2541 btree_path_set_should_be_locked(btree_iter_path(trans, iter));
2543 bch2_btree_iter_verify_entry_exit(iter);
2544 bch2_btree_iter_verify(iter);
2545 ret = bch2_btree_iter_verify_ret(iter, k);
2547 return bkey_s_c_err(ret);
2552 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
2554 if (!bch2_btree_iter_advance(iter))
2555 return bkey_s_c_null;
2557 return bch2_btree_iter_peek_slot(iter);
2560 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
2562 if (!bch2_btree_iter_rewind(iter))
2563 return bkey_s_c_null;
2565 return bch2_btree_iter_peek_slot(iter);
2568 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
2572 while (btree_trans_too_many_iters(iter->trans) ||
2573 (k = bch2_btree_iter_peek_type(iter, iter->flags),
2574 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
2575 bch2_trans_begin(iter->trans);
2580 /* new transactional stuff: */
2582 #ifdef CONFIG_BCACHEFS_DEBUG
2583 static void btree_trans_verify_sorted_refs(struct btree_trans *trans)
2585 struct btree_path *path;
2588 BUG_ON(trans->nr_sorted != bitmap_weight(trans->paths_allocated, trans->nr_paths) - 1);
2590 trans_for_each_path(trans, path, i) {
2591 BUG_ON(path->sorted_idx >= trans->nr_sorted);
2592 BUG_ON(trans->sorted[path->sorted_idx] != i);
2595 for (i = 0; i < trans->nr_sorted; i++) {
2596 unsigned idx = trans->sorted[i];
2598 BUG_ON(!test_bit(idx, trans->paths_allocated));
2599 BUG_ON(trans->paths[idx].sorted_idx != i);
2603 static void btree_trans_verify_sorted(struct btree_trans *trans)
2605 struct btree_path *path, *prev = NULL;
2606 struct trans_for_each_path_inorder_iter iter;
2608 if (!bch2_debug_check_iterators)
2611 trans_for_each_path_inorder(trans, path, iter) {
2612 if (prev && btree_path_cmp(prev, path) > 0) {
2613 __bch2_dump_trans_paths_updates(trans, true);
2614 panic("trans paths out of order!\n");
2620 static inline void btree_trans_verify_sorted_refs(struct btree_trans *trans) {}
2621 static inline void btree_trans_verify_sorted(struct btree_trans *trans) {}
2624 void __bch2_btree_trans_sort_paths(struct btree_trans *trans)
2626 int i, l = 0, r = trans->nr_sorted, inc = 1;
2629 btree_trans_verify_sorted_refs(trans);
2631 if (trans->paths_sorted)
2635 * Cocktail shaker sort: this is efficient because iterators will be
2641 for (i = inc > 0 ? l : r - 2;
2642 i + 1 < r && i >= l;
2644 if (btree_path_cmp(trans->paths + trans->sorted[i],
2645 trans->paths + trans->sorted[i + 1]) > 0) {
2646 swap(trans->sorted[i], trans->sorted[i + 1]);
2647 trans->paths[trans->sorted[i]].sorted_idx = i;
2648 trans->paths[trans->sorted[i + 1]].sorted_idx = i + 1;
2660 trans->paths_sorted = true;
2662 btree_trans_verify_sorted(trans);
2665 static inline void btree_path_list_remove(struct btree_trans *trans,
2666 struct btree_path *path)
2668 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
2669 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2671 memmove_u64s_down_small(trans->sorted + path->sorted_idx,
2672 trans->sorted + path->sorted_idx + 1,
2673 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2674 sizeof(u64) / sizeof(btree_path_idx_t)));
2676 array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
2678 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2679 trans->paths[trans->sorted[i]].sorted_idx = i;
2682 static inline void btree_path_list_add(struct btree_trans *trans,
2683 btree_path_idx_t pos,
2684 btree_path_idx_t path_idx)
2686 struct btree_path *path = trans->paths + path_idx;
2688 path->sorted_idx = pos ? trans->paths[pos].sorted_idx + 1 : trans->nr_sorted;
2690 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2691 memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
2692 trans->sorted + path->sorted_idx,
2693 DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx,
2694 sizeof(u64) / sizeof(btree_path_idx_t)));
2696 trans->sorted[path->sorted_idx] = path_idx;
2698 array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path_idx);
2701 for (unsigned i = path->sorted_idx; i < trans->nr_sorted; i++)
2702 trans->paths[trans->sorted[i]].sorted_idx = i;
2704 btree_trans_verify_sorted_refs(trans);
2707 void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
2709 if (iter->update_path)
2710 bch2_path_put_nokeep(trans, iter->update_path,
2711 iter->flags & BTREE_ITER_INTENT);
2713 bch2_path_put(trans, iter->path,
2714 iter->flags & BTREE_ITER_INTENT);
2715 if (iter->key_cache_path)
2716 bch2_path_put(trans, iter->key_cache_path,
2717 iter->flags & BTREE_ITER_INTENT);
2719 iter->update_path = 0;
2720 iter->key_cache_path = 0;
2724 void bch2_trans_iter_init_outlined(struct btree_trans *trans,
2725 struct btree_iter *iter,
2726 enum btree_id btree_id, struct bpos pos,
2729 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
2730 bch2_btree_iter_flags(trans, btree_id, flags),
2734 void bch2_trans_node_iter_init(struct btree_trans *trans,
2735 struct btree_iter *iter,
2736 enum btree_id btree_id,
2738 unsigned locks_want,
2742 flags |= BTREE_ITER_NOT_EXTENTS;
2743 flags |= __BTREE_ITER_ALL_SNAPSHOTS;
2744 flags |= BTREE_ITER_ALL_SNAPSHOTS;
2746 bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
2747 __bch2_btree_iter_flags(trans, btree_id, flags),
2750 iter->min_depth = depth;
2752 struct btree_path *path = btree_iter_path(trans, iter);
2753 BUG_ON(path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
2754 BUG_ON(path->level != depth);
2755 BUG_ON(iter->min_depth != depth);
2758 void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
2760 struct btree_trans *trans = src->trans;
2764 __btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT);
2765 if (src->update_path)
2766 __btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_INTENT);
2767 dst->key_cache_path = 0;
2770 void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
2772 struct bch_fs *c = trans->c;
2773 unsigned new_top = trans->mem_top + size;
2774 unsigned old_bytes = trans->mem_bytes;
2775 unsigned new_bytes = roundup_pow_of_two(new_top);
2780 WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX);
2782 struct btree_transaction_stats *s = btree_trans_stats(trans);
2783 s->max_mem = max(s->max_mem, new_bytes);
2785 new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);
2786 if (unlikely(!new_mem)) {
2787 bch2_trans_unlock(trans);
2789 new_mem = krealloc(trans->mem, new_bytes, GFP_KERNEL);
2790 if (!new_mem && new_bytes <= BTREE_TRANS_MEM_MAX) {
2791 new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
2792 new_bytes = BTREE_TRANS_MEM_MAX;
2797 return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
2799 trans->mem = new_mem;
2800 trans->mem_bytes = new_bytes;
2802 ret = bch2_trans_relock(trans);
2804 return ERR_PTR(ret);
2807 trans->mem = new_mem;
2808 trans->mem_bytes = new_bytes;
2811 trace_and_count(c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
2812 return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
2815 p = trans->mem + trans->mem_top;
2816 trans->mem_top += size;
2821 static inline void check_srcu_held_too_long(struct btree_trans *trans)
2823 WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
2824 "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
2825 (jiffies - trans->srcu_lock_time) / HZ);
2828 void bch2_trans_srcu_unlock(struct btree_trans *trans)
2830 if (trans->srcu_held) {
2831 struct bch_fs *c = trans->c;
2832 struct btree_path *path;
2835 trans_for_each_path(trans, path, i)
2836 if (path->cached && !btree_node_locked(path, 0))
2837 path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
2839 check_srcu_held_too_long(trans);
2840 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
2841 trans->srcu_held = false;
2845 static void bch2_trans_srcu_lock(struct btree_trans *trans)
2847 if (!trans->srcu_held) {
2848 trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
2849 trans->srcu_lock_time = jiffies;
2850 trans->srcu_held = true;
2855 * bch2_trans_begin() - reset a transaction after a interrupted attempt
2856 * @trans: transaction to reset
2858 * Returns: current restart counter, to be used with trans_was_restarted()
2860 * While iterating over nodes or updating nodes a attempt to lock a btree node
2861 * may return BCH_ERR_transaction_restart when the trylock fails. When this
2862 * occurs bch2_trans_begin() should be called and the transaction retried.
2864 u32 bch2_trans_begin(struct btree_trans *trans)
2866 struct btree_path *path;
2870 bch2_trans_reset_updates(trans);
2872 trans->restart_count++;
2874 trans->journal_entries = NULL;
2876 trans_for_each_path(trans, path, i) {
2877 path->should_be_locked = false;
2880 * If the transaction wasn't restarted, we're presuming to be
2881 * doing something new: dont keep iterators excpt the ones that
2882 * are in use - except for the subvolumes btree:
2884 if (!trans->restarted && path->btree_id != BTREE_ID_subvolumes)
2885 path->preserve = false;
2888 * XXX: we probably shouldn't be doing this if the transaction
2889 * was restarted, but currently we still overflow transaction
2890 * iterators if we do that
2892 if (!path->ref && !path->preserve)
2893 __bch2_path_free(trans, i);
2895 path->preserve = false;
2898 now = local_clock();
2900 if (!IS_ENABLED(CONFIG_BCACHEFS_NO_LATENCY_ACCT) &&
2901 time_after64(now, trans->last_begin_time + 10))
2902 __bch2_time_stats_update(&btree_trans_stats(trans)->duration,
2903 trans->last_begin_time, now);
2905 if (!trans->restarted &&
2907 time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
2908 drop_locks_do(trans, (cond_resched(), 0));
2909 now = local_clock();
2911 trans->last_begin_time = now;
2913 if (unlikely(trans->srcu_held &&
2914 time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
2915 bch2_trans_srcu_unlock(trans);
2917 trans->last_begin_ip = _RET_IP_;
2918 if (trans->restarted) {
2919 bch2_btree_path_traverse_all(trans);
2920 trans->notrace_relock_fail = false;
2923 return trans->restart_count;
2926 const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR] = { "(unknown)" };
2928 unsigned bch2_trans_get_fn_idx(const char *fn)
2930 for (unsigned i = 0; i < ARRAY_SIZE(bch2_btree_transaction_fns); i++)
2931 if (!bch2_btree_transaction_fns[i] ||
2932 bch2_btree_transaction_fns[i] == fn) {
2933 bch2_btree_transaction_fns[i] = fn;
2937 pr_warn_once("BCH_TRANSACTIONS_NR not big enough!");
2941 struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
2942 __acquires(&c->btree_trans_barrier)
2944 struct btree_trans *trans;
2946 if (IS_ENABLED(__KERNEL__)) {
2947 trans = this_cpu_xchg(c->btree_trans_bufs->trans, NULL);
2949 memset(trans, 0, offsetof(struct btree_trans, list));
2954 trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
2955 memset(trans, 0, sizeof(*trans));
2956 closure_init_stack(&trans->ref);
2958 seqmutex_lock(&c->btree_trans_lock);
2959 if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
2960 struct btree_trans *pos;
2961 pid_t pid = current->pid;
2963 trans->locking_wait.task = current;
2965 list_for_each_entry(pos, &c->btree_trans_list, list) {
2966 struct task_struct *pos_task = READ_ONCE(pos->locking_wait.task);
2968 * We'd much prefer to be stricter here and completely
2969 * disallow multiple btree_trans in the same thread -
2970 * but the data move path calls bch2_write when we
2971 * already have a btree_trans initialized.
2974 pid == pos_task->pid &&
2975 bch2_trans_locked(pos));
2977 if (pos_task && pid < pos_task->pid) {
2978 list_add_tail(&trans->list, &pos->list);
2983 list_add_tail(&trans->list, &c->btree_trans_list);
2985 seqmutex_unlock(&c->btree_trans_lock);
2988 trans->last_begin_time = local_clock();
2989 trans->fn_idx = fn_idx;
2990 trans->locking_wait.task = current;
2991 trans->journal_replay_not_finished =
2992 unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
2993 atomic_inc_not_zero(&c->journal_keys.ref);
2994 trans->nr_paths = ARRAY_SIZE(trans->_paths);
2995 trans->paths_allocated = trans->_paths_allocated;
2996 trans->sorted = trans->_sorted;
2997 trans->paths = trans->_paths;
2998 trans->updates = trans->_updates;
3000 *trans_paths_nr(trans->paths) = BTREE_ITER_INITIAL;
3002 trans->paths_allocated[0] = 1;
3004 if (fn_idx < BCH_TRANSACTIONS_NR) {
3005 trans->fn = bch2_btree_transaction_fns[fn_idx];
3007 struct btree_transaction_stats *s = &c->btree_transaction_stats[fn_idx];
3010 unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
3012 trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
3013 if (likely(trans->mem))
3014 trans->mem_bytes = expected_mem_bytes;
3017 trans->nr_paths_max = s->nr_max_paths;
3018 trans->journal_entries_size = s->journal_entries_size;
3021 trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
3022 trans->srcu_lock_time = jiffies;
3023 trans->srcu_held = true;
3027 static void check_btree_paths_leaked(struct btree_trans *trans)
3029 #ifdef CONFIG_BCACHEFS_DEBUG
3030 struct bch_fs *c = trans->c;
3031 struct btree_path *path;
3034 trans_for_each_path(trans, path, i)
3039 bch_err(c, "btree paths leaked from %s!", trans->fn);
3040 trans_for_each_path(trans, path, i)
3042 printk(KERN_ERR " btree %s %pS\n",
3043 bch2_btree_id_str(path->btree_id),
3044 (void *) path->ip_allocated);
3045 /* Be noisy about this: */
3046 bch2_fatal_error(c);
3050 void bch2_trans_put(struct btree_trans *trans)
3051 __releases(&c->btree_trans_barrier)
3053 struct bch_fs *c = trans->c;
3055 bch2_trans_unlock(trans);
3057 trans_for_each_update(trans, i)
3058 __btree_path_put(trans->paths + i->path, true);
3059 trans->nr_updates = 0;
3060 trans->locking_wait.task = NULL;
3062 check_btree_paths_leaked(trans);
3064 if (trans->srcu_held) {
3065 check_srcu_held_too_long(trans);
3066 srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
3069 if (trans->fs_usage_deltas) {
3070 if (trans->fs_usage_deltas->size + sizeof(trans->fs_usage_deltas) ==
3071 REPLICAS_DELTA_LIST_MAX)
3072 mempool_free(trans->fs_usage_deltas,
3073 &c->replicas_delta_pool);
3075 kfree(trans->fs_usage_deltas);
3078 if (unlikely(trans->journal_replay_not_finished))
3079 bch2_journal_keys_put(c);
3081 unsigned long *paths_allocated = trans->paths_allocated;
3082 trans->paths_allocated = NULL;
3083 trans->paths = NULL;
3085 if (paths_allocated != trans->_paths_allocated)
3086 kfree_rcu_mightsleep(paths_allocated);
3088 if (trans->mem_bytes == BTREE_TRANS_MEM_MAX)
3089 mempool_free(trans->mem, &c->btree_trans_mem_pool);
3093 /* Userspace doesn't have a real percpu implementation: */
3094 if (IS_ENABLED(__KERNEL__))
3095 trans = this_cpu_xchg(c->btree_trans_bufs->trans, trans);
3098 closure_sync(&trans->ref);
3100 seqmutex_lock(&c->btree_trans_lock);
3101 list_del(&trans->list);
3102 seqmutex_unlock(&c->btree_trans_lock);
3104 mempool_free(trans, &c->btree_trans_pool);
3108 static void __maybe_unused
3109 bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
3110 struct btree_bkey_cached_common *b)
3112 struct six_lock_count c = six_lock_counts(&b->lock);
3113 struct task_struct *owner;
3117 owner = READ_ONCE(b->lock.owner);
3118 pid = owner ? owner->pid : 0;
3122 prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
3123 b->level, bch2_btree_id_str(b->btree_id));
3124 bch2_bpos_to_text(out, btree_node_pos(b));
3127 prt_printf(out, " locks %u:%u:%u held by pid %u",
3128 c.n[0], c.n[1], c.n[2], pid);
3131 void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
3133 struct btree_bkey_cached_common *b;
3134 static char lock_types[] = { 'r', 'i', 'w' };
3135 struct task_struct *task = READ_ONCE(trans->locking_wait.task);
3138 /* before rcu_read_lock(): */
3139 bch2_printbuf_make_room(out, 4096);
3141 if (!out->nr_tabstops) {
3142 printbuf_tabstop_push(out, 16);
3143 printbuf_tabstop_push(out, 32);
3146 prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
3148 /* trans->paths is rcu protected vs. freeing */
3152 struct btree_path *paths = rcu_dereference(trans->paths);
3156 unsigned long *paths_allocated = trans_paths_allocated(paths);
3158 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths), idx, 1) {
3159 struct btree_path *path = paths + idx;
3160 if (!path->nodes_locked)
3163 prt_printf(out, " path %u %c l=%u %s:",
3165 path->cached ? 'c' : 'b',
3167 bch2_btree_id_str(path->btree_id));
3168 bch2_bpos_to_text(out, path->pos);
3171 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
3172 if (btree_node_locked(path, l) &&
3173 !IS_ERR_OR_NULL(b = (void *) READ_ONCE(path->l[l].b))) {
3174 prt_printf(out, " %c l=%u ",
3175 lock_types[btree_node_locked_type(path, l)], l);
3176 bch2_btree_bkey_cached_common_to_text(out, b);
3182 b = READ_ONCE(trans->locking);
3184 prt_printf(out, " blocked for %lluus on",
3185 div_u64(local_clock() - trans->locking_wait.start_time,
3188 prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
3189 bch2_btree_bkey_cached_common_to_text(out, b);
3197 void bch2_fs_btree_iter_exit(struct bch_fs *c)
3199 struct btree_transaction_stats *s;
3200 struct btree_trans *trans;
3203 if (c->btree_trans_bufs)
3204 for_each_possible_cpu(cpu) {
3205 struct btree_trans *trans =
3206 per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
3209 closure_sync(&trans->ref);
3211 seqmutex_lock(&c->btree_trans_lock);
3212 list_del(&trans->list);
3213 seqmutex_unlock(&c->btree_trans_lock);
3217 free_percpu(c->btree_trans_bufs);
3219 trans = list_first_entry_or_null(&c->btree_trans_list, struct btree_trans, list);
3221 panic("%s leaked btree_trans\n", trans->fn);
3223 for (s = c->btree_transaction_stats;
3224 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3226 kfree(s->max_paths_text);
3227 bch2_time_stats_exit(&s->lock_hold_times);
3230 if (c->btree_trans_barrier_initialized)
3231 cleanup_srcu_struct(&c->btree_trans_barrier);
3232 mempool_exit(&c->btree_trans_mem_pool);
3233 mempool_exit(&c->btree_trans_pool);
3236 void bch2_fs_btree_iter_init_early(struct bch_fs *c)
3238 struct btree_transaction_stats *s;
3240 for (s = c->btree_transaction_stats;
3241 s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
3243 bch2_time_stats_init(&s->duration);
3244 bch2_time_stats_init(&s->lock_hold_times);
3245 mutex_init(&s->lock);
3248 INIT_LIST_HEAD(&c->btree_trans_list);
3249 seqmutex_init(&c->btree_trans_lock);
3252 int bch2_fs_btree_iter_init(struct bch_fs *c)
3256 c->btree_trans_bufs = alloc_percpu(struct btree_trans_buf);
3257 if (!c->btree_trans_bufs)
3260 ret = mempool_init_kmalloc_pool(&c->btree_trans_pool, 1,
3261 sizeof(struct btree_trans)) ?:
3262 mempool_init_kmalloc_pool(&c->btree_trans_mem_pool, 1,
3263 BTREE_TRANS_MEM_MAX) ?:
3264 init_srcu_struct(&c->btree_trans_barrier);
3266 c->btree_trans_barrier_initialized = true;