1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
6 #include "btree_types.h"
8 #include <trace/events/bcachefs.h>
10 static inline void __btree_path_get(struct btree_path *path, bool intent)
13 path->intent_ref += intent;
16 static inline bool __btree_path_put(struct btree_path *path, bool intent)
19 EBUG_ON(!path->intent_ref && intent);
20 path->intent_ref -= intent;
21 return --path->ref == 0;
24 static inline void btree_path_set_dirty(struct btree_path *path,
25 enum btree_path_uptodate u)
27 path->uptodate = max_t(unsigned, path->uptodate, u);
30 static inline struct btree *btree_path_node(struct btree_path *path,
33 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
36 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
37 const struct btree *b, unsigned level)
40 * We don't compare the low bits of the lock sequence numbers because
41 * @path might have taken a write lock on @b, and we don't want to skip
42 * the linked path if the sequence numbers were equal before taking that
43 * write lock. The lock sequence number is incremented by taking and
44 * releasing write locks and is even when unlocked:
46 return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
49 static inline struct btree *btree_node_parent(struct btree_path *path,
52 return btree_path_node(path, b->c.level + 1);
55 /* Iterate over paths within a transaction: */
57 static inline struct btree_path *
58 __trans_next_path(struct btree_trans *trans, unsigned idx)
62 if (idx == BTREE_ITER_MAX)
65 l = trans->paths_allocated >> idx;
70 EBUG_ON(idx >= BTREE_ITER_MAX);
71 EBUG_ON(trans->paths[idx].idx != idx);
72 return &trans->paths[idx];
75 void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
77 #define trans_for_each_path_from(_trans, _path, _start) \
78 for (_path = __trans_next_path((_trans), _start); \
80 _path = __trans_next_path((_trans), (_path)->idx + 1))
82 #define trans_for_each_path(_trans, _path) \
83 trans_for_each_path_from(_trans, _path, 0)
85 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
87 unsigned idx = path ? path->sorted_idx + 1 : 0;
89 EBUG_ON(idx > trans->nr_sorted);
91 return idx < trans->nr_sorted
92 ? trans->paths + trans->sorted[idx]
96 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
98 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
99 return path->sorted_idx
100 ? trans->paths + trans->sorted[path->sorted_idx - 1]
104 #define trans_for_each_path_inorder(_trans, _path, _i) \
106 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
109 static inline bool __path_has_node(const struct btree_path *path,
110 const struct btree *b)
112 return path->l[b->c.level].b == b &&
113 btree_node_lock_seq_matches(path, b, b->c.level);
116 static inline struct btree_path *
117 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
120 struct btree_path *path = __trans_next_path(trans, idx);
122 while (path && !__path_has_node(path, b))
123 path = __trans_next_path(trans, path->idx + 1);
128 #define trans_for_each_path_with_node(_trans, _b, _path) \
129 for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
131 _path = __trans_next_path_with_node((_trans), (_b), \
134 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
135 bool, unsigned long);
137 static inline struct btree_path * __must_check
138 bch2_btree_path_make_mut(struct btree_trans *trans,
139 struct btree_path *path, bool intent,
142 if (path->ref > 1 || path->preserve)
143 path = __bch2_btree_path_make_mut(trans, path, intent, ip);
144 path->should_be_locked = false;
148 struct btree_path * __must_check
149 __bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
150 struct bpos, bool, unsigned long, int);
152 static inline struct btree_path * __must_check
153 bch2_btree_path_set_pos(struct btree_trans *trans,
154 struct btree_path *path, struct bpos new_pos,
155 bool intent, unsigned long ip)
157 int cmp = bpos_cmp(new_pos, path->pos);
160 ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
164 int __must_check bch2_btree_path_traverse(struct btree_trans *,
165 struct btree_path *, unsigned);
166 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
167 unsigned, unsigned, unsigned, unsigned long);
168 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
170 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
171 struct btree_iter *, struct bpos);
173 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
175 #ifdef CONFIG_BCACHEFS_DEBUG
176 void bch2_trans_verify_paths(struct btree_trans *);
177 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
180 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
181 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
182 struct bpos pos, bool key_cache) {}
185 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
186 struct btree *, struct bkey_packed *);
187 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
188 struct btree *, struct btree_node_iter *,
189 struct bkey_packed *, unsigned, unsigned);
191 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
193 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
195 int bch2_trans_relock(struct btree_trans *);
196 void bch2_trans_unlock(struct btree_trans *);
197 bool bch2_trans_locked(struct btree_trans *);
199 static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count)
201 return restart_count != trans->restart_count;
204 void bch2_trans_verify_not_restarted(struct btree_trans *, u32);
207 static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
210 BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
212 trans->restarted = err;
217 static inline int btree_trans_restart(struct btree_trans *trans, int err)
219 btree_trans_restart_nounlock(trans, err);
223 bool bch2_btree_node_upgrade(struct btree_trans *,
224 struct btree_path *, unsigned);
226 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
228 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
229 struct btree_path *path)
231 unsigned new_locks_want = path->level + !!path->intent_ref;
233 if (path->locks_want > new_locks_want)
234 __bch2_btree_path_downgrade(trans, path, new_locks_want);
237 void bch2_trans_downgrade(struct btree_trans *);
239 void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
240 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
242 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
243 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
245 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
246 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
248 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
249 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
251 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *);
253 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
255 return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
258 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
259 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
261 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
262 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
263 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
265 bool bch2_btree_iter_advance(struct btree_iter *);
266 bool bch2_btree_iter_rewind(struct btree_iter *);
268 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
270 iter->k.type = KEY_TYPE_deleted;
271 iter->k.p.inode = iter->pos.inode = new_pos.inode;
272 iter->k.p.offset = iter->pos.offset = new_pos.offset;
273 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
277 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
279 if (unlikely(iter->update_path))
280 bch2_path_put(iter->trans, iter->update_path,
281 iter->flags & BTREE_ITER_INTENT);
282 iter->update_path = NULL;
284 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
285 new_pos.snapshot = iter->snapshot;
287 __bch2_btree_iter_set_pos(iter, new_pos);
290 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
292 BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
293 iter->pos = bkey_start_pos(&iter->k);
296 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
298 struct bpos pos = iter->pos;
300 iter->snapshot = snapshot;
301 pos.snapshot = snapshot;
302 bch2_btree_iter_set_pos(iter, pos);
305 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
307 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
311 if (flags & BTREE_ITER_ALL_LEVELS)
312 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
314 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
315 btree_node_type_is_extents(btree_id))
316 flags |= BTREE_ITER_IS_EXTENTS;
318 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
319 !btree_type_has_snapshots(btree_id))
320 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
322 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
323 btree_type_has_snapshots(btree_id))
324 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
326 if (trans->journal_replay_not_finished)
327 flags |= BTREE_ITER_WITH_JOURNAL;
332 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
336 if (!btree_id_cached(trans->c, btree_id)) {
337 flags &= ~BTREE_ITER_CACHED;
338 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
339 } else if (!(flags & BTREE_ITER_CACHED))
340 flags |= BTREE_ITER_WITH_KEY_CACHE;
342 return __bch2_btree_iter_flags(trans, btree_id, flags);
345 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
346 struct btree_iter *iter,
347 unsigned btree_id, struct bpos pos,
353 memset(iter, 0, sizeof(*iter));
355 iter->btree_id = btree_id;
357 iter->snapshot = pos.snapshot;
361 #ifdef CONFIG_BCACHEFS_DEBUG
362 iter->ip_allocated = ip;
364 iter->path = bch2_path_get(trans, btree_id, iter->pos,
365 locks_want, depth, flags, ip);
368 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
369 unsigned, struct bpos, unsigned);
371 static inline void bch2_trans_iter_init(struct btree_trans *trans,
372 struct btree_iter *iter,
373 unsigned btree_id, struct bpos pos,
376 if (__builtin_constant_p(btree_id) &&
377 __builtin_constant_p(flags))
378 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
379 bch2_btree_iter_flags(trans, btree_id, flags),
382 bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
385 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
386 enum btree_id, struct bpos,
387 unsigned, unsigned, unsigned);
388 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
390 static inline void set_btree_iter_dontneed(struct btree_iter *iter)
392 if (!iter->trans->restarted)
393 iter->path->preserve = false;
396 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
398 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
400 size = roundup(size, 8);
402 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
403 void *p = trans->mem + trans->mem_top;
405 trans->mem_top += size;
409 return __bch2_trans_kmalloc(trans, size);
413 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
415 size = roundup(size, 8);
417 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
418 void *p = trans->mem + trans->mem_top;
420 trans->mem_top += size;
423 return __bch2_trans_kmalloc(trans, size);
427 static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
429 struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k));
432 bkey_reassemble(mut, k);
436 static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
437 struct btree_iter *iter)
439 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
441 return unlikely(IS_ERR(k.k))
443 : bch2_bkey_make_mut(trans, k);
446 #define bch2_bkey_get_mut_typed(_trans, _iter, _type) \
448 struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter); \
449 struct bkey_i_##_type *_ret; \
452 _ret = ERR_CAST(_k); \
453 else if (unlikely(_k->k.type != KEY_TYPE_##_type)) \
454 _ret = ERR_PTR(-ENOENT); \
456 _ret = bkey_i_to_##_type(_k); \
460 #define bch2_bkey_alloc(_trans, _iter, _type) \
462 struct bkey_i_##_type *_k = bch2_trans_kmalloc_nomemzero(_trans, sizeof(*_k));\
464 bkey_##_type##_init(&_k->k_i); \
465 _k->k.p = (_iter)->pos; \
470 u32 bch2_trans_begin(struct btree_trans *);
472 static inline struct btree *
473 __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter *iter)
477 while (b = bch2_btree_iter_peek_node(iter),
478 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
479 bch2_trans_begin(trans);
486 * this does not handle transaction restarts from bch2_btree_iter_next_node()
489 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
490 _locks_want, _depth, _flags, _b, _ret) \
491 for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
492 _start, _locks_want, _depth, _flags); \
493 (_b) = __btree_iter_peek_node_and_restart((_trans), &(_iter)),\
494 !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
495 (_b) = bch2_btree_iter_next_node(&(_iter)))
497 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
499 __for_each_btree_node(_trans, _iter, _btree_id, _start, \
500 0, 0, _flags, _b, _ret)
502 static inline int bkey_err(struct bkey_s_c k)
504 return PTR_ERR_OR_ZERO(k.k);
507 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
510 BUG_ON(flags & BTREE_ITER_ALL_LEVELS);
512 return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
513 bch2_btree_iter_peek_prev(iter);
516 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
519 return flags & BTREE_ITER_ALL_LEVELS ? bch2_btree_iter_peek_all_levels(iter) :
520 flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
521 bch2_btree_iter_peek(iter);
524 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
528 if (!(flags & BTREE_ITER_SLOTS))
529 return bch2_btree_iter_peek_upto(iter, end);
531 if (bkey_gt(iter->pos, end))
532 return bkey_s_c_null;
534 return bch2_btree_iter_peek_slot(iter);
537 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
539 if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) {
540 trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_);
541 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
547 static inline struct bkey_s_c
548 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
549 struct btree_iter *iter, unsigned flags)
553 while (btree_trans_too_many_iters(trans) ||
554 (k = bch2_btree_iter_peek_type(iter, flags),
555 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
556 bch2_trans_begin(trans);
561 static inline struct bkey_s_c
562 __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
563 struct btree_iter *iter,
569 while (btree_trans_too_many_iters(trans) ||
570 (k = bch2_btree_iter_peek_upto_type(iter, end, flags),
571 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
572 bch2_trans_begin(trans);
577 #define lockrestart_do(_trans, _do) \
579 u32 _restart_count; \
583 _restart_count = bch2_trans_begin(_trans); \
585 } while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \
588 bch2_trans_verify_not_restarted(_trans, _restart_count);\
594 * nested_lockrestart_do(), nested_commit_do():
596 * These are like lockrestart_do() and commit_do(), with two differences:
598 * - We don't call bch2_trans_begin() unless we had a transaction restart
599 * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
600 * transaction restart
602 #define nested_lockrestart_do(_trans, _do) \
604 u32 _restart_count, _orig_restart_count; \
607 _restart_count = _orig_restart_count = (_trans)->restart_count; \
609 while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\
610 _restart_count = bch2_trans_begin(_trans); \
613 bch2_trans_verify_not_restarted(_trans, _restart_count);\
615 if (!_ret && trans_was_restarted(_trans, _orig_restart_count)) \
616 _ret = -BCH_ERR_transaction_restart_nested; \
621 #define for_each_btree_key2(_trans, _iter, _btree_id, \
622 _start, _flags, _k, _do) \
626 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
627 (_start), (_flags)); \
630 u32 _restart_count = bch2_trans_begin(_trans); \
633 (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
637 _ret = bkey_err(_k) ?: (_do); \
638 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
642 bch2_trans_verify_not_restarted(_trans, _restart_count);\
643 if (!bch2_btree_iter_advance(&(_iter))) \
647 bch2_trans_iter_exit((_trans), &(_iter)); \
651 #define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
652 _start, _end, _flags, _k, _do) \
656 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
657 (_start), (_flags)); \
660 u32 _restart_count = bch2_trans_begin(_trans); \
663 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\
667 _ret = bkey_err(_k) ?: (_do); \
668 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
672 bch2_trans_verify_not_restarted(_trans, _restart_count);\
673 if (!bch2_btree_iter_advance(&(_iter))) \
677 bch2_trans_iter_exit((_trans), &(_iter)); \
681 #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
682 _start, _flags, _k, _do) \
686 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
687 (_start), (_flags)); \
690 u32 _restart_count = bch2_trans_begin(_trans); \
691 (_k) = bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\
697 _ret = bkey_err(_k) ?: (_do); \
698 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
702 bch2_trans_verify_not_restarted(_trans, _restart_count);\
703 if (!bch2_btree_iter_rewind(&(_iter))) \
707 bch2_trans_iter_exit((_trans), &(_iter)); \
711 #define for_each_btree_key_commit(_trans, _iter, _btree_id, \
712 _start, _iter_flags, _k, \
713 _disk_res, _journal_seq, _commit_flags,\
715 for_each_btree_key2(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
716 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
717 (_journal_seq), (_commit_flags)))
719 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
720 _start, _end, _iter_flags, _k, \
721 _disk_res, _journal_seq, _commit_flags,\
723 for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
724 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
725 (_journal_seq), (_commit_flags)))
727 #define for_each_btree_key(_trans, _iter, _btree_id, \
728 _start, _flags, _k, _ret) \
729 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
730 (_start), (_flags)); \
731 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
732 !((_ret) = bkey_err(_k)) && (_k).k; \
733 bch2_btree_iter_advance(&(_iter)))
735 #define for_each_btree_key_upto(_trans, _iter, _btree_id, \
736 _start, _end, _flags, _k, _ret) \
737 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
738 (_start), (_flags)); \
739 (_k) = __bch2_btree_iter_peek_upto_and_restart((_trans), \
740 &(_iter), _end, _flags),\
741 !((_ret) = bkey_err(_k)) && (_k).k; \
742 bch2_btree_iter_advance(&(_iter)))
744 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
745 _start, _flags, _k, _ret) \
746 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
747 (_start), (_flags)); \
748 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
749 !((_ret) = bkey_err(_k)) && (_k).k; \
750 bch2_btree_iter_advance(&(_iter)))
752 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
753 _start, _end, _flags, _k, _ret) \
754 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
755 (_start), (_flags)); \
756 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
757 !((_ret) = bkey_err(_k)) && (_k).k; \
758 bch2_btree_iter_advance(&(_iter)))
760 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
762 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
763 !((_ret) = bkey_err(_k)) && (_k).k; \
764 bch2_btree_iter_advance(&(_iter)))
766 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
768 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
769 !((_ret) = bkey_err(_k)) && (_k).k; \
770 bch2_btree_iter_advance(&(_iter)))
772 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
774 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
775 !((_ret) = bkey_err(_k)) && (_k).k; \
776 bch2_btree_iter_advance(&(_iter)))
778 /* new multiple iterator interface: */
780 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
781 void bch2_btree_path_to_text(struct printbuf *, struct btree_path *);
782 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
783 void bch2_dump_trans_updates(struct btree_trans *);
784 void bch2_dump_trans_paths_updates(struct btree_trans *);
785 void __bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned);
786 void bch2_trans_exit(struct btree_trans *);
788 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
789 unsigned bch2_trans_get_fn_idx(const char *);
791 #define bch2_trans_init(_trans, _c, _nr_iters, _mem) \
793 static unsigned trans_fn_idx; \
795 if (unlikely(!trans_fn_idx)) \
796 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
798 __bch2_trans_init(_trans, _c, trans_fn_idx); \
801 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
803 void bch2_fs_btree_iter_exit(struct bch_fs *);
804 int bch2_fs_btree_iter_init(struct bch_fs *);
806 #endif /* _BCACHEFS_BTREE_ITER_H */