1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
6 #include "btree_types.h"
8 #include <trace/events/bcachefs.h>
10 static inline void __btree_path_get(struct btree_path *path, bool intent)
13 path->intent_ref += intent;
16 static inline bool __btree_path_put(struct btree_path *path, bool intent)
19 EBUG_ON(!path->intent_ref && intent);
20 path->intent_ref -= intent;
21 return --path->ref == 0;
24 static inline void btree_path_set_dirty(struct btree_path *path,
25 enum btree_path_uptodate u)
27 path->uptodate = max_t(unsigned, path->uptodate, u);
30 static inline struct btree *btree_path_node(struct btree_path *path,
33 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
36 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
37 const struct btree *b, unsigned level)
40 * We don't compare the low bits of the lock sequence numbers because
41 * @path might have taken a write lock on @b, and we don't want to skip
42 * the linked path if the sequence numbers were equal before taking that
43 * write lock. The lock sequence number is incremented by taking and
44 * releasing write locks and is even when unlocked:
46 return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
49 static inline struct btree *btree_node_parent(struct btree_path *path,
52 return btree_path_node(path, b->c.level + 1);
55 /* Iterate over paths within a transaction: */
57 void __bch2_btree_trans_sort_paths(struct btree_trans *);
59 static inline void btree_trans_sort_paths(struct btree_trans *trans)
61 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
64 __bch2_btree_trans_sort_paths(trans);
67 static inline struct btree_path *
68 __trans_next_path(struct btree_trans *trans, unsigned idx)
72 if (idx == BTREE_ITER_MAX)
75 l = trans->paths_allocated >> idx;
80 EBUG_ON(idx >= BTREE_ITER_MAX);
81 EBUG_ON(trans->paths[idx].idx != idx);
82 return &trans->paths[idx];
85 #define trans_for_each_path_from(_trans, _path, _start) \
86 for (_path = __trans_next_path((_trans), _start); \
88 _path = __trans_next_path((_trans), (_path)->idx + 1))
90 #define trans_for_each_path(_trans, _path) \
91 trans_for_each_path_from(_trans, _path, 0)
93 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
95 unsigned idx = path ? path->sorted_idx + 1 : 0;
97 EBUG_ON(idx > trans->nr_sorted);
99 return idx < trans->nr_sorted
100 ? trans->paths + trans->sorted[idx]
104 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
106 unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
109 ? trans->paths + trans->sorted[idx - 1]
113 #define trans_for_each_path_inorder(_trans, _path, _i) \
115 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
118 #define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
119 for (_i = trans->nr_sorted - 1; \
120 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
123 static inline bool __path_has_node(const struct btree_path *path,
124 const struct btree *b)
126 return path->l[b->c.level].b == b &&
127 btree_node_lock_seq_matches(path, b, b->c.level);
130 static inline struct btree_path *
131 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
134 struct btree_path *path = __trans_next_path(trans, idx);
136 while (path && !__path_has_node(path, b))
137 path = __trans_next_path(trans, path->idx + 1);
142 #define trans_for_each_path_with_node(_trans, _b, _path) \
143 for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
145 _path = __trans_next_path_with_node((_trans), (_b), \
148 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
149 bool, unsigned long);
151 static inline struct btree_path * __must_check
152 bch2_btree_path_make_mut(struct btree_trans *trans,
153 struct btree_path *path, bool intent,
156 if (path->ref > 1 || path->preserve)
157 path = __bch2_btree_path_make_mut(trans, path, intent, ip);
158 path->should_be_locked = false;
162 struct btree_path * __must_check
163 __bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
164 struct bpos, bool, unsigned long, int);
166 static inline struct btree_path * __must_check
167 bch2_btree_path_set_pos(struct btree_trans *trans,
168 struct btree_path *path, struct bpos new_pos,
169 bool intent, unsigned long ip)
171 int cmp = bpos_cmp(new_pos, path->pos);
174 ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
178 int __must_check bch2_btree_path_traverse_one(struct btree_trans *, struct btree_path *,
179 unsigned, unsigned long);
181 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
182 struct btree_path *path, unsigned flags)
184 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
187 return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
190 int __must_check bch2_btree_path_traverse(struct btree_trans *,
191 struct btree_path *, unsigned);
192 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
193 unsigned, unsigned, unsigned, unsigned long);
194 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
196 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
197 struct btree_iter *, struct bpos);
199 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
201 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
203 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
205 return mutex_trylock(lock)
207 : __bch2_trans_mutex_lock(trans, lock);
210 #ifdef CONFIG_BCACHEFS_DEBUG
211 void bch2_trans_verify_paths(struct btree_trans *);
212 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
215 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
216 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
217 struct bpos pos, bool key_cache) {}
220 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
221 struct btree *, struct bkey_packed *);
222 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
223 struct btree *, struct btree_node_iter *,
224 struct bkey_packed *, unsigned, unsigned);
226 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
228 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
230 int bch2_trans_relock(struct btree_trans *);
231 int bch2_trans_relock_notrace(struct btree_trans *);
232 void bch2_trans_unlock(struct btree_trans *);
233 bool bch2_trans_locked(struct btree_trans *);
235 static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count)
237 return restart_count != trans->restart_count;
240 void bch2_trans_restart_error(struct btree_trans *, u32);
242 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
245 if (trans_was_restarted(trans, restart_count))
246 bch2_trans_restart_error(trans, restart_count);
249 void bch2_trans_in_restart_error(struct btree_trans *);
251 static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
253 if (trans->restarted)
254 bch2_trans_in_restart_error(trans);
258 static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
261 BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
263 trans->restarted = err;
264 trans->last_restarted_ip = _THIS_IP_;
269 static inline int btree_trans_restart(struct btree_trans *trans, int err)
271 btree_trans_restart_nounlock(trans, err);
275 bool bch2_btree_node_upgrade(struct btree_trans *,
276 struct btree_path *, unsigned);
278 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
280 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
281 struct btree_path *path)
283 unsigned new_locks_want = path->level + !!path->intent_ref;
285 if (path->locks_want > new_locks_want)
286 __bch2_btree_path_downgrade(trans, path, new_locks_want);
289 void bch2_trans_downgrade(struct btree_trans *);
291 void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
292 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
294 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
295 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
297 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
298 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
300 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
301 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
303 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *);
305 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
307 return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
310 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
311 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
313 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
314 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
315 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
317 bool bch2_btree_iter_advance(struct btree_iter *);
318 bool bch2_btree_iter_rewind(struct btree_iter *);
320 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
322 iter->k.type = KEY_TYPE_deleted;
323 iter->k.p.inode = iter->pos.inode = new_pos.inode;
324 iter->k.p.offset = iter->pos.offset = new_pos.offset;
325 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
329 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
331 if (unlikely(iter->update_path))
332 bch2_path_put(iter->trans, iter->update_path,
333 iter->flags & BTREE_ITER_INTENT);
334 iter->update_path = NULL;
336 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
337 new_pos.snapshot = iter->snapshot;
339 __bch2_btree_iter_set_pos(iter, new_pos);
342 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
344 BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
345 iter->pos = bkey_start_pos(&iter->k);
348 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
350 struct bpos pos = iter->pos;
352 iter->snapshot = snapshot;
353 pos.snapshot = snapshot;
354 bch2_btree_iter_set_pos(iter, pos);
357 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
359 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
363 if (flags & BTREE_ITER_ALL_LEVELS)
364 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
366 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
367 btree_node_type_is_extents(btree_id))
368 flags |= BTREE_ITER_IS_EXTENTS;
370 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
371 !btree_type_has_snapshots(btree_id))
372 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
374 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
375 btree_type_has_snapshots(btree_id))
376 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
378 if (trans->journal_replay_not_finished)
379 flags |= BTREE_ITER_WITH_JOURNAL;
384 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
388 if (!btree_id_cached(trans->c, btree_id)) {
389 flags &= ~BTREE_ITER_CACHED;
390 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
391 } else if (!(flags & BTREE_ITER_CACHED))
392 flags |= BTREE_ITER_WITH_KEY_CACHE;
394 return __bch2_btree_iter_flags(trans, btree_id, flags);
397 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
398 struct btree_iter *iter,
399 unsigned btree_id, struct bpos pos,
405 memset(iter, 0, sizeof(*iter));
407 iter->btree_id = btree_id;
409 iter->snapshot = pos.snapshot;
413 #ifdef CONFIG_BCACHEFS_DEBUG
414 iter->ip_allocated = ip;
416 iter->path = bch2_path_get(trans, btree_id, iter->pos,
417 locks_want, depth, flags, ip);
420 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
421 unsigned, struct bpos, unsigned);
423 static inline void bch2_trans_iter_init(struct btree_trans *trans,
424 struct btree_iter *iter,
425 unsigned btree_id, struct bpos pos,
428 if (__builtin_constant_p(btree_id) &&
429 __builtin_constant_p(flags))
430 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
431 bch2_btree_iter_flags(trans, btree_id, flags),
434 bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
437 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
438 enum btree_id, struct bpos,
439 unsigned, unsigned, unsigned);
440 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
442 static inline void set_btree_iter_dontneed(struct btree_iter *iter)
444 if (!iter->trans->restarted)
445 iter->path->preserve = false;
448 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
450 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
452 size = roundup(size, 8);
454 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
455 void *p = trans->mem + trans->mem_top;
457 trans->mem_top += size;
461 return __bch2_trans_kmalloc(trans, size);
465 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
467 size = roundup(size, 8);
469 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
470 void *p = trans->mem + trans->mem_top;
472 trans->mem_top += size;
475 return __bch2_trans_kmalloc(trans, size);
479 static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
481 struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k));
484 bkey_reassemble(mut, k);
488 static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
489 struct btree_iter *iter)
491 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
493 return unlikely(IS_ERR(k.k))
495 : bch2_bkey_make_mut(trans, k);
498 #define bch2_bkey_get_mut_typed(_trans, _iter, _type) \
500 struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter); \
501 struct bkey_i_##_type *_ret; \
504 _ret = ERR_CAST(_k); \
505 else if (unlikely(_k->k.type != KEY_TYPE_##_type)) \
506 _ret = ERR_PTR(-ENOENT); \
508 _ret = bkey_i_to_##_type(_k); \
512 #define bch2_bkey_alloc(_trans, _iter, _type) \
514 struct bkey_i_##_type *_k = bch2_trans_kmalloc_nomemzero(_trans, sizeof(*_k));\
516 bkey_##_type##_init(&_k->k_i); \
517 _k->k.p = (_iter)->pos; \
522 u32 bch2_trans_begin(struct btree_trans *);
524 static inline struct btree *
525 __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter *iter)
529 while (b = bch2_btree_iter_peek_node(iter),
530 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
531 bch2_trans_begin(trans);
538 * this does not handle transaction restarts from bch2_btree_iter_next_node()
541 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
542 _locks_want, _depth, _flags, _b, _ret) \
543 for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
544 _start, _locks_want, _depth, _flags); \
545 (_b) = __btree_iter_peek_node_and_restart((_trans), &(_iter)),\
546 !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
547 (_b) = bch2_btree_iter_next_node(&(_iter)))
549 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
551 __for_each_btree_node(_trans, _iter, _btree_id, _start, \
552 0, 0, _flags, _b, _ret)
554 static inline int bkey_err(struct bkey_s_c k)
556 return PTR_ERR_OR_ZERO(k.k);
559 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
562 BUG_ON(flags & BTREE_ITER_ALL_LEVELS);
564 return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
565 bch2_btree_iter_peek_prev(iter);
568 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
571 return flags & BTREE_ITER_ALL_LEVELS ? bch2_btree_iter_peek_all_levels(iter) :
572 flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
573 bch2_btree_iter_peek(iter);
576 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
580 if (!(flags & BTREE_ITER_SLOTS))
581 return bch2_btree_iter_peek_upto(iter, end);
583 if (bkey_gt(iter->pos, end))
584 return bkey_s_c_null;
586 return bch2_btree_iter_peek_slot(iter);
589 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
591 if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) {
592 trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_);
593 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
599 static inline struct bkey_s_c
600 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
601 struct btree_iter *iter, unsigned flags)
605 while (btree_trans_too_many_iters(trans) ||
606 (k = bch2_btree_iter_peek_type(iter, flags),
607 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
608 bch2_trans_begin(trans);
613 static inline struct bkey_s_c
614 __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
615 struct btree_iter *iter,
621 while (btree_trans_too_many_iters(trans) ||
622 (k = bch2_btree_iter_peek_upto_type(iter, end, flags),
623 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
624 bch2_trans_begin(trans);
629 #define lockrestart_do(_trans, _do) \
631 u32 _restart_count; \
635 _restart_count = bch2_trans_begin(_trans); \
637 } while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \
640 bch2_trans_verify_not_restarted(_trans, _restart_count);\
646 * nested_lockrestart_do(), nested_commit_do():
648 * These are like lockrestart_do() and commit_do(), with two differences:
650 * - We don't call bch2_trans_begin() unless we had a transaction restart
651 * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
652 * transaction restart
654 #define nested_lockrestart_do(_trans, _do) \
656 u32 _restart_count, _orig_restart_count; \
659 _restart_count = _orig_restart_count = (_trans)->restart_count; \
661 while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\
662 _restart_count = bch2_trans_begin(_trans); \
665 bch2_trans_verify_not_restarted(_trans, _restart_count);\
667 if (!_ret && trans_was_restarted(_trans, _orig_restart_count)) \
668 _ret = -BCH_ERR_transaction_restart_nested; \
673 #define for_each_btree_key2(_trans, _iter, _btree_id, \
674 _start, _flags, _k, _do) \
678 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
679 (_start), (_flags)); \
682 u32 _restart_count = bch2_trans_begin(_trans); \
685 (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
689 _ret = bkey_err(_k) ?: (_do); \
690 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
694 bch2_trans_verify_not_restarted(_trans, _restart_count);\
695 if (!bch2_btree_iter_advance(&(_iter))) \
699 bch2_trans_iter_exit((_trans), &(_iter)); \
703 #define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
704 _start, _end, _flags, _k, _do) \
708 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
709 (_start), (_flags)); \
712 u32 _restart_count = bch2_trans_begin(_trans); \
715 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\
719 _ret = bkey_err(_k) ?: (_do); \
720 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
724 bch2_trans_verify_not_restarted(_trans, _restart_count);\
725 if (!bch2_btree_iter_advance(&(_iter))) \
729 bch2_trans_iter_exit((_trans), &(_iter)); \
733 #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
734 _start, _flags, _k, _do) \
738 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
739 (_start), (_flags)); \
742 u32 _restart_count = bch2_trans_begin(_trans); \
743 (_k) = bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\
749 _ret = bkey_err(_k) ?: (_do); \
750 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
754 bch2_trans_verify_not_restarted(_trans, _restart_count);\
755 if (!bch2_btree_iter_rewind(&(_iter))) \
759 bch2_trans_iter_exit((_trans), &(_iter)); \
763 #define for_each_btree_key_commit(_trans, _iter, _btree_id, \
764 _start, _iter_flags, _k, \
765 _disk_res, _journal_seq, _commit_flags,\
767 for_each_btree_key2(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
768 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
769 (_journal_seq), (_commit_flags)))
771 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
772 _start, _end, _iter_flags, _k, \
773 _disk_res, _journal_seq, _commit_flags,\
775 for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
776 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
777 (_journal_seq), (_commit_flags)))
779 #define for_each_btree_key(_trans, _iter, _btree_id, \
780 _start, _flags, _k, _ret) \
781 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
782 (_start), (_flags)); \
783 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
784 !((_ret) = bkey_err(_k)) && (_k).k; \
785 bch2_btree_iter_advance(&(_iter)))
787 #define for_each_btree_key_upto(_trans, _iter, _btree_id, \
788 _start, _end, _flags, _k, _ret) \
789 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
790 (_start), (_flags)); \
791 (_k) = __bch2_btree_iter_peek_upto_and_restart((_trans), \
792 &(_iter), _end, _flags),\
793 !((_ret) = bkey_err(_k)) && (_k).k; \
794 bch2_btree_iter_advance(&(_iter)))
796 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
797 _start, _flags, _k, _ret) \
798 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
799 (_start), (_flags)); \
800 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
801 !((_ret) = bkey_err(_k)) && (_k).k; \
802 bch2_btree_iter_advance(&(_iter)))
804 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
805 _start, _end, _flags, _k, _ret) \
806 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
807 (_start), (_flags)); \
808 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
809 !((_ret) = bkey_err(_k)) && (_k).k; \
810 bch2_btree_iter_advance(&(_iter)))
812 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
814 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
815 !((_ret) = bkey_err(_k)) && (_k).k; \
816 bch2_btree_iter_advance(&(_iter)))
818 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
820 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
821 !((_ret) = bkey_err(_k)) && (_k).k; \
822 bch2_btree_iter_advance(&(_iter)))
824 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
826 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
827 !((_ret) = bkey_err(_k)) && (_k).k; \
828 bch2_btree_iter_advance(&(_iter)))
830 /* new multiple iterator interface: */
832 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
833 void bch2_btree_path_to_text(struct printbuf *, struct btree_path *);
834 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
835 void bch2_dump_trans_updates(struct btree_trans *);
836 void bch2_dump_trans_paths_updates(struct btree_trans *);
837 void __bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned);
838 void bch2_trans_exit(struct btree_trans *);
840 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
841 unsigned bch2_trans_get_fn_idx(const char *);
843 #define bch2_trans_init(_trans, _c, _nr_iters, _mem) \
845 static unsigned trans_fn_idx; \
847 if (unlikely(!trans_fn_idx)) \
848 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
850 __bch2_trans_init(_trans, _c, trans_fn_idx); \
853 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
855 void bch2_fs_btree_iter_exit(struct bch_fs *);
856 int bch2_fs_btree_iter_init(struct bch_fs *);
858 #endif /* _BCACHEFS_BTREE_ITER_H */