1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
6 #include "btree_types.h"
9 static inline int __bkey_err(const struct bkey *k)
11 return PTR_ERR_OR_ZERO(k);
14 #define bkey_err(_k) __bkey_err((_k).k)
16 static inline void __btree_path_get(struct btree_path *path, bool intent)
19 path->intent_ref += intent;
22 static inline bool __btree_path_put(struct btree_path *path, bool intent)
25 EBUG_ON(!path->intent_ref && intent);
26 path->intent_ref -= intent;
27 return --path->ref == 0;
30 static inline void btree_path_set_dirty(struct btree_path *path,
31 enum btree_path_uptodate u)
33 path->uptodate = max_t(unsigned, path->uptodate, u);
36 static inline struct btree *btree_path_node(struct btree_path *path,
39 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
42 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
43 const struct btree *b, unsigned level)
45 return path->l[level].lock_seq == six_lock_seq(&b->c.lock);
48 static inline struct btree *btree_node_parent(struct btree_path *path,
51 return btree_path_node(path, b->c.level + 1);
54 /* Iterate over paths within a transaction: */
56 void __bch2_btree_trans_sort_paths(struct btree_trans *);
58 static inline void btree_trans_sort_paths(struct btree_trans *trans)
60 if (!IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
63 __bch2_btree_trans_sort_paths(trans);
66 static inline struct btree_path *
67 __trans_next_path(struct btree_trans *trans, unsigned idx)
71 if (idx == BTREE_ITER_MAX)
74 l = trans->paths_allocated >> idx;
79 EBUG_ON(idx >= BTREE_ITER_MAX);
80 EBUG_ON(trans->paths[idx].idx != idx);
81 return &trans->paths[idx];
84 #define trans_for_each_path_from(_trans, _path, _start) \
85 for (_path = __trans_next_path((_trans), _start); \
87 _path = __trans_next_path((_trans), (_path)->idx + 1))
89 #define trans_for_each_path(_trans, _path) \
90 trans_for_each_path_from(_trans, _path, 0)
92 static inline struct btree_path *
93 __trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
97 if (*idx == BTREE_ITER_MAX)
100 l = trans->paths_allocated >> *idx;
105 EBUG_ON(*idx >= BTREE_ITER_MAX);
106 return &trans->paths[*idx];
110 * This version is intended to be safe for use on a btree_trans that is owned by
111 * another thread, for bch2_btree_trans_to_text();
113 #define trans_for_each_path_safe_from(_trans, _path, _idx, _start) \
114 for (_idx = _start; \
115 (_path = __trans_next_path_safe((_trans), &_idx)); \
118 #define trans_for_each_path_safe(_trans, _path, _idx) \
119 trans_for_each_path_safe_from(_trans, _path, _idx, 0)
121 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
123 unsigned idx = path ? path->sorted_idx + 1 : 0;
125 EBUG_ON(idx > trans->nr_sorted);
127 return idx < trans->nr_sorted
128 ? trans->paths + trans->sorted[idx]
132 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
134 unsigned idx = path ? path->sorted_idx : trans->nr_sorted;
137 ? trans->paths + trans->sorted[idx - 1]
141 #define trans_for_each_path_inorder(_trans, _path, _i) \
143 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
146 #define trans_for_each_path_inorder_reverse(_trans, _path, _i) \
147 for (_i = trans->nr_sorted - 1; \
148 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) >= 0;\
151 static inline bool __path_has_node(const struct btree_path *path,
152 const struct btree *b)
154 return path->l[b->c.level].b == b &&
155 btree_node_lock_seq_matches(path, b, b->c.level);
158 static inline struct btree_path *
159 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
162 struct btree_path *path = __trans_next_path(trans, idx);
164 while (path && !__path_has_node(path, b))
165 path = __trans_next_path(trans, path->idx + 1);
170 #define trans_for_each_path_with_node(_trans, _b, _path) \
171 for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
173 _path = __trans_next_path_with_node((_trans), (_b), \
176 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
177 bool, unsigned long);
179 static inline struct btree_path * __must_check
180 bch2_btree_path_make_mut(struct btree_trans *trans,
181 struct btree_path *path, bool intent,
184 if (path->ref > 1 || path->preserve)
185 path = __bch2_btree_path_make_mut(trans, path, intent, ip);
186 path->should_be_locked = false;
190 struct btree_path * __must_check
191 __bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
192 struct bpos, bool, unsigned long, int);
194 static inline struct btree_path * __must_check
195 bch2_btree_path_set_pos(struct btree_trans *trans,
196 struct btree_path *path, struct bpos new_pos,
197 bool intent, unsigned long ip)
199 int cmp = bpos_cmp(new_pos, path->pos);
202 ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
206 int __must_check bch2_btree_path_traverse_one(struct btree_trans *, struct btree_path *,
207 unsigned, unsigned long);
209 static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
210 struct btree_path *path, unsigned flags)
212 if (path->uptodate < BTREE_ITER_NEED_RELOCK)
215 return bch2_btree_path_traverse_one(trans, path, flags, _RET_IP_);
218 int __must_check bch2_btree_path_traverse(struct btree_trans *,
219 struct btree_path *, unsigned);
220 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
221 unsigned, unsigned, unsigned, unsigned long);
222 struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
224 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
225 struct btree_iter *, struct bpos);
227 void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
229 int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
231 static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
233 return mutex_trylock(lock)
235 : __bch2_trans_mutex_lock(trans, lock);
238 #ifdef CONFIG_BCACHEFS_DEBUG
239 void bch2_trans_verify_paths(struct btree_trans *);
240 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
243 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
244 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
245 struct bpos pos, bool key_cache) {}
248 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
249 struct btree *, struct bkey_packed *);
250 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
251 struct btree *, struct btree_node_iter *,
252 struct bkey_packed *, unsigned, unsigned);
254 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
256 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
258 int bch2_trans_relock(struct btree_trans *);
259 int bch2_trans_relock_notrace(struct btree_trans *);
260 void bch2_trans_unlock(struct btree_trans *);
261 bool bch2_trans_locked(struct btree_trans *);
263 static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count)
265 return restart_count != trans->restart_count;
268 void bch2_trans_restart_error(struct btree_trans *, u32);
270 static inline void bch2_trans_verify_not_restarted(struct btree_trans *trans,
273 if (trans_was_restarted(trans, restart_count))
274 bch2_trans_restart_error(trans, restart_count);
277 void bch2_trans_in_restart_error(struct btree_trans *);
279 static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
281 if (trans->restarted)
282 bch2_trans_in_restart_error(trans);
286 static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
289 BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
291 trans->restarted = err;
292 trans->last_restarted_ip = _THIS_IP_;
297 static int btree_trans_restart(struct btree_trans *trans, int err)
299 btree_trans_restart_nounlock(trans, err);
303 bool bch2_btree_node_upgrade(struct btree_trans *,
304 struct btree_path *, unsigned);
306 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
308 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
309 struct btree_path *path)
311 unsigned new_locks_want = path->level + !!path->intent_ref;
313 if (path->locks_want > new_locks_want)
314 __bch2_btree_path_downgrade(trans, path, new_locks_want);
317 void bch2_trans_downgrade(struct btree_trans *);
319 void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
320 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
322 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
323 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
325 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
326 struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
327 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
329 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
330 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
332 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *);
334 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
336 return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
339 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
340 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
342 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
343 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
344 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
346 bool bch2_btree_iter_advance(struct btree_iter *);
347 bool bch2_btree_iter_rewind(struct btree_iter *);
349 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
351 iter->k.type = KEY_TYPE_deleted;
352 iter->k.p.inode = iter->pos.inode = new_pos.inode;
353 iter->k.p.offset = iter->pos.offset = new_pos.offset;
354 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
358 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
360 if (unlikely(iter->update_path))
361 bch2_path_put(iter->trans, iter->update_path,
362 iter->flags & BTREE_ITER_INTENT);
363 iter->update_path = NULL;
365 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
366 new_pos.snapshot = iter->snapshot;
368 __bch2_btree_iter_set_pos(iter, new_pos);
371 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
373 BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
374 iter->pos = bkey_start_pos(&iter->k);
377 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
379 struct bpos pos = iter->pos;
381 iter->snapshot = snapshot;
382 pos.snapshot = snapshot;
383 bch2_btree_iter_set_pos(iter, pos);
386 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
388 static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
392 if (flags & BTREE_ITER_ALL_LEVELS)
393 flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
395 if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
396 btree_node_type_is_extents(btree_id))
397 flags |= BTREE_ITER_IS_EXTENTS;
399 if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
400 !btree_type_has_snapshots(btree_id))
401 flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
403 if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
404 btree_type_has_snapshots(btree_id))
405 flags |= BTREE_ITER_FILTER_SNAPSHOTS;
407 if (trans->journal_replay_not_finished)
408 flags |= BTREE_ITER_WITH_JOURNAL;
413 static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
417 if (!btree_id_cached(trans->c, btree_id)) {
418 flags &= ~BTREE_ITER_CACHED;
419 flags &= ~BTREE_ITER_WITH_KEY_CACHE;
420 } else if (!(flags & BTREE_ITER_CACHED))
421 flags |= BTREE_ITER_WITH_KEY_CACHE;
423 return __bch2_btree_iter_flags(trans, btree_id, flags);
426 static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
427 struct btree_iter *iter,
428 unsigned btree_id, struct bpos pos,
434 memset(iter, 0, sizeof(*iter));
436 iter->btree_id = btree_id;
438 iter->snapshot = pos.snapshot;
442 #ifdef CONFIG_BCACHEFS_DEBUG
443 iter->ip_allocated = ip;
445 iter->path = bch2_path_get(trans, btree_id, iter->pos,
446 locks_want, depth, flags, ip);
449 void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
450 unsigned, struct bpos, unsigned);
452 static inline void bch2_trans_iter_init(struct btree_trans *trans,
453 struct btree_iter *iter,
454 unsigned btree_id, struct bpos pos,
457 if (__builtin_constant_p(btree_id) &&
458 __builtin_constant_p(flags))
459 bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
460 bch2_btree_iter_flags(trans, btree_id, flags),
463 bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
466 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
467 enum btree_id, struct bpos,
468 unsigned, unsigned, unsigned);
469 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
471 static inline void set_btree_iter_dontneed(struct btree_iter *iter)
473 if (!iter->trans->restarted)
474 iter->path->preserve = false;
477 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
479 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
481 size = roundup(size, 8);
483 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
484 void *p = trans->mem + trans->mem_top;
486 trans->mem_top += size;
490 return __bch2_trans_kmalloc(trans, size);
494 static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
496 size = roundup(size, 8);
498 if (likely(trans->mem_top + size <= trans->mem_bytes)) {
499 void *p = trans->mem + trans->mem_top;
501 trans->mem_top += size;
504 return __bch2_trans_kmalloc(trans, size);
508 static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
509 struct btree_iter *iter,
510 unsigned btree_id, struct bpos pos,
511 unsigned flags, unsigned type)
515 bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
516 k = bch2_btree_iter_peek_slot(iter);
518 if (!bkey_err(k) && type && k.k->type != type)
519 k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
520 if (unlikely(bkey_err(k)))
521 bch2_trans_iter_exit(trans, iter);
525 static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
526 struct btree_iter *iter,
527 unsigned btree_id, struct bpos pos,
530 return __bch2_bkey_get_iter(trans, iter, btree_id, pos, flags, 0);
533 #define bch2_bkey_get_iter_typed(_trans, _iter, _btree_id, _pos, _flags, _type)\
534 bkey_s_c_to_##_type(__bch2_bkey_get_iter(_trans, _iter, \
535 _btree_id, _pos, _flags, KEY_TYPE_##_type))
537 static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
538 unsigned btree_id, struct bpos pos,
539 unsigned flags, unsigned type,
540 unsigned val_size, void *val)
542 struct btree_iter iter;
546 k = __bch2_bkey_get_iter(trans, &iter, btree_id, pos, flags, type);
549 unsigned b = min_t(unsigned, bkey_val_bytes(k.k), val_size);
552 if (unlikely(b < sizeof(*val)))
553 memset((void *) val + b, 0, sizeof(*val) - b);
554 bch2_trans_iter_exit(trans, &iter);
560 #define bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, _type, _val)\
561 __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags, \
562 KEY_TYPE_##_type, sizeof(*_val), _val)
564 u32 bch2_trans_begin(struct btree_trans *);
568 * this does not handle transaction restarts from bch2_btree_iter_next_node()
571 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
572 _locks_want, _depth, _flags, _b, _ret) \
573 for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
574 _start, _locks_want, _depth, _flags); \
575 (_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)), \
576 !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
577 (_b) = bch2_btree_iter_next_node(&(_iter)))
579 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
581 __for_each_btree_node(_trans, _iter, _btree_id, _start, \
582 0, 0, _flags, _b, _ret)
584 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
587 BUG_ON(flags & BTREE_ITER_ALL_LEVELS);
589 return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
590 bch2_btree_iter_peek_prev(iter);
593 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
596 return flags & BTREE_ITER_ALL_LEVELS ? bch2_btree_iter_peek_all_levels(iter) :
597 flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
598 bch2_btree_iter_peek(iter);
601 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
605 if (!(flags & BTREE_ITER_SLOTS))
606 return bch2_btree_iter_peek_upto(iter, end);
608 if (bkey_gt(iter->pos, end))
609 return bkey_s_c_null;
611 return bch2_btree_iter_peek_slot(iter);
614 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
616 if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) {
617 trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_);
618 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
624 struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
626 static inline struct bkey_s_c
627 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
628 struct btree_iter *iter, unsigned flags)
632 while (btree_trans_too_many_iters(trans) ||
633 (k = bch2_btree_iter_peek_type(iter, flags),
634 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
635 bch2_trans_begin(trans);
640 static inline struct bkey_s_c
641 __bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
642 struct btree_iter *iter,
648 while (btree_trans_too_many_iters(trans) ||
649 (k = bch2_btree_iter_peek_upto_type(iter, end, flags),
650 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
651 bch2_trans_begin(trans);
656 #define lockrestart_do(_trans, _do) \
658 u32 _restart_count; \
662 _restart_count = bch2_trans_begin(_trans); \
664 } while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \
667 bch2_trans_verify_not_restarted(_trans, _restart_count);\
673 * nested_lockrestart_do(), nested_commit_do():
675 * These are like lockrestart_do() and commit_do(), with two differences:
677 * - We don't call bch2_trans_begin() unless we had a transaction restart
678 * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
679 * transaction restart
681 #define nested_lockrestart_do(_trans, _do) \
683 u32 _restart_count, _orig_restart_count; \
686 _restart_count = _orig_restart_count = (_trans)->restart_count; \
688 while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\
689 _restart_count = bch2_trans_begin(_trans); \
692 bch2_trans_verify_not_restarted(_trans, _restart_count);\
694 if (!_ret && trans_was_restarted(_trans, _orig_restart_count)) \
695 _ret = -BCH_ERR_transaction_restart_nested; \
700 #define for_each_btree_key2(_trans, _iter, _btree_id, \
701 _start, _flags, _k, _do) \
705 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
706 (_start), (_flags)); \
709 u32 _restart_count = bch2_trans_begin(_trans); \
712 (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
716 _ret = bkey_err(_k) ?: (_do); \
717 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
721 bch2_trans_verify_not_restarted(_trans, _restart_count);\
722 if (!bch2_btree_iter_advance(&(_iter))) \
726 bch2_trans_iter_exit((_trans), &(_iter)); \
730 #define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
731 _start, _end, _flags, _k, _do) \
735 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
736 (_start), (_flags)); \
739 u32 _restart_count = bch2_trans_begin(_trans); \
742 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\
746 _ret = bkey_err(_k) ?: (_do); \
747 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
751 bch2_trans_verify_not_restarted(_trans, _restart_count);\
752 if (!bch2_btree_iter_advance(&(_iter))) \
756 bch2_trans_iter_exit((_trans), &(_iter)); \
760 #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
761 _start, _flags, _k, _do) \
765 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
766 (_start), (_flags)); \
769 u32 _restart_count = bch2_trans_begin(_trans); \
770 (_k) = bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\
776 _ret = bkey_err(_k) ?: (_do); \
777 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
781 bch2_trans_verify_not_restarted(_trans, _restart_count);\
782 if (!bch2_btree_iter_rewind(&(_iter))) \
786 bch2_trans_iter_exit((_trans), &(_iter)); \
790 #define for_each_btree_key_commit(_trans, _iter, _btree_id, \
791 _start, _iter_flags, _k, \
792 _disk_res, _journal_seq, _commit_flags,\
794 for_each_btree_key2(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
795 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
796 (_journal_seq), (_commit_flags)))
798 #define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
799 _start, _end, _iter_flags, _k, \
800 _disk_res, _journal_seq, _commit_flags,\
802 for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
803 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
804 (_journal_seq), (_commit_flags)))
806 #define for_each_btree_key(_trans, _iter, _btree_id, \
807 _start, _flags, _k, _ret) \
808 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
809 (_start), (_flags)); \
810 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
811 !((_ret) = bkey_err(_k)) && (_k).k; \
812 bch2_btree_iter_advance(&(_iter)))
814 #define for_each_btree_key_upto(_trans, _iter, _btree_id, \
815 _start, _end, _flags, _k, _ret) \
816 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
817 (_start), (_flags)); \
818 (_k) = __bch2_btree_iter_peek_upto_and_restart((_trans), \
819 &(_iter), _end, _flags),\
820 !((_ret) = bkey_err(_k)) && (_k).k; \
821 bch2_btree_iter_advance(&(_iter)))
823 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
824 _start, _flags, _k, _ret) \
825 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
826 (_start), (_flags)); \
827 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
828 !((_ret) = bkey_err(_k)) && (_k).k; \
829 bch2_btree_iter_advance(&(_iter)))
831 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
832 _start, _end, _flags, _k, _ret) \
833 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
834 (_start), (_flags)); \
835 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
836 !((_ret) = bkey_err(_k)) && (_k).k; \
837 bch2_btree_iter_advance(&(_iter)))
839 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
841 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
842 !((_ret) = bkey_err(_k)) && (_k).k; \
843 bch2_btree_iter_advance(&(_iter)))
845 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
847 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
848 !((_ret) = bkey_err(_k)) && (_k).k; \
849 bch2_btree_iter_advance(&(_iter)))
851 #define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
853 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
854 !((_ret) = bkey_err(_k)) && (_k).k; \
855 bch2_btree_iter_advance(&(_iter)))
857 #define drop_locks_do(_trans, _do) \
859 bch2_trans_unlock(_trans); \
860 _do ?: bch2_trans_relock(_trans); \
863 #define allocate_dropping_locks_errcode(_trans, _do) \
865 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
868 if (bch2_err_matches(_ret, ENOMEM)) { \
870 _ret = drop_locks_do(trans, _do); \
875 #define allocate_dropping_locks(_trans, _ret, _do) \
877 gfp_t _gfp = GFP_NOWAIT|__GFP_NOWARN; \
878 typeof(_do) _p = _do; \
881 if (unlikely(!_p)) { \
883 _ret = drop_locks_do(trans, ((_p = _do), 0)); \
888 /* new multiple iterator interface: */
890 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
891 void bch2_btree_path_to_text(struct printbuf *, struct btree_path *);
892 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
893 void bch2_dump_trans_updates(struct btree_trans *);
894 void bch2_dump_trans_paths_updates(struct btree_trans *);
895 void __bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned);
896 void bch2_trans_exit(struct btree_trans *);
898 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
899 unsigned bch2_trans_get_fn_idx(const char *);
901 #define bch2_trans_init(_trans, _c, _nr_iters, _mem) \
903 static unsigned trans_fn_idx; \
905 if (unlikely(!trans_fn_idx)) \
906 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
908 __bch2_trans_init(_trans, _c, trans_fn_idx); \
911 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
913 void bch2_fs_btree_iter_exit(struct bch_fs *);
914 int bch2_fs_btree_iter_init(struct bch_fs *);
916 #endif /* _BCACHEFS_BTREE_ITER_H */