1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
6 #include "btree_types.h"
8 #include <trace/events/bcachefs.h>
10 static inline void __btree_path_get(struct btree_path *path, bool intent)
13 path->intent_ref += intent;
16 static inline bool __btree_path_put(struct btree_path *path, bool intent)
19 EBUG_ON(!path->intent_ref && intent);
20 path->intent_ref -= intent;
21 return --path->ref == 0;
24 static inline void btree_path_set_dirty(struct btree_path *path,
25 enum btree_path_uptodate u)
27 path->uptodate = max_t(unsigned, path->uptodate, u);
30 static inline struct btree *btree_path_node(struct btree_path *path,
33 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
36 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
37 const struct btree *b, unsigned level)
40 * We don't compare the low bits of the lock sequence numbers because
41 * @path might have taken a write lock on @b, and we don't want to skip
42 * the linked path if the sequence numbers were equal before taking that
43 * write lock. The lock sequence number is incremented by taking and
44 * releasing write locks and is even when unlocked:
46 return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
49 static inline struct btree *btree_node_parent(struct btree_path *path,
52 return btree_path_node(path, b->c.level + 1);
55 /* Iterate over paths within a transaction: */
57 static inline struct btree_path *
58 __trans_next_path(struct btree_trans *trans, unsigned idx)
62 if (idx == BTREE_ITER_MAX)
65 l = trans->paths_allocated >> idx;
70 EBUG_ON(idx >= BTREE_ITER_MAX);
71 EBUG_ON(trans->paths[idx].idx != idx);
72 return &trans->paths[idx];
75 void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
77 #define trans_for_each_path_from(_trans, _path, _start) \
78 for (_path = __trans_next_path((_trans), _start); \
80 _path = __trans_next_path((_trans), (_path)->idx + 1))
82 #define trans_for_each_path(_trans, _path) \
83 trans_for_each_path_from(_trans, _path, 0)
85 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
87 unsigned idx = path ? path->sorted_idx + 1 : 0;
89 EBUG_ON(idx > trans->nr_sorted);
91 return idx < trans->nr_sorted
92 ? trans->paths + trans->sorted[idx]
96 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
98 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
99 return path->sorted_idx
100 ? trans->paths + trans->sorted[path->sorted_idx - 1]
104 #define trans_for_each_path_inorder(_trans, _path, _i) \
106 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
109 static inline bool __path_has_node(const struct btree_path *path,
110 const struct btree *b)
112 return path->l[b->c.level].b == b &&
113 btree_node_lock_seq_matches(path, b, b->c.level);
116 static inline struct btree_path *
117 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
120 struct btree_path *path = __trans_next_path(trans, idx);
122 while (path && !__path_has_node(path, b))
123 path = __trans_next_path(trans, path->idx + 1);
128 #define trans_for_each_path_with_node(_trans, _b, _path) \
129 for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
131 _path = __trans_next_path_with_node((_trans), (_b), \
134 struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
135 bool, unsigned long);
137 static inline struct btree_path * __must_check
138 bch2_btree_path_make_mut(struct btree_trans *trans,
139 struct btree_path *path, bool intent,
142 if (path->ref > 1 || path->preserve)
143 path = __bch2_btree_path_make_mut(trans, path, intent, ip);
144 path->should_be_locked = false;
148 struct btree_path * __must_check
149 bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
150 struct bpos, bool, unsigned long);
151 int __must_check bch2_btree_path_traverse(struct btree_trans *,
152 struct btree_path *, unsigned);
153 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
154 unsigned, unsigned, unsigned, unsigned long);
155 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
157 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
158 struct btree_iter *, struct bpos);
160 inline void bch2_btree_path_level_init(struct btree_trans *,
161 struct btree_path *, struct btree *);
163 #ifdef CONFIG_BCACHEFS_DEBUG
164 void bch2_trans_verify_paths(struct btree_trans *);
165 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
168 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
169 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
170 struct bpos pos, bool key_cache) {}
173 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
174 struct btree *, struct bkey_packed *);
175 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
176 struct btree *, struct btree_node_iter *,
177 struct bkey_packed *, unsigned, unsigned);
179 int bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
181 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
183 int bch2_trans_relock(struct btree_trans *);
184 void bch2_trans_unlock(struct btree_trans *);
185 bool bch2_trans_locked(struct btree_trans *);
187 static inline bool trans_was_restarted(struct btree_trans *trans, u32 restart_count)
189 return restart_count != trans->restart_count;
192 void bch2_trans_verify_not_restarted(struct btree_trans *, u32);
195 static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
198 BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
200 trans->restarted = err;
205 static inline int btree_trans_restart(struct btree_trans *trans, int err)
207 btree_trans_restart_nounlock(trans, err);
211 bool bch2_btree_node_upgrade(struct btree_trans *,
212 struct btree_path *, unsigned);
214 void __bch2_btree_path_downgrade(struct btree_trans *, struct btree_path *, unsigned);
216 static inline void bch2_btree_path_downgrade(struct btree_trans *trans,
217 struct btree_path *path)
219 unsigned new_locks_want = path->level + !!path->intent_ref;
221 if (path->locks_want > new_locks_want)
222 __bch2_btree_path_downgrade(trans, path, new_locks_want);
225 void bch2_trans_downgrade(struct btree_trans *);
227 void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
228 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
230 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
231 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
233 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
234 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
236 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
237 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
239 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *);
241 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
243 return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
246 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
247 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
249 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
250 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
251 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
253 bool bch2_btree_iter_advance(struct btree_iter *);
254 bool bch2_btree_iter_rewind(struct btree_iter *);
256 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
258 iter->k.type = KEY_TYPE_deleted;
259 iter->k.p.inode = iter->pos.inode = new_pos.inode;
260 iter->k.p.offset = iter->pos.offset = new_pos.offset;
261 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
265 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
267 if (unlikely(iter->update_path))
268 bch2_path_put(iter->trans, iter->update_path,
269 iter->flags & BTREE_ITER_INTENT);
270 iter->update_path = NULL;
272 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
273 new_pos.snapshot = iter->snapshot;
275 __bch2_btree_iter_set_pos(iter, new_pos);
278 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
280 BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
281 iter->pos = bkey_start_pos(&iter->k);
284 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
286 struct bpos pos = iter->pos;
288 iter->snapshot = snapshot;
289 pos.snapshot = snapshot;
290 bch2_btree_iter_set_pos(iter, pos);
293 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
294 void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
295 unsigned, struct bpos, unsigned);
296 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
297 enum btree_id, struct bpos,
298 unsigned, unsigned, unsigned);
299 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
301 static inline void set_btree_iter_dontneed(struct btree_iter *iter)
303 if (!iter->trans->restarted)
304 iter->path->preserve = false;
307 void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
309 static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
311 unsigned new_top = trans->mem_top + size;
312 void *p = trans->mem + trans->mem_top;
314 if (likely(new_top <= trans->mem_bytes)) {
315 trans->mem_top += size;
319 return __bch2_trans_kmalloc(trans, size);
324 u32 bch2_trans_begin(struct btree_trans *);
326 static inline struct btree *
327 __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter *iter)
331 while (b = bch2_btree_iter_peek_node(iter),
332 bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
333 bch2_trans_begin(trans);
338 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
339 _locks_want, _depth, _flags, _b, _ret) \
340 for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
341 _start, _locks_want, _depth, _flags); \
342 (_b) = __btree_iter_peek_node_and_restart((_trans), &(_iter)),\
343 !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
344 (_b) = bch2_btree_iter_next_node(&(_iter)))
346 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
348 __for_each_btree_node(_trans, _iter, _btree_id, _start, \
349 0, 0, _flags, _b, _ret)
351 static inline int bkey_err(struct bkey_s_c k)
353 return PTR_ERR_OR_ZERO(k.k);
356 static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
359 BUG_ON(flags & BTREE_ITER_ALL_LEVELS);
361 return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
362 bch2_btree_iter_peek_prev(iter);
365 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
368 return flags & BTREE_ITER_ALL_LEVELS ? bch2_btree_iter_peek_all_levels(iter) :
369 flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
370 bch2_btree_iter_peek(iter);
373 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
377 if (!(flags & BTREE_ITER_SLOTS))
378 return bch2_btree_iter_peek_upto(iter, end);
380 if (bkey_cmp(iter->pos, end) > 0)
381 return bkey_s_c_null;
383 return bch2_btree_iter_peek_slot(iter);
386 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
388 if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) {
389 trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_);
390 return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
396 static inline struct bkey_s_c
397 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
398 struct btree_iter *iter, unsigned flags)
402 while (btree_trans_too_many_iters(trans) ||
403 (k = bch2_btree_iter_peek_type(iter, flags),
404 bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
405 bch2_trans_begin(trans);
410 #define lockrestart_do(_trans, _do) \
412 u32 _restart_count; \
416 _restart_count = bch2_trans_begin(_trans); \
418 } while (bch2_err_matches(_ret, BCH_ERR_transaction_restart)); \
421 bch2_trans_verify_not_restarted(_trans, _restart_count);\
427 * nested_lockrestart_do(), nested_commit_do():
429 * These are like lockrestart_do() and commit_do(), with two differences:
431 * - We don't call bch2_trans_begin() unless we had a transaction restart
432 * - We return -BCH_ERR_transaction_restart_nested if we succeeded after a
433 * transaction restart
435 #define nested_lockrestart_do(_trans, _do) \
437 u32 _restart_count, _orig_restart_count; \
440 _restart_count = _orig_restart_count = (_trans)->restart_count; \
442 while (bch2_err_matches(_ret = (_do), BCH_ERR_transaction_restart))\
443 _restart_count = bch2_trans_begin(_trans); \
446 bch2_trans_verify_not_restarted(_trans, _restart_count);\
448 if (!_ret && trans_was_restarted(_trans, _orig_restart_count)) \
449 _ret = -BCH_ERR_transaction_restart_nested; \
454 #define for_each_btree_key2(_trans, _iter, _btree_id, \
455 _start, _flags, _k, _do) \
459 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
460 (_start), (_flags)); \
463 u32 _restart_count = bch2_trans_begin(_trans); \
464 (_k) = bch2_btree_iter_peek_type(&(_iter), (_flags)); \
470 _ret = bkey_err(_k) ?: (_do); \
471 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
475 bch2_trans_verify_not_restarted(_trans, _restart_count);\
476 if (!bch2_btree_iter_advance(&(_iter))) \
480 bch2_trans_iter_exit((_trans), &(_iter)); \
484 #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
485 _start, _flags, _k, _do) \
489 bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
490 (_start), (_flags)); \
493 u32 _restart_count = bch2_trans_begin(_trans); \
494 (_k) = bch2_btree_iter_peek_prev_type(&(_iter), (_flags));\
500 _ret = bkey_err(_k) ?: (_do); \
501 if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
505 bch2_trans_verify_not_restarted(_trans, _restart_count);\
506 if (!bch2_btree_iter_rewind(&(_iter))) \
510 bch2_trans_iter_exit((_trans), &(_iter)); \
514 #define for_each_btree_key_commit(_trans, _iter, _btree_id, \
515 _start, _iter_flags, _k, \
516 _disk_res, _journal_seq, _commit_flags,\
518 for_each_btree_key2(_trans, _iter, _btree_id, _start, _iter_flags, _k,\
519 (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
520 (_journal_seq), (_commit_flags)))
522 #define for_each_btree_key(_trans, _iter, _btree_id, \
523 _start, _flags, _k, _ret) \
524 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
525 (_start), (_flags)); \
526 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
527 !((_ret) = bkey_err(_k)) && (_k).k; \
528 bch2_btree_iter_advance(&(_iter)))
530 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
531 _start, _flags, _k, _ret) \
532 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
533 (_start), (_flags)); \
534 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
535 !((_ret) = bkey_err(_k)) && (_k).k; \
536 bch2_btree_iter_advance(&(_iter)))
538 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
539 _start, _end, _flags, _k, _ret) \
540 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
541 (_start), (_flags)); \
542 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
543 !((_ret) = bkey_err(_k)) && (_k).k; \
544 bch2_btree_iter_advance(&(_iter)))
546 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
548 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
549 !((_ret) = bkey_err(_k)) && (_k).k; \
550 bch2_btree_iter_advance(&(_iter)))
552 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
554 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
555 !((_ret) = bkey_err(_k)) && (_k).k; \
556 bch2_btree_iter_advance(&(_iter)))
558 /* new multiple iterator interface: */
560 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
561 void bch2_btree_path_to_text(struct printbuf *, struct btree_path *);
562 void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
563 void bch2_dump_trans_updates(struct btree_trans *);
564 void bch2_dump_trans_paths_updates(struct btree_trans *);
565 void __bch2_trans_init(struct btree_trans *, struct bch_fs *, unsigned);
566 void bch2_trans_exit(struct btree_trans *);
568 extern const char *bch2_btree_transaction_fns[BCH_TRANSACTIONS_NR];
569 unsigned bch2_trans_get_fn_idx(const char *);
571 #define bch2_trans_init(_trans, _c, _nr_iters, _mem) \
573 static unsigned trans_fn_idx; \
575 if (unlikely(!trans_fn_idx)) \
576 trans_fn_idx = bch2_trans_get_fn_idx(__func__); \
578 __bch2_trans_init(_trans, _c, trans_fn_idx); \
581 void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
583 void bch2_fs_btree_iter_exit(struct bch_fs *);
584 int bch2_fs_btree_iter_init(struct bch_fs *);
586 #endif /* _BCACHEFS_BTREE_ITER_H */