1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_ITER_H
3 #define _BCACHEFS_BTREE_ITER_H
6 #include "btree_types.h"
8 static inline void __btree_path_get(struct btree_path *path, bool intent)
11 path->intent_ref += intent;
14 static inline bool __btree_path_put(struct btree_path *path, bool intent)
17 EBUG_ON(!path->intent_ref && intent);
18 path->intent_ref -= intent;
19 return --path->ref == 0;
22 static inline void btree_path_set_dirty(struct btree_path *path,
23 enum btree_path_uptodate u)
25 path->uptodate = max_t(unsigned, path->uptodate, u);
28 static inline struct btree *btree_path_node(struct btree_path *path,
31 return level < BTREE_MAX_DEPTH ? path->l[level].b : NULL;
34 static inline bool btree_node_lock_seq_matches(const struct btree_path *path,
35 const struct btree *b, unsigned level)
38 * We don't compare the low bits of the lock sequence numbers because
39 * @path might have taken a write lock on @b, and we don't want to skip
40 * the linked path if the sequence numbers were equal before taking that
41 * write lock. The lock sequence number is incremented by taking and
42 * releasing write locks and is even when unlocked:
44 return path->l[level].lock_seq >> 1 == b->c.lock.state.seq >> 1;
47 static inline struct btree *btree_node_parent(struct btree_path *path,
50 return btree_path_node(path, b->c.level + 1);
53 /* Iterate over paths within a transaction: */
55 static inline struct btree_path *
56 __trans_next_path(struct btree_trans *trans, unsigned idx)
60 if (idx == BTREE_ITER_MAX)
63 l = trans->paths_allocated >> idx;
68 EBUG_ON(idx >= BTREE_ITER_MAX);
69 EBUG_ON(trans->paths[idx].idx != idx);
70 return &trans->paths[idx];
73 void bch2_btree_path_check_sort(struct btree_trans *, struct btree_path *, int);
75 #define trans_for_each_path(_trans, _path) \
76 for (_path = __trans_next_path((_trans), 0); \
78 _path = __trans_next_path((_trans), (_path)->idx + 1))
80 static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
82 unsigned idx = path ? path->sorted_idx + 1 : 0;
84 EBUG_ON(idx > trans->nr_sorted);
86 return idx < trans->nr_sorted
87 ? trans->paths + trans->sorted[idx]
91 static inline struct btree_path *prev_btree_path(struct btree_trans *trans, struct btree_path *path)
93 EBUG_ON(path->sorted_idx >= trans->nr_sorted);
94 return path->sorted_idx
95 ? trans->paths + trans->sorted[path->sorted_idx - 1]
99 #define trans_for_each_path_inorder(_trans, _path, _i) \
101 ((_path) = (_trans)->paths + trans->sorted[_i]), (_i) < (_trans)->nr_sorted;\
104 static inline bool __path_has_node(const struct btree_path *path,
105 const struct btree *b)
107 return path->l[b->c.level].b == b &&
108 btree_node_lock_seq_matches(path, b, b->c.level);
111 static inline struct btree_path *
112 __trans_next_path_with_node(struct btree_trans *trans, struct btree *b,
115 struct btree_path *path = __trans_next_path(trans, idx);
117 while (path && !__path_has_node(path, b))
118 path = __trans_next_path(trans, path->idx + 1);
123 #define trans_for_each_path_with_node(_trans, _b, _path) \
124 for (_path = __trans_next_path_with_node((_trans), (_b), 0); \
126 _path = __trans_next_path_with_node((_trans), (_b), \
129 struct btree_path * __must_check
130 bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
131 bool, unsigned long);
132 struct btree_path * __must_check
133 bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
134 struct bpos, bool, unsigned long);
135 int __must_check bch2_btree_path_traverse(struct btree_trans *,
136 struct btree_path *, unsigned);
137 struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
138 unsigned, unsigned, unsigned, unsigned long);
139 inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
141 struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
142 struct btree_iter *, struct bpos);
144 #ifdef CONFIG_BCACHEFS_DEBUG
145 void bch2_trans_verify_paths(struct btree_trans *);
146 void bch2_trans_verify_locks(struct btree_trans *);
147 void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
150 static inline void bch2_trans_verify_paths(struct btree_trans *trans) {}
151 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
152 static inline void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
153 struct bpos pos, bool key_cache) {}
156 void bch2_btree_path_fix_key_modified(struct btree_trans *trans,
157 struct btree *, struct bkey_packed *);
158 void bch2_btree_node_iter_fix(struct btree_trans *trans, struct btree_path *,
159 struct btree *, struct btree_node_iter *,
160 struct bkey_packed *, unsigned, unsigned);
162 bool bch2_btree_path_relock_intent(struct btree_trans *, struct btree_path *);
164 void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
166 bool bch2_trans_relock(struct btree_trans *);
167 void bch2_trans_unlock(struct btree_trans *);
170 static inline int btree_trans_restart(struct btree_trans *trans)
172 trans->restarted = true;
173 bch2_trans_unlock(trans);
177 bool bch2_btree_node_upgrade(struct btree_trans *,
178 struct btree_path *, unsigned);
180 bool __bch2_btree_path_upgrade(struct btree_trans *,
181 struct btree_path *, unsigned);
183 static inline bool bch2_btree_path_upgrade(struct btree_trans *trans,
184 struct btree_path *path,
185 unsigned new_locks_want)
187 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
189 return path->locks_want < new_locks_want
190 ? __bch2_btree_path_upgrade(trans, path, new_locks_want)
191 : path->uptodate == BTREE_ITER_UPTODATE;
194 void __bch2_btree_path_downgrade(struct btree_path *, unsigned);
196 static inline void bch2_btree_path_downgrade(struct btree_path *path)
198 unsigned new_locks_want = path->level + !!path->intent_ref;
200 if (path->locks_want > new_locks_want)
201 __bch2_btree_path_downgrade(path, new_locks_want);
204 void bch2_trans_downgrade(struct btree_trans *);
206 void bch2_trans_node_add(struct btree_trans *trans, struct btree *);
207 void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
209 int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
210 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
212 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
213 struct btree *bch2_btree_iter_next_node(struct btree_iter *);
215 struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *, struct bpos);
216 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
218 struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *);
220 static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
222 return bch2_btree_iter_peek_upto(iter, SPOS_MAX);
225 struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *);
226 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
228 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
229 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
230 struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
232 bool bch2_btree_iter_advance(struct btree_iter *);
233 bool bch2_btree_iter_rewind(struct btree_iter *);
235 static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
237 iter->k.type = KEY_TYPE_deleted;
238 iter->k.p.inode = iter->pos.inode = new_pos.inode;
239 iter->k.p.offset = iter->pos.offset = new_pos.offset;
240 iter->k.p.snapshot = iter->pos.snapshot = new_pos.snapshot;
244 static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
246 if (unlikely(iter->update_path))
247 bch2_path_put(iter->trans, iter->update_path,
248 iter->flags & BTREE_ITER_INTENT);
249 iter->update_path = NULL;
251 if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
252 new_pos.snapshot = iter->snapshot;
254 __bch2_btree_iter_set_pos(iter, new_pos);
257 static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
259 BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
260 iter->pos = bkey_start_pos(&iter->k);
263 static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
265 struct bpos pos = iter->pos;
267 iter->snapshot = snapshot;
268 pos.snapshot = snapshot;
269 bch2_btree_iter_set_pos(iter, pos);
272 void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
273 void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
274 unsigned, struct bpos, unsigned);
275 void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
276 enum btree_id, struct bpos,
277 unsigned, unsigned, unsigned);
278 void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
280 static inline void set_btree_iter_dontneed(struct btree_iter *iter)
282 iter->path->preserve = false;
285 void *bch2_trans_kmalloc(struct btree_trans *, size_t);
286 void bch2_trans_begin(struct btree_trans *);
288 static inline struct btree *
289 __btree_iter_peek_node_and_restart(struct btree_trans *trans, struct btree_iter *iter)
293 while (b = bch2_btree_iter_peek_node(iter),
294 PTR_ERR_OR_ZERO(b) == -EINTR)
295 bch2_trans_begin(trans);
300 #define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
301 _locks_want, _depth, _flags, _b, _ret) \
302 for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
303 _start, _locks_want, _depth, _flags); \
304 (_b) = __btree_iter_peek_node_and_restart((_trans), &(_iter)),\
305 !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
306 (_b) = bch2_btree_iter_next_node(&(_iter)))
308 #define for_each_btree_node(_trans, _iter, _btree_id, _start, \
310 __for_each_btree_node(_trans, _iter, _btree_id, _start, \
311 0, 0, _flags, _b, _ret)
313 static inline int bkey_err(struct bkey_s_c k)
315 return PTR_ERR_OR_ZERO(k.k);
318 static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
321 return flags & BTREE_ITER_ALL_LEVELS ? bch2_btree_iter_peek_all_levels(iter) :
322 flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
323 bch2_btree_iter_peek(iter);
326 static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *iter,
330 if (!(flags & BTREE_ITER_SLOTS))
331 return bch2_btree_iter_peek_upto(iter, end);
333 if (bkey_cmp(iter->pos, end) > 0)
334 return bkey_s_c_null;
336 return bch2_btree_iter_peek_slot(iter);
339 static inline int btree_trans_too_many_iters(struct btree_trans *trans)
341 return hweight64(trans->paths_allocated) > BTREE_ITER_MAX / 2
345 static inline struct bkey_s_c
346 __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
347 struct btree_iter *iter, unsigned flags)
351 while (btree_trans_too_many_iters(trans) ||
352 (k = bch2_btree_iter_peek_type(iter, flags),
353 bkey_err(k) == -EINTR))
354 bch2_trans_begin(trans);
359 #define for_each_btree_key(_trans, _iter, _btree_id, \
360 _start, _flags, _k, _ret) \
361 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
362 (_start), (_flags)); \
363 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
364 !((_ret) = bkey_err(_k)) && (_k).k; \
365 bch2_btree_iter_advance(&(_iter)))
367 #define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
368 _start, _flags, _k, _ret) \
369 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
370 (_start), (_flags)); \
371 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
372 !((_ret) = bkey_err(_k)) && (_k).k; \
373 bch2_btree_iter_advance(&(_iter)))
375 #define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
376 _start, _end, _flags, _k, _ret) \
377 for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
378 (_start), (_flags)); \
379 (_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags),\
380 !((_ret) = bkey_err(_k)) && (_k).k; \
381 bch2_btree_iter_advance(&(_iter)))
383 #define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
385 (_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
386 !((_ret) = bkey_err(_k)) && (_k).k; \
387 bch2_btree_iter_advance(&(_iter)))
389 #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
391 (_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
392 !((_ret) = bkey_err(_k)) && (_k).k; \
393 bch2_btree_iter_advance(&(_iter)))
395 /* new multiple iterator interface: */
397 void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
398 void bch2_dump_trans_updates(struct btree_trans *);
399 void bch2_dump_trans_paths_updates(struct btree_trans *);
400 void __bch2_trans_init(struct btree_trans *, struct bch_fs *,
401 unsigned, size_t, const char *);
402 void bch2_trans_exit(struct btree_trans *);
404 #define bch2_trans_init(...) __bch2_trans_init(__VA_ARGS__, __func__)
406 void bch2_btree_trans_to_text(struct printbuf *, struct bch_fs *);
408 void bch2_fs_btree_iter_exit(struct bch_fs *);
409 int bch2_fs_btree_iter_init(struct bch_fs *);
411 #endif /* _BCACHEFS_BTREE_ITER_H */