]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.h
1a1ca952c7e5f130668dfd7ec33e3db4157b9e88
[bcachefs-tools-debian] / libbcachefs / btree_iter.h
1 #ifndef _BCACHEFS_BTREE_ITER_H
2 #define _BCACHEFS_BTREE_ITER_H
3
4 #include <linux/dynamic_fault.h>
5
6 #include "btree_types.h"
7
8 static inline void btree_iter_set_dirty(struct btree_iter *iter,
9                                         enum btree_iter_uptodate u)
10 {
11         iter->uptodate = max_t(unsigned, iter->uptodate, u);
12 }
13
14 static inline struct btree *btree_iter_node(struct btree_iter *iter,
15                                             unsigned level)
16 {
17         return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
18 }
19
20 static inline struct btree *btree_node_parent(struct btree_iter *iter,
21                                               struct btree *b)
22 {
23         return btree_iter_node(iter, b->level + 1);
24 }
25
26 static inline bool btree_iter_linked(const struct btree_iter *iter)
27 {
28         return iter->next != iter;
29 }
30
31 static inline bool __iter_has_node(const struct btree_iter *iter,
32                                    const struct btree *b)
33 {
34         /*
35          * We don't compare the low bits of the lock sequence numbers because
36          * @iter might have taken a write lock on @b, and we don't want to skip
37          * the linked iterator if the sequence numbers were equal before taking
38          * that write lock. The lock sequence number is incremented by taking
39          * and releasing write locks and is even when unlocked:
40          */
41
42         return iter->l[b->level].b == b &&
43                 iter->l[b->level].lock_seq >> 1 == b->lock.state.seq >> 1;
44 }
45
46 static inline struct btree_iter *
47 __next_linked_iter(struct btree_iter *iter, struct btree_iter *linked)
48 {
49         return linked->next != iter ? linked->next : NULL;
50 }
51
52 static inline struct btree_iter *
53 __next_iter_with_node(struct btree_iter *iter, struct btree *b,
54                       struct btree_iter *linked)
55 {
56         while (linked && !__iter_has_node(linked, b))
57                 linked = __next_linked_iter(iter, linked);
58
59         return linked;
60 }
61
62 /**
63  * for_each_btree_iter - iterate over all iterators linked with @_iter,
64  * including @_iter
65  */
66 #define for_each_btree_iter(_iter, _linked)                             \
67         for ((_linked) = (_iter); (_linked);                            \
68              (_linked) = __next_linked_iter(_iter, _linked))
69
70 /**
71  * for_each_btree_iter_with_node - iterate over all iterators linked with @_iter
72  * that also point to @_b
73  *
74  * @_b is assumed to be locked by @_iter
75  *
76  * Filters out iterators that don't have a valid btree_node iterator for @_b -
77  * i.e. iterators for which bch2_btree_node_relock() would not succeed.
78  */
79 #define for_each_btree_iter_with_node(_iter, _b, _linked)               \
80         for ((_linked) = (_iter);                                       \
81              ((_linked) = __next_iter_with_node(_iter, _b, _linked));   \
82              (_linked) = __next_linked_iter(_iter, _linked))
83
84 /**
85  * for_each_linked_btree_iter - iterate over all iterators linked with @_iter,
86  * _not_ including @_iter
87  */
88 #define for_each_linked_btree_iter(_iter, _linked)                      \
89         for ((_linked) = (_iter)->next;                                 \
90              (_linked) != (_iter);                                      \
91              (_linked) = (_linked)->next)
92
93 #ifdef CONFIG_BCACHEFS_DEBUG
94 void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
95 void bch2_btree_iter_verify_locks(struct btree_iter *);
96 #else
97 static inline void bch2_btree_iter_verify(struct btree_iter *iter,
98                                           struct btree *b) {}
99 static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
100 #endif
101
102 void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
103                               struct btree_node_iter *, struct bkey_packed *,
104                               unsigned, unsigned);
105
106 int bch2_btree_iter_unlock(struct btree_iter *);
107
108 bool __bch2_btree_iter_upgrade(struct btree_iter *, unsigned);
109 bool __bch2_btree_iter_upgrade_nounlock(struct btree_iter *, unsigned);
110
111 static inline bool bch2_btree_iter_upgrade(struct btree_iter *iter,
112                                            unsigned new_locks_want,
113                                            bool may_drop_locks)
114 {
115         new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
116
117         return iter->locks_want < new_locks_want
118                 ? (may_drop_locks
119                    ? __bch2_btree_iter_upgrade(iter, new_locks_want)
120                    : __bch2_btree_iter_upgrade_nounlock(iter, new_locks_want))
121                 : iter->uptodate <= BTREE_ITER_NEED_PEEK;
122 }
123
124 void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);
125
126 static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
127 {
128         if (iter->locks_want > (iter->flags & BTREE_ITER_INTENT) ? 1 : 0)
129                 __bch2_btree_iter_downgrade(iter, 0);
130 }
131
132 void bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
133 void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
134
135 void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
136
137 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
138
139 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
140 struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
141
142 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
143 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
144 struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
145
146 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
147 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
148
149 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
150 void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
151
152 void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
153                            enum btree_id, struct bpos,
154                            unsigned , unsigned, unsigned);
155
156 static inline void bch2_btree_iter_init(struct btree_iter *iter,
157                         struct bch_fs *c, enum btree_id btree_id,
158                         struct bpos pos, unsigned flags)
159 {
160         __bch2_btree_iter_init(iter, c, btree_id, pos,
161                                flags & BTREE_ITER_INTENT ? 1 : 0, 0,
162                                (btree_id == BTREE_ID_EXTENTS
163                                 ?  BTREE_ITER_IS_EXTENTS : 0)|flags);
164 }
165
166 void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *);
167 void bch2_btree_iter_unlink(struct btree_iter *);
168 void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *);
169
170 static inline struct bpos btree_type_successor(enum btree_id id,
171                                                struct bpos pos)
172 {
173         if (id == BTREE_ID_INODES) {
174                 pos.inode++;
175                 pos.offset = 0;
176         } else if (id != BTREE_ID_EXTENTS) {
177                 pos = bkey_successor(pos);
178         }
179
180         return pos;
181 }
182
183 static inline struct bpos btree_type_predecessor(enum btree_id id,
184                                                struct bpos pos)
185 {
186         if (id == BTREE_ID_INODES) {
187                 --pos.inode;
188                 pos.offset = 0;
189         } else /* if (id != BTREE_ID_EXTENTS) */ {
190                 pos = bkey_predecessor(pos);
191         }
192
193         return pos;
194 }
195
196 static inline int __btree_iter_cmp(enum btree_id id,
197                                    struct bpos pos,
198                                    const struct btree_iter *r)
199 {
200         if (id != r->btree_id)
201                 return id < r->btree_id ? -1 : 1;
202         return bkey_cmp(pos, r->pos);
203 }
204
205 static inline int btree_iter_cmp(const struct btree_iter *l,
206                                  const struct btree_iter *r)
207 {
208         return __btree_iter_cmp(l->btree_id, l->pos, r);
209 }
210
211 /*
212  * Unlocks before scheduling
213  * Note: does not revalidate iterator
214  */
215 static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter)
216 {
217         if (need_resched()) {
218                 bch2_btree_iter_unlock(iter);
219                 schedule();
220         } else if (race_fault()) {
221                 bch2_btree_iter_unlock(iter);
222         }
223 }
224
225 #define __for_each_btree_node(_iter, _c, _btree_id, _start,             \
226                               _locks_want, _depth, _flags, _b)          \
227         for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), _start, \
228                                     _locks_want, _depth,                \
229                                     _flags|BTREE_ITER_NODES),           \
230              _b = bch2_btree_iter_peek_node(_iter);                     \
231              (_b);                                                      \
232              (_b) = bch2_btree_iter_next_node(_iter, _depth))
233
234 #define for_each_btree_node(_iter, _c, _btree_id, _start, _flags, _b)   \
235         __for_each_btree_node(_iter, _c, _btree_id, _start, 0, 0, _flags, _b)
236
237 static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
238                                                      unsigned flags)
239 {
240         return flags & BTREE_ITER_SLOTS
241                 ? bch2_btree_iter_peek_slot(iter)
242                 : bch2_btree_iter_peek(iter);
243 }
244
245 static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
246                                                      unsigned flags)
247 {
248         bch2_btree_iter_cond_resched(iter);
249
250         return flags & BTREE_ITER_SLOTS
251                 ? bch2_btree_iter_next_slot(iter)
252                 : bch2_btree_iter_next(iter);
253 }
254
255 #define for_each_btree_key(_iter, _c, _btree_id,  _start, _flags, _k)   \
256         for (bch2_btree_iter_init((_iter), (_c), (_btree_id),           \
257                                   (_start), (_flags)),                  \
258              (_k) = __bch2_btree_iter_peek(_iter, _flags);              \
259              !IS_ERR_OR_NULL((_k).k);                                   \
260              (_k) = __bch2_btree_iter_next(_iter, _flags))
261
262 #define for_each_btree_key_continue(_iter, _flags, _k)                  \
263         for ((_k) = __bch2_btree_iter_peek(_iter, _flags);              \
264              !IS_ERR_OR_NULL((_k).k);                                   \
265              (_k) = __bch2_btree_iter_next(_iter, _flags))
266
267 static inline int btree_iter_err(struct bkey_s_c k)
268 {
269         return PTR_ERR_OR_ZERO(k.k);
270 }
271
272 /* new multiple iterator interface: */
273
274 void bch2_trans_preload_iters(struct btree_trans *);
275 void bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
276 void bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
277
278 struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
279                                          struct bpos, unsigned, u64);
280 struct btree_iter *__bch2_trans_copy_iter(struct btree_trans *,
281                                           struct btree_iter *, u64);
282
283 static __always_inline u64 __btree_iter_id(void)
284 {
285         u64 ret = 0;
286
287         ret <<= 32;
288         ret |= _RET_IP_ & U32_MAX;
289         ret <<= 32;
290         ret |= _THIS_IP_ & U32_MAX;
291         return ret;
292 }
293
294 static __always_inline struct btree_iter *
295 bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
296                     struct bpos pos, unsigned flags)
297 {
298         return __bch2_trans_get_iter(trans, btree_id, pos, flags,
299                                      __btree_iter_id());
300 }
301
302 static __always_inline struct btree_iter *
303 bch2_trans_copy_iter(struct btree_trans *trans, struct btree_iter *src)
304 {
305
306         return __bch2_trans_copy_iter(trans, src, __btree_iter_id());
307 }
308
309 void __bch2_trans_begin(struct btree_trans *);
310
311 static inline void bch2_trans_begin_updates(struct btree_trans *trans)
312 {
313         trans->nr_updates = 0;
314 }
315
316 void *bch2_trans_kmalloc(struct btree_trans *, size_t);
317 int bch2_trans_unlock(struct btree_trans *);
318 void bch2_trans_init(struct btree_trans *, struct bch_fs *);
319 int bch2_trans_exit(struct btree_trans *);
320
321 #ifdef TRACE_TRANSACTION_RESTARTS
322 #define bch2_trans_begin(_trans)                                        \
323 do {                                                                    \
324         if (is_power_of_2((_trans)->nr_restarts) &&                     \
325             (_trans)->nr_restarts >= 8)                                 \
326                 pr_info("nr restarts: %zu", (_trans)->nr_restarts);     \
327                                                                         \
328         (_trans)->nr_restarts++;                                        \
329         __bch2_trans_begin(_trans);                                     \
330 } while (0)
331 #else
332 #define bch2_trans_begin(_trans)        __bch2_trans_begin(_trans)
333 #endif
334
335 #ifdef TRACE_TRANSACTION_RESTARTS_ALL
336 #define trans_restart(...) pr_info("transaction restart" __VA_ARGS__)
337 #else
338 #define trans_restart(...) no_printk("transaction restart" __VA_ARGS__)
339 #endif
340
341 #endif /* _BCACHEFS_BTREE_ITER_H */