]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.h
Update bcachefs sources to 0e765bc37c bcachefs: foreground merging of interior btree...
[bcachefs-tools-debian] / libbcachefs / btree_iter.h
1 #ifndef _BCACHEFS_BTREE_ITER_H
2 #define _BCACHEFS_BTREE_ITER_H
3
4 #include <linux/dynamic_fault.h>
5
6 #include "btree_types.h"
7 #include "bset.h"
8
9 #define BTREE_ITER_SLOTS                (1 << 0)
10 #define BTREE_ITER_INTENT               (1 << 1)
11 #define BTREE_ITER_PREFETCH             (1 << 2)
12 /*
13  * Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
14  * @pos or the first key strictly greater than @pos
15  */
16 #define BTREE_ITER_IS_EXTENTS           (1 << 3)
17 /*
18  * indicates we need to call bch2_btree_iter_traverse() to revalidate iterator:
19  */
20 #define BTREE_ITER_AT_END_OF_LEAF       (1 << 4)
21 #define BTREE_ITER_ERROR                (1 << 5)
22
23 enum btree_iter_uptodate {
24         BTREE_ITER_UPTODATE             = 0,
25         BTREE_ITER_NEED_PEEK            = 1,
26         BTREE_ITER_NEED_RELOCK          = 2,
27         BTREE_ITER_NEED_TRAVERSE        = 3,
28         BTREE_ITER_END                  = 4,
29 };
30
31 /*
32  * @pos                 - iterator's current position
33  * @level               - current btree depth
34  * @locks_want          - btree level below which we start taking intent locks
35  * @nodes_locked        - bitmask indicating which nodes in @nodes are locked
36  * @nodes_intent_locked - bitmask indicating which locks are intent locks
37  */
38 struct btree_iter {
39         struct bch_fs           *c;
40         struct bpos             pos;
41
42         u8                      flags;
43         unsigned                uptodate:4;
44         enum btree_id           btree_id:4;
45         unsigned                level:4,
46                                 locks_want:4,
47                                 nodes_locked:4,
48                                 nodes_intent_locked:4;
49
50         struct btree_iter_level {
51                 struct btree    *b;
52                 struct btree_node_iter iter;
53         }                       l[BTREE_MAX_DEPTH];
54
55         u32                     lock_seq[BTREE_MAX_DEPTH];
56
57         /*
58          * Current unpacked key - so that bch2_btree_iter_next()/
59          * bch2_btree_iter_next_slot() can correctly advance pos.
60          */
61         struct bkey             k;
62
63         /*
64          * Circular linked list of linked iterators: linked iterators share
65          * locks (e.g. two linked iterators may have the same node intent
66          * locked, or read and write locked, at the same time), and insertions
67          * through one iterator won't invalidate the other linked iterators.
68          */
69
70         /* Must come last: */
71         struct btree_iter       *next;
72 };
73
74 static inline void btree_iter_set_dirty(struct btree_iter *iter,
75                                         enum btree_iter_uptodate u)
76 {
77         iter->uptodate = max_t(unsigned, iter->uptodate, u);
78 }
79
80 static inline struct btree *btree_iter_node(struct btree_iter *iter,
81                                             unsigned level)
82 {
83         return level < BTREE_MAX_DEPTH ? iter->l[level].b : NULL;
84 }
85
86 static inline struct btree *btree_node_parent(struct btree_iter *iter,
87                                               struct btree *b)
88 {
89         return btree_iter_node(iter, b->level + 1);
90 }
91
92 static inline bool btree_iter_linked(const struct btree_iter *iter)
93 {
94         return iter->next != iter;
95 }
96
97 /**
98  * for_each_linked_btree_iter - iterate over all iterators linked with @_iter
99  */
100 #define for_each_linked_btree_iter(_iter, _linked)                      \
101         for ((_linked) = (_iter)->next;                                 \
102              (_linked) != (_iter);                                      \
103              (_linked) = (_linked)->next)
104
105 static inline struct btree_iter *
106 __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
107                          struct btree_iter *linked)
108 {
109         do {
110                 linked = linked->next;
111
112                 if (linked == iter)
113                         return NULL;
114
115                 /*
116                  * We don't compare the low bits of the lock sequence numbers
117                  * because @iter might have taken a write lock on @b, and we
118                  * don't want to skip the linked iterator if the sequence
119                  * numbers were equal before taking that write lock. The lock
120                  * sequence number is incremented by taking and releasing write
121                  * locks and is even when unlocked:
122                  */
123         } while (linked->l[b->level].b != b ||
124                  linked->lock_seq[b->level] >> 1 != b->lock.state.seq >> 1);
125
126         return linked;
127 }
128
129 /**
130  * for_each_linked_btree_node - iterate over all iterators linked with @_iter
131  * that also point to @_b
132  *
133  * @_b is assumed to be locked by @_iter
134  *
135  * Filters out iterators that don't have a valid btree_node iterator for @_b -
136  * i.e. iterators for which bch2_btree_node_relock() would not succeed.
137  */
138 #define for_each_linked_btree_node(_iter, _b, _linked)                  \
139         for ((_linked) = (_iter);                                       \
140              ((_linked) = __next_linked_btree_node(_iter, _b, _linked));)
141
142 #ifdef CONFIG_BCACHEFS_DEBUG
143 void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
144 #else
145 static inline void bch2_btree_iter_verify(struct btree_iter *iter,
146                                          struct btree *b) {}
147 #endif
148
149 void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
150                              struct btree_node_iter *, struct bset_tree *,
151                              struct bkey_packed *, unsigned, unsigned);
152
153 int bch2_btree_iter_unlock(struct btree_iter *);
154 bool __bch2_btree_iter_set_locks_want(struct btree_iter *, unsigned);
155
156 static inline bool bch2_btree_iter_set_locks_want(struct btree_iter *iter,
157                                                  unsigned new_locks_want)
158 {
159         new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
160
161         if (iter->locks_want == new_locks_want &&
162             iter->nodes_intent_locked == (1 << new_locks_want) - 1)
163                 return true;
164
165         return __bch2_btree_iter_set_locks_want(iter, new_locks_want);
166 }
167
168 bool bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
169 void bch2_btree_iter_node_drop_linked(struct btree_iter *, struct btree *);
170 void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
171
172 void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
173
174 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
175
176 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
177 struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
178
179 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
180 struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
181
182 struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
183 struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
184
185 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
186 void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
187
188 void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
189                            enum btree_id, struct bpos,
190                            unsigned , unsigned, unsigned);
191
192 static inline void bch2_btree_iter_init(struct btree_iter *iter,
193                         struct bch_fs *c, enum btree_id btree_id,
194                         struct bpos pos, unsigned flags)
195 {
196         __bch2_btree_iter_init(iter, c, btree_id, pos,
197                                flags & BTREE_ITER_INTENT ? 1 : 0, 0,
198                                (btree_id == BTREE_ID_EXTENTS
199                                 ?  BTREE_ITER_IS_EXTENTS : 0)|flags);
200 }
201
202 void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *);
203 void bch2_btree_iter_unlink(struct btree_iter *);
204 void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *);
205
206 static inline struct bpos btree_type_successor(enum btree_id id,
207                                                struct bpos pos)
208 {
209         if (id == BTREE_ID_INODES) {
210                 pos.inode++;
211                 pos.offset = 0;
212         } else if (id != BTREE_ID_EXTENTS) {
213                 pos = bkey_successor(pos);
214         }
215
216         return pos;
217 }
218
219 static inline int __btree_iter_cmp(enum btree_id id,
220                                    struct bpos pos,
221                                    const struct btree_iter *r)
222 {
223         if (id != r->btree_id)
224                 return id < r->btree_id ? -1 : 1;
225         return bkey_cmp(pos, r->pos);
226 }
227
228 static inline int btree_iter_cmp(const struct btree_iter *l,
229                                  const struct btree_iter *r)
230 {
231         return __btree_iter_cmp(l->btree_id, l->pos, r);
232 }
233
234 #define __for_each_btree_node(_iter, _c, _btree_id, _start,             \
235                               _locks_want, _depth, _flags, _b)          \
236         for (__bch2_btree_iter_init((_iter), (_c), (_btree_id), _start, \
237                                     _locks_want, _depth, _flags),       \
238              _b = bch2_btree_iter_peek_node(_iter);                     \
239              (_b);                                                      \
240              (_b) = bch2_btree_iter_next_node(_iter, _depth))
241
242 #define for_each_btree_node(_iter, _c, _btree_id, _start, _flags, _b)   \
243         __for_each_btree_node(_iter, _c, _btree_id, _start, 0, 0, _flags, _b)
244
245 static inline struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter,
246                                                      unsigned flags)
247 {
248         return flags & BTREE_ITER_SLOTS
249                 ? bch2_btree_iter_peek_slot(iter)
250                 : bch2_btree_iter_peek(iter);
251 }
252
253 static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
254                                                      unsigned flags)
255 {
256         return flags & BTREE_ITER_SLOTS
257                 ? bch2_btree_iter_next_slot(iter)
258                 : bch2_btree_iter_next(iter);
259 }
260
261 #define for_each_btree_key(_iter, _c, _btree_id,  _start, _flags, _k)   \
262         for (bch2_btree_iter_init((_iter), (_c), (_btree_id),           \
263                                   (_start), (_flags)),                  \
264              (_k) = __bch2_btree_iter_peek(_iter, _flags);              \
265              !IS_ERR_OR_NULL((_k).k);                                   \
266              (_k) = __bch2_btree_iter_next(_iter, _flags))
267
268 #define for_each_btree_key_continue(_iter, _flags, _k)                  \
269         for ((_k) = __bch2_btree_iter_peek(_iter, _flags);              \
270              !IS_ERR_OR_NULL((_k).k);                                   \
271              (_k) = __bch2_btree_iter_next(_iter, _flags))
272
273 static inline int btree_iter_err(struct bkey_s_c k)
274 {
275         return PTR_ERR_OR_ZERO(k.k);
276 }
277
278 /*
279  * Unlocks before scheduling
280  * Note: does not revalidate iterator
281  */
282 static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter)
283 {
284         if (need_resched()) {
285                 bch2_btree_iter_unlock(iter);
286                 schedule();
287         } else if (race_fault()) {
288                 bch2_btree_iter_unlock(iter);
289         }
290 }
291
292 #endif /* _BCACHEFS_BTREE_ITER_H */