]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_iter.h
Rename from bcache-tools to bcachefs-tools
[bcachefs-tools-debian] / libbcachefs / btree_iter.h
1 #ifndef _BCACHE_BTREE_ITER_H
2 #define _BCACHE_BTREE_ITER_H
3
4 #include "btree_types.h"
5
6 struct btree_iter {
7         /* Current btree depth */
8         u8                      level;
9
10         /*
11          * Used in bch2_btree_iter_traverse(), to indicate whether we're
12          * searching for @pos or the first key strictly greater than @pos
13          */
14         u8                      is_extents;
15
16         /* Bitmasks for read/intent locks held per level */
17         u8                      nodes_locked;
18         u8                      nodes_intent_locked;
19
20         /* Btree level below which we start taking intent locks */
21         u8                      locks_want;
22
23         enum btree_id           btree_id:8;
24
25         /*
26          * indicates we need to call bch2_btree_iter_traverse() to revalidate
27          * iterator:
28          */
29         u8                      at_end_of_leaf;
30
31         s8                      error;
32
33         struct bch_fs   *c;
34
35         /* Current position of the iterator */
36         struct bpos             pos;
37
38         u32                     lock_seq[BTREE_MAX_DEPTH];
39
40         /*
41          * NOTE: Never set iter->nodes to NULL except in btree_iter_lock_root().
42          *
43          * This is because iter->nodes[iter->level] == NULL is how
44          * btree_iter_next_node() knows that it's finished with a depth first
45          * traversal. Just unlocking a node (with btree_node_unlock()) is fine,
46          * and if you really don't want that node used again (e.g. btree_split()
47          * freed it) decrementing lock_seq will cause bch2_btree_node_relock() to
48          * always fail (but since freeing a btree node takes a write lock on the
49          * node, which increments the node's lock seq, that's not actually
50          * necessary in that example).
51          *
52          * One extra slot for a sentinel NULL:
53          */
54         struct btree            *nodes[BTREE_MAX_DEPTH + 1];
55         struct btree_node_iter  node_iters[BTREE_MAX_DEPTH];
56
57         /*
58          * Current unpacked key - so that bch2_btree_iter_next()/
59          * bch2_btree_iter_next_with_holes() can correctly advance pos.
60          */
61         struct bkey             k;
62
63         /*
64          * Circular linked list of linked iterators: linked iterators share
65          * locks (e.g. two linked iterators may have the same node intent
66          * locked, or read and write locked, at the same time), and insertions
67          * through one iterator won't invalidate the other linked iterators.
68          */
69
70         /* Must come last: */
71         struct btree_iter       *next;
72 };
73
74 static inline bool btree_iter_linked(const struct btree_iter *iter)
75 {
76         return iter->next != iter;
77 }
78
79 /**
80  * for_each_linked_btree_iter - iterate over all iterators linked with @_iter
81  */
82 #define for_each_linked_btree_iter(_iter, _linked)                      \
83         for ((_linked) = (_iter)->next;                                 \
84              (_linked) != (_iter);                                      \
85              (_linked) = (_linked)->next)
86
87 static inline struct btree_iter *
88 __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
89                          struct btree_iter *linked)
90 {
91         do {
92                 linked = linked->next;
93
94                 if (linked == iter)
95                         return NULL;
96
97                 /*
98                  * We don't compare the low bits of the lock sequence numbers
99                  * because @iter might have taken a write lock on @b, and we
100                  * don't want to skip the linked iterator if the sequence
101                  * numbers were equal before taking that write lock. The lock
102                  * sequence number is incremented by taking and releasing write
103                  * locks and is even when unlocked:
104                  */
105         } while (linked->nodes[b->level] != b ||
106                  linked->lock_seq[b->level] >> 1 != b->lock.state.seq >> 1);
107
108         return linked;
109 }
110
111 /**
112  * for_each_linked_btree_node - iterate over all iterators linked with @_iter
113  * that also point to @_b
114  *
115  * @_b is assumed to be locked by @_iter
116  *
117  * Filters out iterators that don't have a valid btree_node iterator for @_b -
118  * i.e. iterators for which bch2_btree_node_relock() would not succeed.
119  */
120 #define for_each_linked_btree_node(_iter, _b, _linked)                  \
121         for ((_linked) = (_iter);                                       \
122              ((_linked) = __next_linked_btree_node(_iter, _b, _linked));)
123
124 #ifdef CONFIG_BCACHEFS_DEBUG
125 void bch2_btree_iter_verify(struct btree_iter *, struct btree *);
126 #else
127 static inline void bch2_btree_iter_verify(struct btree_iter *iter,
128                                          struct btree *b) {}
129 #endif
130
131 void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
132                              struct btree_node_iter *, struct bset_tree *,
133                              struct bkey_packed *, unsigned, unsigned);
134
135 int bch2_btree_iter_unlock(struct btree_iter *);
136 bool __bch2_btree_iter_set_locks_want(struct btree_iter *, unsigned);
137
138 static inline bool bch2_btree_iter_set_locks_want(struct btree_iter *iter,
139                                                  unsigned new_locks_want)
140 {
141         new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
142
143         if (iter->locks_want == new_locks_want &&
144             iter->nodes_intent_locked == (1 << new_locks_want) - 1)
145                 return true;
146
147         return __bch2_btree_iter_set_locks_want(iter, new_locks_want);
148 }
149
150 bool bch2_btree_iter_node_replace(struct btree_iter *, struct btree *);
151 void bch2_btree_iter_node_drop_linked(struct btree_iter *, struct btree *);
152 void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
153
154 void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
155
156 int __must_check bch2_btree_iter_traverse(struct btree_iter *);
157
158 struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
159 struct btree *bch2_btree_iter_next_node(struct btree_iter *, unsigned);
160
161 struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *);
162 struct bkey_s_c bch2_btree_iter_peek_with_holes(struct btree_iter *);
163 void bch2_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
164 void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
165 void bch2_btree_iter_advance_pos(struct btree_iter *);
166 void bch2_btree_iter_rewind(struct btree_iter *, struct bpos);
167
168 void __bch2_btree_iter_init(struct btree_iter *, struct bch_fs *,
169                            enum btree_id, struct bpos, unsigned , unsigned);
170
171 static inline void bch2_btree_iter_init(struct btree_iter *iter,
172                                        struct bch_fs *c,
173                                        enum btree_id btree_id,
174                                        struct bpos pos)
175 {
176         __bch2_btree_iter_init(iter, c, btree_id, pos, 0, 0);
177 }
178
179 static inline void bch2_btree_iter_init_intent(struct btree_iter *iter,
180                                               struct bch_fs *c,
181                                               enum btree_id btree_id,
182                                               struct bpos pos)
183 {
184         __bch2_btree_iter_init(iter, c, btree_id, pos, 1, 0);
185 }
186
187 void bch2_btree_iter_link(struct btree_iter *, struct btree_iter *);
188 void bch2_btree_iter_copy(struct btree_iter *, struct btree_iter *);
189
190 static inline struct bpos btree_type_successor(enum btree_id id,
191                                                struct bpos pos)
192 {
193         if (id == BTREE_ID_INODES) {
194                 pos.inode++;
195                 pos.offset = 0;
196         } else if (id != BTREE_ID_EXTENTS) {
197                 pos = bkey_successor(pos);
198         }
199
200         return pos;
201 }
202
203 static inline int __btree_iter_cmp(enum btree_id id,
204                                    struct bpos pos,
205                                    const struct btree_iter *r)
206 {
207         if (id != r->btree_id)
208                 return id < r->btree_id ? -1 : 1;
209         return bkey_cmp(pos, r->pos);
210 }
211
212 static inline int btree_iter_cmp(const struct btree_iter *l,
213                                  const struct btree_iter *r)
214 {
215         return __btree_iter_cmp(l->btree_id, l->pos, r);
216 }
217
218 #define __for_each_btree_node(_iter, _c, _btree_id, _start, _depth,     \
219                               _b, _locks_want)                          \
220         for (__bch2_btree_iter_init((_iter), (_c), (_btree_id),         \
221                                    _start, _locks_want, _depth),        \
222              (_iter)->is_extents = false,                               \
223              _b = bch2_btree_iter_peek_node(_iter);                     \
224              (_b);                                                      \
225              (_b) = bch2_btree_iter_next_node(_iter, _depth))
226
227 #define for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b)   \
228         __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b, 0)
229
230 #define __for_each_btree_key(_iter, _c, _btree_id,  _start,             \
231                              _k, _locks_want)                           \
232         for (__bch2_btree_iter_init((_iter), (_c), (_btree_id),         \
233                                    _start, _locks_want, 0);             \
234              !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek(_iter)).k);   \
235              bch2_btree_iter_advance_pos(_iter))
236
237 #define for_each_btree_key(_iter, _c, _btree_id,  _start, _k)           \
238         __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 0)
239
240 #define for_each_btree_key_intent(_iter, _c, _btree_id,  _start, _k)    \
241         __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 1)
242
243 #define __for_each_btree_key_with_holes(_iter, _c, _btree_id,           \
244                                         _start, _k, _locks_want)        \
245         for (__bch2_btree_iter_init((_iter), (_c), (_btree_id),         \
246                                    _start, _locks_want, 0);             \
247              !IS_ERR_OR_NULL(((_k) = bch2_btree_iter_peek_with_holes(_iter)).k);\
248              bch2_btree_iter_advance_pos(_iter))
249
250 #define for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k) \
251         __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 0)
252
253 #define for_each_btree_key_with_holes_intent(_iter, _c, _btree_id,      \
254                                              _start, _k)                \
255         __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 1)
256
257 static inline int btree_iter_err(struct bkey_s_c k)
258 {
259         return IS_ERR(k.k) ? PTR_ERR(k.k) : 0;
260 }
261
262 /*
263  * Unlocks before scheduling
264  * Note: does not revalidate iterator
265  */
266 static inline void bch2_btree_iter_cond_resched(struct btree_iter *iter)
267 {
268         struct btree_iter *linked;
269
270         if (need_resched()) {
271                 for_each_linked_btree_iter(iter, linked)
272                         bch2_btree_iter_unlock(linked);
273                 bch2_btree_iter_unlock(iter);
274                 schedule();
275         } else if (race_fault()) {
276                 for_each_linked_btree_iter(iter, linked)
277                         bch2_btree_iter_unlock(linked);
278                 bch2_btree_iter_unlock(iter);
279         }
280 }
281
282 #endif /* _BCACHE_BTREE_ITER_H */