1 #ifndef _BCACHE_BTREE_ITER_H
2 #define _BCACHE_BTREE_ITER_H
4 #include "btree_types.h"
7 /* Current btree depth */
11 * Used in bch_btree_iter_traverse(), to indicate whether we're
12 * searching for @pos or the first key strictly greater than @pos
16 /* Bitmasks for read/intent locks held per level */
18 u8 nodes_intent_locked;
20 /* Btree level below which we start taking intent locks */
23 enum btree_id btree_id:8;
26 * indicates we need to call bch_btree_iter_traverse() to revalidate
35 /* Current position of the iterator */
38 u32 lock_seq[BTREE_MAX_DEPTH];
41 * NOTE: Never set iter->nodes to NULL except in btree_iter_lock_root().
43 * This is because iter->nodes[iter->level] == NULL is how
44 * btree_iter_next_node() knows that it's finished with a depth first
45 * traversal. Just unlocking a node (with btree_node_unlock()) is fine,
46 * and if you really don't want that node used again (e.g. btree_split()
47 * freed it) decrementing lock_seq will cause btree_node_relock() to
48 * always fail (but since freeing a btree node takes a write lock on the
49 * node, which increments the node's lock seq, that's not actually
50 * necessary in that example).
52 * One extra slot for a sentinel NULL:
54 struct btree *nodes[BTREE_MAX_DEPTH + 1];
55 struct btree_node_iter node_iters[BTREE_MAX_DEPTH];
58 * Current unpacked key - so that bch_btree_iter_next()/
59 * bch_btree_iter_next_with_holes() can correctly advance pos.
64 * Circular linked list of linked iterators: linked iterators share
65 * locks (e.g. two linked iterators may have the same node intent
66 * locked, or read and write locked, at the same time), and insertions
67 * through one iterator won't invalidate the other linked iterators.
71 struct btree_iter *next;
74 static inline bool btree_iter_linked(const struct btree_iter *iter)
76 return iter->next != iter;
80 * for_each_linked_btree_iter - iterate over all iterators linked with @_iter
82 #define for_each_linked_btree_iter(_iter, _linked) \
83 for ((_linked) = (_iter)->next; \
84 (_linked) != (_iter); \
85 (_linked) = (_linked)->next)
87 static inline struct btree_iter *
88 __next_linked_btree_node(struct btree_iter *iter, struct btree *b,
89 struct btree_iter *linked)
92 linked = linked->next;
98 * We don't compare the low bits of the lock sequence numbers
99 * because @iter might have taken a write lock on @b, and we
100 * don't want to skip the linked iterator if the sequence
101 * numbers were equal before taking that write lock. The lock
102 * sequence number is incremented by taking and releasing write
103 * locks and is even when unlocked:
105 } while (linked->nodes[b->level] != b ||
106 linked->lock_seq[b->level] >> 1 != b->lock.state.seq >> 1);
112 * for_each_linked_btree_node - iterate over all iterators linked with @_iter
113 * that also point to @_b
115 * @_b is assumed to be locked by @_iter
117 * Filters out iterators that don't have a valid btree_node iterator for @_b -
118 * i.e. iterators for which btree_node_relock() would not succeed.
120 #define for_each_linked_btree_node(_iter, _b, _linked) \
121 for ((_linked) = (_iter); \
122 ((_linked) = __next_linked_btree_node(_iter, _b, _linked));)
124 #ifdef CONFIG_BCACHE_DEBUG
125 void bch_btree_iter_verify(struct btree_iter *, struct btree *);
127 static inline void bch_btree_iter_verify(struct btree_iter *iter,
131 void bch_btree_node_iter_fix(struct btree_iter *, struct btree *,
132 struct btree_node_iter *, struct bset_tree *,
133 struct bkey_packed *, unsigned, unsigned);
135 int bch_btree_iter_unlock(struct btree_iter *);
136 bool __bch_btree_iter_set_locks_want(struct btree_iter *, unsigned);
138 static inline bool bch_btree_iter_set_locks_want(struct btree_iter *iter,
139 unsigned new_locks_want)
141 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
143 if (iter->locks_want == new_locks_want &&
144 iter->nodes_intent_locked == (1 << new_locks_want) - 1)
147 return __bch_btree_iter_set_locks_want(iter, new_locks_want);
150 bool bch_btree_iter_node_replace(struct btree_iter *, struct btree *);
151 void bch_btree_iter_node_drop_linked(struct btree_iter *, struct btree *);
152 void bch_btree_iter_node_drop(struct btree_iter *, struct btree *);
154 void bch_btree_iter_reinit_node(struct btree_iter *, struct btree *);
156 int __must_check bch_btree_iter_traverse(struct btree_iter *);
158 struct btree *bch_btree_iter_peek_node(struct btree_iter *);
159 struct btree *bch_btree_iter_next_node(struct btree_iter *, unsigned);
161 struct bkey_s_c bch_btree_iter_peek(struct btree_iter *);
162 struct bkey_s_c bch_btree_iter_peek_with_holes(struct btree_iter *);
163 void bch_btree_iter_set_pos_same_leaf(struct btree_iter *, struct bpos);
164 void bch_btree_iter_set_pos(struct btree_iter *, struct bpos);
165 void bch_btree_iter_advance_pos(struct btree_iter *);
166 void bch_btree_iter_rewind(struct btree_iter *, struct bpos);
168 void __bch_btree_iter_init(struct btree_iter *, struct cache_set *,
169 enum btree_id, struct bpos, unsigned , unsigned);
171 static inline void bch_btree_iter_init(struct btree_iter *iter,
173 enum btree_id btree_id,
176 __bch_btree_iter_init(iter, c, btree_id, pos, 0, 0);
179 static inline void bch_btree_iter_init_intent(struct btree_iter *iter,
181 enum btree_id btree_id,
184 __bch_btree_iter_init(iter, c, btree_id, pos, 1, 0);
187 void bch_btree_iter_link(struct btree_iter *, struct btree_iter *);
188 void bch_btree_iter_copy(struct btree_iter *, struct btree_iter *);
190 static inline struct bpos btree_type_successor(enum btree_id id,
193 if (id == BTREE_ID_INODES) {
196 } else if (id != BTREE_ID_EXTENTS) {
197 pos = bkey_successor(pos);
203 static inline int __btree_iter_cmp(enum btree_id id,
205 const struct btree_iter *r)
207 if (id != r->btree_id)
208 return id < r->btree_id ? -1 : 1;
209 return bkey_cmp(pos, r->pos);
212 static inline int btree_iter_cmp(const struct btree_iter *l,
213 const struct btree_iter *r)
215 return __btree_iter_cmp(l->btree_id, l->pos, r);
218 #define __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, \
220 for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
221 _start, _locks_want, _depth), \
222 (_iter)->is_extents = false, \
223 _b = bch_btree_iter_peek_node(_iter); \
225 (_b) = bch_btree_iter_next_node(_iter, _depth))
227 #define for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b) \
228 __for_each_btree_node(_iter, _c, _btree_id, _start, _depth, _b, 0)
230 #define __for_each_btree_key(_iter, _c, _btree_id, _start, \
232 for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
233 _start, _locks_want, 0); \
234 !IS_ERR_OR_NULL(((_k) = bch_btree_iter_peek(_iter)).k); \
235 bch_btree_iter_advance_pos(_iter))
237 #define for_each_btree_key(_iter, _c, _btree_id, _start, _k) \
238 __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 0)
240 #define for_each_btree_key_intent(_iter, _c, _btree_id, _start, _k) \
241 __for_each_btree_key(_iter, _c, _btree_id, _start, _k, 1)
243 #define __for_each_btree_key_with_holes(_iter, _c, _btree_id, \
244 _start, _k, _locks_want) \
245 for (__bch_btree_iter_init((_iter), (_c), (_btree_id), \
246 _start, _locks_want, 0); \
247 !IS_ERR_OR_NULL(((_k) = bch_btree_iter_peek_with_holes(_iter)).k);\
248 bch_btree_iter_advance_pos(_iter))
250 #define for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k) \
251 __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 0)
253 #define for_each_btree_key_with_holes_intent(_iter, _c, _btree_id, \
255 __for_each_btree_key_with_holes(_iter, _c, _btree_id, _start, _k, 1)
257 static inline int btree_iter_err(struct bkey_s_c k)
259 return IS_ERR(k.k) ? PTR_ERR(k.k) : 0;
263 * Unlocks before scheduling
264 * Note: does not revalidate iterator
266 static inline void bch_btree_iter_cond_resched(struct btree_iter *iter)
268 struct btree_iter *linked;
270 if (need_resched()) {
271 for_each_linked_btree_iter(iter, linked)
272 bch_btree_iter_unlock(linked);
273 bch_btree_iter_unlock(iter);
275 } else if (race_fault()) {
276 for_each_linked_btree_iter(iter, linked)
277 bch_btree_iter_unlock(linked);
278 bch_btree_iter_unlock(iter);
282 #endif /* _BCACHE_BTREE_ITER_H */