1 #ifndef _BCACHEFS_BTREE_LOCKING_H
2 #define _BCACHEFS_BTREE_LOCKING_H
5 * Only for internal btree use:
7 * The btree iterator tracks what locks it wants to take, and what locks it
8 * currently has - here we have wrappers for locking/unlocking btree nodes and
9 * updating the iterator state
12 #include <linux/six.h>
14 #include "btree_iter.h"
16 /* matches six lock types */
17 enum btree_node_locked_type {
18 BTREE_NODE_UNLOCKED = -1,
19 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
20 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
23 static inline int btree_node_locked_type(struct btree_iter *iter,
27 * We're relying on the fact that if nodes_intent_locked is set
28 * nodes_locked must be set as well, so that we can compute without
31 return BTREE_NODE_UNLOCKED +
32 ((iter->nodes_locked >> level) & 1) +
33 ((iter->nodes_intent_locked >> level) & 1);
36 static inline bool btree_node_intent_locked(struct btree_iter *iter,
39 return btree_node_locked_type(iter, level) == BTREE_NODE_INTENT_LOCKED;
42 static inline bool btree_node_read_locked(struct btree_iter *iter,
45 return btree_node_locked_type(iter, level) == BTREE_NODE_READ_LOCKED;
48 static inline bool btree_node_locked(struct btree_iter *iter, unsigned level)
50 return iter->nodes_locked & (1 << level);
53 static inline void mark_btree_node_unlocked(struct btree_iter *iter,
56 iter->nodes_locked &= ~(1 << level);
57 iter->nodes_intent_locked &= ~(1 << level);
60 static inline void mark_btree_node_locked(struct btree_iter *iter,
62 enum six_lock_type type)
64 /* relying on this to avoid a branch */
65 BUILD_BUG_ON(SIX_LOCK_read != 0);
66 BUILD_BUG_ON(SIX_LOCK_intent != 1);
68 iter->nodes_locked |= 1 << level;
69 iter->nodes_intent_locked |= type << level;
72 static inline void mark_btree_node_intent_locked(struct btree_iter *iter,
75 mark_btree_node_locked(iter, level, SIX_LOCK_intent);
78 static inline enum six_lock_type __btree_lock_want(struct btree_iter *iter, int level)
80 return level < iter->locks_want
85 static inline enum btree_node_locked_type
86 btree_lock_want(struct btree_iter *iter, int level)
88 if (level < iter->level)
89 return BTREE_NODE_UNLOCKED;
90 if (level < iter->locks_want)
91 return BTREE_NODE_INTENT_LOCKED;
92 if (level == iter->level)
93 return BTREE_NODE_READ_LOCKED;
94 return BTREE_NODE_UNLOCKED;
97 static inline void __btree_node_unlock(struct btree_iter *iter, unsigned level)
99 int lock_type = btree_node_locked_type(iter, level);
101 EBUG_ON(level >= BTREE_MAX_DEPTH);
103 if (lock_type != BTREE_NODE_UNLOCKED)
104 six_unlock_type(&iter->l[level].b->lock, lock_type);
105 mark_btree_node_unlocked(iter, level);
108 static inline void btree_node_unlock(struct btree_iter *iter, unsigned level)
110 BUG_ON(!level && iter->flags & BTREE_ITER_NOUNLOCK);
112 __btree_node_unlock(iter, level);
115 static inline void __bch2_btree_iter_unlock(struct btree_iter *iter)
117 btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
119 while (iter->nodes_locked)
120 btree_node_unlock(iter, __ffs(iter->nodes_locked));
123 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
127 return BCH_TIME_btree_lock_contended_read;
128 case SIX_LOCK_intent:
129 return BCH_TIME_btree_lock_contended_intent;
131 return BCH_TIME_btree_lock_contended_write;
138 * wrapper around six locks that just traces lock contended time
140 static inline void __btree_node_lock_type(struct bch_fs *c, struct btree *b,
141 enum six_lock_type type)
143 u64 start_time = local_clock();
145 six_lock_type(&b->lock, type);
146 bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
149 static inline void btree_node_lock_type(struct bch_fs *c, struct btree *b,
150 enum six_lock_type type)
152 if (!six_trylock_type(&b->lock, type))
153 __btree_node_lock_type(c, b, type);
157 * Lock a btree node if we already have it locked on one of our linked
160 static inline bool btree_node_lock_increment(struct btree_iter *iter,
161 struct btree *b, unsigned level,
162 enum btree_node_locked_type want)
164 struct btree_iter *linked;
166 trans_for_each_iter(iter->trans, linked)
167 if (linked != iter &&
168 linked->l[level].b == b &&
169 btree_node_locked_type(linked, level) >= want) {
170 six_lock_increment(&b->lock, want);
177 bool __bch2_btree_node_lock(struct btree *, struct bpos, unsigned,
178 struct btree_iter *, enum six_lock_type, bool);
180 static inline bool btree_node_lock(struct btree *b, struct bpos pos,
182 struct btree_iter *iter,
183 enum six_lock_type type,
186 EBUG_ON(level >= BTREE_MAX_DEPTH);
188 return likely(six_trylock_type(&b->lock, type)) ||
189 btree_node_lock_increment(iter, b, level, type) ||
190 __bch2_btree_node_lock(b, pos, level, iter,
191 type, may_drop_locks);
194 bool __bch2_btree_node_relock(struct btree_iter *, unsigned);
196 static inline bool bch2_btree_node_relock(struct btree_iter *iter,
199 EBUG_ON(btree_node_locked(iter, level) &&
200 btree_node_locked_type(iter, level) !=
201 __btree_lock_want(iter, level));
203 return likely(btree_node_locked(iter, level)) ||
204 __bch2_btree_node_relock(iter, level);
207 void bch2_btree_node_unlock_write(struct btree *, struct btree_iter *);
209 void __bch2_btree_node_lock_write(struct btree *, struct btree_iter *);
211 static inline void bch2_btree_node_lock_write(struct btree *b, struct btree_iter *iter)
213 EBUG_ON(iter->l[b->level].b != b);
214 EBUG_ON(iter->l[b->level].lock_seq != b->lock.state.seq);
216 if (!six_trylock_write(&b->lock))
217 __bch2_btree_node_lock_write(b, iter);
220 #endif /* _BCACHEFS_BTREE_LOCKING_H */