1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
6 * Only for internal btree use:
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
13 #include <linux/six.h>
15 #include "btree_iter.h"
17 /* matches six lock types */
18 enum btree_node_locked_type {
19 BTREE_NODE_UNLOCKED = -1,
20 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
21 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
24 static inline int btree_node_locked_type(struct btree_path *path,
28 * We're relying on the fact that if nodes_intent_locked is set
29 * nodes_locked must be set as well, so that we can compute without
32 return BTREE_NODE_UNLOCKED +
33 ((path->nodes_locked >> level) & 1) +
34 ((path->nodes_intent_locked >> level) & 1);
37 static inline bool btree_node_intent_locked(struct btree_path *path,
40 return btree_node_locked_type(path, level) == BTREE_NODE_INTENT_LOCKED;
43 static inline bool btree_node_read_locked(struct btree_path *path,
46 return btree_node_locked_type(path, level) == BTREE_NODE_READ_LOCKED;
49 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
51 return path->nodes_locked & (1 << level);
54 static inline void mark_btree_node_unlocked(struct btree_path *path,
57 path->nodes_locked &= ~(1 << level);
58 path->nodes_intent_locked &= ~(1 << level);
61 static inline void mark_btree_node_locked(struct btree_trans *trans,
62 struct btree_path *path,
64 enum six_lock_type type)
66 /* relying on this to avoid a branch */
67 BUILD_BUG_ON(SIX_LOCK_read != 0);
68 BUILD_BUG_ON(SIX_LOCK_intent != 1);
70 BUG_ON(trans->in_traverse_all && path->sorted_idx > trans->traverse_all_idx);
72 path->nodes_locked |= 1 << level;
73 path->nodes_intent_locked |= type << level;
76 static inline void mark_btree_node_intent_locked(struct btree_trans *trans,
77 struct btree_path *path,
80 mark_btree_node_locked(trans, path, level, SIX_LOCK_intent);
83 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
85 return level < path->locks_want
90 static inline enum btree_node_locked_type
91 btree_lock_want(struct btree_path *path, int level)
93 if (level < path->level)
94 return BTREE_NODE_UNLOCKED;
95 if (level < path->locks_want)
96 return BTREE_NODE_INTENT_LOCKED;
97 if (level == path->level)
98 return BTREE_NODE_READ_LOCKED;
99 return BTREE_NODE_UNLOCKED;
102 static inline void btree_node_unlock(struct btree_path *path, unsigned level)
104 int lock_type = btree_node_locked_type(path, level);
106 EBUG_ON(level >= BTREE_MAX_DEPTH);
108 if (lock_type != BTREE_NODE_UNLOCKED)
109 six_unlock_type(&path->l[level].b->c.lock, lock_type);
110 mark_btree_node_unlocked(path, level);
113 static inline void __bch2_btree_path_unlock(struct btree_path *path)
115 btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
117 while (path->nodes_locked)
118 btree_node_unlock(path, __ffs(path->nodes_locked));
121 static inline enum bch_time_stats lock_to_time_stat(enum six_lock_type type)
125 return BCH_TIME_btree_lock_contended_read;
126 case SIX_LOCK_intent:
127 return BCH_TIME_btree_lock_contended_intent;
129 return BCH_TIME_btree_lock_contended_write;
135 static inline bool btree_node_lock_type(struct btree_trans *trans,
136 struct btree_path *path,
138 struct bpos pos, unsigned level,
139 enum six_lock_type type,
140 six_lock_should_sleep_fn should_sleep_fn, void *p)
142 struct bch_fs *c = trans->c;
146 if (six_trylock_type(&b->c.lock, type))
149 start_time = local_clock();
151 trans->locking_path_idx = path->idx;
152 trans->locking_pos = pos;
153 trans->locking_btree_id = path->btree_id;
154 trans->locking_level = level;
155 trans->locking_lock_type = type;
157 ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
158 trans->locking = NULL;
161 bch2_time_stats_update(&c->times[lock_to_time_stat(type)], start_time);
167 * Lock a btree node if we already have it locked on one of our linked
170 static inline bool btree_node_lock_increment(struct btree_trans *trans,
171 struct btree *b, unsigned level,
172 enum btree_node_locked_type want)
174 struct btree_path *path;
176 trans_for_each_path(trans, path)
177 if (path->l[level].b == b &&
178 btree_node_locked_type(path, level) >= want) {
179 six_lock_increment(&b->c.lock, want);
186 bool __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
187 struct btree *, struct bpos, unsigned,
189 six_lock_should_sleep_fn, void *,
192 static inline bool btree_node_lock(struct btree_trans *trans,
193 struct btree_path *path,
194 struct btree *b, struct bpos pos, unsigned level,
195 enum six_lock_type type,
196 six_lock_should_sleep_fn should_sleep_fn, void *p,
199 EBUG_ON(level >= BTREE_MAX_DEPTH);
200 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
202 return likely(six_trylock_type(&b->c.lock, type)) ||
203 btree_node_lock_increment(trans, b, level, type) ||
204 __bch2_btree_node_lock(trans, path, b, pos, level, type,
205 should_sleep_fn, p, ip);
208 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned);
210 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
211 struct btree_path *path, unsigned level)
213 EBUG_ON(btree_node_locked(path, level) &&
214 btree_node_locked_type(path, level) !=
215 __btree_lock_want(path, level));
217 return likely(btree_node_locked(path, level)) ||
218 __bch2_btree_node_relock(trans, path, level);
222 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
226 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
229 struct btree_path *linked;
231 EBUG_ON(path->l[b->c.level].b != b);
232 EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
234 trans_for_each_path_with_node(trans, b, linked)
235 linked->l[b->c.level].lock_seq += 2;
237 six_unlock_write(&b->c.lock);
240 void bch2_btree_node_unlock_write(struct btree_trans *,
241 struct btree_path *, struct btree *);
243 void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
245 static inline void bch2_btree_node_lock_write(struct btree_trans *trans,
246 struct btree_path *path,
249 EBUG_ON(path->l[b->c.level].b != b);
250 EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
251 EBUG_ON(!btree_node_intent_locked(path, b->c.level));
253 if (unlikely(!six_trylock_write(&b->c.lock)))
254 __bch2_btree_node_lock_write(trans, b);
257 #endif /* _BCACHEFS_BTREE_LOCKING_H */