1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_BTREE_LOCKING_H
3 #define _BCACHEFS_BTREE_LOCKING_H
6 * Only for internal btree use:
8 * The btree iterator tracks what locks it wants to take, and what locks it
9 * currently has - here we have wrappers for locking/unlocking btree nodes and
10 * updating the iterator state
13 #include <linux/six.h>
15 #include "btree_iter.h"
17 extern struct lock_class_key bch2_btree_node_lock_key;
19 static inline bool is_btree_node(struct btree_path *path, unsigned l)
21 return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
24 static inline struct btree_transaction_stats *btree_trans_stats(struct btree_trans *trans)
26 return trans->fn_idx < ARRAY_SIZE(trans->c->btree_transaction_stats)
27 ? &trans->c->btree_transaction_stats[trans->fn_idx]
31 /* matches six lock types */
32 enum btree_node_locked_type {
33 BTREE_NODE_UNLOCKED = -1,
34 BTREE_NODE_READ_LOCKED = SIX_LOCK_read,
35 BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent,
36 BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write,
39 static inline int btree_node_locked_type(struct btree_path *path,
42 return BTREE_NODE_UNLOCKED + ((path->nodes_locked >> (level << 1)) & 3);
45 static inline bool btree_node_write_locked(struct btree_path *path, unsigned l)
47 return btree_node_locked_type(path, l) == BTREE_NODE_WRITE_LOCKED;
50 static inline bool btree_node_intent_locked(struct btree_path *path, unsigned l)
52 return btree_node_locked_type(path, l) == BTREE_NODE_INTENT_LOCKED;
55 static inline bool btree_node_read_locked(struct btree_path *path, unsigned l)
57 return btree_node_locked_type(path, l) == BTREE_NODE_READ_LOCKED;
60 static inline bool btree_node_locked(struct btree_path *path, unsigned level)
62 return btree_node_locked_type(path, level) != BTREE_NODE_UNLOCKED;
65 static inline void mark_btree_node_locked_noreset(struct btree_path *path,
67 enum btree_node_locked_type type)
69 /* relying on this to avoid a branch */
70 BUILD_BUG_ON(SIX_LOCK_read != 0);
71 BUILD_BUG_ON(SIX_LOCK_intent != 1);
73 path->nodes_locked &= ~(3U << (level << 1));
74 path->nodes_locked |= (type + 1) << (level << 1);
77 static inline void mark_btree_node_unlocked(struct btree_path *path,
80 EBUG_ON(btree_node_write_locked(path, level));
81 mark_btree_node_locked_noreset(path, level, BTREE_NODE_UNLOCKED);
84 static inline void mark_btree_node_locked(struct btree_trans *trans,
85 struct btree_path *path,
87 enum six_lock_type type)
89 mark_btree_node_locked_noreset(path, level, type);
90 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
91 path->l[level].lock_taken_time = ktime_get_ns();
95 static inline enum six_lock_type __btree_lock_want(struct btree_path *path, int level)
97 return level < path->locks_want
102 static inline enum btree_node_locked_type
103 btree_lock_want(struct btree_path *path, int level)
105 if (level < path->level)
106 return BTREE_NODE_UNLOCKED;
107 if (level < path->locks_want)
108 return BTREE_NODE_INTENT_LOCKED;
109 if (level == path->level)
110 return BTREE_NODE_READ_LOCKED;
111 return BTREE_NODE_UNLOCKED;
114 static void btree_trans_lock_hold_time_update(struct btree_trans *trans,
115 struct btree_path *path, unsigned level)
117 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
118 struct btree_transaction_stats *s = btree_trans_stats(trans);
121 __bch2_time_stats_update(&s->lock_hold_times,
122 path->l[level].lock_taken_time,
129 static inline void btree_node_unlock(struct btree_trans *trans,
130 struct btree_path *path, unsigned level)
132 int lock_type = btree_node_locked_type(path, level);
134 EBUG_ON(level >= BTREE_MAX_DEPTH);
136 if (lock_type != BTREE_NODE_UNLOCKED) {
137 six_unlock_type(&path->l[level].b->c.lock, lock_type);
138 btree_trans_lock_hold_time_update(trans, path, level);
140 mark_btree_node_unlocked(path, level);
143 static inline int btree_path_lowest_level_locked(struct btree_path *path)
145 return __ffs(path->nodes_locked) >> 1;
148 static inline int btree_path_highest_level_locked(struct btree_path *path)
150 return __fls(path->nodes_locked) >> 1;
153 static inline void __bch2_btree_path_unlock(struct btree_trans *trans,
154 struct btree_path *path)
156 btree_path_set_dirty(path, BTREE_ITER_NEED_RELOCK);
158 while (path->nodes_locked)
159 btree_node_unlock(trans, path, btree_path_lowest_level_locked(path));
163 * Updates the saved lock sequence number, so that bch2_btree_node_relock() will
167 bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_path *path,
170 struct btree_path *linked;
172 EBUG_ON(path->l[b->c.level].b != b);
173 EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq);
174 EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
176 mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
178 trans_for_each_path_with_node(trans, b, linked)
179 linked->l[b->c.level].lock_seq += 2;
181 six_unlock_write(&b->c.lock);
184 void bch2_btree_node_unlock_write(struct btree_trans *,
185 struct btree_path *, struct btree *);
187 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
191 static inline int __btree_node_lock_nopath(struct btree_trans *trans,
192 struct btree_bkey_cached_common *b,
193 enum six_lock_type type,
194 bool lock_may_not_fail)
198 trans->lock_may_not_fail = lock_may_not_fail;
199 trans->lock_must_abort = false;
202 ret = six_lock_type_waiter(&b->lock, type, &trans->locking_wait,
203 bch2_six_check_for_deadlock, trans);
204 WRITE_ONCE(trans->locking, NULL);
205 WRITE_ONCE(trans->locking_wait.start_time, 0);
209 static inline int __must_check
210 btree_node_lock_nopath(struct btree_trans *trans,
211 struct btree_bkey_cached_common *b,
212 enum six_lock_type type)
214 return __btree_node_lock_nopath(trans, b, type, false);
217 static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans,
218 struct btree_bkey_cached_common *b,
219 enum six_lock_type type)
221 int ret = __btree_node_lock_nopath(trans, b, type, true);
227 * Lock a btree node if we already have it locked on one of our linked
230 static inline bool btree_node_lock_increment(struct btree_trans *trans,
231 struct btree_bkey_cached_common *b,
233 enum btree_node_locked_type want)
235 struct btree_path *path;
237 trans_for_each_path(trans, path)
238 if (&path->l[level].b->c == b &&
239 btree_node_locked_type(path, level) >= want) {
240 six_lock_increment(&b->lock, want);
247 static inline int btree_node_lock(struct btree_trans *trans,
248 struct btree_path *path,
249 struct btree_bkey_cached_common *b,
251 enum six_lock_type type,
256 EBUG_ON(level >= BTREE_MAX_DEPTH);
257 EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
259 if (likely(six_trylock_type(&b->lock, type)) ||
260 btree_node_lock_increment(trans, b, level, type) ||
261 !(ret = btree_node_lock_nopath(trans, b, type))) {
262 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
263 path->l[b->level].lock_taken_time = ktime_get_ns();
270 int __bch2_btree_node_lock_write(struct btree_trans *, struct btree_path *,
271 struct btree_bkey_cached_common *b, bool);
273 static inline int __btree_node_lock_write(struct btree_trans *trans,
274 struct btree_path *path,
275 struct btree_bkey_cached_common *b,
276 bool lock_may_not_fail)
278 EBUG_ON(&path->l[b->level].b->c != b);
279 EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq);
280 EBUG_ON(!btree_node_intent_locked(path, b->level));
283 * six locks are unfair, and read locks block while a thread wants a
284 * write lock: thus, we need to tell the cycle detector we have a write
285 * lock _before_ taking the lock:
287 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write);
289 return likely(six_trylock_write(&b->lock))
291 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail);
294 static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
295 struct btree_path *path,
296 struct btree_bkey_cached_common *b)
298 int ret = __btree_node_lock_write(trans, path, b, true);
302 static inline int __must_check
303 bch2_btree_node_lock_write(struct btree_trans *trans,
304 struct btree_path *path,
305 struct btree_bkey_cached_common *b)
307 return __btree_node_lock_write(trans, path, b, false);
312 bool bch2_btree_path_relock_norestart(struct btree_trans *,
313 struct btree_path *, unsigned long);
314 bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace);
316 static inline bool bch2_btree_node_relock(struct btree_trans *trans,
317 struct btree_path *path, unsigned level)
319 EBUG_ON(btree_node_locked(path, level) &&
320 !btree_node_write_locked(path, level) &&
321 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
323 return likely(btree_node_locked(path, level)) ||
324 (!IS_ERR_OR_NULL(path->l[level].b) &&
325 __bch2_btree_node_relock(trans, path, level, true));
328 static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
329 struct btree_path *path, unsigned level)
331 EBUG_ON(btree_node_locked(path, level) &&
332 !btree_node_write_locked(path, level) &&
333 btree_node_locked_type(path, level) != __btree_lock_want(path, level));
335 return likely(btree_node_locked(path, level)) ||
336 (!IS_ERR_OR_NULL(path->l[level].b) &&
337 __bch2_btree_node_relock(trans, path, level, false));
340 static inline int bch2_btree_path_relock(struct btree_trans *trans,
341 struct btree_path *path, unsigned long trace_ip)
343 if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
344 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
345 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
353 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
354 struct btree_path *, unsigned);
355 bool __bch2_btree_path_upgrade(struct btree_trans *,
356 struct btree_path *, unsigned);
358 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
359 struct btree_path *path,
360 unsigned new_locks_want)
362 unsigned old_locks_want = path->locks_want;
364 new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
366 if (path->locks_want < new_locks_want
367 ? __bch2_btree_path_upgrade(trans, path, new_locks_want)
368 : path->uptodate == BTREE_ITER_UPTODATE)
371 trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
372 old_locks_want, new_locks_want);
373 return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
378 static inline void btree_path_set_should_be_locked(struct btree_path *path)
380 EBUG_ON(!btree_node_locked(path, path->level));
381 EBUG_ON(path->uptodate);
383 path->should_be_locked = true;
386 static inline void __btree_path_set_level_up(struct btree_trans *trans,
387 struct btree_path *path,
390 btree_node_unlock(trans, path, l);
391 path->l[l].b = ERR_PTR(-BCH_ERR_no_btree_node_up);
394 static inline void btree_path_set_level_up(struct btree_trans *trans,
395 struct btree_path *path)
397 __btree_path_set_level_up(trans, path, path->level++);
398 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
403 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
405 struct btree_bkey_cached_common *b,
408 int bch2_check_for_deadlock(struct btree_trans *, struct printbuf *);
410 #ifdef CONFIG_BCACHEFS_DEBUG
411 void bch2_btree_path_verify_locks(struct btree_path *);
412 void bch2_trans_verify_locks(struct btree_trans *);
414 static inline void bch2_btree_path_verify_locks(struct btree_path *path) {}
415 static inline void bch2_trans_verify_locks(struct btree_trans *trans) {}
418 #endif /* _BCACHEFS_BTREE_LOCKING_H */