X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_locking.h;h=a49f1dd1d223613c5f9c59b28a6a53f1410a5143;hb=8d8a9f3e9bdd1d84fbbe0531e81977cc9044654a;hp=bf8d1880673b224a09390e911dc50dfff03bb3b5;hpb=e0a51ccce8533a91c7cc0cd0adc5662697c9bcfa;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_locking.h b/libbcachefs/btree_locking.h index bf8d188..a49f1dd 100644 --- a/libbcachefs/btree_locking.h +++ b/libbcachefs/btree_locking.h @@ -10,11 +10,18 @@ * updating the iterator state */ -#include - #include "btree_iter.h" +#include "six.h" + +void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags); -extern struct lock_class_key bch2_btree_node_lock_key; +#ifdef CONFIG_LOCKDEP +void bch2_assert_btree_nodes_not_locked(void); +#else +static inline void bch2_assert_btree_nodes_not_locked(void) {} +#endif + +void bch2_trans_unlock_noassert(struct btree_trans *); static inline bool is_btree_node(struct btree_path *path, unsigned l) { @@ -84,9 +91,9 @@ static inline void mark_btree_node_unlocked(struct btree_path *path, static inline void mark_btree_node_locked(struct btree_trans *trans, struct btree_path *path, unsigned level, - enum six_lock_type type) + enum btree_node_locked_type type) { - mark_btree_node_locked_noreset(path, level, type); + mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type); #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS path->l[level].lock_taken_time = local_clock(); #endif @@ -170,13 +177,13 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat struct btree_path *linked; EBUG_ON(path->l[b->c.level].b != b); - EBUG_ON(path->l[b->c.level].lock_seq + 1 != b->c.lock.state.seq); + EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock)); EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write); - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); trans_for_each_path_with_node(trans, b, linked) - linked->l[b->c.level].lock_seq += 2; + linked->l[b->c.level].lock_seq++; six_unlock_write(&b->c.lock); } @@ -191,7 +198,8 @@ int bch2_six_check_for_deadlock(struct six_lock *lock, void *p); static inline int __btree_node_lock_nopath(struct btree_trans *trans, struct btree_bkey_cached_common *b, enum six_lock_type type, - bool lock_may_not_fail) + bool lock_may_not_fail, + unsigned long ip) { int ret; @@ -199,8 +207,8 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans, trans->lock_must_abort = false; trans->locking = b; - ret = six_lock_type_waiter(&b->lock, type, &trans->locking_wait, - bch2_six_check_for_deadlock, trans); + ret = six_lock_ip_waiter(&b->lock, type, &trans->locking_wait, + bch2_six_check_for_deadlock, trans, ip); WRITE_ONCE(trans->locking, NULL); WRITE_ONCE(trans->locking_wait.start_time, 0); return ret; @@ -209,16 +217,17 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans, static inline int __must_check btree_node_lock_nopath(struct btree_trans *trans, struct btree_bkey_cached_common *b, - enum six_lock_type type) + enum six_lock_type type, + unsigned long ip) { - return __btree_node_lock_nopath(trans, b, type, false); + return __btree_node_lock_nopath(trans, b, type, false, ip); } static inline void btree_node_lock_nopath_nofail(struct btree_trans *trans, struct btree_bkey_cached_common *b, enum six_lock_type type) { - int ret = __btree_node_lock_nopath(trans, b, type, true); + int ret = __btree_node_lock_nopath(trans, b, type, true, _THIS_IP_); BUG_ON(ret); } @@ -237,7 +246,7 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans, trans_for_each_path(trans, path) if (&path->l[level].b->c == b && btree_node_locked_type(path, level) >= want) { - six_lock_increment(&b->lock, want); + six_lock_increment(&b->lock, (enum six_lock_type) want); return true; } @@ -254,11 +263,11 @@ static inline int btree_node_lock(struct btree_trans *trans, int ret = 0; EBUG_ON(level >= BTREE_MAX_DEPTH); - EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); + EBUG_ON(!test_bit(path->idx, trans->paths_allocated)); if (likely(six_trylock_type(&b->lock, type)) || - btree_node_lock_increment(trans, b, level, type) || - !(ret = btree_node_lock_nopath(trans, b, type))) { + btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) || + !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) { #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS path->l[b->level].lock_taken_time = local_clock(); #endif @@ -276,7 +285,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans, bool lock_may_not_fail) { EBUG_ON(&path->l[b->level].b->c != b); - EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq); + EBUG_ON(path->l[b->level].lock_seq != six_lock_seq(&b->lock)); EBUG_ON(!btree_node_intent_locked(path, b->level)); /* @@ -284,21 +293,13 @@ static inline int __btree_node_lock_write(struct btree_trans *trans, * write lock: thus, we need to tell the cycle detector we have a write * lock _before_ taking the lock: */ - mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write); + mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED); return likely(six_trylock_write(&b->lock)) ? 0 : __bch2_btree_node_lock_write(trans, path, b, lock_may_not_fail); } -static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans, - struct btree_path *path, - struct btree_bkey_cached_common *b) -{ - int ret = __btree_node_lock_write(trans, path, b, true); - BUG_ON(ret); -} - static inline int __must_check bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path, @@ -307,10 +308,25 @@ bch2_btree_node_lock_write(struct btree_trans *trans, return __btree_node_lock_write(trans, path, b, false); } +void bch2_btree_node_lock_write_nofail(struct btree_trans *, + struct btree_path *, + struct btree_bkey_cached_common *); + /* relock: */ bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *, unsigned long); +int __bch2_btree_path_relock(struct btree_trans *, + struct btree_path *, unsigned long); + +static inline int bch2_btree_path_relock(struct btree_trans *trans, + struct btree_path *path, unsigned long trace_ip) +{ + return btree_node_locked(path, path->level) + ? 0 + : __bch2_btree_path_relock(trans, path, trace_ip); +} + bool __bch2_btree_node_relock(struct btree_trans *, struct btree_path *, unsigned, bool trace); static inline bool bch2_btree_node_relock(struct btree_trans *trans, @@ -337,39 +353,38 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans, __bch2_btree_node_relock(trans, path, level, false)); } -static inline int bch2_btree_path_relock(struct btree_trans *trans, - struct btree_path *path, unsigned long trace_ip) -{ - if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) { - trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path); - return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path); - } +/* upgrade */ - return 0; -} -/* upgrade */ +struct get_locks_fail { + unsigned l; + struct btree *b; +}; bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *, - struct btree_path *, unsigned); + struct btree_path *, unsigned, + struct get_locks_fail *); + bool __bch2_btree_path_upgrade(struct btree_trans *, - struct btree_path *, unsigned); + struct btree_path *, unsigned, + struct get_locks_fail *); static inline int bch2_btree_path_upgrade(struct btree_trans *trans, struct btree_path *path, unsigned new_locks_want) { + struct get_locks_fail f; unsigned old_locks_want = path->locks_want; new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH); if (path->locks_want < new_locks_want - ? __bch2_btree_path_upgrade(trans, path, new_locks_want) + ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f) : path->uptodate == BTREE_ITER_UPTODATE) return 0; trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path, - old_locks_want, new_locks_want); + old_locks_want, new_locks_want, &f); return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); }