X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_locking.h;h=f2e2c5881b7e4bd2551018708d7bcadee1d53083;hb=e5b2870d05753c1dedd79261ba5e321ce953f5ab;hp=b341cc894c61c06dc0c689945e6b0f943a5f2b12;hpb=1f78fed4693a5361f56508daac59bebd5b556379;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_locking.h b/libbcachefs/btree_locking.h index b341cc8..f2e2c58 100644 --- a/libbcachefs/btree_locking.h +++ b/libbcachefs/btree_locking.h @@ -10,9 +10,8 @@ * updating the iterator state */ -#include - #include "btree_iter.h" +#include "six.h" void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags); @@ -22,6 +21,8 @@ void bch2_assert_btree_nodes_not_locked(void); static inline void bch2_assert_btree_nodes_not_locked(void) {} #endif +void bch2_trans_unlock_noassert(struct btree_trans *); + static inline bool is_btree_node(struct btree_path *path, unsigned l) { return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b); @@ -90,9 +91,9 @@ static inline void mark_btree_node_unlocked(struct btree_path *path, static inline void mark_btree_node_locked(struct btree_trans *trans, struct btree_path *path, unsigned level, - enum six_lock_type type) + enum btree_node_locked_type type) { - mark_btree_node_locked_noreset(path, level, type); + mark_btree_node_locked_noreset(path, level, (enum btree_node_locked_type) type); #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS path->l[level].lock_taken_time = local_clock(); #endif @@ -121,12 +122,9 @@ static void btree_trans_lock_hold_time_update(struct btree_trans *trans, struct btree_path *path, unsigned level) { #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS - struct btree_transaction_stats *s = btree_trans_stats(trans); - - if (s) - __bch2_time_stats_update(&s->lock_hold_times, - path->l[level].lock_taken_time, - local_clock()); + __time_stats_update(&btree_trans_stats(trans)->lock_hold_times, + path->l[level].lock_taken_time, + local_clock()); #endif } @@ -174,14 +172,15 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat struct btree *b) { struct btree_path *linked; + unsigned i; EBUG_ON(path->l[b->c.level].b != b); EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock)); EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write); - mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED); - trans_for_each_path_with_node(trans, b, linked) + trans_for_each_path_with_node(trans, b, linked, i) linked->l[b->c.level].lock_seq++; six_unlock_write(&b->c.lock); @@ -241,11 +240,12 @@ static inline bool btree_node_lock_increment(struct btree_trans *trans, enum btree_node_locked_type want) { struct btree_path *path; + unsigned i; - trans_for_each_path(trans, path) + trans_for_each_path(trans, path, i) if (&path->l[level].b->c == b && btree_node_locked_type(path, level) >= want) { - six_lock_increment(&b->lock, want); + six_lock_increment(&b->lock, (enum six_lock_type) want); return true; } @@ -262,10 +262,9 @@ static inline int btree_node_lock(struct btree_trans *trans, int ret = 0; EBUG_ON(level >= BTREE_MAX_DEPTH); - EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); if (likely(six_trylock_type(&b->lock, type)) || - btree_node_lock_increment(trans, b, level, type) || + btree_node_lock_increment(trans, b, level, (enum btree_node_locked_type) type) || !(ret = btree_node_lock_nopath(trans, b, type, btree_path_ip_allocated(path)))) { #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS path->l[b->level].lock_taken_time = local_clock(); @@ -292,7 +291,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans, * write lock: thus, we need to tell the cycle detector we have a write * lock _before_ taking the lock: */ - mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write); + mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED); return likely(six_trylock_write(&b->lock)) ? 0 @@ -313,8 +312,7 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *, /* relock: */ -bool bch2_btree_path_relock_norestart(struct btree_trans *, - struct btree_path *, unsigned long); +bool bch2_btree_path_relock_norestart(struct btree_trans *, struct btree_path *); int __bch2_btree_path_relock(struct btree_trans *, struct btree_path *, unsigned long); @@ -355,25 +353,29 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans, /* upgrade */ bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *, - struct btree_path *, unsigned); + struct btree_path *, unsigned, + struct get_locks_fail *); + bool __bch2_btree_path_upgrade(struct btree_trans *, - struct btree_path *, unsigned); + struct btree_path *, unsigned, + struct get_locks_fail *); static inline int bch2_btree_path_upgrade(struct btree_trans *trans, struct btree_path *path, unsigned new_locks_want) { + struct get_locks_fail f; unsigned old_locks_want = path->locks_want; new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH); if (path->locks_want < new_locks_want - ? __bch2_btree_path_upgrade(trans, path, new_locks_want) + ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f) : path->uptodate == BTREE_ITER_UPTODATE) return 0; trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path, - old_locks_want, new_locks_want); + old_locks_want, new_locks_want, &f); return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); }