X-Git-Url: https://git.sesse.net/?a=blobdiff_plain;f=libbcachefs%2Fbtree_locking.c;h=1eca320e7574ef251c2f62a5463fb93da7530f7c;hb=f27b135285d032d9570a1accb00412e111b38878;hp=7d9e01c1d307a723a3b64b5d652022df9289cc6a;hpb=4d185cfa51789d27489543443452972da87c2f97;p=bcachefs-tools-debian diff --git a/libbcachefs/btree_locking.c b/libbcachefs/btree_locking.c index 7d9e01c..1eca320 100644 --- a/libbcachefs/btree_locking.c +++ b/libbcachefs/btree_locking.c @@ -10,15 +10,16 @@ void bch2_btree_lock_init(struct btree_bkey_cached_common *b, enum six_lock_init_flags flags) { __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags); -#ifdef CONFIG_DEBUG_LOCK_ALLOC - lockdep_set_no_check_recursion(&b->lock.dep_map); -#endif + lockdep_set_novalidate_class(&b->lock); } #ifdef CONFIG_LOCKDEP void bch2_assert_btree_nodes_not_locked(void) { +#if 0 + //Re-enable when lock_class_is_held() is merged: BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key)); +#endif } #endif @@ -111,10 +112,8 @@ static noinline void lock_graph_pop_all(struct lock_graph *g) lock_graph_up(g); } -static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) +static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans) { - closure_get(&trans->ref); - g->g[g->nr++] = (struct trans_waiting_for_lock) { .trans = trans, .node_want = trans->locking, @@ -122,6 +121,12 @@ static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) }; } +static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans) +{ + closure_get(&trans->ref); + __lock_graph_down(g, trans); +} + static bool lock_graph_remove_non_waiters(struct lock_graph *g) { struct trans_waiting_for_lock *i; @@ -137,10 +142,28 @@ static bool lock_graph_remove_non_waiters(struct lock_graph *g) return false; } +static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans, + unsigned long ip) +{ + struct bch_fs *c = trans->c; + + count_event(c, trans_restart_would_deadlock); + + if (trace_trans_restart_would_deadlock_enabled()) { + struct printbuf buf = PRINTBUF; + + buf.atomic++; + print_cycle(&buf, g); + + trace_trans_restart_would_deadlock(trans, ip, buf.buf); + printbuf_exit(&buf); + } +} + static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i) { if (i == g->g) { - trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_); + trace_would_deadlock(g, i->trans, _RET_IP_); return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock); } else { i->trans->lock_must_abort = true; @@ -222,10 +245,14 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans, struct trans_waiting_for_lock *i; for (i = g->g; i < g->g + g->nr; i++) - if (i->trans == trans) + if (i->trans == trans) { + closure_put(&trans->ref); return break_cycle(g, cycle); + } if (g->nr == ARRAY_SIZE(g->g)) { + closure_put(&trans->ref); + if (orig_trans->lock_may_not_fail) return 0; @@ -239,7 +266,7 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans, return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit); } - lock_graph_down(g, trans); + __lock_graph_down(g, trans); return 0; } @@ -257,15 +284,16 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle) unsigned path_idx; int ret; + g.nr = 0; + if (trans->lock_must_abort) { if (cycle) return -1; - trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_); + trace_would_deadlock(&g, trans, _RET_IP_); return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock); } - g.nr = 0; lock_graph_down(&g, trans); next: if (!g.nr) @@ -334,9 +362,10 @@ next: !lock_type_conflicts(lock_held, trans->locking_wait.lock_want)) continue; - ret = lock_graph_descend(&g, trans, cycle); + closure_get(&trans->ref); raw_spin_unlock(&b->lock.wait_lock); + ret = lock_graph_descend(&g, trans, cycle); if (ret) return ret; goto next; @@ -378,7 +407,7 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p six_lock_readers_add(&b->lock, readers); if (ret) - mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED); return ret; } @@ -421,7 +450,8 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans, static inline bool btree_path_get_locks(struct btree_trans *trans, struct btree_path *path, - bool upgrade) + bool upgrade, + struct get_locks_fail *f) { unsigned l = path->level; int fail_idx = -1; @@ -432,8 +462,14 @@ static inline bool btree_path_get_locks(struct btree_trans *trans, if (!(upgrade ? bch2_btree_node_upgrade(trans, path, l) - : bch2_btree_node_relock(trans, path, l))) - fail_idx = l; + : bch2_btree_node_relock(trans, path, l))) { + fail_idx = l; + + if (f) { + f->l = l; + f->b = path->l[l].b; + } + } l++; } while (l < path->locks_want); @@ -542,7 +578,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level); return false; success: - mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); + mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED); return true; } @@ -574,7 +610,9 @@ __flatten bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_path *path, unsigned long trace_ip) { - return btree_path_get_locks(trans, path, false); + struct get_locks_fail f; + + return btree_path_get_locks(trans, path, false, &f); } int __bch2_btree_path_relock(struct btree_trans *trans, @@ -588,31 +626,26 @@ int __bch2_btree_path_relock(struct btree_trans *trans, return 0; } -__flatten -bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans, - struct btree_path *path, unsigned long trace_ip) -{ - return btree_path_get_locks(trans, path, true); -} - bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans, struct btree_path *path, - unsigned new_locks_want) + unsigned new_locks_want, + struct get_locks_fail *f) { EBUG_ON(path->locks_want >= new_locks_want); path->locks_want = new_locks_want; - return btree_path_get_locks(trans, path, true); + return btree_path_get_locks(trans, path, true, f); } bool __bch2_btree_path_upgrade(struct btree_trans *trans, struct btree_path *path, - unsigned new_locks_want) + unsigned new_locks_want, + struct get_locks_fail *f) { struct btree_path *linked; - if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want)) + if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f)) return true; /* @@ -641,7 +674,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans, linked->btree_id == path->btree_id && linked->locks_want < new_locks_want) { linked->locks_want = new_locks_want; - btree_path_get_locks(trans, linked, true); + btree_path_get_locks(trans, linked, true, NULL); } return false; @@ -651,7 +684,10 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans, struct btree_path *path, unsigned new_locks_want) { - unsigned l; + unsigned l, old_locks_want = path->locks_want; + + if (trans->restarted) + return; EBUG_ON(path->locks_want < new_locks_want); @@ -664,13 +700,16 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans, } else { if (btree_node_intent_locked(path, l)) { six_lock_downgrade(&path->l[l].b->c.lock); - mark_btree_node_locked_noreset(path, l, SIX_LOCK_read); + mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED); } break; } } bch2_btree_path_verify_locks(path); + + path->downgrade_seq++; + trace_path_downgrade(trans, _RET_IP_, path, old_locks_want); } /* Btree transaction locking: */ @@ -679,6 +718,9 @@ void bch2_trans_downgrade(struct btree_trans *trans) { struct btree_path *path; + if (trans->restarted) + return; + trans_for_each_path(trans, path) bch2_btree_path_downgrade(trans, path); } @@ -728,13 +770,12 @@ void bch2_trans_unlock(struct btree_trans *trans) trans_for_each_path(trans, path) __bch2_btree_path_unlock(trans, path); +} - /* - * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking - * btree nodes, it implements its own walking: - */ - if (!trans->is_initial_gc) - bch2_assert_btree_nodes_not_locked(); +void bch2_trans_unlock_long(struct btree_trans *trans) +{ + bch2_trans_unlock(trans); + bch2_trans_srcu_unlock(trans); } bool bch2_trans_locked(struct btree_trans *trans)