#include "btree_locking.h"
#include "btree_types.h"
-struct lock_class_key bch2_btree_node_lock_key;
+static struct lock_class_key bch2_btree_node_lock_key;
-/* Btree node locking: */
+void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
+ enum six_lock_init_flags flags)
+{
+ __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_set_no_check_recursion(&b->lock.dep_map);
+#endif
+}
-static inline void six_lock_readers_add(struct six_lock *lock, int nr)
+#ifdef CONFIG_LOCKDEP
+void bch2_assert_btree_nodes_not_locked(void)
{
- if (lock->readers)
- this_cpu_add(*lock->readers, nr);
- else if (nr > 0)
- atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
- else
- atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
+ BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
}
+#endif
+
+/* Btree node locking: */
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
struct btree_path *skip,
closure_put(&g->g[--g->nr].trans->ref);
}
-static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+static noinline void lock_graph_pop_all(struct lock_graph *g)
{
- closure_get(&trans->ref);
+ while (g->nr)
+ lock_graph_up(g);
+}
+static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
g->g[g->nr++] = (struct trans_waiting_for_lock) {
.trans = trans,
.node_want = trans->locking,
};
}
+static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
+ closure_get(&trans->ref);
+ __lock_graph_down(g, trans);
+}
+
static bool lock_graph_remove_non_waiters(struct lock_graph *g)
{
struct trans_waiting_for_lock *i;
}
if (unlikely(!best)) {
- struct bch_fs *c = g->g->trans->c;
struct printbuf buf = PRINTBUF;
- bch_err(c, "cycle of nofail locks");
+ prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
for (i = g->g; i < g->g + g->nr; i++) {
struct btree_trans *trans = i->trans;
prt_printf(&buf, "backtrace:");
prt_newline(&buf);
printbuf_indent_add(&buf, 2);
- bch2_prt_backtrace(&buf, trans->locking_wait.task);
+ bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
printbuf_indent_sub(&buf, 2);
prt_newline(&buf);
}
struct trans_waiting_for_lock *i;
for (i = g->g; i < g->g + g->nr; i++)
- if (i->trans == trans)
+ if (i->trans == trans) {
+ closure_put(&trans->ref);
return break_cycle(g, cycle);
+ }
if (g->nr == ARRAY_SIZE(g->g)) {
+ closure_put(&trans->ref);
+
if (orig_trans->lock_may_not_fail)
return 0;
while (g->nr)
lock_graph_up(g);
+
+ if (cycle)
+ return 0;
+
trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
}
- lock_graph_down(g, trans);
+ __lock_graph_down(g, trans);
return 0;
}
struct trans_waiting_for_lock *top;
struct btree_bkey_cached_common *b;
struct btree_path *path;
+ unsigned path_idx;
int ret;
if (trans->lock_must_abort) {
+ if (cycle)
+ return -1;
+
trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
}
top = &g.g[g.nr - 1];
- trans_for_each_path_from(top->trans, path, top->path_idx) {
+ trans_for_each_path_safe_from(top->trans, path, path_idx, top->path_idx) {
if (!path->nodes_locked)
continue;
- if (top->path_idx != path->idx) {
- top->path_idx = path->idx;
+ if (path_idx != top->path_idx) {
+ top->path_idx = path_idx;
top->level = 0;
top->lock_start_time = 0;
}
b = &READ_ONCE(path->l[top->level].b)->c;
if (IS_ERR_OR_NULL(b)) {
- BUG_ON(!lock_graph_remove_non_waiters(&g));
+ /*
+ * If we get here, it means we raced with the
+ * other thread updating its btree_path
+ * structures - which means it can't be blocked
+ * waiting on a lock:
+ */
+ if (!lock_graph_remove_non_waiters(&g)) {
+ /*
+ * If lock_graph_remove_non_waiters()
+ * didn't do anything, it must be
+ * because we're being called by debugfs
+ * checking for lock cycles, which
+ * invokes us on btree_transactions that
+ * aren't actually waiting on anything.
+ * Just bail out:
+ */
+ lock_graph_pop_all(&g);
+ }
+
goto next;
}
!lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
continue;
- ret = lock_graph_descend(&g, trans, cycle);
+ closure_get(&trans->ref);
raw_spin_unlock(&b->lock.wait_lock);
+ ret = lock_graph_descend(&g, trans, cycle);
if (ret)
return ret;
goto next;
* locked:
*/
six_lock_readers_add(&b->lock, -readers);
- ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write, lock_may_not_fail);
+ ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
+ lock_may_not_fail, _RET_IP_);
six_lock_readers_add(&b->lock, readers);
if (ret)
- mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
+ mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
return ret;
}
+void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
+ struct btree_path *path,
+ struct btree_bkey_cached_common *b)
+{
+ struct btree_path *linked;
+ unsigned i;
+ int ret;
+
+ /*
+ * XXX BIG FAT NOTICE
+ *
+ * Drop all read locks before taking a write lock:
+ *
+ * This is a hack, because bch2_btree_node_lock_write_nofail() is a
+ * hack - but by dropping read locks first, this should never fail, and
+ * we only use this in code paths where whatever read locks we've
+ * already taken are no longer needed:
+ */
+
+ trans_for_each_path(trans, linked) {
+ if (!linked->nodes_locked)
+ continue;
+
+ for (i = 0; i < BTREE_MAX_DEPTH; i++)
+ if (btree_node_read_locked(linked, i)) {
+ btree_node_unlock(trans, linked, i);
+ btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
+ }
+ }
+
+ ret = __btree_node_lock_write(trans, path, b, true);
+ BUG_ON(ret);
+}
+
/* relock */
static inline bool btree_path_get_locks(struct btree_trans *trans,
struct btree_path *path,
- bool upgrade)
+ bool upgrade,
+ struct get_locks_fail *f)
{
unsigned l = path->level;
int fail_idx = -1;
if (!(upgrade
? bch2_btree_node_upgrade(trans, path, l)
- : bch2_btree_node_relock(trans, path, l)))
- fail_idx = l;
+ : bch2_btree_node_relock(trans, path, l))) {
+ fail_idx = l;
+
+ if (f) {
+ f->l = l;
+ f->b = path->l[l].b;
+ }
+ }
l++;
} while (l < path->locks_want);
return true;
}
fail:
- if (trace)
+ if (trace && !trans->notrace_relock_fail)
trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
return false;
}
trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
return false;
success:
- mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
+ mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
return true;
}
bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
- return btree_path_get_locks(trans, path, false);
+ struct get_locks_fail f;
+
+ return btree_path_get_locks(trans, path, false, &f);
}
-__flatten
-bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
+int __bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip)
{
- return btree_path_get_locks(trans, path, true);
+ if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
+ trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
+ }
+
+ return 0;
}
bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
struct btree_path *path,
- unsigned new_locks_want)
+ unsigned new_locks_want,
+ struct get_locks_fail *f)
{
EBUG_ON(path->locks_want >= new_locks_want);
path->locks_want = new_locks_want;
- return btree_path_get_locks(trans, path, true);
+ return btree_path_get_locks(trans, path, true, f);
}
bool __bch2_btree_path_upgrade(struct btree_trans *trans,
struct btree_path *path,
- unsigned new_locks_want)
+ unsigned new_locks_want,
+ struct get_locks_fail *f)
{
struct btree_path *linked;
- if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
+ if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
return true;
/*
linked->btree_id == path->btree_id &&
linked->locks_want < new_locks_want) {
linked->locks_want = new_locks_want;
- btree_path_get_locks(trans, linked, true);
+ btree_path_get_locks(trans, linked, true, NULL);
}
return false;
struct btree_path *path,
unsigned new_locks_want)
{
- unsigned l;
+ unsigned l, old_locks_want = path->locks_want;
+
+ if (trans->restarted)
+ return;
EBUG_ON(path->locks_want < new_locks_want);
} else {
if (btree_node_intent_locked(path, l)) {
six_lock_downgrade(&path->l[l].b->c.lock);
- mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
+ mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
}
break;
}
}
bch2_btree_path_verify_locks(path);
+
+ path->downgrade_seq++;
+ trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
}
/* Btree transaction locking: */
{
struct btree_path *path;
+ if (trans->restarted)
+ return;
+
trans_for_each_path(trans, path)
bch2_btree_path_downgrade(trans, path);
}
return 0;
}
+int bch2_trans_relock_notrace(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ if (unlikely(trans->restarted))
+ return -((int) trans->restarted);
+
+ trans_for_each_path(trans, path)
+ if (path->should_be_locked &&
+ !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
+ return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+ }
+ return 0;
+}
+
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
+{
+ struct btree_path *path;
+
+ trans_for_each_path(trans, path)
+ __bch2_btree_path_unlock(trans, path);
+}
+
void bch2_trans_unlock(struct btree_trans *trans)
{
struct btree_path *path;
* bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
* btree nodes, it implements its own walking:
*/
- EBUG_ON(!trans->is_initial_gc &&
- lock_class_is_held(&bch2_btree_node_lock_key));
+ if (!trans->is_initial_gc)
+ bch2_assert_btree_nodes_not_locked();
+}
+
+void bch2_trans_unlock_long(struct btree_trans *trans)
+{
+ bch2_trans_unlock(trans);
+ bch2_trans_srcu_unlock(trans);
}
bool bch2_trans_locked(struct btree_trans *trans)
return false;
}
+int __bch2_trans_mutex_lock(struct btree_trans *trans,
+ struct mutex *lock)
+{
+ int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
+
+ if (ret)
+ mutex_unlock(lock);
+ return ret;
+}
+
/* Debug */
#ifdef CONFIG_BCACHEFS_DEBUG