1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_types.h"
7 static struct lock_class_key bch2_btree_node_lock_key;
9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 enum six_lock_init_flags flags)
12 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
13 lockdep_set_novalidate_class(&b->lock);
17 void bch2_assert_btree_nodes_not_locked(void)
20 //Re-enable when lock_class_is_held() is merged:
21 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
26 /* Btree node locking: */
28 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
29 struct btree_path *skip,
30 struct btree_bkey_cached_common *b,
33 struct btree_path *path;
34 struct six_lock_count ret;
37 memset(&ret, 0, sizeof(ret));
39 if (IS_ERR_OR_NULL(b))
42 trans_for_each_path(trans, path, i)
43 if (path != skip && &path->l[level].b->c == b) {
44 int t = btree_node_locked_type(path, level);
46 if (t != BTREE_NODE_UNLOCKED)
55 void bch2_btree_node_unlock_write(struct btree_trans *trans,
56 struct btree_path *path, struct btree *b)
58 bch2_btree_node_unlock_write_inlined(trans, path, b);
64 * @trans wants to lock @b with type @type
66 struct trans_waiting_for_lock {
67 struct btree_trans *trans;
68 struct btree_bkey_cached_common *node_want;
69 enum six_lock_type lock_want;
71 /* for iterating over held locks :*/
78 struct trans_waiting_for_lock g[8];
82 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
84 struct trans_waiting_for_lock *i;
86 prt_printf(out, "Found lock cycle (%u entries):", g->nr);
89 for (i = g->g; i < g->g + g->nr; i++)
90 bch2_btree_trans_to_text(out, i->trans);
93 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
95 struct trans_waiting_for_lock *i;
97 for (i = g->g; i != g->g + g->nr; i++) {
98 struct task_struct *task = i->trans->locking_wait.task;
101 prt_printf(out, "%u ", task ?task->pid : 0);
106 static void lock_graph_up(struct lock_graph *g)
108 closure_put(&g->g[--g->nr].trans->ref);
111 static noinline void lock_graph_pop_all(struct lock_graph *g)
117 static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
119 g->g[g->nr++] = (struct trans_waiting_for_lock) {
121 .node_want = trans->locking,
122 .lock_want = trans->locking_wait.lock_want,
126 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
128 closure_get(&trans->ref);
129 __lock_graph_down(g, trans);
132 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
134 struct trans_waiting_for_lock *i;
136 for (i = g->g + 1; i < g->g + g->nr; i++)
137 if (i->trans->locking != i->node_want ||
138 i->trans->locking_wait.start_time != i[-1].lock_start_time) {
139 while (g->g + g->nr > i)
147 static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans,
150 struct bch_fs *c = trans->c;
152 count_event(c, trans_restart_would_deadlock);
154 if (trace_trans_restart_would_deadlock_enabled()) {
155 struct printbuf buf = PRINTBUF;
158 print_cycle(&buf, g);
160 trace_trans_restart_would_deadlock(trans, ip, buf.buf);
165 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
168 trace_would_deadlock(g, i->trans, _RET_IP_);
169 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
171 i->trans->lock_must_abort = true;
172 wake_up_process(i->trans->locking_wait.task);
177 static int btree_trans_abort_preference(struct btree_trans *trans)
179 if (trans->lock_may_not_fail)
181 if (trans->locking_wait.lock_want == SIX_LOCK_write)
183 if (!trans->in_traverse_all)
188 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
190 struct trans_waiting_for_lock *i, *abort = NULL;
191 unsigned best = 0, pref;
194 if (lock_graph_remove_non_waiters(g))
197 /* Only checking, for debugfs: */
199 print_cycle(cycle, g);
204 for (i = g->g; i < g->g + g->nr; i++) {
205 pref = btree_trans_abort_preference(i->trans);
212 if (unlikely(!best)) {
213 struct printbuf buf = PRINTBUF;
215 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
217 for (i = g->g; i < g->g + g->nr; i++) {
218 struct btree_trans *trans = i->trans;
220 bch2_btree_trans_to_text(&buf, trans);
222 prt_printf(&buf, "backtrace:");
224 printbuf_indent_add(&buf, 2);
225 bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
226 printbuf_indent_sub(&buf, 2);
230 bch2_print_string_as_lines(KERN_ERR, buf.buf);
235 ret = abort_lock(g, abort);
243 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
244 struct printbuf *cycle)
246 struct btree_trans *orig_trans = g->g->trans;
247 struct trans_waiting_for_lock *i;
249 for (i = g->g; i < g->g + g->nr; i++)
250 if (i->trans == trans) {
251 closure_put(&trans->ref);
252 return break_cycle(g, cycle);
255 if (g->nr == ARRAY_SIZE(g->g)) {
256 closure_put(&trans->ref);
258 if (orig_trans->lock_may_not_fail)
267 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
268 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
271 __lock_graph_down(g, trans);
275 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
280 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
283 struct trans_waiting_for_lock *top;
284 struct btree_bkey_cached_common *b;
285 btree_path_idx_t path_idx;
290 if (trans->lock_must_abort) {
294 trace_would_deadlock(&g, trans, _RET_IP_);
295 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
298 lock_graph_down(&g, trans);
300 /* trans->paths is rcu protected vs. freeing */
308 top = &g.g[g.nr - 1];
310 struct btree_path *paths = rcu_dereference(top->trans->paths);
314 unsigned long *paths_allocated = trans_paths_allocated(paths);
316 trans_for_each_path_idx_from(paths_allocated, *trans_paths_nr(paths),
317 path_idx, top->path_idx) {
318 struct btree_path *path = paths + path_idx;
319 if (!path->nodes_locked)
322 if (path_idx != top->path_idx) {
323 top->path_idx = path_idx;
325 top->lock_start_time = 0;
329 top->level < BTREE_MAX_DEPTH;
330 top->level++, top->lock_start_time = 0) {
331 int lock_held = btree_node_locked_type(path, top->level);
333 if (lock_held == BTREE_NODE_UNLOCKED)
336 b = &READ_ONCE(path->l[top->level].b)->c;
338 if (IS_ERR_OR_NULL(b)) {
340 * If we get here, it means we raced with the
341 * other thread updating its btree_path
342 * structures - which means it can't be blocked
345 if (!lock_graph_remove_non_waiters(&g)) {
347 * If lock_graph_remove_non_waiters()
348 * didn't do anything, it must be
349 * because we're being called by debugfs
350 * checking for lock cycles, which
351 * invokes us on btree_transactions that
352 * aren't actually waiting on anything.
355 lock_graph_pop_all(&g);
361 if (list_empty_careful(&b->lock.wait_list))
364 raw_spin_lock(&b->lock.wait_lock);
365 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
366 BUG_ON(b != trans->locking);
368 if (top->lock_start_time &&
369 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
372 top->lock_start_time = trans->locking_wait.start_time;
374 /* Don't check for self deadlock: */
375 if (trans == top->trans ||
376 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
379 closure_get(&trans->ref);
380 raw_spin_unlock(&b->lock.wait_lock);
382 ret = lock_graph_descend(&g, trans, cycle);
388 raw_spin_unlock(&b->lock.wait_lock);
392 if (g.nr > 1 && cycle)
393 print_chain(cycle, &g);
403 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
405 struct btree_trans *trans = p;
407 return bch2_check_for_deadlock(trans, NULL);
410 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
411 struct btree_bkey_cached_common *b,
412 bool lock_may_not_fail)
414 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
418 * Must drop our read locks before calling six_lock_write() -
419 * six_unlock() won't do wakeups until the reader count
420 * goes to 0, and it's safe because we have the node intent
423 six_lock_readers_add(&b->lock, -readers);
424 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
425 lock_may_not_fail, _RET_IP_);
426 six_lock_readers_add(&b->lock, readers);
429 mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
434 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
435 struct btree_path *path,
436 struct btree_bkey_cached_common *b)
438 struct btree_path *linked;
445 * Drop all read locks before taking a write lock:
447 * This is a hack, because bch2_btree_node_lock_write_nofail() is a
448 * hack - but by dropping read locks first, this should never fail, and
449 * we only use this in code paths where whatever read locks we've
450 * already taken are no longer needed:
453 trans_for_each_path(trans, linked, iter) {
454 if (!linked->nodes_locked)
457 for (i = 0; i < BTREE_MAX_DEPTH; i++)
458 if (btree_node_read_locked(linked, i)) {
459 btree_node_unlock(trans, linked, i);
460 btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
464 ret = __btree_node_lock_write(trans, path, b, true);
470 static inline bool btree_path_get_locks(struct btree_trans *trans,
471 struct btree_path *path,
473 struct get_locks_fail *f)
475 unsigned l = path->level;
479 if (!btree_path_node(path, l))
483 ? bch2_btree_node_upgrade(trans, path, l)
484 : bch2_btree_node_relock(trans, path, l))) {
494 } while (l < path->locks_want);
497 * When we fail to get a lock, we have to ensure that any child nodes
498 * can't be relocked so bch2_btree_path_traverse has to walk back up to
499 * the node that we failed to relock:
502 __bch2_btree_path_unlock(trans, path);
503 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
506 path->l[fail_idx].b = upgrade
507 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
508 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
510 } while (fail_idx >= 0);
513 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
514 path->uptodate = BTREE_ITER_UPTODATE;
516 bch2_trans_verify_locks(trans);
518 return path->uptodate < BTREE_ITER_NEED_RELOCK;
521 bool __bch2_btree_node_relock(struct btree_trans *trans,
522 struct btree_path *path, unsigned level,
525 struct btree *b = btree_path_node(path, level);
526 int want = __btree_lock_want(path, level);
531 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
532 (btree_node_lock_seq_matches(path, b, level) &&
533 btree_node_lock_increment(trans, &b->c, level, want))) {
534 mark_btree_node_locked(trans, path, level, want);
538 if (trace && !trans->notrace_relock_fail)
539 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
545 bool bch2_btree_node_upgrade(struct btree_trans *trans,
546 struct btree_path *path, unsigned level)
548 struct btree *b = path->l[level].b;
549 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
551 if (!is_btree_node(path, level))
554 switch (btree_lock_want(path, level)) {
555 case BTREE_NODE_UNLOCKED:
556 BUG_ON(btree_node_locked(path, level));
558 case BTREE_NODE_READ_LOCKED:
559 BUG_ON(btree_node_intent_locked(path, level));
560 return bch2_btree_node_relock(trans, path, level);
561 case BTREE_NODE_INTENT_LOCKED:
563 case BTREE_NODE_WRITE_LOCKED:
567 if (btree_node_intent_locked(path, level))
573 if (btree_node_locked(path, level)) {
576 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
577 ret = six_lock_tryupgrade(&b->c.lock);
578 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
583 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
588 * Do we already have an intent lock via another path? If so, just bump
591 if (btree_node_lock_seq_matches(path, b, level) &&
592 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
593 btree_node_unlock(trans, path, level);
597 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
600 mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
604 /* Btree path locking: */
607 * Only for btree_cache.c - only relocks intent locks
609 int bch2_btree_path_relock_intent(struct btree_trans *trans,
610 struct btree_path *path)
614 for (l = path->level;
615 l < path->locks_want && btree_path_node(path, l);
617 if (!bch2_btree_node_relock(trans, path, l)) {
618 __bch2_btree_path_unlock(trans, path);
619 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
620 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
621 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
629 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
630 struct btree_path *path, unsigned long trace_ip)
632 struct get_locks_fail f;
634 return btree_path_get_locks(trans, path, false, &f);
637 int __bch2_btree_path_relock(struct btree_trans *trans,
638 struct btree_path *path, unsigned long trace_ip)
640 if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
641 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
642 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
648 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
649 struct btree_path *path,
650 unsigned new_locks_want,
651 struct get_locks_fail *f)
653 EBUG_ON(path->locks_want >= new_locks_want);
655 path->locks_want = new_locks_want;
657 return btree_path_get_locks(trans, path, true, f);
660 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
661 struct btree_path *path,
662 unsigned new_locks_want,
663 struct get_locks_fail *f)
665 if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
669 * XXX: this is ugly - we'd prefer to not be mucking with other
670 * iterators in the btree_trans here.
672 * On failure to upgrade the iterator, setting iter->locks_want and
673 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
674 * get the locks we want on transaction restart.
676 * But if this iterator was a clone, on transaction restart what we did
677 * to this iterator isn't going to be preserved.
679 * Possibly we could add an iterator field for the parent iterator when
680 * an iterator is a copy - for now, we'll just upgrade any other
681 * iterators with the same btree id.
683 * The code below used to be needed to ensure ancestor nodes get locked
684 * before interior nodes - now that's handled by
685 * bch2_btree_path_traverse_all().
687 if (!path->cached && !trans->in_traverse_all) {
688 struct btree_path *linked;
691 trans_for_each_path(trans, linked, i)
692 if (linked != path &&
693 linked->cached == path->cached &&
694 linked->btree_id == path->btree_id &&
695 linked->locks_want < new_locks_want) {
696 linked->locks_want = new_locks_want;
697 btree_path_get_locks(trans, linked, true, NULL);
704 void __bch2_btree_path_downgrade(struct btree_trans *trans,
705 struct btree_path *path,
706 unsigned new_locks_want)
708 unsigned l, old_locks_want = path->locks_want;
710 if (trans->restarted)
713 EBUG_ON(path->locks_want < new_locks_want);
715 path->locks_want = new_locks_want;
717 while (path->nodes_locked &&
718 (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
719 if (l > path->level) {
720 btree_node_unlock(trans, path, l);
722 if (btree_node_intent_locked(path, l)) {
723 six_lock_downgrade(&path->l[l].b->c.lock);
724 mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
730 bch2_btree_path_verify_locks(path);
732 trace_path_downgrade(trans, _RET_IP_, path, old_locks_want);
735 /* Btree transaction locking: */
737 void bch2_trans_downgrade(struct btree_trans *trans)
739 struct btree_path *path;
742 if (trans->restarted)
745 trans_for_each_path(trans, path, i)
746 bch2_btree_path_downgrade(trans, path);
749 int bch2_trans_relock(struct btree_trans *trans)
751 struct btree_path *path;
754 if (unlikely(trans->restarted))
755 return -((int) trans->restarted);
757 trans_for_each_path(trans, path, i)
758 if (path->should_be_locked &&
759 !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
760 trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
761 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
766 int bch2_trans_relock_notrace(struct btree_trans *trans)
768 struct btree_path *path;
771 if (unlikely(trans->restarted))
772 return -((int) trans->restarted);
774 trans_for_each_path(trans, path, i)
775 if (path->should_be_locked &&
776 !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
777 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
782 void bch2_trans_unlock_noassert(struct btree_trans *trans)
784 struct btree_path *path;
787 trans_for_each_path(trans, path, i)
788 __bch2_btree_path_unlock(trans, path);
791 void bch2_trans_unlock(struct btree_trans *trans)
793 struct btree_path *path;
796 trans_for_each_path(trans, path, i)
797 __bch2_btree_path_unlock(trans, path);
800 void bch2_trans_unlock_long(struct btree_trans *trans)
802 bch2_trans_unlock(trans);
803 bch2_trans_srcu_unlock(trans);
806 bool bch2_trans_locked(struct btree_trans *trans)
808 struct btree_path *path;
811 trans_for_each_path(trans, path, i)
812 if (path->nodes_locked)
817 int __bch2_trans_mutex_lock(struct btree_trans *trans,
820 int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
829 #ifdef CONFIG_BCACHEFS_DEBUG
831 void bch2_btree_path_verify_locks(struct btree_path *path)
835 if (!path->nodes_locked) {
836 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
837 btree_path_node(path, path->level));
841 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
842 int want = btree_lock_want(path, l);
843 int have = btree_node_locked_type(path, l);
845 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
847 BUG_ON(is_btree_node(path, l) &&
848 (want == BTREE_NODE_UNLOCKED ||
849 have != BTREE_NODE_WRITE_LOCKED) &&
854 void bch2_trans_verify_locks(struct btree_trans *trans)
856 struct btree_path *path;
859 trans_for_each_path(trans, path, i)
860 bch2_btree_path_verify_locks(path);