1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_types.h"
7 static struct lock_class_key bch2_btree_node_lock_key;
9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10 enum six_lock_init_flags flags)
12 __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
16 void bch2_assert_btree_nodes_not_locked(void)
18 BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
22 /* Btree node locking: */
24 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
25 struct btree_path *skip,
26 struct btree_bkey_cached_common *b,
29 struct btree_path *path;
30 struct six_lock_count ret;
32 memset(&ret, 0, sizeof(ret));
34 if (IS_ERR_OR_NULL(b))
37 trans_for_each_path(trans, path)
38 if (path != skip && &path->l[level].b->c == b) {
39 int t = btree_node_locked_type(path, level);
41 if (t != BTREE_NODE_UNLOCKED)
50 void bch2_btree_node_unlock_write(struct btree_trans *trans,
51 struct btree_path *path, struct btree *b)
53 bch2_btree_node_unlock_write_inlined(trans, path, b);
59 * @trans wants to lock @b with type @type
61 struct trans_waiting_for_lock {
62 struct btree_trans *trans;
63 struct btree_bkey_cached_common *node_want;
64 enum six_lock_type lock_want;
66 /* for iterating over held locks :*/
73 struct trans_waiting_for_lock g[8];
77 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
79 struct trans_waiting_for_lock *i;
81 prt_printf(out, "Found lock cycle (%u entries):", g->nr);
84 for (i = g->g; i < g->g + g->nr; i++)
85 bch2_btree_trans_to_text(out, i->trans);
88 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
90 struct trans_waiting_for_lock *i;
92 for (i = g->g; i != g->g + g->nr; i++) {
95 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
100 static void lock_graph_up(struct lock_graph *g)
102 closure_put(&g->g[--g->nr].trans->ref);
105 static noinline void lock_graph_pop_all(struct lock_graph *g)
111 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
113 closure_get(&trans->ref);
115 g->g[g->nr++] = (struct trans_waiting_for_lock) {
117 .node_want = trans->locking,
118 .lock_want = trans->locking_wait.lock_want,
122 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
124 struct trans_waiting_for_lock *i;
126 for (i = g->g + 1; i < g->g + g->nr; i++)
127 if (i->trans->locking != i->node_want ||
128 i->trans->locking_wait.start_time != i[-1].lock_start_time) {
129 while (g->g + g->nr > i)
137 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
140 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
141 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
143 i->trans->lock_must_abort = true;
144 wake_up_process(i->trans->locking_wait.task);
149 static int btree_trans_abort_preference(struct btree_trans *trans)
151 if (trans->lock_may_not_fail)
153 if (trans->locking_wait.lock_want == SIX_LOCK_write)
155 if (!trans->in_traverse_all)
160 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
162 struct trans_waiting_for_lock *i, *abort = NULL;
163 unsigned best = 0, pref;
166 if (lock_graph_remove_non_waiters(g))
169 /* Only checking, for debugfs: */
171 print_cycle(cycle, g);
176 for (i = g->g; i < g->g + g->nr; i++) {
177 pref = btree_trans_abort_preference(i->trans);
184 if (unlikely(!best)) {
185 struct printbuf buf = PRINTBUF;
187 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
189 for (i = g->g; i < g->g + g->nr; i++) {
190 struct btree_trans *trans = i->trans;
192 bch2_btree_trans_to_text(&buf, trans);
194 prt_printf(&buf, "backtrace:");
196 printbuf_indent_add(&buf, 2);
197 bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
198 printbuf_indent_sub(&buf, 2);
202 bch2_print_string_as_lines(KERN_ERR, buf.buf);
207 ret = abort_lock(g, abort);
215 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
216 struct printbuf *cycle)
218 struct btree_trans *orig_trans = g->g->trans;
219 struct trans_waiting_for_lock *i;
221 for (i = g->g; i < g->g + g->nr; i++)
222 if (i->trans == trans)
223 return break_cycle(g, cycle);
225 if (g->nr == ARRAY_SIZE(g->g)) {
226 if (orig_trans->lock_may_not_fail)
235 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
236 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
239 lock_graph_down(g, trans);
243 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
248 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
251 struct trans_waiting_for_lock *top;
252 struct btree_bkey_cached_common *b;
253 struct btree_path *path;
256 if (trans->lock_must_abort) {
260 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
261 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
265 lock_graph_down(&g, trans);
270 top = &g.g[g.nr - 1];
272 trans_for_each_path_from(top->trans, path, top->path_idx) {
273 if (!path->nodes_locked)
276 if (top->path_idx != path->idx) {
277 top->path_idx = path->idx;
279 top->lock_start_time = 0;
283 top->level < BTREE_MAX_DEPTH;
284 top->level++, top->lock_start_time = 0) {
285 int lock_held = btree_node_locked_type(path, top->level);
287 if (lock_held == BTREE_NODE_UNLOCKED)
290 b = &READ_ONCE(path->l[top->level].b)->c;
292 if (IS_ERR_OR_NULL(b)) {
294 * If we get here, it means we raced with the
295 * other thread updating its btree_path
296 * structures - which means it can't be blocked
299 if (!lock_graph_remove_non_waiters(&g)) {
301 * If lock_graph_remove_non_waiters()
302 * didn't do anything, it must be
303 * because we're being called by debugfs
304 * checking for lock cycles, which
305 * invokes us on btree_transactions that
306 * aren't actually waiting on anything.
309 lock_graph_pop_all(&g);
315 if (list_empty_careful(&b->lock.wait_list))
318 raw_spin_lock(&b->lock.wait_lock);
319 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
320 BUG_ON(b != trans->locking);
322 if (top->lock_start_time &&
323 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
326 top->lock_start_time = trans->locking_wait.start_time;
328 /* Don't check for self deadlock: */
329 if (trans == top->trans ||
330 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
333 ret = lock_graph_descend(&g, trans, cycle);
334 raw_spin_unlock(&b->lock.wait_lock);
341 raw_spin_unlock(&b->lock.wait_lock);
345 if (g.nr > 1 && cycle)
346 print_chain(cycle, &g);
351 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
353 struct btree_trans *trans = p;
355 return bch2_check_for_deadlock(trans, NULL);
358 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
359 struct btree_bkey_cached_common *b,
360 bool lock_may_not_fail)
362 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
366 * Must drop our read locks before calling six_lock_write() -
367 * six_unlock() won't do wakeups until the reader count
368 * goes to 0, and it's safe because we have the node intent
371 six_lock_readers_add(&b->lock, -readers);
372 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
373 lock_may_not_fail, _RET_IP_);
374 six_lock_readers_add(&b->lock, readers);
377 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
382 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
383 struct btree_path *path,
384 struct btree_bkey_cached_common *b)
386 struct btree_path *linked;
393 * Drop all read locks before taking a write lock:
395 * This is a hack, because bch2_btree_node_lock_write_nofail() is a
396 * hack - but by dropping read locks first, this should never fail, and
397 * we only use this in code paths where whatever read locks we've
398 * already taken are no longer needed:
401 trans_for_each_path(trans, linked) {
402 if (!linked->nodes_locked)
405 for (i = 0; i < BTREE_MAX_DEPTH; i++)
406 if (btree_node_read_locked(linked, i)) {
407 btree_node_unlock(trans, linked, i);
408 btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
412 ret = __btree_node_lock_write(trans, path, b, true);
418 static inline bool btree_path_get_locks(struct btree_trans *trans,
419 struct btree_path *path,
422 unsigned l = path->level;
426 if (!btree_path_node(path, l))
430 ? bch2_btree_node_upgrade(trans, path, l)
431 : bch2_btree_node_relock(trans, path, l)))
435 } while (l < path->locks_want);
438 * When we fail to get a lock, we have to ensure that any child nodes
439 * can't be relocked so bch2_btree_path_traverse has to walk back up to
440 * the node that we failed to relock:
443 __bch2_btree_path_unlock(trans, path);
444 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
447 path->l[fail_idx].b = upgrade
448 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
449 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
451 } while (fail_idx >= 0);
454 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
455 path->uptodate = BTREE_ITER_UPTODATE;
457 bch2_trans_verify_locks(trans);
459 return path->uptodate < BTREE_ITER_NEED_RELOCK;
462 bool __bch2_btree_node_relock(struct btree_trans *trans,
463 struct btree_path *path, unsigned level,
466 struct btree *b = btree_path_node(path, level);
467 int want = __btree_lock_want(path, level);
472 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
473 (btree_node_lock_seq_matches(path, b, level) &&
474 btree_node_lock_increment(trans, &b->c, level, want))) {
475 mark_btree_node_locked(trans, path, level, want);
479 if (trace && !trans->notrace_relock_fail)
480 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
486 bool bch2_btree_node_upgrade(struct btree_trans *trans,
487 struct btree_path *path, unsigned level)
489 struct btree *b = path->l[level].b;
490 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
492 if (!is_btree_node(path, level))
495 switch (btree_lock_want(path, level)) {
496 case BTREE_NODE_UNLOCKED:
497 BUG_ON(btree_node_locked(path, level));
499 case BTREE_NODE_READ_LOCKED:
500 BUG_ON(btree_node_intent_locked(path, level));
501 return bch2_btree_node_relock(trans, path, level);
502 case BTREE_NODE_INTENT_LOCKED:
504 case BTREE_NODE_WRITE_LOCKED:
508 if (btree_node_intent_locked(path, level))
514 if (btree_node_locked(path, level)) {
517 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
518 ret = six_lock_tryupgrade(&b->c.lock);
519 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
524 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
529 * Do we already have an intent lock via another path? If so, just bump
532 if (btree_node_lock_seq_matches(path, b, level) &&
533 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
534 btree_node_unlock(trans, path, level);
538 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
541 mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
545 /* Btree path locking: */
548 * Only for btree_cache.c - only relocks intent locks
550 int bch2_btree_path_relock_intent(struct btree_trans *trans,
551 struct btree_path *path)
555 for (l = path->level;
556 l < path->locks_want && btree_path_node(path, l);
558 if (!bch2_btree_node_relock(trans, path, l)) {
559 __bch2_btree_path_unlock(trans, path);
560 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
561 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
562 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
570 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
571 struct btree_path *path, unsigned long trace_ip)
573 return btree_path_get_locks(trans, path, false);
576 int __bch2_btree_path_relock(struct btree_trans *trans,
577 struct btree_path *path, unsigned long trace_ip)
579 if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
580 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
581 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
588 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
589 struct btree_path *path, unsigned long trace_ip)
591 return btree_path_get_locks(trans, path, true);
594 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
595 struct btree_path *path,
596 unsigned new_locks_want)
598 EBUG_ON(path->locks_want >= new_locks_want);
600 path->locks_want = new_locks_want;
602 return btree_path_get_locks(trans, path, true);
605 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
606 struct btree_path *path,
607 unsigned new_locks_want)
609 struct btree_path *linked;
611 if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
615 * XXX: this is ugly - we'd prefer to not be mucking with other
616 * iterators in the btree_trans here.
618 * On failure to upgrade the iterator, setting iter->locks_want and
619 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
620 * get the locks we want on transaction restart.
622 * But if this iterator was a clone, on transaction restart what we did
623 * to this iterator isn't going to be preserved.
625 * Possibly we could add an iterator field for the parent iterator when
626 * an iterator is a copy - for now, we'll just upgrade any other
627 * iterators with the same btree id.
629 * The code below used to be needed to ensure ancestor nodes get locked
630 * before interior nodes - now that's handled by
631 * bch2_btree_path_traverse_all().
633 if (!path->cached && !trans->in_traverse_all)
634 trans_for_each_path(trans, linked)
635 if (linked != path &&
636 linked->cached == path->cached &&
637 linked->btree_id == path->btree_id &&
638 linked->locks_want < new_locks_want) {
639 linked->locks_want = new_locks_want;
640 btree_path_get_locks(trans, linked, true);
646 void __bch2_btree_path_downgrade(struct btree_trans *trans,
647 struct btree_path *path,
648 unsigned new_locks_want)
652 EBUG_ON(path->locks_want < new_locks_want);
654 path->locks_want = new_locks_want;
656 while (path->nodes_locked &&
657 (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
658 if (l > path->level) {
659 btree_node_unlock(trans, path, l);
661 if (btree_node_intent_locked(path, l)) {
662 six_lock_downgrade(&path->l[l].b->c.lock);
663 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
669 bch2_btree_path_verify_locks(path);
672 /* Btree transaction locking: */
674 void bch2_trans_downgrade(struct btree_trans *trans)
676 struct btree_path *path;
678 trans_for_each_path(trans, path)
679 bch2_btree_path_downgrade(trans, path);
682 int bch2_trans_relock(struct btree_trans *trans)
684 struct btree_path *path;
686 if (unlikely(trans->restarted))
687 return -((int) trans->restarted);
689 trans_for_each_path(trans, path)
690 if (path->should_be_locked &&
691 !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
692 trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
693 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
698 int bch2_trans_relock_notrace(struct btree_trans *trans)
700 struct btree_path *path;
702 if (unlikely(trans->restarted))
703 return -((int) trans->restarted);
705 trans_for_each_path(trans, path)
706 if (path->should_be_locked &&
707 !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
708 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
713 void bch2_trans_unlock(struct btree_trans *trans)
715 struct btree_path *path;
717 trans_for_each_path(trans, path)
718 __bch2_btree_path_unlock(trans, path);
721 * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
722 * btree nodes, it implements its own walking:
724 if (!trans->is_initial_gc)
725 bch2_assert_btree_nodes_not_locked();
728 bool bch2_trans_locked(struct btree_trans *trans)
730 struct btree_path *path;
732 trans_for_each_path(trans, path)
733 if (path->nodes_locked)
738 int __bch2_trans_mutex_lock(struct btree_trans *trans,
743 bch2_trans_unlock(trans);
745 ret = bch2_trans_relock(trans);
753 #ifdef CONFIG_BCACHEFS_DEBUG
755 void bch2_btree_path_verify_locks(struct btree_path *path)
759 if (!path->nodes_locked) {
760 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
761 btree_path_node(path, path->level));
765 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
766 int want = btree_lock_want(path, l);
767 int have = btree_node_locked_type(path, l);
769 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
771 BUG_ON(is_btree_node(path, l) &&
772 (want == BTREE_NODE_UNLOCKED ||
773 have != BTREE_NODE_WRITE_LOCKED) &&
778 void bch2_trans_verify_locks(struct btree_trans *trans)
780 struct btree_path *path;
782 trans_for_each_path(trans, path)
783 bch2_btree_path_verify_locks(path);