1 // SPDX-License-Identifier: GPL-2.0
4 #include "btree_locking.h"
5 #include "btree_types.h"
7 struct lock_class_key bch2_btree_node_lock_key;
9 /* Btree node locking: */
11 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
14 this_cpu_add(*lock->readers, nr);
16 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
18 atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
21 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
22 struct btree_path *skip,
23 struct btree_bkey_cached_common *b,
26 struct btree_path *path;
27 struct six_lock_count ret;
29 memset(&ret, 0, sizeof(ret));
31 if (IS_ERR_OR_NULL(b))
34 trans_for_each_path(trans, path)
35 if (path != skip && &path->l[level].b->c == b) {
36 int t = btree_node_locked_type(path, level);
38 if (t != BTREE_NODE_UNLOCKED)
47 void bch2_btree_node_unlock_write(struct btree_trans *trans,
48 struct btree_path *path, struct btree *b)
50 bch2_btree_node_unlock_write_inlined(trans, path, b);
56 * @trans wants to lock @b with type @type
58 struct trans_waiting_for_lock {
59 struct btree_trans *trans;
60 struct btree_bkey_cached_common *node_want;
61 enum six_lock_type lock_want;
63 /* for iterating over held locks :*/
70 struct trans_waiting_for_lock g[8];
74 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
76 struct trans_waiting_for_lock *i;
78 prt_printf(out, "Found lock cycle (%u entries):", g->nr);
81 for (i = g->g; i < g->g + g->nr; i++)
82 bch2_btree_trans_to_text(out, i->trans);
85 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
87 struct trans_waiting_for_lock *i;
89 for (i = g->g; i != g->g + g->nr; i++) {
92 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
97 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
100 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
101 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
103 i->trans->lock_must_abort = true;
104 wake_up_process(i->trans->locking_wait.task);
109 static noinline int break_cycle(struct lock_graph *g)
111 struct trans_waiting_for_lock *i;
114 * We'd like to prioritize aborting transactions that have done less
115 * work - but it appears breaking cycles by telling other transactions
116 * to abort may still be buggy:
119 for (i = g->g; i < g->g + g->nr; i++) {
120 if (i->trans->lock_may_not_fail ||
121 i->trans->locking_wait.lock_want == SIX_LOCK_write)
124 return abort_lock(g, i);
127 for (i = g->g; i < g->g + g->nr; i++) {
128 if (i->trans->lock_may_not_fail ||
129 !i->trans->in_traverse_all)
132 return abort_lock(g, i);
135 for (i = g->g; i < g->g + g->nr; i++) {
136 if (i->trans->lock_may_not_fail)
139 return abort_lock(g, i);
143 struct bch_fs *c = g->g->trans->c;
144 struct printbuf buf = PRINTBUF;
146 bch_err(c, "cycle of nofail locks");
148 for (i = g->g; i < g->g + g->nr; i++) {
149 struct btree_trans *trans = i->trans;
151 bch2_btree_trans_to_text(&buf, trans);
153 prt_printf(&buf, "backtrace:");
155 printbuf_indent_add(&buf, 2);
156 bch2_prt_backtrace(&buf, trans->locking_wait.task);
157 printbuf_indent_sub(&buf, 2);
161 bch2_print_string_as_lines(KERN_ERR, buf.buf);
167 static void lock_graph_pop(struct lock_graph *g)
169 closure_put(&g->g[--g->nr].trans->ref);
172 static void lock_graph_pop_above(struct lock_graph *g, struct trans_waiting_for_lock *above,
173 struct printbuf *cycle)
175 if (g->nr > 1 && cycle)
176 print_chain(cycle, g);
178 while (g->g + g->nr > above)
182 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
183 struct printbuf *cycle)
185 struct btree_trans *orig_trans = g->g->trans;
186 struct trans_waiting_for_lock *i;
189 for (i = g->g; i < g->g + g->nr; i++) {
190 if (i->trans->locking != i->node_want) {
191 lock_graph_pop_above(g, i - 1, cycle);
195 if (i->trans == trans) {
198 print_cycle(cycle, g);
201 ret = break_cycle(g);
207 * If we didn't abort (instead telling another
208 * transaction to abort), keep checking:
213 if (g->nr == ARRAY_SIZE(g->g)) {
214 if (orig_trans->lock_may_not_fail)
217 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
218 ret = btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
222 closure_get(&trans->ref);
224 g->g[g->nr++] = (struct trans_waiting_for_lock) {
226 .node_want = trans->locking,
227 .lock_want = trans->locking_wait.lock_want,
232 lock_graph_pop_above(g, g->g, cycle);
236 static noinline void lock_graph_remove_non_waiters(struct lock_graph *g,
237 struct printbuf *cycle)
239 struct trans_waiting_for_lock *i;
241 for (i = g->g + 1; i < g->g + g->nr; i++)
242 if (i->trans->locking != i->node_want ||
243 i->trans->locking_wait.start_time != i[-1].lock_start_time) {
244 lock_graph_pop_above(g, i - 1, cycle);
250 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
255 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
258 struct trans_waiting_for_lock *top;
259 struct btree_bkey_cached_common *b;
260 struct btree_path *path;
263 if (trans->lock_must_abort) {
264 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
265 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
269 ret = lock_graph_descend(&g, trans, cycle);
275 top = &g.g[g.nr - 1];
277 trans_for_each_path_from(top->trans, path, top->path_idx) {
278 if (!path->nodes_locked)
281 if (top->path_idx != path->idx) {
282 top->path_idx = path->idx;
284 top->lock_start_time = 0;
288 top->level < BTREE_MAX_DEPTH;
289 top->level++, top->lock_start_time = 0) {
290 int lock_held = btree_node_locked_type(path, top->level);
292 if (lock_held == BTREE_NODE_UNLOCKED)
295 b = &READ_ONCE(path->l[top->level].b)->c;
297 if (unlikely(IS_ERR_OR_NULL(b))) {
298 lock_graph_remove_non_waiters(&g, cycle);
302 if (list_empty_careful(&b->lock.wait_list))
305 raw_spin_lock(&b->lock.wait_lock);
306 list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
307 BUG_ON(b != trans->locking);
309 if (top->lock_start_time &&
310 time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
313 top->lock_start_time = trans->locking_wait.start_time;
315 /* Don't check for self deadlock: */
316 if (trans == top->trans ||
317 !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
320 ret = lock_graph_descend(&g, trans, cycle);
321 raw_spin_unlock(&b->lock.wait_lock);
324 return ret < 0 ? ret : 0;
328 raw_spin_unlock(&b->lock.wait_lock);
332 if (g.nr > 1 && cycle)
333 print_chain(cycle, &g);
338 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
340 struct btree_trans *trans = p;
342 return bch2_check_for_deadlock(trans, NULL);
345 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
346 struct btree_bkey_cached_common *b,
347 bool lock_may_not_fail)
349 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
353 * Must drop our read locks before calling six_lock_write() -
354 * six_unlock() won't do wakeups until the reader count
355 * goes to 0, and it's safe because we have the node intent
358 six_lock_readers_add(&b->lock, -readers);
359 ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write, lock_may_not_fail);
360 six_lock_readers_add(&b->lock, readers);
363 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
370 static inline bool btree_path_get_locks(struct btree_trans *trans,
371 struct btree_path *path,
374 unsigned l = path->level;
378 if (!btree_path_node(path, l))
382 ? bch2_btree_node_upgrade(trans, path, l)
383 : bch2_btree_node_relock(trans, path, l)))
387 } while (l < path->locks_want);
390 * When we fail to get a lock, we have to ensure that any child nodes
391 * can't be relocked so bch2_btree_path_traverse has to walk back up to
392 * the node that we failed to relock:
395 __bch2_btree_path_unlock(trans, path);
396 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
399 path->l[fail_idx].b = upgrade
400 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
401 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
403 } while (fail_idx >= 0);
406 if (path->uptodate == BTREE_ITER_NEED_RELOCK)
407 path->uptodate = BTREE_ITER_UPTODATE;
409 bch2_trans_verify_locks(trans);
411 return path->uptodate < BTREE_ITER_NEED_RELOCK;
414 bool __bch2_btree_node_relock(struct btree_trans *trans,
415 struct btree_path *path, unsigned level,
418 struct btree *b = btree_path_node(path, level);
419 int want = __btree_lock_want(path, level);
424 if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
425 (btree_node_lock_seq_matches(path, b, level) &&
426 btree_node_lock_increment(trans, &b->c, level, want))) {
427 mark_btree_node_locked(trans, path, level, want);
432 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
438 bool bch2_btree_node_upgrade(struct btree_trans *trans,
439 struct btree_path *path, unsigned level)
441 struct btree *b = path->l[level].b;
442 struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
444 if (!is_btree_node(path, level))
447 switch (btree_lock_want(path, level)) {
448 case BTREE_NODE_UNLOCKED:
449 BUG_ON(btree_node_locked(path, level));
451 case BTREE_NODE_READ_LOCKED:
452 BUG_ON(btree_node_intent_locked(path, level));
453 return bch2_btree_node_relock(trans, path, level);
454 case BTREE_NODE_INTENT_LOCKED:
456 case BTREE_NODE_WRITE_LOCKED:
460 if (btree_node_intent_locked(path, level))
466 if (btree_node_locked(path, level)) {
469 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
470 ret = six_lock_tryupgrade(&b->c.lock);
471 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
476 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
481 * Do we already have an intent lock via another path? If so, just bump
484 if (btree_node_lock_seq_matches(path, b, level) &&
485 btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
486 btree_node_unlock(trans, path, level);
490 trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
493 mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
497 /* Btree path locking: */
500 * Only for btree_cache.c - only relocks intent locks
502 int bch2_btree_path_relock_intent(struct btree_trans *trans,
503 struct btree_path *path)
507 for (l = path->level;
508 l < path->locks_want && btree_path_node(path, l);
510 if (!bch2_btree_node_relock(trans, path, l)) {
511 __bch2_btree_path_unlock(trans, path);
512 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
513 trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
514 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
522 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
523 struct btree_path *path, unsigned long trace_ip)
525 return btree_path_get_locks(trans, path, false);
529 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
530 struct btree_path *path, unsigned long trace_ip)
532 return btree_path_get_locks(trans, path, true);
535 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
536 struct btree_path *path,
537 unsigned new_locks_want)
539 EBUG_ON(path->locks_want >= new_locks_want);
541 path->locks_want = new_locks_want;
543 return btree_path_get_locks(trans, path, true);
546 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
547 struct btree_path *path,
548 unsigned new_locks_want)
550 struct btree_path *linked;
552 if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
556 * XXX: this is ugly - we'd prefer to not be mucking with other
557 * iterators in the btree_trans here.
559 * On failure to upgrade the iterator, setting iter->locks_want and
560 * calling get_locks() is sufficient to make bch2_btree_path_traverse()
561 * get the locks we want on transaction restart.
563 * But if this iterator was a clone, on transaction restart what we did
564 * to this iterator isn't going to be preserved.
566 * Possibly we could add an iterator field for the parent iterator when
567 * an iterator is a copy - for now, we'll just upgrade any other
568 * iterators with the same btree id.
570 * The code below used to be needed to ensure ancestor nodes get locked
571 * before interior nodes - now that's handled by
572 * bch2_btree_path_traverse_all().
574 if (!path->cached && !trans->in_traverse_all)
575 trans_for_each_path(trans, linked)
576 if (linked != path &&
577 linked->cached == path->cached &&
578 linked->btree_id == path->btree_id &&
579 linked->locks_want < new_locks_want) {
580 linked->locks_want = new_locks_want;
581 btree_path_get_locks(trans, linked, true);
587 void __bch2_btree_path_downgrade(struct btree_trans *trans,
588 struct btree_path *path,
589 unsigned new_locks_want)
593 EBUG_ON(path->locks_want < new_locks_want);
595 path->locks_want = new_locks_want;
597 while (path->nodes_locked &&
598 (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
599 if (l > path->level) {
600 btree_node_unlock(trans, path, l);
602 if (btree_node_intent_locked(path, l)) {
603 six_lock_downgrade(&path->l[l].b->c.lock);
604 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
610 bch2_btree_path_verify_locks(path);
613 /* Btree transaction locking: */
615 void bch2_trans_downgrade(struct btree_trans *trans)
617 struct btree_path *path;
619 trans_for_each_path(trans, path)
620 bch2_btree_path_downgrade(trans, path);
623 int bch2_trans_relock(struct btree_trans *trans)
625 struct btree_path *path;
627 if (unlikely(trans->restarted))
628 return - ((int) trans->restarted);
630 trans_for_each_path(trans, path)
631 if (path->should_be_locked &&
632 !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
633 trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
634 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
639 void bch2_trans_unlock(struct btree_trans *trans)
641 struct btree_path *path;
643 trans_for_each_path(trans, path)
644 __bch2_btree_path_unlock(trans, path);
647 * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
648 * btree nodes, it implements its own walking:
650 EBUG_ON(!trans->is_initial_gc &&
651 lock_class_is_held(&bch2_btree_node_lock_key));
654 bool bch2_trans_locked(struct btree_trans *trans)
656 struct btree_path *path;
658 trans_for_each_path(trans, path)
659 if (path->nodes_locked)
666 #ifdef CONFIG_BCACHEFS_DEBUG
668 void bch2_btree_path_verify_locks(struct btree_path *path)
672 if (!path->nodes_locked) {
673 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
674 btree_path_node(path, path->level));
678 for (l = 0; l < BTREE_MAX_DEPTH; l++) {
679 int want = btree_lock_want(path, l);
680 int have = btree_node_locked_type(path, l);
682 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
684 BUG_ON(is_btree_node(path, l) &&
685 (want == BTREE_NODE_UNLOCKED ||
686 have != BTREE_NODE_WRITE_LOCKED) &&
691 void bch2_trans_verify_locks(struct btree_trans *trans)
693 struct btree_path *path;
695 trans_for_each_path(trans, path)
696 bch2_btree_path_verify_locks(path);