]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.c
Update bcachefs sources to 400f275d46 bcachefs: Fix check_overlapping_extents()
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6
7 static struct lock_class_key bch2_btree_node_lock_key;
8
9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b)
10 {
11         __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key);
12 }
13
14 #ifdef CONFIG_LOCKDEP
15 void bch2_assert_btree_nodes_not_locked(void)
16 {
17         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
18 }
19 #endif
20
21 /* Btree node locking: */
22
23 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
24 {
25         if (lock->readers)
26                 this_cpu_add(*lock->readers, nr);
27         else if (nr > 0)
28                 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
29         else
30                 atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
31 }
32
33 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
34                                                   struct btree_path *skip,
35                                                   struct btree_bkey_cached_common *b,
36                                                   unsigned level)
37 {
38         struct btree_path *path;
39         struct six_lock_count ret;
40
41         memset(&ret, 0, sizeof(ret));
42
43         if (IS_ERR_OR_NULL(b))
44                 return ret;
45
46         trans_for_each_path(trans, path)
47                 if (path != skip && &path->l[level].b->c == b) {
48                         int t = btree_node_locked_type(path, level);
49
50                         if (t != BTREE_NODE_UNLOCKED)
51                                 ret.n[t]++;
52                 }
53
54         return ret;
55 }
56
57 /* unlock */
58
59 void bch2_btree_node_unlock_write(struct btree_trans *trans,
60                         struct btree_path *path, struct btree *b)
61 {
62         bch2_btree_node_unlock_write_inlined(trans, path, b);
63 }
64
65 /* lock */
66
67 /*
68  * @trans wants to lock @b with type @type
69  */
70 struct trans_waiting_for_lock {
71         struct btree_trans              *trans;
72         struct btree_bkey_cached_common *node_want;
73         enum six_lock_type              lock_want;
74
75         /* for iterating over held locks :*/
76         u8                              path_idx;
77         u8                              level;
78         u64                             lock_start_time;
79 };
80
81 struct lock_graph {
82         struct trans_waiting_for_lock   g[8];
83         unsigned                        nr;
84 };
85
86 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
87 {
88         struct trans_waiting_for_lock *i;
89
90         prt_printf(out, "Found lock cycle (%u entries):", g->nr);
91         prt_newline(out);
92
93         for (i = g->g; i < g->g + g->nr; i++)
94                 bch2_btree_trans_to_text(out, i->trans);
95 }
96
97 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
98 {
99         struct trans_waiting_for_lock *i;
100
101         for (i = g->g; i != g->g + g->nr; i++) {
102                 if (i != g->g)
103                         prt_str(out, "<- ");
104                 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
105         }
106         prt_newline(out);
107 }
108
109 static void lock_graph_up(struct lock_graph *g)
110 {
111         closure_put(&g->g[--g->nr].trans->ref);
112 }
113
114 static noinline void lock_graph_pop_all(struct lock_graph *g)
115 {
116         while (g->nr)
117                 lock_graph_up(g);
118 }
119
120 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
121 {
122         closure_get(&trans->ref);
123
124         g->g[g->nr++] = (struct trans_waiting_for_lock) {
125                 .trans          = trans,
126                 .node_want      = trans->locking,
127                 .lock_want      = trans->locking_wait.lock_want,
128         };
129 }
130
131 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
132 {
133         struct trans_waiting_for_lock *i;
134
135         for (i = g->g + 1; i < g->g + g->nr; i++)
136                 if (i->trans->locking != i->node_want ||
137                     i->trans->locking_wait.start_time != i[-1].lock_start_time) {
138                         while (g->g + g->nr > i)
139                                 lock_graph_up(g);
140                         return true;
141                 }
142
143         return false;
144 }
145
146 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
147 {
148         if (i == g->g) {
149                 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
150                 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
151         } else {
152                 i->trans->lock_must_abort = true;
153                 wake_up_process(i->trans->locking_wait.task);
154                 return 0;
155         }
156 }
157
158 static int btree_trans_abort_preference(struct btree_trans *trans)
159 {
160         if (trans->lock_may_not_fail)
161                 return 0;
162         if (trans->locking_wait.lock_want == SIX_LOCK_write)
163                 return 1;
164         if (!trans->in_traverse_all)
165                 return 2;
166         return 3;
167 }
168
169 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
170 {
171         struct trans_waiting_for_lock *i, *abort = NULL;
172         unsigned best = 0, pref;
173         int ret;
174
175         if (lock_graph_remove_non_waiters(g))
176                 return 0;
177
178         /* Only checking, for debugfs: */
179         if (cycle) {
180                 print_cycle(cycle, g);
181                 ret = -1;
182                 goto out;
183         }
184
185         for (i = g->g; i < g->g + g->nr; i++) {
186                 pref = btree_trans_abort_preference(i->trans);
187                 if (pref > best) {
188                         abort = i;
189                         best = pref;
190                 }
191         }
192
193         if (unlikely(!best)) {
194                 struct printbuf buf = PRINTBUF;
195
196                 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
197
198                 for (i = g->g; i < g->g + g->nr; i++) {
199                         struct btree_trans *trans = i->trans;
200
201                         bch2_btree_trans_to_text(&buf, trans);
202
203                         prt_printf(&buf, "backtrace:");
204                         prt_newline(&buf);
205                         printbuf_indent_add(&buf, 2);
206                         bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
207                         printbuf_indent_sub(&buf, 2);
208                         prt_newline(&buf);
209                 }
210
211                 bch2_print_string_as_lines(KERN_ERR, buf.buf);
212                 printbuf_exit(&buf);
213                 BUG();
214         }
215
216         ret = abort_lock(g, abort);
217 out:
218         if (ret)
219                 while (g->nr)
220                         lock_graph_up(g);
221         return ret;
222 }
223
224 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
225                               struct printbuf *cycle)
226 {
227         struct btree_trans *orig_trans = g->g->trans;
228         struct trans_waiting_for_lock *i;
229
230         for (i = g->g; i < g->g + g->nr; i++)
231                 if (i->trans == trans)
232                         return break_cycle(g, cycle);
233
234         if (g->nr == ARRAY_SIZE(g->g)) {
235                 if (orig_trans->lock_may_not_fail)
236                         return 0;
237
238                 while (g->nr)
239                         lock_graph_up(g);
240
241                 if (cycle)
242                         return 0;
243
244                 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
245                 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
246         }
247
248         lock_graph_down(g, trans);
249         return 0;
250 }
251
252 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
253 {
254         return t1 + t2 > 1;
255 }
256
257 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
258 {
259         struct lock_graph g;
260         struct trans_waiting_for_lock *top;
261         struct btree_bkey_cached_common *b;
262         struct btree_path *path;
263         int ret;
264
265         if (trans->lock_must_abort) {
266                 if (cycle)
267                         return -1;
268
269                 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
270                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
271         }
272
273         g.nr = 0;
274         lock_graph_down(&g, trans);
275 next:
276         if (!g.nr)
277                 return 0;
278
279         top = &g.g[g.nr - 1];
280
281         trans_for_each_path_from(top->trans, path, top->path_idx) {
282                 if (!path->nodes_locked)
283                         continue;
284
285                 if (top->path_idx != path->idx) {
286                         top->path_idx           = path->idx;
287                         top->level              = 0;
288                         top->lock_start_time    = 0;
289                 }
290
291                 for (;
292                      top->level < BTREE_MAX_DEPTH;
293                      top->level++, top->lock_start_time = 0) {
294                         int lock_held = btree_node_locked_type(path, top->level);
295
296                         if (lock_held == BTREE_NODE_UNLOCKED)
297                                 continue;
298
299                         b = &READ_ONCE(path->l[top->level].b)->c;
300
301                         if (IS_ERR_OR_NULL(b)) {
302                                 /*
303                                  * If we get here, it means we raced with the
304                                  * other thread updating its btree_path
305                                  * structures - which means it can't be blocked
306                                  * waiting on a lock:
307                                  */
308                                 if (!lock_graph_remove_non_waiters(&g)) {
309                                         /*
310                                          * If lock_graph_remove_non_waiters()
311                                          * didn't do anything, it must be
312                                          * because we're being called by debugfs
313                                          * checking for lock cycles, which
314                                          * invokes us on btree_transactions that
315                                          * aren't actually waiting on anything.
316                                          * Just bail out:
317                                          */
318                                         lock_graph_pop_all(&g);
319                                 }
320
321                                 goto next;
322                         }
323
324                         if (list_empty_careful(&b->lock.wait_list))
325                                 continue;
326
327                         raw_spin_lock(&b->lock.wait_lock);
328                         list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
329                                 BUG_ON(b != trans->locking);
330
331                                 if (top->lock_start_time &&
332                                     time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
333                                         continue;
334
335                                 top->lock_start_time = trans->locking_wait.start_time;
336
337                                 /* Don't check for self deadlock: */
338                                 if (trans == top->trans ||
339                                     !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
340                                         continue;
341
342                                 ret = lock_graph_descend(&g, trans, cycle);
343                                 raw_spin_unlock(&b->lock.wait_lock);
344
345                                 if (ret)
346                                         return ret;
347                                 goto next;
348
349                         }
350                         raw_spin_unlock(&b->lock.wait_lock);
351                 }
352         }
353
354         if (g.nr > 1 && cycle)
355                 print_chain(cycle, &g);
356         lock_graph_up(&g);
357         goto next;
358 }
359
360 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
361 {
362         struct btree_trans *trans = p;
363
364         return bch2_check_for_deadlock(trans, NULL);
365 }
366
367 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
368                                  struct btree_bkey_cached_common *b,
369                                  bool lock_may_not_fail)
370 {
371         int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
372         int ret;
373
374         /*
375          * Must drop our read locks before calling six_lock_write() -
376          * six_unlock() won't do wakeups until the reader count
377          * goes to 0, and it's safe because we have the node intent
378          * locked:
379          */
380         six_lock_readers_add(&b->lock, -readers);
381         ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
382                                        lock_may_not_fail, _RET_IP_);
383         six_lock_readers_add(&b->lock, readers);
384
385         if (ret)
386                 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
387
388         return ret;
389 }
390
391 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
392                                        struct btree_path *path,
393                                        struct btree_bkey_cached_common *b)
394 {
395         struct btree_path *linked;
396         unsigned i;
397         int ret;
398
399         /*
400          * XXX BIG FAT NOTICE
401          *
402          * Drop all read locks before taking a write lock:
403          *
404          * This is a hack, because bch2_btree_node_lock_write_nofail() is a
405          * hack - but by dropping read locks first, this should never fail, and
406          * we only use this in code paths where whatever read locks we've
407          * already taken are no longer needed:
408          */
409
410         trans_for_each_path(trans, linked) {
411                 if (!linked->nodes_locked)
412                         continue;
413
414                 for (i = 0; i < BTREE_MAX_DEPTH; i++)
415                         if (btree_node_read_locked(linked, i)) {
416                                 btree_node_unlock(trans, linked, i);
417                                 btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
418                         }
419         }
420
421         ret = __btree_node_lock_write(trans, path, b, true);
422         BUG_ON(ret);
423 }
424
425 /* relock */
426
427 static inline bool btree_path_get_locks(struct btree_trans *trans,
428                                         struct btree_path *path,
429                                         bool upgrade)
430 {
431         unsigned l = path->level;
432         int fail_idx = -1;
433
434         do {
435                 if (!btree_path_node(path, l))
436                         break;
437
438                 if (!(upgrade
439                       ? bch2_btree_node_upgrade(trans, path, l)
440                       : bch2_btree_node_relock(trans, path, l)))
441                         fail_idx = l;
442
443                 l++;
444         } while (l < path->locks_want);
445
446         /*
447          * When we fail to get a lock, we have to ensure that any child nodes
448          * can't be relocked so bch2_btree_path_traverse has to walk back up to
449          * the node that we failed to relock:
450          */
451         if (fail_idx >= 0) {
452                 __bch2_btree_path_unlock(trans, path);
453                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
454
455                 do {
456                         path->l[fail_idx].b = upgrade
457                                 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
458                                 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
459                         --fail_idx;
460                 } while (fail_idx >= 0);
461         }
462
463         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
464                 path->uptodate = BTREE_ITER_UPTODATE;
465
466         bch2_trans_verify_locks(trans);
467
468         return path->uptodate < BTREE_ITER_NEED_RELOCK;
469 }
470
471 bool __bch2_btree_node_relock(struct btree_trans *trans,
472                               struct btree_path *path, unsigned level,
473                               bool trace)
474 {
475         struct btree *b = btree_path_node(path, level);
476         int want = __btree_lock_want(path, level);
477
478         if (race_fault())
479                 goto fail;
480
481         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
482             (btree_node_lock_seq_matches(path, b, level) &&
483              btree_node_lock_increment(trans, &b->c, level, want))) {
484                 mark_btree_node_locked(trans, path, level, want);
485                 return true;
486         }
487 fail:
488         if (trace && !trans->notrace_relock_fail)
489                 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
490         return false;
491 }
492
493 /* upgrade */
494
495 bool bch2_btree_node_upgrade(struct btree_trans *trans,
496                              struct btree_path *path, unsigned level)
497 {
498         struct btree *b = path->l[level].b;
499         struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
500
501         if (!is_btree_node(path, level))
502                 return false;
503
504         switch (btree_lock_want(path, level)) {
505         case BTREE_NODE_UNLOCKED:
506                 BUG_ON(btree_node_locked(path, level));
507                 return true;
508         case BTREE_NODE_READ_LOCKED:
509                 BUG_ON(btree_node_intent_locked(path, level));
510                 return bch2_btree_node_relock(trans, path, level);
511         case BTREE_NODE_INTENT_LOCKED:
512                 break;
513         case BTREE_NODE_WRITE_LOCKED:
514                 BUG();
515         }
516
517         if (btree_node_intent_locked(path, level))
518                 return true;
519
520         if (race_fault())
521                 return false;
522
523         if (btree_node_locked(path, level)) {
524                 bool ret;
525
526                 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
527                 ret = six_lock_tryupgrade(&b->c.lock);
528                 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
529
530                 if (ret)
531                         goto success;
532         } else {
533                 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
534                         goto success;
535         }
536
537         /*
538          * Do we already have an intent lock via another path? If so, just bump
539          * lock count:
540          */
541         if (btree_node_lock_seq_matches(path, b, level) &&
542             btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
543                 btree_node_unlock(trans, path, level);
544                 goto success;
545         }
546
547         trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
548         return false;
549 success:
550         mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
551         return true;
552 }
553
554 /* Btree path locking: */
555
556 /*
557  * Only for btree_cache.c - only relocks intent locks
558  */
559 int bch2_btree_path_relock_intent(struct btree_trans *trans,
560                                   struct btree_path *path)
561 {
562         unsigned l;
563
564         for (l = path->level;
565              l < path->locks_want && btree_path_node(path, l);
566              l++) {
567                 if (!bch2_btree_node_relock(trans, path, l)) {
568                         __bch2_btree_path_unlock(trans, path);
569                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
570                         trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
571                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
572                 }
573         }
574
575         return 0;
576 }
577
578 __flatten
579 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
580                         struct btree_path *path, unsigned long trace_ip)
581 {
582         return btree_path_get_locks(trans, path, false);
583 }
584
585 int __bch2_btree_path_relock(struct btree_trans *trans,
586                         struct btree_path *path, unsigned long trace_ip)
587 {
588         if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
589                 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
590                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
591         }
592
593         return 0;
594 }
595
596 __flatten
597 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
598                         struct btree_path *path, unsigned long trace_ip)
599 {
600         return btree_path_get_locks(trans, path, true);
601 }
602
603 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
604                                struct btree_path *path,
605                                unsigned new_locks_want)
606 {
607         EBUG_ON(path->locks_want >= new_locks_want);
608
609         path->locks_want = new_locks_want;
610
611         return btree_path_get_locks(trans, path, true);
612 }
613
614 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
615                                struct btree_path *path,
616                                unsigned new_locks_want)
617 {
618         struct btree_path *linked;
619
620         if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
621                 return true;
622
623         /*
624          * XXX: this is ugly - we'd prefer to not be mucking with other
625          * iterators in the btree_trans here.
626          *
627          * On failure to upgrade the iterator, setting iter->locks_want and
628          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
629          * get the locks we want on transaction restart.
630          *
631          * But if this iterator was a clone, on transaction restart what we did
632          * to this iterator isn't going to be preserved.
633          *
634          * Possibly we could add an iterator field for the parent iterator when
635          * an iterator is a copy - for now, we'll just upgrade any other
636          * iterators with the same btree id.
637          *
638          * The code below used to be needed to ensure ancestor nodes get locked
639          * before interior nodes - now that's handled by
640          * bch2_btree_path_traverse_all().
641          */
642         if (!path->cached && !trans->in_traverse_all)
643                 trans_for_each_path(trans, linked)
644                         if (linked != path &&
645                             linked->cached == path->cached &&
646                             linked->btree_id == path->btree_id &&
647                             linked->locks_want < new_locks_want) {
648                                 linked->locks_want = new_locks_want;
649                                 btree_path_get_locks(trans, linked, true);
650                         }
651
652         return false;
653 }
654
655 void __bch2_btree_path_downgrade(struct btree_trans *trans,
656                                  struct btree_path *path,
657                                  unsigned new_locks_want)
658 {
659         unsigned l;
660
661         EBUG_ON(path->locks_want < new_locks_want);
662
663         path->locks_want = new_locks_want;
664
665         while (path->nodes_locked &&
666                (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
667                 if (l > path->level) {
668                         btree_node_unlock(trans, path, l);
669                 } else {
670                         if (btree_node_intent_locked(path, l)) {
671                                 six_lock_downgrade(&path->l[l].b->c.lock);
672                                 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
673                         }
674                         break;
675                 }
676         }
677
678         bch2_btree_path_verify_locks(path);
679 }
680
681 /* Btree transaction locking: */
682
683 void bch2_trans_downgrade(struct btree_trans *trans)
684 {
685         struct btree_path *path;
686
687         trans_for_each_path(trans, path)
688                 bch2_btree_path_downgrade(trans, path);
689 }
690
691 int bch2_trans_relock(struct btree_trans *trans)
692 {
693         struct btree_path *path;
694
695         if (unlikely(trans->restarted))
696                 return -((int) trans->restarted);
697
698         trans_for_each_path(trans, path)
699                 if (path->should_be_locked &&
700                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
701                         trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
702                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
703                 }
704         return 0;
705 }
706
707 int bch2_trans_relock_notrace(struct btree_trans *trans)
708 {
709         struct btree_path *path;
710
711         if (unlikely(trans->restarted))
712                 return -((int) trans->restarted);
713
714         trans_for_each_path(trans, path)
715                 if (path->should_be_locked &&
716                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
717                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
718                 }
719         return 0;
720 }
721
722 void bch2_trans_unlock(struct btree_trans *trans)
723 {
724         struct btree_path *path;
725
726         trans_for_each_path(trans, path)
727                 __bch2_btree_path_unlock(trans, path);
728
729         /*
730          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
731          * btree nodes, it implements its own walking:
732          */
733         if (!trans->is_initial_gc)
734                 bch2_assert_btree_nodes_not_locked();
735 }
736
737 bool bch2_trans_locked(struct btree_trans *trans)
738 {
739         struct btree_path *path;
740
741         trans_for_each_path(trans, path)
742                 if (path->nodes_locked)
743                         return true;
744         return false;
745 }
746
747 int __bch2_trans_mutex_lock(struct btree_trans *trans,
748                             struct mutex *lock)
749 {
750         int ret;
751
752         bch2_trans_unlock(trans);
753         mutex_lock(lock);
754         ret = bch2_trans_relock(trans);
755         if (ret)
756                 mutex_unlock(lock);
757         return ret;
758 }
759
760 /* Debug */
761
762 #ifdef CONFIG_BCACHEFS_DEBUG
763
764 void bch2_btree_path_verify_locks(struct btree_path *path)
765 {
766         unsigned l;
767
768         if (!path->nodes_locked) {
769                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
770                        btree_path_node(path, path->level));
771                 return;
772         }
773
774         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
775                 int want = btree_lock_want(path, l);
776                 int have = btree_node_locked_type(path, l);
777
778                 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
779
780                 BUG_ON(is_btree_node(path, l) &&
781                        (want == BTREE_NODE_UNLOCKED ||
782                         have != BTREE_NODE_WRITE_LOCKED) &&
783                        want != have);
784         }
785 }
786
787 void bch2_trans_verify_locks(struct btree_trans *trans)
788 {
789         struct btree_path *path;
790
791         trans_for_each_path(trans, path)
792                 bch2_btree_path_verify_locks(path);
793 }
794
795 #endif