]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.c
Update bcachefs sources to 2272c5f5b7 bcachefs: Mark stripe buckets with correct...
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6
7 static struct lock_class_key bch2_btree_node_lock_key;
8
9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b)
10 {
11         __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key);
12 }
13
14 #ifdef CONFIG_LOCKDEP
15 void bch2_assert_btree_nodes_not_locked(void)
16 {
17         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
18 }
19 #endif
20
21 /* Btree node locking: */
22
23 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
24 {
25         if (lock->readers)
26                 this_cpu_add(*lock->readers, nr);
27         else if (nr > 0)
28                 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
29         else
30                 atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
31 }
32
33 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
34                                                   struct btree_path *skip,
35                                                   struct btree_bkey_cached_common *b,
36                                                   unsigned level)
37 {
38         struct btree_path *path;
39         struct six_lock_count ret;
40
41         memset(&ret, 0, sizeof(ret));
42
43         if (IS_ERR_OR_NULL(b))
44                 return ret;
45
46         trans_for_each_path(trans, path)
47                 if (path != skip && &path->l[level].b->c == b) {
48                         int t = btree_node_locked_type(path, level);
49
50                         if (t != BTREE_NODE_UNLOCKED)
51                                 ret.n[t]++;
52                 }
53
54         return ret;
55 }
56
57 /* unlock */
58
59 void bch2_btree_node_unlock_write(struct btree_trans *trans,
60                         struct btree_path *path, struct btree *b)
61 {
62         bch2_btree_node_unlock_write_inlined(trans, path, b);
63 }
64
65 /* lock */
66
67 /*
68  * @trans wants to lock @b with type @type
69  */
70 struct trans_waiting_for_lock {
71         struct btree_trans              *trans;
72         struct btree_bkey_cached_common *node_want;
73         enum six_lock_type              lock_want;
74
75         /* for iterating over held locks :*/
76         u8                              path_idx;
77         u8                              level;
78         u64                             lock_start_time;
79 };
80
81 struct lock_graph {
82         struct trans_waiting_for_lock   g[8];
83         unsigned                        nr;
84 };
85
86 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
87 {
88         struct trans_waiting_for_lock *i;
89
90         prt_printf(out, "Found lock cycle (%u entries):", g->nr);
91         prt_newline(out);
92
93         for (i = g->g; i < g->g + g->nr; i++)
94                 bch2_btree_trans_to_text(out, i->trans);
95 }
96
97 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
98 {
99         struct trans_waiting_for_lock *i;
100
101         for (i = g->g; i != g->g + g->nr; i++) {
102                 if (i != g->g)
103                         prt_str(out, "<- ");
104                 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
105         }
106         prt_newline(out);
107 }
108
109 static void lock_graph_up(struct lock_graph *g)
110 {
111         closure_put(&g->g[--g->nr].trans->ref);
112 }
113
114 static noinline void lock_graph_pop_all(struct lock_graph *g)
115 {
116         while (g->nr)
117                 lock_graph_up(g);
118 }
119
120 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
121 {
122         closure_get(&trans->ref);
123
124         g->g[g->nr++] = (struct trans_waiting_for_lock) {
125                 .trans          = trans,
126                 .node_want      = trans->locking,
127                 .lock_want      = trans->locking_wait.lock_want,
128         };
129 }
130
131 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
132 {
133         struct trans_waiting_for_lock *i;
134
135         for (i = g->g + 1; i < g->g + g->nr; i++)
136                 if (i->trans->locking != i->node_want ||
137                     i->trans->locking_wait.start_time != i[-1].lock_start_time) {
138                         while (g->g + g->nr > i)
139                                 lock_graph_up(g);
140                         return true;
141                 }
142
143         return false;
144 }
145
146 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
147 {
148         if (i == g->g) {
149                 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
150                 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
151         } else {
152                 i->trans->lock_must_abort = true;
153                 wake_up_process(i->trans->locking_wait.task);
154                 return 0;
155         }
156 }
157
158 static int btree_trans_abort_preference(struct btree_trans *trans)
159 {
160         if (trans->lock_may_not_fail)
161                 return 0;
162         if (trans->locking_wait.lock_want == SIX_LOCK_write)
163                 return 1;
164         if (!trans->in_traverse_all)
165                 return 2;
166         return 3;
167 }
168
169 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
170 {
171         struct trans_waiting_for_lock *i, *abort = NULL;
172         unsigned best = 0, pref;
173         int ret;
174
175         if (lock_graph_remove_non_waiters(g))
176                 return 0;
177
178         /* Only checking, for debugfs: */
179         if (cycle) {
180                 print_cycle(cycle, g);
181                 ret = -1;
182                 goto out;
183         }
184
185         for (i = g->g; i < g->g + g->nr; i++) {
186                 pref = btree_trans_abort_preference(i->trans);
187                 if (pref > best) {
188                         abort = i;
189                         best = pref;
190                 }
191         }
192
193         if (unlikely(!best)) {
194                 struct printbuf buf = PRINTBUF;
195
196                 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
197
198                 for (i = g->g; i < g->g + g->nr; i++) {
199                         struct btree_trans *trans = i->trans;
200
201                         bch2_btree_trans_to_text(&buf, trans);
202
203                         prt_printf(&buf, "backtrace:");
204                         prt_newline(&buf);
205                         printbuf_indent_add(&buf, 2);
206                         bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
207                         printbuf_indent_sub(&buf, 2);
208                         prt_newline(&buf);
209                 }
210
211                 bch2_print_string_as_lines(KERN_ERR, buf.buf);
212                 printbuf_exit(&buf);
213                 BUG();
214         }
215
216         ret = abort_lock(g, abort);
217 out:
218         if (ret)
219                 while (g->nr)
220                         lock_graph_up(g);
221         return ret;
222 }
223
224 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
225                               struct printbuf *cycle)
226 {
227         struct btree_trans *orig_trans = g->g->trans;
228         struct trans_waiting_for_lock *i;
229
230         for (i = g->g; i < g->g + g->nr; i++)
231                 if (i->trans == trans)
232                         return break_cycle(g, cycle);
233
234         if (g->nr == ARRAY_SIZE(g->g)) {
235                 if (orig_trans->lock_may_not_fail)
236                         return 0;
237
238                 while (g->nr)
239                         lock_graph_up(g);
240
241                 if (cycle)
242                         return 0;
243
244                 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
245                 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
246         }
247
248         lock_graph_down(g, trans);
249         return 0;
250 }
251
252 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
253 {
254         return t1 + t2 > 1;
255 }
256
257 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
258 {
259         struct lock_graph g;
260         struct trans_waiting_for_lock *top;
261         struct btree_bkey_cached_common *b;
262         struct btree_path *path;
263         int ret;
264
265         if (trans->lock_must_abort) {
266                 if (cycle)
267                         return -1;
268
269                 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
270                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
271         }
272
273         g.nr = 0;
274         lock_graph_down(&g, trans);
275 next:
276         if (!g.nr)
277                 return 0;
278
279         top = &g.g[g.nr - 1];
280
281         trans_for_each_path_from(top->trans, path, top->path_idx) {
282                 if (!path->nodes_locked)
283                         continue;
284
285                 if (top->path_idx != path->idx) {
286                         top->path_idx           = path->idx;
287                         top->level              = 0;
288                         top->lock_start_time    = 0;
289                 }
290
291                 for (;
292                      top->level < BTREE_MAX_DEPTH;
293                      top->level++, top->lock_start_time = 0) {
294                         int lock_held = btree_node_locked_type(path, top->level);
295
296                         if (lock_held == BTREE_NODE_UNLOCKED)
297                                 continue;
298
299                         b = &READ_ONCE(path->l[top->level].b)->c;
300
301                         if (IS_ERR_OR_NULL(b)) {
302                                 /*
303                                  * If we get here, it means we raced with the
304                                  * other thread updating its btree_path
305                                  * structures - which means it can't be blocked
306                                  * waiting on a lock:
307                                  */
308                                 if (!lock_graph_remove_non_waiters(&g)) {
309                                         /*
310                                          * If lock_graph_remove_non_waiters()
311                                          * didn't do anything, it must be
312                                          * because we're being called by debugfs
313                                          * checking for lock cycles, which
314                                          * invokes us on btree_transactions that
315                                          * aren't actually waiting on anything.
316                                          * Just bail out:
317                                          */
318                                         lock_graph_pop_all(&g);
319                                 }
320
321                                 goto next;
322                         }
323
324                         if (list_empty_careful(&b->lock.wait_list))
325                                 continue;
326
327                         raw_spin_lock(&b->lock.wait_lock);
328                         list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
329                                 BUG_ON(b != trans->locking);
330
331                                 if (top->lock_start_time &&
332                                     time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
333                                         continue;
334
335                                 top->lock_start_time = trans->locking_wait.start_time;
336
337                                 /* Don't check for self deadlock: */
338                                 if (trans == top->trans ||
339                                     !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
340                                         continue;
341
342                                 ret = lock_graph_descend(&g, trans, cycle);
343                                 raw_spin_unlock(&b->lock.wait_lock);
344
345                                 if (ret)
346                                         return ret;
347                                 goto next;
348
349                         }
350                         raw_spin_unlock(&b->lock.wait_lock);
351                 }
352         }
353
354         if (g.nr > 1 && cycle)
355                 print_chain(cycle, &g);
356         lock_graph_up(&g);
357         goto next;
358 }
359
360 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
361 {
362         struct btree_trans *trans = p;
363
364         return bch2_check_for_deadlock(trans, NULL);
365 }
366
367 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
368                                  struct btree_bkey_cached_common *b,
369                                  bool lock_may_not_fail)
370 {
371         int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
372         int ret;
373
374         /*
375          * Must drop our read locks before calling six_lock_write() -
376          * six_unlock() won't do wakeups until the reader count
377          * goes to 0, and it's safe because we have the node intent
378          * locked:
379          */
380         six_lock_readers_add(&b->lock, -readers);
381         ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
382                                        lock_may_not_fail, _RET_IP_);
383         six_lock_readers_add(&b->lock, readers);
384
385         if (ret)
386                 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
387
388         return ret;
389 }
390
391 /* relock */
392
393 static inline bool btree_path_get_locks(struct btree_trans *trans,
394                                         struct btree_path *path,
395                                         bool upgrade)
396 {
397         unsigned l = path->level;
398         int fail_idx = -1;
399
400         do {
401                 if (!btree_path_node(path, l))
402                         break;
403
404                 if (!(upgrade
405                       ? bch2_btree_node_upgrade(trans, path, l)
406                       : bch2_btree_node_relock(trans, path, l)))
407                         fail_idx = l;
408
409                 l++;
410         } while (l < path->locks_want);
411
412         /*
413          * When we fail to get a lock, we have to ensure that any child nodes
414          * can't be relocked so bch2_btree_path_traverse has to walk back up to
415          * the node that we failed to relock:
416          */
417         if (fail_idx >= 0) {
418                 __bch2_btree_path_unlock(trans, path);
419                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
420
421                 do {
422                         path->l[fail_idx].b = upgrade
423                                 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
424                                 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
425                         --fail_idx;
426                 } while (fail_idx >= 0);
427         }
428
429         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
430                 path->uptodate = BTREE_ITER_UPTODATE;
431
432         bch2_trans_verify_locks(trans);
433
434         return path->uptodate < BTREE_ITER_NEED_RELOCK;
435 }
436
437 bool __bch2_btree_node_relock(struct btree_trans *trans,
438                               struct btree_path *path, unsigned level,
439                               bool trace)
440 {
441         struct btree *b = btree_path_node(path, level);
442         int want = __btree_lock_want(path, level);
443
444         if (race_fault())
445                 goto fail;
446
447         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
448             (btree_node_lock_seq_matches(path, b, level) &&
449              btree_node_lock_increment(trans, &b->c, level, want))) {
450                 mark_btree_node_locked(trans, path, level, want);
451                 return true;
452         }
453 fail:
454         if (trace && !trans->notrace_relock_fail)
455                 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
456         return false;
457 }
458
459 /* upgrade */
460
461 bool bch2_btree_node_upgrade(struct btree_trans *trans,
462                              struct btree_path *path, unsigned level)
463 {
464         struct btree *b = path->l[level].b;
465         struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
466
467         if (!is_btree_node(path, level))
468                 return false;
469
470         switch (btree_lock_want(path, level)) {
471         case BTREE_NODE_UNLOCKED:
472                 BUG_ON(btree_node_locked(path, level));
473                 return true;
474         case BTREE_NODE_READ_LOCKED:
475                 BUG_ON(btree_node_intent_locked(path, level));
476                 return bch2_btree_node_relock(trans, path, level);
477         case BTREE_NODE_INTENT_LOCKED:
478                 break;
479         case BTREE_NODE_WRITE_LOCKED:
480                 BUG();
481         }
482
483         if (btree_node_intent_locked(path, level))
484                 return true;
485
486         if (race_fault())
487                 return false;
488
489         if (btree_node_locked(path, level)) {
490                 bool ret;
491
492                 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
493                 ret = six_lock_tryupgrade(&b->c.lock);
494                 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
495
496                 if (ret)
497                         goto success;
498         } else {
499                 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
500                         goto success;
501         }
502
503         /*
504          * Do we already have an intent lock via another path? If so, just bump
505          * lock count:
506          */
507         if (btree_node_lock_seq_matches(path, b, level) &&
508             btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
509                 btree_node_unlock(trans, path, level);
510                 goto success;
511         }
512
513         trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
514         return false;
515 success:
516         mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
517         return true;
518 }
519
520 /* Btree path locking: */
521
522 /*
523  * Only for btree_cache.c - only relocks intent locks
524  */
525 int bch2_btree_path_relock_intent(struct btree_trans *trans,
526                                   struct btree_path *path)
527 {
528         unsigned l;
529
530         for (l = path->level;
531              l < path->locks_want && btree_path_node(path, l);
532              l++) {
533                 if (!bch2_btree_node_relock(trans, path, l)) {
534                         __bch2_btree_path_unlock(trans, path);
535                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
536                         trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
537                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
538                 }
539         }
540
541         return 0;
542 }
543
544 __flatten
545 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
546                         struct btree_path *path, unsigned long trace_ip)
547 {
548         return btree_path_get_locks(trans, path, false);
549 }
550
551 int __bch2_btree_path_relock(struct btree_trans *trans,
552                         struct btree_path *path, unsigned long trace_ip)
553 {
554         if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
555                 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
556                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
557         }
558
559         return 0;
560 }
561
562 __flatten
563 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
564                         struct btree_path *path, unsigned long trace_ip)
565 {
566         return btree_path_get_locks(trans, path, true);
567 }
568
569 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
570                                struct btree_path *path,
571                                unsigned new_locks_want)
572 {
573         EBUG_ON(path->locks_want >= new_locks_want);
574
575         path->locks_want = new_locks_want;
576
577         return btree_path_get_locks(trans, path, true);
578 }
579
580 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
581                                struct btree_path *path,
582                                unsigned new_locks_want)
583 {
584         struct btree_path *linked;
585
586         if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
587                 return true;
588
589         /*
590          * XXX: this is ugly - we'd prefer to not be mucking with other
591          * iterators in the btree_trans here.
592          *
593          * On failure to upgrade the iterator, setting iter->locks_want and
594          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
595          * get the locks we want on transaction restart.
596          *
597          * But if this iterator was a clone, on transaction restart what we did
598          * to this iterator isn't going to be preserved.
599          *
600          * Possibly we could add an iterator field for the parent iterator when
601          * an iterator is a copy - for now, we'll just upgrade any other
602          * iterators with the same btree id.
603          *
604          * The code below used to be needed to ensure ancestor nodes get locked
605          * before interior nodes - now that's handled by
606          * bch2_btree_path_traverse_all().
607          */
608         if (!path->cached && !trans->in_traverse_all)
609                 trans_for_each_path(trans, linked)
610                         if (linked != path &&
611                             linked->cached == path->cached &&
612                             linked->btree_id == path->btree_id &&
613                             linked->locks_want < new_locks_want) {
614                                 linked->locks_want = new_locks_want;
615                                 btree_path_get_locks(trans, linked, true);
616                         }
617
618         return false;
619 }
620
621 void __bch2_btree_path_downgrade(struct btree_trans *trans,
622                                  struct btree_path *path,
623                                  unsigned new_locks_want)
624 {
625         unsigned l;
626
627         EBUG_ON(path->locks_want < new_locks_want);
628
629         path->locks_want = new_locks_want;
630
631         while (path->nodes_locked &&
632                (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
633                 if (l > path->level) {
634                         btree_node_unlock(trans, path, l);
635                 } else {
636                         if (btree_node_intent_locked(path, l)) {
637                                 six_lock_downgrade(&path->l[l].b->c.lock);
638                                 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
639                         }
640                         break;
641                 }
642         }
643
644         bch2_btree_path_verify_locks(path);
645 }
646
647 /* Btree transaction locking: */
648
649 void bch2_trans_downgrade(struct btree_trans *trans)
650 {
651         struct btree_path *path;
652
653         trans_for_each_path(trans, path)
654                 bch2_btree_path_downgrade(trans, path);
655 }
656
657 int bch2_trans_relock(struct btree_trans *trans)
658 {
659         struct btree_path *path;
660
661         if (unlikely(trans->restarted))
662                 return -((int) trans->restarted);
663
664         trans_for_each_path(trans, path)
665                 if (path->should_be_locked &&
666                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
667                         trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
668                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
669                 }
670         return 0;
671 }
672
673 int bch2_trans_relock_notrace(struct btree_trans *trans)
674 {
675         struct btree_path *path;
676
677         if (unlikely(trans->restarted))
678                 return -((int) trans->restarted);
679
680         trans_for_each_path(trans, path)
681                 if (path->should_be_locked &&
682                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
683                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
684                 }
685         return 0;
686 }
687
688 void bch2_trans_unlock(struct btree_trans *trans)
689 {
690         struct btree_path *path;
691
692         trans_for_each_path(trans, path)
693                 __bch2_btree_path_unlock(trans, path);
694
695         /*
696          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
697          * btree nodes, it implements its own walking:
698          */
699         if (!trans->is_initial_gc)
700                 bch2_assert_btree_nodes_not_locked();
701 }
702
703 bool bch2_trans_locked(struct btree_trans *trans)
704 {
705         struct btree_path *path;
706
707         trans_for_each_path(trans, path)
708                 if (path->nodes_locked)
709                         return true;
710         return false;
711 }
712
713 int __bch2_trans_mutex_lock(struct btree_trans *trans,
714                             struct mutex *lock)
715 {
716         int ret;
717
718         bch2_trans_unlock(trans);
719         mutex_lock(lock);
720         ret = bch2_trans_relock(trans);
721         if (ret)
722                 mutex_unlock(lock);
723         return ret;
724 }
725
726 /* Debug */
727
728 #ifdef CONFIG_BCACHEFS_DEBUG
729
730 void bch2_btree_path_verify_locks(struct btree_path *path)
731 {
732         unsigned l;
733
734         if (!path->nodes_locked) {
735                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
736                        btree_path_node(path, path->level));
737                 return;
738         }
739
740         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
741                 int want = btree_lock_want(path, l);
742                 int have = btree_node_locked_type(path, l);
743
744                 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
745
746                 BUG_ON(is_btree_node(path, l) &&
747                        (want == BTREE_NODE_UNLOCKED ||
748                         have != BTREE_NODE_WRITE_LOCKED) &&
749                        want != have);
750         }
751 }
752
753 void bch2_trans_verify_locks(struct btree_trans *trans)
754 {
755         struct btree_path *path;
756
757         trans_for_each_path(trans, path)
758                 bch2_btree_path_verify_locks(path);
759 }
760
761 #endif