]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.c
Update bcachefs sources to 31c09369cd six locks: Fix an unitialized var
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6
7 static struct lock_class_key bch2_btree_node_lock_key;
8
9 void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
10                           enum six_lock_init_flags flags)
11 {
12         __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
13 }
14
15 #ifdef CONFIG_LOCKDEP
16 void bch2_assert_btree_nodes_not_locked(void)
17 {
18         BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
19 }
20 #endif
21
22 /* Btree node locking: */
23
24 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
25                                                   struct btree_path *skip,
26                                                   struct btree_bkey_cached_common *b,
27                                                   unsigned level)
28 {
29         struct btree_path *path;
30         struct six_lock_count ret;
31
32         memset(&ret, 0, sizeof(ret));
33
34         if (IS_ERR_OR_NULL(b))
35                 return ret;
36
37         trans_for_each_path(trans, path)
38                 if (path != skip && &path->l[level].b->c == b) {
39                         int t = btree_node_locked_type(path, level);
40
41                         if (t != BTREE_NODE_UNLOCKED)
42                                 ret.n[t]++;
43                 }
44
45         return ret;
46 }
47
48 /* unlock */
49
50 void bch2_btree_node_unlock_write(struct btree_trans *trans,
51                         struct btree_path *path, struct btree *b)
52 {
53         bch2_btree_node_unlock_write_inlined(trans, path, b);
54 }
55
56 /* lock */
57
58 /*
59  * @trans wants to lock @b with type @type
60  */
61 struct trans_waiting_for_lock {
62         struct btree_trans              *trans;
63         struct btree_bkey_cached_common *node_want;
64         enum six_lock_type              lock_want;
65
66         /* for iterating over held locks :*/
67         u8                              path_idx;
68         u8                              level;
69         u64                             lock_start_time;
70 };
71
72 struct lock_graph {
73         struct trans_waiting_for_lock   g[8];
74         unsigned                        nr;
75 };
76
77 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
78 {
79         struct trans_waiting_for_lock *i;
80
81         prt_printf(out, "Found lock cycle (%u entries):", g->nr);
82         prt_newline(out);
83
84         for (i = g->g; i < g->g + g->nr; i++)
85                 bch2_btree_trans_to_text(out, i->trans);
86 }
87
88 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
89 {
90         struct trans_waiting_for_lock *i;
91
92         for (i = g->g; i != g->g + g->nr; i++) {
93                 if (i != g->g)
94                         prt_str(out, "<- ");
95                 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
96         }
97         prt_newline(out);
98 }
99
100 static void lock_graph_up(struct lock_graph *g)
101 {
102         closure_put(&g->g[--g->nr].trans->ref);
103 }
104
105 static noinline void lock_graph_pop_all(struct lock_graph *g)
106 {
107         while (g->nr)
108                 lock_graph_up(g);
109 }
110
111 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
112 {
113         closure_get(&trans->ref);
114
115         g->g[g->nr++] = (struct trans_waiting_for_lock) {
116                 .trans          = trans,
117                 .node_want      = trans->locking,
118                 .lock_want      = trans->locking_wait.lock_want,
119         };
120 }
121
122 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
123 {
124         struct trans_waiting_for_lock *i;
125
126         for (i = g->g + 1; i < g->g + g->nr; i++)
127                 if (i->trans->locking != i->node_want ||
128                     i->trans->locking_wait.start_time != i[-1].lock_start_time) {
129                         while (g->g + g->nr > i)
130                                 lock_graph_up(g);
131                         return true;
132                 }
133
134         return false;
135 }
136
137 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
138 {
139         if (i == g->g) {
140                 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
141                 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
142         } else {
143                 i->trans->lock_must_abort = true;
144                 wake_up_process(i->trans->locking_wait.task);
145                 return 0;
146         }
147 }
148
149 static int btree_trans_abort_preference(struct btree_trans *trans)
150 {
151         if (trans->lock_may_not_fail)
152                 return 0;
153         if (trans->locking_wait.lock_want == SIX_LOCK_write)
154                 return 1;
155         if (!trans->in_traverse_all)
156                 return 2;
157         return 3;
158 }
159
160 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
161 {
162         struct trans_waiting_for_lock *i, *abort = NULL;
163         unsigned best = 0, pref;
164         int ret;
165
166         if (lock_graph_remove_non_waiters(g))
167                 return 0;
168
169         /* Only checking, for debugfs: */
170         if (cycle) {
171                 print_cycle(cycle, g);
172                 ret = -1;
173                 goto out;
174         }
175
176         for (i = g->g; i < g->g + g->nr; i++) {
177                 pref = btree_trans_abort_preference(i->trans);
178                 if (pref > best) {
179                         abort = i;
180                         best = pref;
181                 }
182         }
183
184         if (unlikely(!best)) {
185                 struct printbuf buf = PRINTBUF;
186
187                 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
188
189                 for (i = g->g; i < g->g + g->nr; i++) {
190                         struct btree_trans *trans = i->trans;
191
192                         bch2_btree_trans_to_text(&buf, trans);
193
194                         prt_printf(&buf, "backtrace:");
195                         prt_newline(&buf);
196                         printbuf_indent_add(&buf, 2);
197                         bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
198                         printbuf_indent_sub(&buf, 2);
199                         prt_newline(&buf);
200                 }
201
202                 bch2_print_string_as_lines(KERN_ERR, buf.buf);
203                 printbuf_exit(&buf);
204                 BUG();
205         }
206
207         ret = abort_lock(g, abort);
208 out:
209         if (ret)
210                 while (g->nr)
211                         lock_graph_up(g);
212         return ret;
213 }
214
215 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
216                               struct printbuf *cycle)
217 {
218         struct btree_trans *orig_trans = g->g->trans;
219         struct trans_waiting_for_lock *i;
220
221         for (i = g->g; i < g->g + g->nr; i++)
222                 if (i->trans == trans)
223                         return break_cycle(g, cycle);
224
225         if (g->nr == ARRAY_SIZE(g->g)) {
226                 if (orig_trans->lock_may_not_fail)
227                         return 0;
228
229                 while (g->nr)
230                         lock_graph_up(g);
231
232                 if (cycle)
233                         return 0;
234
235                 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
236                 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
237         }
238
239         lock_graph_down(g, trans);
240         return 0;
241 }
242
243 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
244 {
245         return t1 + t2 > 1;
246 }
247
248 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
249 {
250         struct lock_graph g;
251         struct trans_waiting_for_lock *top;
252         struct btree_bkey_cached_common *b;
253         struct btree_path *path;
254         int ret;
255
256         if (trans->lock_must_abort) {
257                 if (cycle)
258                         return -1;
259
260                 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
261                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
262         }
263
264         g.nr = 0;
265         lock_graph_down(&g, trans);
266 next:
267         if (!g.nr)
268                 return 0;
269
270         top = &g.g[g.nr - 1];
271
272         trans_for_each_path_from(top->trans, path, top->path_idx) {
273                 if (!path->nodes_locked)
274                         continue;
275
276                 if (top->path_idx != path->idx) {
277                         top->path_idx           = path->idx;
278                         top->level              = 0;
279                         top->lock_start_time    = 0;
280                 }
281
282                 for (;
283                      top->level < BTREE_MAX_DEPTH;
284                      top->level++, top->lock_start_time = 0) {
285                         int lock_held = btree_node_locked_type(path, top->level);
286
287                         if (lock_held == BTREE_NODE_UNLOCKED)
288                                 continue;
289
290                         b = &READ_ONCE(path->l[top->level].b)->c;
291
292                         if (IS_ERR_OR_NULL(b)) {
293                                 /*
294                                  * If we get here, it means we raced with the
295                                  * other thread updating its btree_path
296                                  * structures - which means it can't be blocked
297                                  * waiting on a lock:
298                                  */
299                                 if (!lock_graph_remove_non_waiters(&g)) {
300                                         /*
301                                          * If lock_graph_remove_non_waiters()
302                                          * didn't do anything, it must be
303                                          * because we're being called by debugfs
304                                          * checking for lock cycles, which
305                                          * invokes us on btree_transactions that
306                                          * aren't actually waiting on anything.
307                                          * Just bail out:
308                                          */
309                                         lock_graph_pop_all(&g);
310                                 }
311
312                                 goto next;
313                         }
314
315                         if (list_empty_careful(&b->lock.wait_list))
316                                 continue;
317
318                         raw_spin_lock(&b->lock.wait_lock);
319                         list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
320                                 BUG_ON(b != trans->locking);
321
322                                 if (top->lock_start_time &&
323                                     time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
324                                         continue;
325
326                                 top->lock_start_time = trans->locking_wait.start_time;
327
328                                 /* Don't check for self deadlock: */
329                                 if (trans == top->trans ||
330                                     !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
331                                         continue;
332
333                                 ret = lock_graph_descend(&g, trans, cycle);
334                                 raw_spin_unlock(&b->lock.wait_lock);
335
336                                 if (ret)
337                                         return ret;
338                                 goto next;
339
340                         }
341                         raw_spin_unlock(&b->lock.wait_lock);
342                 }
343         }
344
345         if (g.nr > 1 && cycle)
346                 print_chain(cycle, &g);
347         lock_graph_up(&g);
348         goto next;
349 }
350
351 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
352 {
353         struct btree_trans *trans = p;
354
355         return bch2_check_for_deadlock(trans, NULL);
356 }
357
358 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
359                                  struct btree_bkey_cached_common *b,
360                                  bool lock_may_not_fail)
361 {
362         int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
363         int ret;
364
365         /*
366          * Must drop our read locks before calling six_lock_write() -
367          * six_unlock() won't do wakeups until the reader count
368          * goes to 0, and it's safe because we have the node intent
369          * locked:
370          */
371         six_lock_readers_add(&b->lock, -readers);
372         ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
373                                        lock_may_not_fail, _RET_IP_);
374         six_lock_readers_add(&b->lock, readers);
375
376         if (ret)
377                 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
378
379         return ret;
380 }
381
382 void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
383                                        struct btree_path *path,
384                                        struct btree_bkey_cached_common *b)
385 {
386         struct btree_path *linked;
387         unsigned i;
388         int ret;
389
390         /*
391          * XXX BIG FAT NOTICE
392          *
393          * Drop all read locks before taking a write lock:
394          *
395          * This is a hack, because bch2_btree_node_lock_write_nofail() is a
396          * hack - but by dropping read locks first, this should never fail, and
397          * we only use this in code paths where whatever read locks we've
398          * already taken are no longer needed:
399          */
400
401         trans_for_each_path(trans, linked) {
402                 if (!linked->nodes_locked)
403                         continue;
404
405                 for (i = 0; i < BTREE_MAX_DEPTH; i++)
406                         if (btree_node_read_locked(linked, i)) {
407                                 btree_node_unlock(trans, linked, i);
408                                 btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
409                         }
410         }
411
412         ret = __btree_node_lock_write(trans, path, b, true);
413         BUG_ON(ret);
414 }
415
416 /* relock */
417
418 static inline bool btree_path_get_locks(struct btree_trans *trans,
419                                         struct btree_path *path,
420                                         bool upgrade)
421 {
422         unsigned l = path->level;
423         int fail_idx = -1;
424
425         do {
426                 if (!btree_path_node(path, l))
427                         break;
428
429                 if (!(upgrade
430                       ? bch2_btree_node_upgrade(trans, path, l)
431                       : bch2_btree_node_relock(trans, path, l)))
432                         fail_idx = l;
433
434                 l++;
435         } while (l < path->locks_want);
436
437         /*
438          * When we fail to get a lock, we have to ensure that any child nodes
439          * can't be relocked so bch2_btree_path_traverse has to walk back up to
440          * the node that we failed to relock:
441          */
442         if (fail_idx >= 0) {
443                 __bch2_btree_path_unlock(trans, path);
444                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
445
446                 do {
447                         path->l[fail_idx].b = upgrade
448                                 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
449                                 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
450                         --fail_idx;
451                 } while (fail_idx >= 0);
452         }
453
454         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
455                 path->uptodate = BTREE_ITER_UPTODATE;
456
457         bch2_trans_verify_locks(trans);
458
459         return path->uptodate < BTREE_ITER_NEED_RELOCK;
460 }
461
462 bool __bch2_btree_node_relock(struct btree_trans *trans,
463                               struct btree_path *path, unsigned level,
464                               bool trace)
465 {
466         struct btree *b = btree_path_node(path, level);
467         int want = __btree_lock_want(path, level);
468
469         if (race_fault())
470                 goto fail;
471
472         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
473             (btree_node_lock_seq_matches(path, b, level) &&
474              btree_node_lock_increment(trans, &b->c, level, want))) {
475                 mark_btree_node_locked(trans, path, level, want);
476                 return true;
477         }
478 fail:
479         if (trace && !trans->notrace_relock_fail)
480                 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
481         return false;
482 }
483
484 /* upgrade */
485
486 bool bch2_btree_node_upgrade(struct btree_trans *trans,
487                              struct btree_path *path, unsigned level)
488 {
489         struct btree *b = path->l[level].b;
490         struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
491
492         if (!is_btree_node(path, level))
493                 return false;
494
495         switch (btree_lock_want(path, level)) {
496         case BTREE_NODE_UNLOCKED:
497                 BUG_ON(btree_node_locked(path, level));
498                 return true;
499         case BTREE_NODE_READ_LOCKED:
500                 BUG_ON(btree_node_intent_locked(path, level));
501                 return bch2_btree_node_relock(trans, path, level);
502         case BTREE_NODE_INTENT_LOCKED:
503                 break;
504         case BTREE_NODE_WRITE_LOCKED:
505                 BUG();
506         }
507
508         if (btree_node_intent_locked(path, level))
509                 return true;
510
511         if (race_fault())
512                 return false;
513
514         if (btree_node_locked(path, level)) {
515                 bool ret;
516
517                 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
518                 ret = six_lock_tryupgrade(&b->c.lock);
519                 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
520
521                 if (ret)
522                         goto success;
523         } else {
524                 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
525                         goto success;
526         }
527
528         /*
529          * Do we already have an intent lock via another path? If so, just bump
530          * lock count:
531          */
532         if (btree_node_lock_seq_matches(path, b, level) &&
533             btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
534                 btree_node_unlock(trans, path, level);
535                 goto success;
536         }
537
538         trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
539         return false;
540 success:
541         mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
542         return true;
543 }
544
545 /* Btree path locking: */
546
547 /*
548  * Only for btree_cache.c - only relocks intent locks
549  */
550 int bch2_btree_path_relock_intent(struct btree_trans *trans,
551                                   struct btree_path *path)
552 {
553         unsigned l;
554
555         for (l = path->level;
556              l < path->locks_want && btree_path_node(path, l);
557              l++) {
558                 if (!bch2_btree_node_relock(trans, path, l)) {
559                         __bch2_btree_path_unlock(trans, path);
560                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
561                         trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
562                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
563                 }
564         }
565
566         return 0;
567 }
568
569 __flatten
570 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
571                         struct btree_path *path, unsigned long trace_ip)
572 {
573         return btree_path_get_locks(trans, path, false);
574 }
575
576 int __bch2_btree_path_relock(struct btree_trans *trans,
577                         struct btree_path *path, unsigned long trace_ip)
578 {
579         if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
580                 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
581                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
582         }
583
584         return 0;
585 }
586
587 __flatten
588 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
589                         struct btree_path *path, unsigned long trace_ip)
590 {
591         return btree_path_get_locks(trans, path, true);
592 }
593
594 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
595                                struct btree_path *path,
596                                unsigned new_locks_want)
597 {
598         EBUG_ON(path->locks_want >= new_locks_want);
599
600         path->locks_want = new_locks_want;
601
602         return btree_path_get_locks(trans, path, true);
603 }
604
605 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
606                                struct btree_path *path,
607                                unsigned new_locks_want)
608 {
609         struct btree_path *linked;
610
611         if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
612                 return true;
613
614         /*
615          * XXX: this is ugly - we'd prefer to not be mucking with other
616          * iterators in the btree_trans here.
617          *
618          * On failure to upgrade the iterator, setting iter->locks_want and
619          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
620          * get the locks we want on transaction restart.
621          *
622          * But if this iterator was a clone, on transaction restart what we did
623          * to this iterator isn't going to be preserved.
624          *
625          * Possibly we could add an iterator field for the parent iterator when
626          * an iterator is a copy - for now, we'll just upgrade any other
627          * iterators with the same btree id.
628          *
629          * The code below used to be needed to ensure ancestor nodes get locked
630          * before interior nodes - now that's handled by
631          * bch2_btree_path_traverse_all().
632          */
633         if (!path->cached && !trans->in_traverse_all)
634                 trans_for_each_path(trans, linked)
635                         if (linked != path &&
636                             linked->cached == path->cached &&
637                             linked->btree_id == path->btree_id &&
638                             linked->locks_want < new_locks_want) {
639                                 linked->locks_want = new_locks_want;
640                                 btree_path_get_locks(trans, linked, true);
641                         }
642
643         return false;
644 }
645
646 void __bch2_btree_path_downgrade(struct btree_trans *trans,
647                                  struct btree_path *path,
648                                  unsigned new_locks_want)
649 {
650         unsigned l;
651
652         EBUG_ON(path->locks_want < new_locks_want);
653
654         path->locks_want = new_locks_want;
655
656         while (path->nodes_locked &&
657                (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
658                 if (l > path->level) {
659                         btree_node_unlock(trans, path, l);
660                 } else {
661                         if (btree_node_intent_locked(path, l)) {
662                                 six_lock_downgrade(&path->l[l].b->c.lock);
663                                 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
664                         }
665                         break;
666                 }
667         }
668
669         bch2_btree_path_verify_locks(path);
670 }
671
672 /* Btree transaction locking: */
673
674 void bch2_trans_downgrade(struct btree_trans *trans)
675 {
676         struct btree_path *path;
677
678         trans_for_each_path(trans, path)
679                 bch2_btree_path_downgrade(trans, path);
680 }
681
682 int bch2_trans_relock(struct btree_trans *trans)
683 {
684         struct btree_path *path;
685
686         if (unlikely(trans->restarted))
687                 return -((int) trans->restarted);
688
689         trans_for_each_path(trans, path)
690                 if (path->should_be_locked &&
691                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
692                         trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
693                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
694                 }
695         return 0;
696 }
697
698 int bch2_trans_relock_notrace(struct btree_trans *trans)
699 {
700         struct btree_path *path;
701
702         if (unlikely(trans->restarted))
703                 return -((int) trans->restarted);
704
705         trans_for_each_path(trans, path)
706                 if (path->should_be_locked &&
707                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
708                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
709                 }
710         return 0;
711 }
712
713 void bch2_trans_unlock(struct btree_trans *trans)
714 {
715         struct btree_path *path;
716
717         trans_for_each_path(trans, path)
718                 __bch2_btree_path_unlock(trans, path);
719
720         /*
721          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
722          * btree nodes, it implements its own walking:
723          */
724         if (!trans->is_initial_gc)
725                 bch2_assert_btree_nodes_not_locked();
726 }
727
728 bool bch2_trans_locked(struct btree_trans *trans)
729 {
730         struct btree_path *path;
731
732         trans_for_each_path(trans, path)
733                 if (path->nodes_locked)
734                         return true;
735         return false;
736 }
737
738 int __bch2_trans_mutex_lock(struct btree_trans *trans,
739                             struct mutex *lock)
740 {
741         int ret;
742
743         bch2_trans_unlock(trans);
744         mutex_lock(lock);
745         ret = bch2_trans_relock(trans);
746         if (ret)
747                 mutex_unlock(lock);
748         return ret;
749 }
750
751 /* Debug */
752
753 #ifdef CONFIG_BCACHEFS_DEBUG
754
755 void bch2_btree_path_verify_locks(struct btree_path *path)
756 {
757         unsigned l;
758
759         if (!path->nodes_locked) {
760                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
761                        btree_path_node(path, path->level));
762                 return;
763         }
764
765         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
766                 int want = btree_lock_want(path, l);
767                 int have = btree_node_locked_type(path, l);
768
769                 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
770
771                 BUG_ON(is_btree_node(path, l) &&
772                        (want == BTREE_NODE_UNLOCKED ||
773                         have != BTREE_NODE_WRITE_LOCKED) &&
774                        want != have);
775         }
776 }
777
778 void bch2_trans_verify_locks(struct btree_trans *trans)
779 {
780         struct btree_path *path;
781
782         trans_for_each_path(trans, path)
783                 bch2_btree_path_verify_locks(path);
784 }
785
786 #endif