]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.c
d3326e980db998f933e846d21b11ea88f925d4a2
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6
7 struct lock_class_key bch2_btree_node_lock_key;
8
9 /* Btree node locking: */
10
11 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
12 {
13         if (lock->readers)
14                 this_cpu_add(*lock->readers, nr);
15         else if (nr > 0)
16                 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
17         else
18                 atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
19 }
20
21 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
22                                                   struct btree_path *skip,
23                                                   struct btree_bkey_cached_common *b,
24                                                   unsigned level)
25 {
26         struct btree_path *path;
27         struct six_lock_count ret;
28
29         memset(&ret, 0, sizeof(ret));
30
31         if (IS_ERR_OR_NULL(b))
32                 return ret;
33
34         trans_for_each_path(trans, path)
35                 if (path != skip && &path->l[level].b->c == b) {
36                         int t = btree_node_locked_type(path, level);
37
38                         if (t != BTREE_NODE_UNLOCKED)
39                                 ret.n[t]++;
40                 }
41
42         return ret;
43 }
44
45 /* unlock */
46
47 void bch2_btree_node_unlock_write(struct btree_trans *trans,
48                         struct btree_path *path, struct btree *b)
49 {
50         bch2_btree_node_unlock_write_inlined(trans, path, b);
51 }
52
53 /* lock */
54
55 /*
56  * @trans wants to lock @b with type @type
57  */
58 struct trans_waiting_for_lock {
59         struct btree_trans              *trans;
60         struct btree_bkey_cached_common *node_want;
61         enum six_lock_type              lock_want;
62
63         /* for iterating over held locks :*/
64         u8                              path_idx;
65         u8                              level;
66         u64                             lock_start_time;
67 };
68
69 struct lock_graph {
70         struct trans_waiting_for_lock   g[8];
71         unsigned                        nr;
72 };
73
74 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
75 {
76         struct trans_waiting_for_lock *i;
77
78         prt_printf(out, "Found lock cycle (%u entries):", g->nr);
79         prt_newline(out);
80
81         for (i = g->g; i < g->g + g->nr; i++)
82                 bch2_btree_trans_to_text(out, i->trans);
83 }
84
85 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
86 {
87         struct trans_waiting_for_lock *i;
88
89         for (i = g->g; i != g->g + g->nr; i++) {
90                 if (i != g->g)
91                         prt_str(out, "<- ");
92                 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
93         }
94         prt_newline(out);
95 }
96
97 static void lock_graph_up(struct lock_graph *g)
98 {
99         closure_put(&g->g[--g->nr].trans->ref);
100 }
101
102 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
103 {
104         closure_get(&trans->ref);
105
106         g->g[g->nr++] = (struct trans_waiting_for_lock) {
107                 .trans          = trans,
108                 .node_want      = trans->locking,
109                 .lock_want      = trans->locking_wait.lock_want,
110         };
111 }
112
113 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
114 {
115         struct trans_waiting_for_lock *i;
116
117         for (i = g->g + 1; i < g->g + g->nr; i++)
118                 if (i->trans->locking != i->node_want ||
119                     i->trans->locking_wait.start_time != i[-1].lock_start_time) {
120                         while (g->g + g->nr > i)
121                                 lock_graph_up(g);
122                         return true;
123                 }
124
125         return false;
126 }
127
128 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
129 {
130         if (i == g->g) {
131                 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
132                 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
133         } else {
134                 i->trans->lock_must_abort = true;
135                 wake_up_process(i->trans->locking_wait.task);
136                 return 0;
137         }
138 }
139
140 static int btree_trans_abort_preference(struct btree_trans *trans)
141 {
142         if (trans->lock_may_not_fail)
143                 return 0;
144         if (trans->locking_wait.lock_want == SIX_LOCK_write)
145                 return 1;
146         if (!trans->in_traverse_all)
147                 return 2;
148         return 3;
149 }
150
151 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
152 {
153         struct trans_waiting_for_lock *i, *abort = NULL;
154         unsigned best = 0, pref;
155         int ret;
156
157         if (lock_graph_remove_non_waiters(g))
158                 return 0;
159
160         /* Only checking, for debugfs: */
161         if (cycle) {
162                 print_cycle(cycle, g);
163                 ret = -1;
164                 goto out;
165         }
166
167         for (i = g->g; i < g->g + g->nr; i++) {
168                 pref = btree_trans_abort_preference(i->trans);
169                 if (pref > best) {
170                         abort = i;
171                         best = pref;
172                 }
173         }
174
175         if (unlikely(!best)) {
176                 struct printbuf buf = PRINTBUF;
177
178                 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
179
180                 for (i = g->g; i < g->g + g->nr; i++) {
181                         struct btree_trans *trans = i->trans;
182
183                         bch2_btree_trans_to_text(&buf, trans);
184
185                         prt_printf(&buf, "backtrace:");
186                         prt_newline(&buf);
187                         printbuf_indent_add(&buf, 2);
188                         bch2_prt_backtrace(&buf, trans->locking_wait.task);
189                         printbuf_indent_sub(&buf, 2);
190                         prt_newline(&buf);
191                 }
192
193                 bch2_print_string_as_lines(KERN_ERR, buf.buf);
194                 printbuf_exit(&buf);
195                 BUG();
196         }
197
198         ret = abort_lock(g, abort);
199 out:
200         if (ret)
201                 while (g->nr)
202                         lock_graph_up(g);
203         return ret;
204 }
205
206 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
207                               struct printbuf *cycle)
208 {
209         struct btree_trans *orig_trans = g->g->trans;
210         struct trans_waiting_for_lock *i;
211
212         for (i = g->g; i < g->g + g->nr; i++)
213                 if (i->trans == trans)
214                         return break_cycle(g, cycle);
215
216         if (g->nr == ARRAY_SIZE(g->g)) {
217                 if (orig_trans->lock_may_not_fail)
218                         return 0;
219
220                 while (g->nr)
221                         lock_graph_up(g);
222                 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
223                 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
224         }
225
226         lock_graph_down(g, trans);
227         return 0;
228 }
229
230 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
231 {
232         return t1 + t2 > 1;
233 }
234
235 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
236 {
237         struct lock_graph g;
238         struct trans_waiting_for_lock *top;
239         struct btree_bkey_cached_common *b;
240         struct btree_path *path;
241         int ret;
242
243         if (trans->lock_must_abort) {
244                 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
245                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
246         }
247
248         g.nr = 0;
249         lock_graph_down(&g, trans);
250 next:
251         if (!g.nr)
252                 return 0;
253
254         top = &g.g[g.nr - 1];
255
256         trans_for_each_path_from(top->trans, path, top->path_idx) {
257                 if (!path->nodes_locked)
258                         continue;
259
260                 if (top->path_idx != path->idx) {
261                         top->path_idx           = path->idx;
262                         top->level              = 0;
263                         top->lock_start_time    = 0;
264                 }
265
266                 for (;
267                      top->level < BTREE_MAX_DEPTH;
268                      top->level++, top->lock_start_time = 0) {
269                         int lock_held = btree_node_locked_type(path, top->level);
270
271                         if (lock_held == BTREE_NODE_UNLOCKED)
272                                 continue;
273
274                         b = &READ_ONCE(path->l[top->level].b)->c;
275
276                         if (IS_ERR_OR_NULL(b)) {
277                                 BUG_ON(!lock_graph_remove_non_waiters(&g));
278                                 goto next;
279                         }
280
281                         if (list_empty_careful(&b->lock.wait_list))
282                                 continue;
283
284                         raw_spin_lock(&b->lock.wait_lock);
285                         list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
286                                 BUG_ON(b != trans->locking);
287
288                                 if (top->lock_start_time &&
289                                     time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
290                                         continue;
291
292                                 top->lock_start_time = trans->locking_wait.start_time;
293
294                                 /* Don't check for self deadlock: */
295                                 if (trans == top->trans ||
296                                     !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
297                                         continue;
298
299                                 ret = lock_graph_descend(&g, trans, cycle);
300                                 raw_spin_unlock(&b->lock.wait_lock);
301
302                                 if (ret)
303                                         return ret;
304                                 goto next;
305
306                         }
307                         raw_spin_unlock(&b->lock.wait_lock);
308                 }
309         }
310
311         if (g.nr > 1 && cycle)
312                 print_chain(cycle, &g);
313         lock_graph_up(&g);
314         goto next;
315 }
316
317 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
318 {
319         struct btree_trans *trans = p;
320
321         return bch2_check_for_deadlock(trans, NULL);
322 }
323
324 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
325                                  struct btree_bkey_cached_common *b,
326                                  bool lock_may_not_fail)
327 {
328         int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
329         int ret;
330
331         /*
332          * Must drop our read locks before calling six_lock_write() -
333          * six_unlock() won't do wakeups until the reader count
334          * goes to 0, and it's safe because we have the node intent
335          * locked:
336          */
337         six_lock_readers_add(&b->lock, -readers);
338         ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write, lock_may_not_fail);
339         six_lock_readers_add(&b->lock, readers);
340
341         if (ret)
342                 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
343
344         return ret;
345 }
346
347 /* relock */
348
349 static inline bool btree_path_get_locks(struct btree_trans *trans,
350                                         struct btree_path *path,
351                                         bool upgrade)
352 {
353         unsigned l = path->level;
354         int fail_idx = -1;
355
356         do {
357                 if (!btree_path_node(path, l))
358                         break;
359
360                 if (!(upgrade
361                       ? bch2_btree_node_upgrade(trans, path, l)
362                       : bch2_btree_node_relock(trans, path, l))) {
363                         (upgrade
364                          ? trace_node_upgrade_fail
365                          : trace_node_relock_fail)(0, _RET_IP_,
366                                         path->btree_id, &path->pos,
367                                         l, path->l[l].lock_seq,
368                                         path->l[l].b,
369                                         is_btree_node(path, l)
370                                         ? path->l[l].b->c.lock.state.seq
371                                         : 0);
372                         fail_idx = l;
373                 }
374
375                 l++;
376         } while (l < path->locks_want);
377
378         /*
379          * When we fail to get a lock, we have to ensure that any child nodes
380          * can't be relocked so bch2_btree_path_traverse has to walk back up to
381          * the node that we failed to relock:
382          */
383         if (fail_idx >= 0) {
384                 __bch2_btree_path_unlock(trans, path);
385                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
386
387                 do {
388                         path->l[fail_idx].b = upgrade
389                                 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
390                                 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
391                         --fail_idx;
392                 } while (fail_idx >= 0);
393         }
394
395         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
396                 path->uptodate = BTREE_ITER_UPTODATE;
397
398         bch2_trans_verify_locks(trans);
399
400         return path->uptodate < BTREE_ITER_NEED_RELOCK;
401 }
402
403 bool __bch2_btree_node_relock(struct btree_trans *trans,
404                               struct btree_path *path, unsigned level,
405                               bool trace)
406 {
407         struct btree *b = btree_path_node(path, level);
408         int want = __btree_lock_want(path, level);
409
410         if (race_fault())
411                 goto fail;
412
413         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
414             (btree_node_lock_seq_matches(path, b, level) &&
415              btree_node_lock_increment(trans, &b->c, level, want))) {
416                 mark_btree_node_locked(trans, path, level, want);
417                 return true;
418         }
419 fail:
420         if (trace)
421                 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
422         return false;
423 }
424
425 /* upgrade */
426
427 bool bch2_btree_node_upgrade(struct btree_trans *trans,
428                              struct btree_path *path, unsigned level)
429 {
430         struct btree *b = path->l[level].b;
431         struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
432
433         if (!is_btree_node(path, level))
434                 return false;
435
436         switch (btree_lock_want(path, level)) {
437         case BTREE_NODE_UNLOCKED:
438                 BUG_ON(btree_node_locked(path, level));
439                 return true;
440         case BTREE_NODE_READ_LOCKED:
441                 BUG_ON(btree_node_intent_locked(path, level));
442                 return bch2_btree_node_relock(trans, path, level);
443         case BTREE_NODE_INTENT_LOCKED:
444                 break;
445         case BTREE_NODE_WRITE_LOCKED:
446                 BUG();
447         }
448
449         if (btree_node_intent_locked(path, level))
450                 return true;
451
452         if (race_fault())
453                 return false;
454
455         if (btree_node_locked(path, level)) {
456                 bool ret;
457
458                 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
459                 ret = six_lock_tryupgrade(&b->c.lock);
460                 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
461
462                 if (ret)
463                         goto success;
464         } else {
465                 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
466                         goto success;
467         }
468
469         /*
470          * Do we already have an intent lock via another path? If so, just bump
471          * lock count:
472          */
473         if (btree_node_lock_seq_matches(path, b, level) &&
474             btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
475                 btree_node_unlock(trans, path, level);
476                 goto success;
477         }
478
479         trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
480         return false;
481 success:
482         mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
483         return true;
484 }
485
486 /* Btree path locking: */
487
488 /*
489  * Only for btree_cache.c - only relocks intent locks
490  */
491 int bch2_btree_path_relock_intent(struct btree_trans *trans,
492                                   struct btree_path *path)
493 {
494         unsigned l;
495
496         for (l = path->level;
497              l < path->locks_want && btree_path_node(path, l);
498              l++) {
499                 if (!bch2_btree_node_relock(trans, path, l)) {
500                         __bch2_btree_path_unlock(trans, path);
501                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
502                         trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
503                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
504                 }
505         }
506
507         return 0;
508 }
509
510 __flatten
511 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
512                         struct btree_path *path, unsigned long trace_ip)
513 {
514         return btree_path_get_locks(trans, path, false);
515 }
516
517 __flatten
518 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
519                         struct btree_path *path, unsigned long trace_ip)
520 {
521         return btree_path_get_locks(trans, path, true);
522 }
523
524 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
525                                struct btree_path *path,
526                                unsigned new_locks_want)
527 {
528         EBUG_ON(path->locks_want >= new_locks_want);
529
530         path->locks_want = new_locks_want;
531
532         return btree_path_get_locks(trans, path, true);
533 }
534
535 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
536                                struct btree_path *path,
537                                unsigned new_locks_want)
538 {
539         struct btree_path *linked;
540
541         if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
542                 return true;
543
544         /*
545          * XXX: this is ugly - we'd prefer to not be mucking with other
546          * iterators in the btree_trans here.
547          *
548          * On failure to upgrade the iterator, setting iter->locks_want and
549          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
550          * get the locks we want on transaction restart.
551          *
552          * But if this iterator was a clone, on transaction restart what we did
553          * to this iterator isn't going to be preserved.
554          *
555          * Possibly we could add an iterator field for the parent iterator when
556          * an iterator is a copy - for now, we'll just upgrade any other
557          * iterators with the same btree id.
558          *
559          * The code below used to be needed to ensure ancestor nodes get locked
560          * before interior nodes - now that's handled by
561          * bch2_btree_path_traverse_all().
562          */
563         if (!path->cached && !trans->in_traverse_all)
564                 trans_for_each_path(trans, linked)
565                         if (linked != path &&
566                             linked->cached == path->cached &&
567                             linked->btree_id == path->btree_id &&
568                             linked->locks_want < new_locks_want) {
569                                 linked->locks_want = new_locks_want;
570                                 btree_path_get_locks(trans, linked, true);
571                         }
572
573         return false;
574 }
575
576 void __bch2_btree_path_downgrade(struct btree_trans *trans,
577                                  struct btree_path *path,
578                                  unsigned new_locks_want)
579 {
580         unsigned l;
581
582         EBUG_ON(path->locks_want < new_locks_want);
583
584         path->locks_want = new_locks_want;
585
586         while (path->nodes_locked &&
587                (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
588                 if (l > path->level) {
589                         btree_node_unlock(trans, path, l);
590                 } else {
591                         if (btree_node_intent_locked(path, l)) {
592                                 six_lock_downgrade(&path->l[l].b->c.lock);
593                                 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
594                         }
595                         break;
596                 }
597         }
598
599         bch2_btree_path_verify_locks(path);
600 }
601
602 /* Btree transaction locking: */
603
604 void bch2_trans_downgrade(struct btree_trans *trans)
605 {
606         struct btree_path *path;
607
608         trans_for_each_path(trans, path)
609                 bch2_btree_path_downgrade(trans, path);
610 }
611
612 int bch2_trans_relock(struct btree_trans *trans)
613 {
614         struct btree_path *path;
615
616         if (unlikely(trans->restarted))
617                 return -((int) trans->restarted);
618
619         trans_for_each_path(trans, path)
620                 if (path->should_be_locked &&
621                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
622                         trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
623                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
624                 }
625         return 0;
626 }
627
628 void bch2_trans_unlock(struct btree_trans *trans)
629 {
630         struct btree_path *path;
631
632         trans_for_each_path(trans, path)
633                 __bch2_btree_path_unlock(trans, path);
634
635         /*
636          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
637          * btree nodes, it implements its own walking:
638          */
639         EBUG_ON(!trans->is_initial_gc &&
640                 lock_class_is_held(&bch2_btree_node_lock_key));
641 }
642
643 bool bch2_trans_locked(struct btree_trans *trans)
644 {
645         struct btree_path *path;
646
647         trans_for_each_path(trans, path)
648                 if (path->nodes_locked)
649                         return true;
650         return false;
651 }
652
653 /* Debug */
654
655 #ifdef CONFIG_BCACHEFS_DEBUG
656
657 void bch2_btree_path_verify_locks(struct btree_path *path)
658 {
659         unsigned l;
660
661         if (!path->nodes_locked) {
662                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
663                        btree_path_node(path, path->level));
664                 return;
665         }
666
667         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
668                 int want = btree_lock_want(path, l);
669                 int have = btree_node_locked_type(path, l);
670
671                 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
672
673                 BUG_ON(is_btree_node(path, l) &&
674                        (want == BTREE_NODE_UNLOCKED ||
675                         have != BTREE_NODE_WRITE_LOCKED) &&
676                        want != have);
677         }
678 }
679
680 void bch2_trans_verify_locks(struct btree_trans *trans)
681 {
682         struct btree_path *path;
683
684         trans_for_each_path(trans, path)
685                 bch2_btree_path_verify_locks(path);
686 }
687
688 #endif