]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.c
Update bcachefs sources to 8dbfede1d9 fixup! bcachefs: More info on check_bucket_ref...
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6
7 struct lock_class_key bch2_btree_node_lock_key;
8
9 /* Btree node locking: */
10
11 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
12 {
13         if (lock->readers)
14                 this_cpu_add(*lock->readers, nr);
15         else if (nr > 0)
16                 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
17         else
18                 atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
19 }
20
21 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
22                                                   struct btree_path *skip,
23                                                   struct btree_bkey_cached_common *b,
24                                                   unsigned level)
25 {
26         struct btree_path *path;
27         struct six_lock_count ret;
28
29         memset(&ret, 0, sizeof(ret));
30
31         if (IS_ERR_OR_NULL(b))
32                 return ret;
33
34         trans_for_each_path(trans, path)
35                 if (path != skip && &path->l[level].b->c == b) {
36                         int t = btree_node_locked_type(path, level);
37
38                         if (t != BTREE_NODE_UNLOCKED)
39                                 ret.n[t]++;
40                 }
41
42         return ret;
43 }
44
45 /* unlock */
46
47 void bch2_btree_node_unlock_write(struct btree_trans *trans,
48                         struct btree_path *path, struct btree *b)
49 {
50         bch2_btree_node_unlock_write_inlined(trans, path, b);
51 }
52
53 /* lock */
54
55 /*
56  * @trans wants to lock @b with type @type
57  */
58 struct trans_waiting_for_lock {
59         struct btree_trans              *trans;
60         struct btree_bkey_cached_common *node_want;
61         enum six_lock_type              lock_want;
62
63         /* for iterating over held locks :*/
64         u8                              path_idx;
65         u8                              level;
66         u64                             lock_start_time;
67 };
68
69 struct lock_graph {
70         struct trans_waiting_for_lock   g[8];
71         unsigned                        nr;
72 };
73
74 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
75 {
76         struct trans_waiting_for_lock *i;
77
78         prt_printf(out, "Found lock cycle (%u entries):", g->nr);
79         prt_newline(out);
80
81         for (i = g->g; i < g->g + g->nr; i++)
82                 bch2_btree_trans_to_text(out, i->trans);
83 }
84
85 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
86 {
87         struct trans_waiting_for_lock *i;
88
89         for (i = g->g; i != g->g + g->nr; i++) {
90                 if (i != g->g)
91                         prt_str(out, "<- ");
92                 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
93         }
94         prt_newline(out);
95 }
96
97 static void lock_graph_up(struct lock_graph *g)
98 {
99         closure_put(&g->g[--g->nr].trans->ref);
100 }
101
102 static noinline void lock_graph_pop_all(struct lock_graph *g)
103 {
104         while (g->nr)
105                 lock_graph_up(g);
106 }
107
108 static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
109 {
110         closure_get(&trans->ref);
111
112         g->g[g->nr++] = (struct trans_waiting_for_lock) {
113                 .trans          = trans,
114                 .node_want      = trans->locking,
115                 .lock_want      = trans->locking_wait.lock_want,
116         };
117 }
118
119 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
120 {
121         struct trans_waiting_for_lock *i;
122
123         for (i = g->g + 1; i < g->g + g->nr; i++)
124                 if (i->trans->locking != i->node_want ||
125                     i->trans->locking_wait.start_time != i[-1].lock_start_time) {
126                         while (g->g + g->nr > i)
127                                 lock_graph_up(g);
128                         return true;
129                 }
130
131         return false;
132 }
133
134 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
135 {
136         if (i == g->g) {
137                 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
138                 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
139         } else {
140                 i->trans->lock_must_abort = true;
141                 wake_up_process(i->trans->locking_wait.task);
142                 return 0;
143         }
144 }
145
146 static int btree_trans_abort_preference(struct btree_trans *trans)
147 {
148         if (trans->lock_may_not_fail)
149                 return 0;
150         if (trans->locking_wait.lock_want == SIX_LOCK_write)
151                 return 1;
152         if (!trans->in_traverse_all)
153                 return 2;
154         return 3;
155 }
156
157 static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
158 {
159         struct trans_waiting_for_lock *i, *abort = NULL;
160         unsigned best = 0, pref;
161         int ret;
162
163         if (lock_graph_remove_non_waiters(g))
164                 return 0;
165
166         /* Only checking, for debugfs: */
167         if (cycle) {
168                 print_cycle(cycle, g);
169                 ret = -1;
170                 goto out;
171         }
172
173         for (i = g->g; i < g->g + g->nr; i++) {
174                 pref = btree_trans_abort_preference(i->trans);
175                 if (pref > best) {
176                         abort = i;
177                         best = pref;
178                 }
179         }
180
181         if (unlikely(!best)) {
182                 struct printbuf buf = PRINTBUF;
183
184                 prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
185
186                 for (i = g->g; i < g->g + g->nr; i++) {
187                         struct btree_trans *trans = i->trans;
188
189                         bch2_btree_trans_to_text(&buf, trans);
190
191                         prt_printf(&buf, "backtrace:");
192                         prt_newline(&buf);
193                         printbuf_indent_add(&buf, 2);
194                         bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
195                         printbuf_indent_sub(&buf, 2);
196                         prt_newline(&buf);
197                 }
198
199                 bch2_print_string_as_lines(KERN_ERR, buf.buf);
200                 printbuf_exit(&buf);
201                 BUG();
202         }
203
204         ret = abort_lock(g, abort);
205 out:
206         if (ret)
207                 while (g->nr)
208                         lock_graph_up(g);
209         return ret;
210 }
211
212 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
213                               struct printbuf *cycle)
214 {
215         struct btree_trans *orig_trans = g->g->trans;
216         struct trans_waiting_for_lock *i;
217
218         for (i = g->g; i < g->g + g->nr; i++)
219                 if (i->trans == trans)
220                         return break_cycle(g, cycle);
221
222         if (g->nr == ARRAY_SIZE(g->g)) {
223                 if (orig_trans->lock_may_not_fail)
224                         return 0;
225
226                 while (g->nr)
227                         lock_graph_up(g);
228
229                 if (cycle)
230                         return 0;
231
232                 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
233                 return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
234         }
235
236         lock_graph_down(g, trans);
237         return 0;
238 }
239
240 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
241 {
242         return t1 + t2 > 1;
243 }
244
245 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
246 {
247         struct lock_graph g;
248         struct trans_waiting_for_lock *top;
249         struct btree_bkey_cached_common *b;
250         struct btree_path *path;
251         int ret;
252
253         if (trans->lock_must_abort) {
254                 if (cycle)
255                         return -1;
256
257                 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
258                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
259         }
260
261         g.nr = 0;
262         lock_graph_down(&g, trans);
263 next:
264         if (!g.nr)
265                 return 0;
266
267         top = &g.g[g.nr - 1];
268
269         trans_for_each_path_from(top->trans, path, top->path_idx) {
270                 if (!path->nodes_locked)
271                         continue;
272
273                 if (top->path_idx != path->idx) {
274                         top->path_idx           = path->idx;
275                         top->level              = 0;
276                         top->lock_start_time    = 0;
277                 }
278
279                 for (;
280                      top->level < BTREE_MAX_DEPTH;
281                      top->level++, top->lock_start_time = 0) {
282                         int lock_held = btree_node_locked_type(path, top->level);
283
284                         if (lock_held == BTREE_NODE_UNLOCKED)
285                                 continue;
286
287                         b = &READ_ONCE(path->l[top->level].b)->c;
288
289                         if (IS_ERR_OR_NULL(b)) {
290                                 /*
291                                  * If we get here, it means we raced with the
292                                  * other thread updating its btree_path
293                                  * structures - which means it can't be blocked
294                                  * waiting on a lock:
295                                  */
296                                 if (!lock_graph_remove_non_waiters(&g)) {
297                                         /*
298                                          * If lock_graph_remove_non_waiters()
299                                          * didn't do anything, it must be
300                                          * because we're being called by debugfs
301                                          * checking for lock cycles, which
302                                          * invokes us on btree_transactions that
303                                          * aren't actually waiting on anything.
304                                          * Just bail out:
305                                          */
306                                         lock_graph_pop_all(&g);
307                                 }
308
309                                 goto next;
310                         }
311
312                         if (list_empty_careful(&b->lock.wait_list))
313                                 continue;
314
315                         raw_spin_lock(&b->lock.wait_lock);
316                         list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
317                                 BUG_ON(b != trans->locking);
318
319                                 if (top->lock_start_time &&
320                                     time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
321                                         continue;
322
323                                 top->lock_start_time = trans->locking_wait.start_time;
324
325                                 /* Don't check for self deadlock: */
326                                 if (trans == top->trans ||
327                                     !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
328                                         continue;
329
330                                 ret = lock_graph_descend(&g, trans, cycle);
331                                 raw_spin_unlock(&b->lock.wait_lock);
332
333                                 if (ret)
334                                         return ret;
335                                 goto next;
336
337                         }
338                         raw_spin_unlock(&b->lock.wait_lock);
339                 }
340         }
341
342         if (g.nr > 1 && cycle)
343                 print_chain(cycle, &g);
344         lock_graph_up(&g);
345         goto next;
346 }
347
348 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
349 {
350         struct btree_trans *trans = p;
351
352         return bch2_check_for_deadlock(trans, NULL);
353 }
354
355 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
356                                  struct btree_bkey_cached_common *b,
357                                  bool lock_may_not_fail)
358 {
359         int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
360         int ret;
361
362         /*
363          * Must drop our read locks before calling six_lock_write() -
364          * six_unlock() won't do wakeups until the reader count
365          * goes to 0, and it's safe because we have the node intent
366          * locked:
367          */
368         six_lock_readers_add(&b->lock, -readers);
369         ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
370                                        lock_may_not_fail, _RET_IP_);
371         six_lock_readers_add(&b->lock, readers);
372
373         if (ret)
374                 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
375
376         return ret;
377 }
378
379 /* relock */
380
381 static inline bool btree_path_get_locks(struct btree_trans *trans,
382                                         struct btree_path *path,
383                                         bool upgrade)
384 {
385         unsigned l = path->level;
386         int fail_idx = -1;
387
388         do {
389                 if (!btree_path_node(path, l))
390                         break;
391
392                 if (!(upgrade
393                       ? bch2_btree_node_upgrade(trans, path, l)
394                       : bch2_btree_node_relock(trans, path, l)))
395                         fail_idx = l;
396
397                 l++;
398         } while (l < path->locks_want);
399
400         /*
401          * When we fail to get a lock, we have to ensure that any child nodes
402          * can't be relocked so bch2_btree_path_traverse has to walk back up to
403          * the node that we failed to relock:
404          */
405         if (fail_idx >= 0) {
406                 __bch2_btree_path_unlock(trans, path);
407                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
408
409                 do {
410                         path->l[fail_idx].b = upgrade
411                                 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
412                                 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
413                         --fail_idx;
414                 } while (fail_idx >= 0);
415         }
416
417         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
418                 path->uptodate = BTREE_ITER_UPTODATE;
419
420         bch2_trans_verify_locks(trans);
421
422         return path->uptodate < BTREE_ITER_NEED_RELOCK;
423 }
424
425 bool __bch2_btree_node_relock(struct btree_trans *trans,
426                               struct btree_path *path, unsigned level,
427                               bool trace)
428 {
429         struct btree *b = btree_path_node(path, level);
430         int want = __btree_lock_want(path, level);
431
432         if (race_fault())
433                 goto fail;
434
435         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
436             (btree_node_lock_seq_matches(path, b, level) &&
437              btree_node_lock_increment(trans, &b->c, level, want))) {
438                 mark_btree_node_locked(trans, path, level, want);
439                 return true;
440         }
441 fail:
442         if (trace && !trans->notrace_relock_fail)
443                 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
444         return false;
445 }
446
447 /* upgrade */
448
449 bool bch2_btree_node_upgrade(struct btree_trans *trans,
450                              struct btree_path *path, unsigned level)
451 {
452         struct btree *b = path->l[level].b;
453         struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
454
455         if (!is_btree_node(path, level))
456                 return false;
457
458         switch (btree_lock_want(path, level)) {
459         case BTREE_NODE_UNLOCKED:
460                 BUG_ON(btree_node_locked(path, level));
461                 return true;
462         case BTREE_NODE_READ_LOCKED:
463                 BUG_ON(btree_node_intent_locked(path, level));
464                 return bch2_btree_node_relock(trans, path, level);
465         case BTREE_NODE_INTENT_LOCKED:
466                 break;
467         case BTREE_NODE_WRITE_LOCKED:
468                 BUG();
469         }
470
471         if (btree_node_intent_locked(path, level))
472                 return true;
473
474         if (race_fault())
475                 return false;
476
477         if (btree_node_locked(path, level)) {
478                 bool ret;
479
480                 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
481                 ret = six_lock_tryupgrade(&b->c.lock);
482                 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
483
484                 if (ret)
485                         goto success;
486         } else {
487                 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
488                         goto success;
489         }
490
491         /*
492          * Do we already have an intent lock via another path? If so, just bump
493          * lock count:
494          */
495         if (btree_node_lock_seq_matches(path, b, level) &&
496             btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
497                 btree_node_unlock(trans, path, level);
498                 goto success;
499         }
500
501         trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
502         return false;
503 success:
504         mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
505         return true;
506 }
507
508 /* Btree path locking: */
509
510 /*
511  * Only for btree_cache.c - only relocks intent locks
512  */
513 int bch2_btree_path_relock_intent(struct btree_trans *trans,
514                                   struct btree_path *path)
515 {
516         unsigned l;
517
518         for (l = path->level;
519              l < path->locks_want && btree_path_node(path, l);
520              l++) {
521                 if (!bch2_btree_node_relock(trans, path, l)) {
522                         __bch2_btree_path_unlock(trans, path);
523                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
524                         trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
525                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
526                 }
527         }
528
529         return 0;
530 }
531
532 __flatten
533 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
534                         struct btree_path *path, unsigned long trace_ip)
535 {
536         return btree_path_get_locks(trans, path, false);
537 }
538
539 int __bch2_btree_path_relock(struct btree_trans *trans,
540                         struct btree_path *path, unsigned long trace_ip)
541 {
542         if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
543                 trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
544                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
545         }
546
547         return 0;
548 }
549
550 __flatten
551 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
552                         struct btree_path *path, unsigned long trace_ip)
553 {
554         return btree_path_get_locks(trans, path, true);
555 }
556
557 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
558                                struct btree_path *path,
559                                unsigned new_locks_want)
560 {
561         EBUG_ON(path->locks_want >= new_locks_want);
562
563         path->locks_want = new_locks_want;
564
565         return btree_path_get_locks(trans, path, true);
566 }
567
568 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
569                                struct btree_path *path,
570                                unsigned new_locks_want)
571 {
572         struct btree_path *linked;
573
574         if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
575                 return true;
576
577         /*
578          * XXX: this is ugly - we'd prefer to not be mucking with other
579          * iterators in the btree_trans here.
580          *
581          * On failure to upgrade the iterator, setting iter->locks_want and
582          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
583          * get the locks we want on transaction restart.
584          *
585          * But if this iterator was a clone, on transaction restart what we did
586          * to this iterator isn't going to be preserved.
587          *
588          * Possibly we could add an iterator field for the parent iterator when
589          * an iterator is a copy - for now, we'll just upgrade any other
590          * iterators with the same btree id.
591          *
592          * The code below used to be needed to ensure ancestor nodes get locked
593          * before interior nodes - now that's handled by
594          * bch2_btree_path_traverse_all().
595          */
596         if (!path->cached && !trans->in_traverse_all)
597                 trans_for_each_path(trans, linked)
598                         if (linked != path &&
599                             linked->cached == path->cached &&
600                             linked->btree_id == path->btree_id &&
601                             linked->locks_want < new_locks_want) {
602                                 linked->locks_want = new_locks_want;
603                                 btree_path_get_locks(trans, linked, true);
604                         }
605
606         return false;
607 }
608
609 void __bch2_btree_path_downgrade(struct btree_trans *trans,
610                                  struct btree_path *path,
611                                  unsigned new_locks_want)
612 {
613         unsigned l;
614
615         EBUG_ON(path->locks_want < new_locks_want);
616
617         path->locks_want = new_locks_want;
618
619         while (path->nodes_locked &&
620                (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
621                 if (l > path->level) {
622                         btree_node_unlock(trans, path, l);
623                 } else {
624                         if (btree_node_intent_locked(path, l)) {
625                                 six_lock_downgrade(&path->l[l].b->c.lock);
626                                 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
627                         }
628                         break;
629                 }
630         }
631
632         bch2_btree_path_verify_locks(path);
633 }
634
635 /* Btree transaction locking: */
636
637 void bch2_trans_downgrade(struct btree_trans *trans)
638 {
639         struct btree_path *path;
640
641         trans_for_each_path(trans, path)
642                 bch2_btree_path_downgrade(trans, path);
643 }
644
645 int bch2_trans_relock(struct btree_trans *trans)
646 {
647         struct btree_path *path;
648
649         if (unlikely(trans->restarted))
650                 return -((int) trans->restarted);
651
652         trans_for_each_path(trans, path)
653                 if (path->should_be_locked &&
654                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
655                         trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
656                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
657                 }
658         return 0;
659 }
660
661 int bch2_trans_relock_notrace(struct btree_trans *trans)
662 {
663         struct btree_path *path;
664
665         if (unlikely(trans->restarted))
666                 return -((int) trans->restarted);
667
668         trans_for_each_path(trans, path)
669                 if (path->should_be_locked &&
670                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
671                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
672                 }
673         return 0;
674 }
675
676 void bch2_trans_unlock(struct btree_trans *trans)
677 {
678         struct btree_path *path;
679
680         trans_for_each_path(trans, path)
681                 __bch2_btree_path_unlock(trans, path);
682
683         /*
684          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
685          * btree nodes, it implements its own walking:
686          */
687         EBUG_ON(!trans->is_initial_gc &&
688                 lock_class_is_held(&bch2_btree_node_lock_key));
689 }
690
691 bool bch2_trans_locked(struct btree_trans *trans)
692 {
693         struct btree_path *path;
694
695         trans_for_each_path(trans, path)
696                 if (path->nodes_locked)
697                         return true;
698         return false;
699 }
700
701 /* Debug */
702
703 #ifdef CONFIG_BCACHEFS_DEBUG
704
705 void bch2_btree_path_verify_locks(struct btree_path *path)
706 {
707         unsigned l;
708
709         if (!path->nodes_locked) {
710                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
711                        btree_path_node(path, path->level));
712                 return;
713         }
714
715         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
716                 int want = btree_lock_want(path, l);
717                 int have = btree_node_locked_type(path, l);
718
719                 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
720
721                 BUG_ON(is_btree_node(path, l) &&
722                        (want == BTREE_NODE_UNLOCKED ||
723                         have != BTREE_NODE_WRITE_LOCKED) &&
724                        want != have);
725         }
726 }
727
728 void bch2_trans_verify_locks(struct btree_trans *trans)
729 {
730         struct btree_path *path;
731
732         trans_for_each_path(trans, path)
733                 bch2_btree_path_verify_locks(path);
734 }
735
736 #endif