]> git.sesse.net Git - bcachefs-tools-debian/blob - libbcachefs/btree_locking.c
Update bcachefs sources to 6dc2a699c6 bcachefs: bch2_path_put_nokeep()
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "btree_locking.h"
5 #include "btree_types.h"
6
7 struct lock_class_key bch2_btree_node_lock_key;
8
9 /* Btree node locking: */
10
11 static inline void six_lock_readers_add(struct six_lock *lock, int nr)
12 {
13         if (lock->readers)
14                 this_cpu_add(*lock->readers, nr);
15         else if (nr > 0)
16                 atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
17         else
18                 atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
19 }
20
21 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
22                                                   struct btree_path *skip,
23                                                   struct btree_bkey_cached_common *b,
24                                                   unsigned level)
25 {
26         struct btree_path *path;
27         struct six_lock_count ret;
28
29         memset(&ret, 0, sizeof(ret));
30
31         if (IS_ERR_OR_NULL(b))
32                 return ret;
33
34         trans_for_each_path(trans, path)
35                 if (path != skip && &path->l[level].b->c == b) {
36                         int t = btree_node_locked_type(path, level);
37
38                         if (t != BTREE_NODE_UNLOCKED)
39                                 ret.n[t]++;
40                 }
41
42         return ret;
43 }
44
45 /* unlock */
46
47 void bch2_btree_node_unlock_write(struct btree_trans *trans,
48                         struct btree_path *path, struct btree *b)
49 {
50         bch2_btree_node_unlock_write_inlined(trans, path, b);
51 }
52
53 /* lock */
54
55 /*
56  * @trans wants to lock @b with type @type
57  */
58 struct trans_waiting_for_lock {
59         struct btree_trans              *trans;
60         struct btree_bkey_cached_common *node_want;
61         enum six_lock_type              lock_want;
62
63         /* for iterating over held locks :*/
64         u8                              path_idx;
65         u8                              level;
66         u64                             lock_start_time;
67 };
68
69 struct lock_graph {
70         struct trans_waiting_for_lock   g[8];
71         unsigned                        nr;
72 };
73
74 static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
75 {
76         struct trans_waiting_for_lock *i;
77
78         prt_printf(out, "Found lock cycle (%u entries):", g->nr);
79         prt_newline(out);
80
81         for (i = g->g; i < g->g + g->nr; i++)
82                 bch2_btree_trans_to_text(out, i->trans);
83 }
84
85 static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
86 {
87         struct trans_waiting_for_lock *i;
88
89         for (i = g->g; i != g->g + g->nr; i++) {
90                 if (i != g->g)
91                         prt_str(out, "<- ");
92                 prt_printf(out, "%u ", i->trans->locking_wait.task->pid);
93         }
94         prt_newline(out);
95 }
96
97 static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
98 {
99         if (i == g->g) {
100                 trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
101                 return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
102         } else {
103                 i->trans->lock_must_abort = true;
104                 wake_up_process(i->trans->locking_wait.task);
105                 return 0;
106         }
107 }
108
109 static noinline int break_cycle(struct lock_graph *g)
110 {
111         struct trans_waiting_for_lock *i;
112
113         /*
114          * We'd like to prioritize aborting transactions that have done less
115          * work - but it appears breaking cycles by telling other transactions
116          * to abort may still be buggy:
117          */
118 #if 0
119         for (i = g->g; i < g->g + g->nr; i++) {
120                 if (i->trans->lock_may_not_fail ||
121                     i->trans->locking_wait.lock_want == SIX_LOCK_write)
122                         continue;
123
124                 return abort_lock(g, i);
125         }
126
127         for (i = g->g; i < g->g + g->nr; i++) {
128                 if (i->trans->lock_may_not_fail ||
129                     !i->trans->in_traverse_all)
130                         continue;
131
132                 return abort_lock(g, i);
133         }
134 #endif
135         for (i = g->g; i < g->g + g->nr; i++) {
136                 if (i->trans->lock_may_not_fail)
137                         continue;
138
139                 return abort_lock(g, i);
140         }
141
142         {
143                 struct bch_fs *c = g->g->trans->c;
144                 struct printbuf buf = PRINTBUF;
145
146                 bch_err(c, "cycle of nofail locks");
147
148                 for (i = g->g; i < g->g + g->nr; i++) {
149                         struct btree_trans *trans = i->trans;
150
151                         bch2_btree_trans_to_text(&buf, trans);
152
153                         prt_printf(&buf, "backtrace:");
154                         prt_newline(&buf);
155                         printbuf_indent_add(&buf, 2);
156                         bch2_prt_backtrace(&buf, trans->locking_wait.task);
157                         printbuf_indent_sub(&buf, 2);
158                         prt_newline(&buf);
159                 }
160
161                 bch2_print_string_as_lines(KERN_ERR, buf.buf);
162                 printbuf_exit(&buf);
163                 BUG();
164         }
165 }
166
167 static void lock_graph_pop(struct lock_graph *g)
168 {
169         closure_put(&g->g[--g->nr].trans->ref);
170 }
171
172 static void lock_graph_pop_above(struct lock_graph *g, struct trans_waiting_for_lock *above,
173                                  struct printbuf *cycle)
174 {
175         if (g->nr > 1 && cycle)
176                 print_chain(cycle, g);
177
178         while (g->g + g->nr > above)
179                 lock_graph_pop(g);
180 }
181
182 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
183                               struct printbuf *cycle)
184 {
185         struct btree_trans *orig_trans = g->g->trans;
186         struct trans_waiting_for_lock *i;
187         int ret = 0;
188
189         for (i = g->g; i < g->g + g->nr; i++) {
190                 if (i->trans->locking != i->node_want) {
191                         lock_graph_pop_above(g, i - 1, cycle);
192                         return 0;
193                 }
194
195                 if (i->trans == trans) {
196                         if (cycle) {
197                                 /* Only checking: */
198                                 print_cycle(cycle, g);
199                                 ret = -1;
200                         } else {
201                                 ret = break_cycle(g);
202                         }
203
204                         if (ret)
205                                 goto deadlock;
206                         /*
207                          * If we didn't abort (instead telling another
208                          * transaction to abort), keep checking:
209                          */
210                 }
211         }
212
213         if (g->nr == ARRAY_SIZE(g->g)) {
214                 if (orig_trans->lock_may_not_fail)
215                         return 0;
216
217                 trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
218                 ret = btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
219                 goto deadlock;
220         }
221
222         closure_get(&trans->ref);
223
224         g->g[g->nr++] = (struct trans_waiting_for_lock) {
225                 .trans          = trans,
226                 .node_want      = trans->locking,
227                 .lock_want      = trans->locking_wait.lock_want,
228         };
229
230         return 0;
231 deadlock:
232         lock_graph_pop_above(g, g->g, cycle);
233         return ret;
234 }
235
236 static noinline void lock_graph_remove_non_waiters(struct lock_graph *g,
237                                                    struct printbuf *cycle)
238 {
239         struct trans_waiting_for_lock *i;
240
241         for (i = g->g + 1; i < g->g + g->nr; i++)
242                 if (i->trans->locking != i->node_want ||
243                     i->trans->locking_wait.start_time != i[-1].lock_start_time) {
244                         lock_graph_pop_above(g, i - 1, cycle);
245                         return;
246                 }
247         BUG();
248 }
249
250 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
251 {
252         return t1 + t2 > 1;
253 }
254
255 int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
256 {
257         struct lock_graph g;
258         struct trans_waiting_for_lock *top;
259         struct btree_bkey_cached_common *b;
260         struct btree_path *path;
261         int ret;
262
263         if (trans->lock_must_abort) {
264                 trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
265                 return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
266         }
267
268         g.nr = 0;
269         ret = lock_graph_descend(&g, trans, cycle);
270         BUG_ON(ret);
271 next:
272         if (!g.nr)
273                 return 0;
274
275         top = &g.g[g.nr - 1];
276
277         trans_for_each_path_from(top->trans, path, top->path_idx) {
278                 if (!path->nodes_locked)
279                         continue;
280
281                 if (top->path_idx != path->idx) {
282                         top->path_idx           = path->idx;
283                         top->level              = 0;
284                         top->lock_start_time    = 0;
285                 }
286
287                 for (;
288                      top->level < BTREE_MAX_DEPTH;
289                      top->level++, top->lock_start_time = 0) {
290                         int lock_held = btree_node_locked_type(path, top->level);
291
292                         if (lock_held == BTREE_NODE_UNLOCKED)
293                                 continue;
294
295                         b = &READ_ONCE(path->l[top->level].b)->c;
296
297                         if (unlikely(IS_ERR_OR_NULL(b))) {
298                                 lock_graph_remove_non_waiters(&g, cycle);
299                                 goto next;
300                         }
301
302                         if (list_empty_careful(&b->lock.wait_list))
303                                 continue;
304
305                         raw_spin_lock(&b->lock.wait_lock);
306                         list_for_each_entry(trans, &b->lock.wait_list, locking_wait.list) {
307                                 BUG_ON(b != trans->locking);
308
309                                 if (top->lock_start_time &&
310                                     time_after_eq64(top->lock_start_time, trans->locking_wait.start_time))
311                                         continue;
312
313                                 top->lock_start_time = trans->locking_wait.start_time;
314
315                                 /* Don't check for self deadlock: */
316                                 if (trans == top->trans ||
317                                     !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
318                                         continue;
319
320                                 ret = lock_graph_descend(&g, trans, cycle);
321                                 raw_spin_unlock(&b->lock.wait_lock);
322
323                                 if (ret)
324                                         return ret < 0 ? ret : 0;
325                                 goto next;
326
327                         }
328                         raw_spin_unlock(&b->lock.wait_lock);
329                 }
330         }
331
332         if (g.nr > 1 && cycle)
333                 print_chain(cycle, &g);
334         lock_graph_pop(&g);
335         goto next;
336 }
337
338 int bch2_six_check_for_deadlock(struct six_lock *lock, void *p)
339 {
340         struct btree_trans *trans = p;
341
342         return bch2_check_for_deadlock(trans, NULL);
343 }
344
345 int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *path,
346                                  struct btree_bkey_cached_common *b,
347                                  bool lock_may_not_fail)
348 {
349         int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
350         int ret;
351
352         /*
353          * Must drop our read locks before calling six_lock_write() -
354          * six_unlock() won't do wakeups until the reader count
355          * goes to 0, and it's safe because we have the node intent
356          * locked:
357          */
358         six_lock_readers_add(&b->lock, -readers);
359         ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write, lock_may_not_fail);
360         six_lock_readers_add(&b->lock, readers);
361
362         if (ret)
363                 mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
364
365         return ret;
366 }
367
368 /* relock */
369
370 static inline bool btree_path_get_locks(struct btree_trans *trans,
371                                         struct btree_path *path,
372                                         bool upgrade)
373 {
374         unsigned l = path->level;
375         int fail_idx = -1;
376
377         do {
378                 if (!btree_path_node(path, l))
379                         break;
380
381                 if (!(upgrade
382                       ? bch2_btree_node_upgrade(trans, path, l)
383                       : bch2_btree_node_relock(trans, path, l)))
384                         fail_idx = l;
385
386                 l++;
387         } while (l < path->locks_want);
388
389         /*
390          * When we fail to get a lock, we have to ensure that any child nodes
391          * can't be relocked so bch2_btree_path_traverse has to walk back up to
392          * the node that we failed to relock:
393          */
394         if (fail_idx >= 0) {
395                 __bch2_btree_path_unlock(trans, path);
396                 btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
397
398                 do {
399                         path->l[fail_idx].b = upgrade
400                                 ? ERR_PTR(-BCH_ERR_no_btree_node_upgrade)
401                                 : ERR_PTR(-BCH_ERR_no_btree_node_relock);
402                         --fail_idx;
403                 } while (fail_idx >= 0);
404         }
405
406         if (path->uptodate == BTREE_ITER_NEED_RELOCK)
407                 path->uptodate = BTREE_ITER_UPTODATE;
408
409         bch2_trans_verify_locks(trans);
410
411         return path->uptodate < BTREE_ITER_NEED_RELOCK;
412 }
413
414 bool __bch2_btree_node_relock(struct btree_trans *trans,
415                               struct btree_path *path, unsigned level,
416                               bool trace)
417 {
418         struct btree *b = btree_path_node(path, level);
419         int want = __btree_lock_want(path, level);
420
421         if (race_fault())
422                 goto fail;
423
424         if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
425             (btree_node_lock_seq_matches(path, b, level) &&
426              btree_node_lock_increment(trans, &b->c, level, want))) {
427                 mark_btree_node_locked(trans, path, level, want);
428                 return true;
429         }
430 fail:
431         if (trace)
432                 trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
433         return false;
434 }
435
436 /* upgrade */
437
438 bool bch2_btree_node_upgrade(struct btree_trans *trans,
439                              struct btree_path *path, unsigned level)
440 {
441         struct btree *b = path->l[level].b;
442         struct six_lock_count count = bch2_btree_node_lock_counts(trans, path, &b->c, level);
443
444         if (!is_btree_node(path, level))
445                 return false;
446
447         switch (btree_lock_want(path, level)) {
448         case BTREE_NODE_UNLOCKED:
449                 BUG_ON(btree_node_locked(path, level));
450                 return true;
451         case BTREE_NODE_READ_LOCKED:
452                 BUG_ON(btree_node_intent_locked(path, level));
453                 return bch2_btree_node_relock(trans, path, level);
454         case BTREE_NODE_INTENT_LOCKED:
455                 break;
456         case BTREE_NODE_WRITE_LOCKED:
457                 BUG();
458         }
459
460         if (btree_node_intent_locked(path, level))
461                 return true;
462
463         if (race_fault())
464                 return false;
465
466         if (btree_node_locked(path, level)) {
467                 bool ret;
468
469                 six_lock_readers_add(&b->c.lock, -count.n[SIX_LOCK_read]);
470                 ret = six_lock_tryupgrade(&b->c.lock);
471                 six_lock_readers_add(&b->c.lock, count.n[SIX_LOCK_read]);
472
473                 if (ret)
474                         goto success;
475         } else {
476                 if (six_relock_type(&b->c.lock, SIX_LOCK_intent, path->l[level].lock_seq))
477                         goto success;
478         }
479
480         /*
481          * Do we already have an intent lock via another path? If so, just bump
482          * lock count:
483          */
484         if (btree_node_lock_seq_matches(path, b, level) &&
485             btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
486                 btree_node_unlock(trans, path, level);
487                 goto success;
488         }
489
490         trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
491         return false;
492 success:
493         mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
494         return true;
495 }
496
497 /* Btree path locking: */
498
499 /*
500  * Only for btree_cache.c - only relocks intent locks
501  */
502 int bch2_btree_path_relock_intent(struct btree_trans *trans,
503                                   struct btree_path *path)
504 {
505         unsigned l;
506
507         for (l = path->level;
508              l < path->locks_want && btree_path_node(path, l);
509              l++) {
510                 if (!bch2_btree_node_relock(trans, path, l)) {
511                         __bch2_btree_path_unlock(trans, path);
512                         btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
513                         trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
514                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
515                 }
516         }
517
518         return 0;
519 }
520
521 __flatten
522 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
523                         struct btree_path *path, unsigned long trace_ip)
524 {
525         return btree_path_get_locks(trans, path, false);
526 }
527
528 __flatten
529 bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
530                         struct btree_path *path, unsigned long trace_ip)
531 {
532         return btree_path_get_locks(trans, path, true);
533 }
534
535 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
536                                struct btree_path *path,
537                                unsigned new_locks_want)
538 {
539         EBUG_ON(path->locks_want >= new_locks_want);
540
541         path->locks_want = new_locks_want;
542
543         return btree_path_get_locks(trans, path, true);
544 }
545
546 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
547                                struct btree_path *path,
548                                unsigned new_locks_want)
549 {
550         struct btree_path *linked;
551
552         if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
553                 return true;
554
555         /*
556          * XXX: this is ugly - we'd prefer to not be mucking with other
557          * iterators in the btree_trans here.
558          *
559          * On failure to upgrade the iterator, setting iter->locks_want and
560          * calling get_locks() is sufficient to make bch2_btree_path_traverse()
561          * get the locks we want on transaction restart.
562          *
563          * But if this iterator was a clone, on transaction restart what we did
564          * to this iterator isn't going to be preserved.
565          *
566          * Possibly we could add an iterator field for the parent iterator when
567          * an iterator is a copy - for now, we'll just upgrade any other
568          * iterators with the same btree id.
569          *
570          * The code below used to be needed to ensure ancestor nodes get locked
571          * before interior nodes - now that's handled by
572          * bch2_btree_path_traverse_all().
573          */
574         if (!path->cached && !trans->in_traverse_all)
575                 trans_for_each_path(trans, linked)
576                         if (linked != path &&
577                             linked->cached == path->cached &&
578                             linked->btree_id == path->btree_id &&
579                             linked->locks_want < new_locks_want) {
580                                 linked->locks_want = new_locks_want;
581                                 btree_path_get_locks(trans, linked, true);
582                         }
583
584         return false;
585 }
586
587 void __bch2_btree_path_downgrade(struct btree_trans *trans,
588                                  struct btree_path *path,
589                                  unsigned new_locks_want)
590 {
591         unsigned l;
592
593         EBUG_ON(path->locks_want < new_locks_want);
594
595         path->locks_want = new_locks_want;
596
597         while (path->nodes_locked &&
598                (l = btree_path_highest_level_locked(path)) >= path->locks_want) {
599                 if (l > path->level) {
600                         btree_node_unlock(trans, path, l);
601                 } else {
602                         if (btree_node_intent_locked(path, l)) {
603                                 six_lock_downgrade(&path->l[l].b->c.lock);
604                                 mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
605                         }
606                         break;
607                 }
608         }
609
610         bch2_btree_path_verify_locks(path);
611 }
612
613 /* Btree transaction locking: */
614
615 void bch2_trans_downgrade(struct btree_trans *trans)
616 {
617         struct btree_path *path;
618
619         trans_for_each_path(trans, path)
620                 bch2_btree_path_downgrade(trans, path);
621 }
622
623 int bch2_trans_relock(struct btree_trans *trans)
624 {
625         struct btree_path *path;
626
627         if (unlikely(trans->restarted))
628                 return - ((int) trans->restarted);
629
630         trans_for_each_path(trans, path)
631                 if (path->should_be_locked &&
632                     !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
633                         trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
634                         return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
635                 }
636         return 0;
637 }
638
639 void bch2_trans_unlock(struct btree_trans *trans)
640 {
641         struct btree_path *path;
642
643         trans_for_each_path(trans, path)
644                 __bch2_btree_path_unlock(trans, path);
645
646         /*
647          * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
648          * btree nodes, it implements its own walking:
649          */
650         EBUG_ON(!trans->is_initial_gc &&
651                 lock_class_is_held(&bch2_btree_node_lock_key));
652 }
653
654 bool bch2_trans_locked(struct btree_trans *trans)
655 {
656         struct btree_path *path;
657
658         trans_for_each_path(trans, path)
659                 if (path->nodes_locked)
660                         return true;
661         return false;
662 }
663
664 /* Debug */
665
666 #ifdef CONFIG_BCACHEFS_DEBUG
667
668 void bch2_btree_path_verify_locks(struct btree_path *path)
669 {
670         unsigned l;
671
672         if (!path->nodes_locked) {
673                 BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
674                        btree_path_node(path, path->level));
675                 return;
676         }
677
678         for (l = 0; l < BTREE_MAX_DEPTH; l++) {
679                 int want = btree_lock_want(path, l);
680                 int have = btree_node_locked_type(path, l);
681
682                 BUG_ON(!is_btree_node(path, l) && have != BTREE_NODE_UNLOCKED);
683
684                 BUG_ON(is_btree_node(path, l) &&
685                        (want == BTREE_NODE_UNLOCKED ||
686                         have != BTREE_NODE_WRITE_LOCKED) &&
687                        want != have);
688         }
689 }
690
691 void bch2_trans_verify_locks(struct btree_trans *trans)
692 {
693         struct btree_path *path;
694
695         trans_for_each_path(trans, path)
696                 bch2_btree_path_verify_locks(path);
697 }
698
699 #endif