]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_locking.c
Update bcachefs sources to f70a3402188e bcachefs: Fix ca->oldest_gen allocation
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
index f4340086c357879caf63841831036b1e9516eb56..0b0f9d607798842ca9c9877a95992eaf1ca9c619 100644 (file)
@@ -4,19 +4,25 @@
 #include "btree_locking.h"
 #include "btree_types.h"
 
-struct lock_class_key bch2_btree_node_lock_key;
+static struct lock_class_key bch2_btree_node_lock_key;
 
-/* Btree node locking: */
+void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
+                         enum six_lock_init_flags flags)
+{
+       __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       lockdep_set_no_check_recursion(&b->lock.dep_map);
+#endif
+}
 
-static inline void six_lock_readers_add(struct six_lock *lock, int nr)
+#ifdef CONFIG_LOCKDEP
+void bch2_assert_btree_nodes_not_locked(void)
 {
-       if (lock->readers)
-               this_cpu_add(*lock->readers, nr);
-       else if (nr > 0)
-               atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
-       else
-               atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
+       BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
 }
+#endif
+
+/* Btree node locking: */
 
 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
                                                  struct btree_path *skip,
@@ -94,66 +100,123 @@ static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
        prt_newline(out);
 }
 
-static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
+static void lock_graph_up(struct lock_graph *g)
 {
-       int ret;
+       closure_put(&g->g[--g->nr].trans->ref);
+}
 
+static noinline void lock_graph_pop_all(struct lock_graph *g)
+{
+       while (g->nr)
+               lock_graph_up(g);
+}
+
+static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
+       g->g[g->nr++] = (struct trans_waiting_for_lock) {
+               .trans          = trans,
+               .node_want      = trans->locking,
+               .lock_want      = trans->locking_wait.lock_want,
+       };
+}
+
+static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
+       closure_get(&trans->ref);
+       __lock_graph_down(g, trans);
+}
+
+static bool lock_graph_remove_non_waiters(struct lock_graph *g)
+{
+       struct trans_waiting_for_lock *i;
+
+       for (i = g->g + 1; i < g->g + g->nr; i++)
+               if (i->trans->locking != i->node_want ||
+                   i->trans->locking_wait.start_time != i[-1].lock_start_time) {
+                       while (g->g + g->nr > i)
+                               lock_graph_up(g);
+                       return true;
+               }
+
+       return false;
+}
+
+static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
+{
        if (i == g->g) {
                trace_and_count(i->trans->c, trans_restart_would_deadlock, i->trans, _RET_IP_);
-               ret = btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
+               return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
        } else {
                i->trans->lock_must_abort = true;
-               ret = 0;
+               wake_up_process(i->trans->locking_wait.task);
+               return 0;
        }
+}
 
-       for (i = g->g + 1; i < g->g + g->nr; i++)
-               wake_up_process(i->trans->locking_wait.task);
-       return ret;
+static int btree_trans_abort_preference(struct btree_trans *trans)
+{
+       if (trans->lock_may_not_fail)
+               return 0;
+       if (trans->locking_wait.lock_want == SIX_LOCK_write)
+               return 1;
+       if (!trans->in_traverse_all)
+               return 2;
+       return 3;
 }
 
-static noinline int break_cycle(struct lock_graph *g)
+static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
 {
-       struct trans_waiting_for_lock *i;
+       struct trans_waiting_for_lock *i, *abort = NULL;
+       unsigned best = 0, pref;
+       int ret;
 
-       for (i = g->g; i < g->g + g->nr; i++) {
-               if (i->trans->lock_may_not_fail ||
-                   i->trans->locking_wait.lock_want == SIX_LOCK_write)
-                       continue;
+       if (lock_graph_remove_non_waiters(g))
+               return 0;
 
-               return abort_lock(g, i);
+       /* Only checking, for debugfs: */
+       if (cycle) {
+               print_cycle(cycle, g);
+               ret = -1;
+               goto out;
        }
 
        for (i = g->g; i < g->g + g->nr; i++) {
-               if (i->trans->lock_may_not_fail ||
-                   !i->trans->in_traverse_all)
-                       continue;
-
-               return abort_lock(g, i);
+               pref = btree_trans_abort_preference(i->trans);
+               if (pref > best) {
+                       abort = i;
+                       best = pref;
+               }
        }
 
-       for (i = g->g; i < g->g + g->nr; i++) {
-               if (i->trans->lock_may_not_fail)
-                       continue;
+       if (unlikely(!best)) {
+               struct printbuf buf = PRINTBUF;
 
-               return abort_lock(g, i);
-       }
+               prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
 
-       BUG();
-}
+               for (i = g->g; i < g->g + g->nr; i++) {
+                       struct btree_trans *trans = i->trans;
 
-static void lock_graph_pop(struct lock_graph *g)
-{
-       closure_put(&g->g[--g->nr].trans->ref);
-}
+                       bch2_btree_trans_to_text(&buf, trans);
 
-static void lock_graph_pop_above(struct lock_graph *g, struct trans_waiting_for_lock *above,
-                                struct printbuf *cycle)
-{
-       if (g->nr > 1 && cycle)
-               print_chain(cycle, g);
+                       prt_printf(&buf, "backtrace:");
+                       prt_newline(&buf);
+                       printbuf_indent_add(&buf, 2);
+                       bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
+                       printbuf_indent_sub(&buf, 2);
+                       prt_newline(&buf);
+               }
+
+               bch2_print_string_as_lines(KERN_ERR, buf.buf);
+               printbuf_exit(&buf);
+               BUG();
+       }
 
-       while (g->g + g->nr > above)
-               lock_graph_pop(g);
+       ret = abort_lock(g, abort);
+out:
+       if (ret)
+               while (g->nr)
+                       lock_graph_up(g);
+       return ret;
 }
 
 static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
@@ -161,67 +224,31 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
 {
        struct btree_trans *orig_trans = g->g->trans;
        struct trans_waiting_for_lock *i;
-       int ret = 0;
-
-       for (i = g->g; i < g->g + g->nr; i++) {
-               if (i->trans->locking != i->node_want) {
-                       lock_graph_pop_above(g, i - 1, cycle);
-                       return 0;
-               }
 
+       for (i = g->g; i < g->g + g->nr; i++)
                if (i->trans == trans) {
-                       if (cycle) {
-                               /* Only checking: */
-                               print_cycle(cycle, g);
-                               ret = -1;
-                       } else {
-                               ret = break_cycle(g);
-                       }
-
-                       if (ret)
-                               goto deadlock;
-                       /*
-                        * If we didn't abort (instead telling another
-                        * transaction to abort), keep checking:
-                        */
+                       closure_put(&trans->ref);
+                       return break_cycle(g, cycle);
                }
-       }
 
        if (g->nr == ARRAY_SIZE(g->g)) {
+               closure_put(&trans->ref);
+
                if (orig_trans->lock_may_not_fail)
                        return 0;
 
-               trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
-               ret = btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
-               goto deadlock;
-       }
+               while (g->nr)
+                       lock_graph_up(g);
 
-       closure_get(&trans->ref);
+               if (cycle)
+                       return 0;
 
-       g->g[g->nr++] = (struct trans_waiting_for_lock) {
-               .trans          = trans,
-               .node_want      = trans->locking,
-               .lock_want      = trans->locking_wait.lock_want,
-       };
+               trace_and_count(trans->c, trans_restart_would_deadlock_recursion_limit, trans, _RET_IP_);
+               return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
+       }
 
+       __lock_graph_down(g, trans);
        return 0;
-deadlock:
-       lock_graph_pop_above(g, g->g, cycle);
-       return ret;
-}
-
-static noinline void lock_graph_remove_non_waiters(struct lock_graph *g,
-                                                  struct printbuf *cycle)
-{
-       struct trans_waiting_for_lock *i;
-
-       for (i = g->g + 1; i < g->g + g->nr; i++)
-               if (i->trans->locking != i->node_want ||
-                   i->trans->locking_wait.start_time != i[-1].lock_start_time) {
-                       lock_graph_pop_above(g, i - 1, cycle);
-                       return;
-               }
-       BUG();
 }
 
 static bool lock_type_conflicts(enum six_lock_type t1, enum six_lock_type t2)
@@ -235,28 +262,31 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
        struct trans_waiting_for_lock *top;
        struct btree_bkey_cached_common *b;
        struct btree_path *path;
+       unsigned path_idx;
        int ret;
 
        if (trans->lock_must_abort) {
+               if (cycle)
+                       return -1;
+
                trace_and_count(trans->c, trans_restart_would_deadlock, trans, _RET_IP_);
                return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
        }
 
        g.nr = 0;
-       ret = lock_graph_descend(&g, trans, cycle);
-       BUG_ON(ret);
+       lock_graph_down(&g, trans);
 next:
        if (!g.nr)
                return 0;
 
        top = &g.g[g.nr - 1];
 
-       trans_for_each_path_from(top->trans, path, top->path_idx) {
+       trans_for_each_path_safe_from(top->trans, path, path_idx, top->path_idx) {
                if (!path->nodes_locked)
                        continue;
 
-               if (top->path_idx != path->idx) {
-                       top->path_idx           = path->idx;
+               if (path_idx != top->path_idx) {
+                       top->path_idx           = path_idx;
                        top->level              = 0;
                        top->lock_start_time    = 0;
                }
@@ -271,8 +301,26 @@ next:
 
                        b = &READ_ONCE(path->l[top->level].b)->c;
 
-                       if (unlikely(IS_ERR_OR_NULL(b))) {
-                               lock_graph_remove_non_waiters(&g, cycle);
+                       if (IS_ERR_OR_NULL(b)) {
+                               /*
+                                * If we get here, it means we raced with the
+                                * other thread updating its btree_path
+                                * structures - which means it can't be blocked
+                                * waiting on a lock:
+                                */
+                               if (!lock_graph_remove_non_waiters(&g)) {
+                                       /*
+                                        * If lock_graph_remove_non_waiters()
+                                        * didn't do anything, it must be
+                                        * because we're being called by debugfs
+                                        * checking for lock cycles, which
+                                        * invokes us on btree_transactions that
+                                        * aren't actually waiting on anything.
+                                        * Just bail out:
+                                        */
+                                       lock_graph_pop_all(&g);
+                               }
+
                                goto next;
                        }
 
@@ -294,11 +342,12 @@ next:
                                    !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
                                        continue;
 
-                               ret = lock_graph_descend(&g, trans, cycle);
+                               closure_get(&trans->ref);
                                raw_spin_unlock(&b->lock.wait_lock);
 
+                               ret = lock_graph_descend(&g, trans, cycle);
                                if (ret)
-                                       return ret < 0 ? ret : 0;
+                                       return ret;
                                goto next;
 
                        }
@@ -308,7 +357,7 @@ next:
 
        if (g.nr > 1 && cycle)
                print_chain(cycle, &g);
-       lock_graph_pop(&g);
+       lock_graph_up(&g);
        goto next;
 }
 
@@ -333,15 +382,50 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p
         * locked:
         */
        six_lock_readers_add(&b->lock, -readers);
-       ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write, lock_may_not_fail);
+       ret = __btree_node_lock_nopath(trans, b, SIX_LOCK_write,
+                                      lock_may_not_fail, _RET_IP_);
        six_lock_readers_add(&b->lock, readers);
 
        if (ret)
-               mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
+               mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
 
        return ret;
 }
 
+void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
+                                      struct btree_path *path,
+                                      struct btree_bkey_cached_common *b)
+{
+       struct btree_path *linked;
+       unsigned i;
+       int ret;
+
+       /*
+        * XXX BIG FAT NOTICE
+        *
+        * Drop all read locks before taking a write lock:
+        *
+        * This is a hack, because bch2_btree_node_lock_write_nofail() is a
+        * hack - but by dropping read locks first, this should never fail, and
+        * we only use this in code paths where whatever read locks we've
+        * already taken are no longer needed:
+        */
+
+       trans_for_each_path(trans, linked) {
+               if (!linked->nodes_locked)
+                       continue;
+
+               for (i = 0; i < BTREE_MAX_DEPTH; i++)
+                       if (btree_node_read_locked(linked, i)) {
+                               btree_node_unlock(trans, linked, i);
+                               btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
+                       }
+       }
+
+       ret = __btree_node_lock_write(trans, path, b, true);
+       BUG_ON(ret);
+}
+
 /* relock */
 
 static inline bool btree_path_get_locks(struct btree_trans *trans,
@@ -405,7 +489,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
                return true;
        }
 fail:
-       if (trace)
+       if (trace && !trans->notrace_relock_fail)
                trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
        return false;
 }
@@ -467,7 +551,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
        trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
        return false;
 success:
-       mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
+       mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
        return true;
 }
 
@@ -502,11 +586,15 @@ bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
        return btree_path_get_locks(trans, path, false);
 }
 
-__flatten
-bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
+int __bch2_btree_path_relock(struct btree_trans *trans,
                        struct btree_path *path, unsigned long trace_ip)
 {
-       return btree_path_get_locks(trans, path, true);
+       if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
+               trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
+               return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
+       }
+
+       return 0;
 }
 
 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
@@ -578,7 +666,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
                } else {
                        if (btree_node_intent_locked(path, l)) {
                                six_lock_downgrade(&path->l[l].b->c.lock);
-                               mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
+                               mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
                        }
                        break;
                }
@@ -602,7 +690,7 @@ int bch2_trans_relock(struct btree_trans *trans)
        struct btree_path *path;
 
        if (unlikely(trans->restarted))
-               return - ((int) trans->restarted);
+               return -((int) trans->restarted);
 
        trans_for_each_path(trans, path)
                if (path->should_be_locked &&
@@ -613,6 +701,29 @@ int bch2_trans_relock(struct btree_trans *trans)
        return 0;
 }
 
+int bch2_trans_relock_notrace(struct btree_trans *trans)
+{
+       struct btree_path *path;
+
+       if (unlikely(trans->restarted))
+               return -((int) trans->restarted);
+
+       trans_for_each_path(trans, path)
+               if (path->should_be_locked &&
+                   !bch2_btree_path_relock_norestart(trans, path, _RET_IP_)) {
+                       return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
+               }
+       return 0;
+}
+
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
+{
+       struct btree_path *path;
+
+       trans_for_each_path(trans, path)
+               __bch2_btree_path_unlock(trans, path);
+}
+
 void bch2_trans_unlock(struct btree_trans *trans)
 {
        struct btree_path *path;
@@ -624,8 +735,8 @@ void bch2_trans_unlock(struct btree_trans *trans)
         * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
         * btree nodes, it implements its own walking:
         */
-       EBUG_ON(!trans->is_initial_gc &&
-               lock_class_is_held(&bch2_btree_node_lock_key));
+       if (!trans->is_initial_gc)
+               bch2_assert_btree_nodes_not_locked();
 }
 
 bool bch2_trans_locked(struct btree_trans *trans)
@@ -638,6 +749,16 @@ bool bch2_trans_locked(struct btree_trans *trans)
        return false;
 }
 
+int __bch2_trans_mutex_lock(struct btree_trans *trans,
+                           struct mutex *lock)
+{
+       int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
+
+       if (ret)
+               mutex_unlock(lock);
+       return ret;
+}
+
 /* Debug */
 
 #ifdef CONFIG_BCACHEFS_DEBUG