]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - libbcachefs/btree_locking.c
Update bcachefs sources to da7d42a9a2 bcachefs: Add new assertions for shutdown path
[bcachefs-tools-debian] / libbcachefs / btree_locking.c
index 27b725f943cd6146b609ccf0b130a2c0c30df758..d7fd87149c6325fc36c53f1b2c137d5124a65799 100644 (file)
@@ -4,19 +4,25 @@
 #include "btree_locking.h"
 #include "btree_types.h"
 
-struct lock_class_key bch2_btree_node_lock_key;
+static struct lock_class_key bch2_btree_node_lock_key;
 
-/* Btree node locking: */
+void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
+                         enum six_lock_init_flags flags)
+{
+       __six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       lockdep_set_no_check_recursion(&b->lock.dep_map);
+#endif
+}
 
-static inline void six_lock_readers_add(struct six_lock *lock, int nr)
+#ifdef CONFIG_LOCKDEP
+void bch2_assert_btree_nodes_not_locked(void)
 {
-       if (lock->readers)
-               this_cpu_add(*lock->readers, nr);
-       else if (nr > 0)
-               atomic64_add(__SIX_VAL(read_lock, nr), &lock->state.counter);
-       else
-               atomic64_sub(__SIX_VAL(read_lock, -nr), &lock->state.counter);
+       BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
 }
+#endif
+
+/* Btree node locking: */
 
 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
                                                  struct btree_path *skip,
@@ -105,10 +111,8 @@ static noinline void lock_graph_pop_all(struct lock_graph *g)
                lock_graph_up(g);
 }
 
-static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
 {
-       closure_get(&trans->ref);
-
        g->g[g->nr++] = (struct trans_waiting_for_lock) {
                .trans          = trans,
                .node_want      = trans->locking,
@@ -116,6 +120,12 @@ static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
        };
 }
 
+static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
+{
+       closure_get(&trans->ref);
+       __lock_graph_down(g, trans);
+}
+
 static bool lock_graph_remove_non_waiters(struct lock_graph *g)
 {
        struct trans_waiting_for_lock *i;
@@ -216,10 +226,14 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
        struct trans_waiting_for_lock *i;
 
        for (i = g->g; i < g->g + g->nr; i++)
-               if (i->trans == trans)
+               if (i->trans == trans) {
+                       closure_put(&trans->ref);
                        return break_cycle(g, cycle);
+               }
 
        if (g->nr == ARRAY_SIZE(g->g)) {
+               closure_put(&trans->ref);
+
                if (orig_trans->lock_may_not_fail)
                        return 0;
 
@@ -233,7 +247,7 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
                return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
        }
 
-       lock_graph_down(g, trans);
+       __lock_graph_down(g, trans);
        return 0;
 }
 
@@ -248,6 +262,7 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
        struct trans_waiting_for_lock *top;
        struct btree_bkey_cached_common *b;
        struct btree_path *path;
+       unsigned path_idx;
        int ret;
 
        if (trans->lock_must_abort) {
@@ -266,12 +281,12 @@ next:
 
        top = &g.g[g.nr - 1];
 
-       trans_for_each_path_from(top->trans, path, top->path_idx) {
+       trans_for_each_path_safe_from(top->trans, path, path_idx, top->path_idx) {
                if (!path->nodes_locked)
                        continue;
 
-               if (top->path_idx != path->idx) {
-                       top->path_idx           = path->idx;
+               if (path_idx != top->path_idx) {
+                       top->path_idx           = path_idx;
                        top->level              = 0;
                        top->lock_start_time    = 0;
                }
@@ -327,9 +342,10 @@ next:
                                    !lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
                                        continue;
 
-                               ret = lock_graph_descend(&g, trans, cycle);
+                               closure_get(&trans->ref);
                                raw_spin_unlock(&b->lock.wait_lock);
 
+                               ret = lock_graph_descend(&g, trans, cycle);
                                if (ret)
                                        return ret;
                                goto next;
@@ -376,6 +392,40 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p
        return ret;
 }
 
+void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
+                                      struct btree_path *path,
+                                      struct btree_bkey_cached_common *b)
+{
+       struct btree_path *linked;
+       unsigned i;
+       int ret;
+
+       /*
+        * XXX BIG FAT NOTICE
+        *
+        * Drop all read locks before taking a write lock:
+        *
+        * This is a hack, because bch2_btree_node_lock_write_nofail() is a
+        * hack - but by dropping read locks first, this should never fail, and
+        * we only use this in code paths where whatever read locks we've
+        * already taken are no longer needed:
+        */
+
+       trans_for_each_path(trans, linked) {
+               if (!linked->nodes_locked)
+                       continue;
+
+               for (i = 0; i < BTREE_MAX_DEPTH; i++)
+                       if (btree_node_read_locked(linked, i)) {
+                               btree_node_unlock(trans, linked, i);
+                               btree_path_set_dirty(linked, BTREE_ITER_NEED_RELOCK);
+                       }
+       }
+
+       ret = __btree_node_lock_write(trans, path, b, true);
+       BUG_ON(ret);
+}
+
 /* relock */
 
 static inline bool btree_path_get_locks(struct btree_trans *trans,
@@ -547,13 +597,6 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
        return 0;
 }
 
-__flatten
-bool bch2_btree_path_upgrade_norestart(struct btree_trans *trans,
-                       struct btree_path *path, unsigned long trace_ip)
-{
-       return btree_path_get_locks(trans, path, true);
-}
-
 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
                               struct btree_path *path,
                               unsigned new_locks_want)
@@ -673,6 +716,14 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
        return 0;
 }
 
+void bch2_trans_unlock_noassert(struct btree_trans *trans)
+{
+       struct btree_path *path;
+
+       trans_for_each_path(trans, path)
+               __bch2_btree_path_unlock(trans, path);
+}
+
 void bch2_trans_unlock(struct btree_trans *trans)
 {
        struct btree_path *path;
@@ -684,8 +735,8 @@ void bch2_trans_unlock(struct btree_trans *trans)
         * bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
         * btree nodes, it implements its own walking:
         */
-       EBUG_ON(!trans->is_initial_gc &&
-               lock_class_is_held(&bch2_btree_node_lock_key));
+       if (!trans->is_initial_gc)
+               bch2_assert_btree_nodes_not_locked();
 }
 
 bool bch2_trans_locked(struct btree_trans *trans)
@@ -701,11 +752,8 @@ bool bch2_trans_locked(struct btree_trans *trans)
 int __bch2_trans_mutex_lock(struct btree_trans *trans,
                            struct mutex *lock)
 {
-       int ret;
+       int ret = drop_locks_do(trans, (mutex_lock(lock), 0));
 
-       bch2_trans_unlock(trans);
-       mutex_lock(lock);
-       ret = bch2_trans_relock(trans);
        if (ret)
                mutex_unlock(lock);
        return ret;