]> git.sesse.net Git - bcachefs-tools-debian/blobdiff - include/linux/closure.h
remove library from bcachefs-tools Rust package
[bcachefs-tools-debian] / include / linux / closure.h
index 33280d30db01d52cdb35d4473cc1a5e2db676146..c554c6a08768ad60cdf529a65cf962095363a4a9 100644 (file)
@@ -1,8 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _LINUX_CLOSURE_H
 #define _LINUX_CLOSURE_H
 
 #include <linux/llist.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 #include <linux/workqueue.h>
 
 /*
 
 struct closure;
 struct closure_syncer;
-typedef void (closure_fn) (struct closure *);
+typedef void (closure_fn) (struct work_struct *);
+extern struct dentry *bcache_debug;
 
 struct closure_waitlist {
        struct llist_head       list;
@@ -125,10 +128,10 @@ enum closure_state {
         * annotate where references are being transferred.
         */
 
-       CLOSURE_BITS_START      = (1U << 27),
-       CLOSURE_DESTRUCTOR      = (1U << 27),
-       CLOSURE_WAITING         = (1U << 29),
-       CLOSURE_RUNNING         = (1U << 31),
+       CLOSURE_BITS_START      = (1U << 26),
+       CLOSURE_DESTRUCTOR      = (1U << 26),
+       CLOSURE_WAITING         = (1U << 28),
+       CLOSURE_RUNNING         = (1U << 30),
 };
 
 #define CLOSURE_GUARD_MASK                                     \
@@ -151,12 +154,13 @@ struct closure {
        struct closure          *parent;
 
        atomic_t                remaining;
+       bool                    closure_get_happened;
 
 #ifdef CONFIG_DEBUG_CLOSURES
 #define CLOSURE_MAGIC_DEAD     0xc054dead
 #define CLOSURE_MAGIC_ALIVE    0xc054a11e
 
-       unsigned                magic;
+       unsigned int            magic;
        struct list_head        all;
        unsigned long           ip;
        unsigned long           waiting_on;
@@ -169,6 +173,11 @@ void __closure_wake_up(struct closure_waitlist *list);
 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
 void __closure_sync(struct closure *cl);
 
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+       return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
 /**
  * closure_sync - sleep until a closure a closure has nothing left to wait on
  *
@@ -177,7 +186,11 @@ void __closure_sync(struct closure *cl);
  */
 static inline void closure_sync(struct closure *cl)
 {
-       if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
+#ifdef CONFIG_DEBUG_CLOSURES
+       BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+
+       if (cl->closure_get_happened)
                __closure_sync(cl);
 }
 
@@ -225,19 +238,23 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
        closure_set_ip(cl);
        cl->fn = fn;
        cl->wq = wq;
-       /* between atomic_dec() in closure_put() */
-       smp_mb__before_atomic();
 }
 
 static inline void closure_queue(struct closure *cl)
 {
        struct workqueue_struct *wq = cl->wq;
+       /**
+        * Changes made to closure, work_struct, or a couple of other structs
+        * may cause work.func not pointing to the right location.
+        */
+       BUILD_BUG_ON(offsetof(struct closure, fn)
+                    != offsetof(struct work_struct, func));
 
        if (wq) {
                INIT_WORK(&cl->work, cl->work.func);
-               queue_work(wq, &cl->work);
+               BUG_ON(!queue_work(wq, &cl->work));
        } else
-               cl->fn(cl);
+               cl->fn(&cl->work);
 }
 
 /**
@@ -245,6 +262,8 @@ static inline void closure_queue(struct closure *cl)
  */
 static inline void closure_get(struct closure *cl)
 {
+       cl->closure_get_happened = true;
+
 #ifdef CONFIG_DEBUG_CLOSURES
        BUG_ON((atomic_inc_return(&cl->remaining) &
                CLOSURE_REMAINING_MASK) <= 1);
@@ -267,6 +286,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
                closure_get(parent);
 
        atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+       cl->closure_get_happened = false;
 
        closure_debug_create(cl);
        closure_set_ip(cl);
@@ -279,19 +299,20 @@ static inline void closure_init_stack(struct closure *cl)
 }
 
 /**
- * closure_wake_up - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list,
+ *                  with memory barrier
  */
 static inline void closure_wake_up(struct closure_waitlist *list)
 {
+       /* Memory barrier for the wait list */
        smp_mb();
        __closure_wake_up(list);
 }
 
-#define continue_at_noreturn(_cl, _fn, _wq)                            \
-do {                                                                   \
-       set_closure_fn(_cl, _fn, _wq);                                  \
-       closure_sub(_cl, CLOSURE_RUNNING + 1);                          \
-} while (0)
+#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
+#define closure_type(name, type, member)                               \
+       struct closure *cl = container_of(ws, struct closure, work);    \
+       type *name = container_of(cl, type, member)
 
 /**
  * continue_at - jump to another function with barrier
@@ -300,16 +321,16 @@ do {                                                                      \
  * been dropped with closure_put()), it will resume execution at @fn running out
  * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
  *
- * NOTE: This macro expands to a return in the calling function!
- *
  * This is because after calling continue_at() you no longer have a ref on @cl,
  * and whatever @cl owns may be freed out from under you - a running closure fn
  * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
  */
 #define continue_at(_cl, _fn, _wq)                                     \
 do {                                                                   \
-       continue_at_noreturn(_cl, _fn, _wq);                            \
-       return;                                                         \
+       set_closure_fn(_cl, _fn, _wq);                                  \
+       closure_sub(_cl, CLOSURE_RUNNING + 1);                          \
 } while (0)
 
 /**
@@ -328,32 +349,19 @@ do {                                                                      \
  * Causes @fn to be executed out of @cl, in @wq context (or called directly if
  * @wq is NULL).
  *
- * NOTE: like continue_at(), this macro expands to a return in the caller!
- *
  * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
  * thus it's not safe to touch anything protected by @cl after a
  * continue_at_nobarrier().
  */
 #define continue_at_nobarrier(_cl, _fn, _wq)                           \
 do {                                                                   \
-       closure_set_ip(_cl);                                            \
-       if (_wq) {                                                      \
-               INIT_WORK(&(_cl)->work, (void *) _fn);                  \
-               queue_work((_wq), &(_cl)->work);                        \
-       } else {                                                        \
-               (_fn)(_cl);                                             \
-       }                                                               \
-       return;                                                         \
-} while (0)
-
-#define closure_return_with_destructor_noreturn(_cl, _destructor)      \
-do {                                                                   \
-       set_closure_fn(_cl, _destructor, NULL);                         \
-       closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1);     \
+       set_closure_fn(_cl, _fn, _wq);                                  \
+       closure_queue(_cl);                                             \
 } while (0)
 
 /**
- * closure_return - finish execution of a closure, with destructor
+ * closure_return_with_destructor - finish execution of a closure,
+ *                                 with destructor
  *
  * Works like closure_return(), except @destructor will be called when all
  * outstanding refs on @cl have been dropped; @destructor may be used to safely
@@ -363,8 +371,8 @@ do {                                                                        \
  */
 #define closure_return_with_destructor(_cl, _destructor)               \
 do {                                                                   \
-       closure_return_with_destructor_noreturn(_cl, _destructor);      \
-       return;                                                         \
+       set_closure_fn(_cl, _destructor, NULL);                         \
+       closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1);     \
 } while (0)
 
 /**
@@ -382,4 +390,26 @@ static inline void closure_call(struct closure *cl, closure_fn fn,
        continue_at_nobarrier(cl, fn, wq);
 }
 
+#define __closure_wait_event(waitlist, _cond)                          \
+do {                                                                   \
+       struct closure cl;                                              \
+                                                                       \
+       closure_init_stack(&cl);                                        \
+                                                                       \
+       while (1) {                                                     \
+               closure_wait(waitlist, &cl);                            \
+               if (_cond)                                              \
+                       break;                                          \
+               closure_sync(&cl);                                      \
+       }                                                               \
+       closure_wake_up(waitlist);                                      \
+       closure_sync(&cl);                                              \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond)                            \
+do {                                                                   \
+       if (!(_cond))                                                   \
+               __closure_wait_event(waitlist, _cond);                  \
+} while (0)
+
 #endif /* _LINUX_CLOSURE_H */