]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Update closures from kernel source tree
authorKent Overstreet <kent.overstreet@gmail.com>
Fri, 28 May 2021 01:13:52 +0000 (21:13 -0400)
committerKent Overstreet <kent.overstreet@gmail.com>
Fri, 28 May 2021 01:13:52 +0000 (21:13 -0400)
include/linux/closure.h
include/linux/sched/debug.h [new file with mode: 0644]
include/linux/sched/task_stack.h [new file with mode: 0644]
linux/closure.c

index a9de6d9334af2316e0709139f819799cd6b401eb..d85ca8696b746c94ccc21b0154e994ef7b932658 100644 (file)
@@ -1,8 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _LINUX_CLOSURE_H
 #define _LINUX_CLOSURE_H
 
 #include <linux/llist.h>
+#include <linux/rcupdate.h>
 #include <linux/sched.h>
+#include <linux/sched/task_stack.h>
 #include <linux/workqueue.h>
 
 /*
 struct closure;
 struct closure_syncer;
 typedef void (closure_fn) (struct closure *);
+extern struct dentry *bcache_debug;
 
 struct closure_waitlist {
        struct llist_head       list;
@@ -125,10 +129,10 @@ enum closure_state {
         * annotate where references are being transferred.
         */
 
-       CLOSURE_BITS_START      = (1U << 27),
-       CLOSURE_DESTRUCTOR      = (1U << 27),
-       CLOSURE_WAITING         = (1U << 29),
-       CLOSURE_RUNNING         = (1U << 31),
+       CLOSURE_BITS_START      = (1U << 26),
+       CLOSURE_DESTRUCTOR      = (1U << 26),
+       CLOSURE_WAITING         = (1U << 28),
+       CLOSURE_RUNNING         = (1U << 30),
 };
 
 #define CLOSURE_GUARD_MASK                                     \
@@ -156,7 +160,7 @@ struct closure {
 #define CLOSURE_MAGIC_DEAD     0xc054dead
 #define CLOSURE_MAGIC_ALIVE    0xc054a11e
 
-       unsigned                magic;
+       unsigned int            magic;
        struct list_head        all;
        unsigned long           ip;
        unsigned long           waiting_on;
@@ -232,10 +236,16 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
 static inline void closure_queue(struct closure *cl)
 {
        struct workqueue_struct *wq = cl->wq;
+       /**
+        * Changes made to closure, work_struct, or a couple of other structs
+        * may cause work.func not pointing to the right location.
+        */
+       BUILD_BUG_ON(offsetof(struct closure, fn)
+                    != offsetof(struct work_struct, func));
 
        if (wq) {
                INIT_WORK(&cl->work, cl->work.func);
-               queue_work(wq, &cl->work);
+               BUG_ON(!queue_work(wq, &cl->work));
        } else
                cl->fn(cl);
 }
@@ -279,20 +289,16 @@ static inline void closure_init_stack(struct closure *cl)
 }
 
 /**
- * closure_wake_up - wake up all closures on a wait list.
+ * closure_wake_up - wake up all closures on a wait list,
+ *                  with memory barrier
  */
 static inline void closure_wake_up(struct closure_waitlist *list)
 {
+       /* Memory barrier for the wait list */
        smp_mb();
        __closure_wake_up(list);
 }
 
-#define continue_at_noreturn(_cl, _fn, _wq)                            \
-do {                                                                   \
-       set_closure_fn(_cl, _fn, _wq);                                  \
-       closure_sub(_cl, CLOSURE_RUNNING + 1);                          \
-} while (0)
-
 /**
  * continue_at - jump to another function with barrier
  *
@@ -300,16 +306,16 @@ do {                                                                      \
  * been dropped with closure_put()), it will resume execution at @fn running out
  * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
  *
- * NOTE: This macro expands to a return in the calling function!
- *
  * This is because after calling continue_at() you no longer have a ref on @cl,
  * and whatever @cl owns may be freed out from under you - a running closure fn
  * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
  */
 #define continue_at(_cl, _fn, _wq)                                     \
 do {                                                                   \
-       continue_at_noreturn(_cl, _fn, _wq);                            \
-       return;                                                         \
+       set_closure_fn(_cl, _fn, _wq);                                  \
+       closure_sub(_cl, CLOSURE_RUNNING + 1);                          \
 } while (0)
 
 /**
@@ -328,32 +334,19 @@ do {                                                                      \
  * Causes @fn to be executed out of @cl, in @wq context (or called directly if
  * @wq is NULL).
  *
- * NOTE: like continue_at(), this macro expands to a return in the caller!
- *
  * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
  * thus it's not safe to touch anything protected by @cl after a
  * continue_at_nobarrier().
  */
 #define continue_at_nobarrier(_cl, _fn, _wq)                           \
 do {                                                                   \
-       closure_set_ip(_cl);                                            \
-       if (_wq) {                                                      \
-               INIT_WORK(&(_cl)->work, (void *) _fn);                  \
-               queue_work((_wq), &(_cl)->work);                        \
-       } else {                                                        \
-               (_fn)(_cl);                                             \
-       }                                                               \
-       return;                                                         \
-} while (0)
-
-#define closure_return_with_destructor_noreturn(_cl, _destructor)      \
-do {                                                                   \
-       set_closure_fn(_cl, _destructor, NULL);                         \
-       closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1);     \
+       set_closure_fn(_cl, _fn, _wq);                                  \
+       closure_queue(_cl);                                             \
 } while (0)
 
 /**
- * closure_return - finish execution of a closure, with destructor
+ * closure_return_with_destructor - finish execution of a closure,
+ *                                 with destructor
  *
  * Works like closure_return(), except @destructor will be called when all
  * outstanding refs on @cl have been dropped; @destructor may be used to safely
@@ -363,8 +356,8 @@ do {                                                                        \
  */
 #define closure_return_with_destructor(_cl, _destructor)               \
 do {                                                                   \
-       closure_return_with_destructor_noreturn(_cl, _destructor);      \
-       return;                                                         \
+       set_closure_fn(_cl, _destructor, NULL);                         \
+       closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1);     \
 } while (0)
 
 /**
diff --git a/include/linux/sched/debug.h b/include/linux/sched/debug.h
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h
new file mode 100644 (file)
index 0000000..e69de29
index 26a29356b43e248d9a5a42b62e2038a4c1b7776e..b38ded00b9b052869e5ffab9873d0faffe955422 100644 (file)
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Asynchronous refcounty things
  *
@@ -9,6 +10,7 @@
 #include <linux/debugfs.h>
 #include <linux/export.h>
 #include <linux/seq_file.h>
+#include <linux/sched/debug.h>
 
 static inline void closure_put_after_sub(struct closure *cl, int flags)
 {
@@ -44,7 +46,7 @@ void closure_sub(struct closure *cl, int v)
 }
 EXPORT_SYMBOL(closure_sub);
 
-/**
+/*
  * closure_put - decrement a closure's refcount
  */
 void closure_put(struct closure *cl)
@@ -53,24 +55,22 @@ void closure_put(struct closure *cl)
 }
 EXPORT_SYMBOL(closure_put);
 
-/**
+/*
  * closure_wake_up - wake up all closures on a wait list, without memory barrier
  */
 void __closure_wake_up(struct closure_waitlist *wait_list)
 {
-       struct llist_node *list, *next;
-       struct closure *cl;
+       struct llist_node *list;
+       struct closure *cl, *t;
+       struct llist_node *reverse = NULL;
+
+       list = llist_del_all(&wait_list->list);
 
-       /*
-        * Grab entire list, reverse order to preserve FIFO ordering, and wake
-        * everything up
-        */
-       for (list = llist_reverse_order(llist_del_all(&wait_list->list));
-            list;
-            list = next) {
-               next = llist_next(list);
-               cl = container_of(list, struct closure, list);
+       /* We first reverse the list to preserve FIFO ordering and fairness */
+       reverse = llist_reverse_order(list);
 
+       /* Then do the wakeups */
+       llist_for_each_entry_safe(cl, t, reverse, list) {
                closure_set_waiting(cl, 0);
                closure_sub(cl, CLOSURE_WAITING + 1);
        }
@@ -79,9 +79,9 @@ EXPORT_SYMBOL(__closure_wake_up);
 
 /**
  * closure_wait - add a closure to a waitlist
- *
- * @waitlist will own a ref on @cl, which will be released when
+ * @waitlist: will own a ref on @cl, which will be released when
  * closure_wake_up() is called on @waitlist.
+ * @cl: closure pointer.
  *
  */
 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
@@ -104,8 +104,14 @@ struct closure_syncer {
 
 static void closure_sync_fn(struct closure *cl)
 {
-       cl->s->done = 1;
-       wake_up_process(cl->s->task);
+       struct closure_syncer *s = cl->s;
+       struct task_struct *p;
+
+       rcu_read_lock();
+       p = READ_ONCE(s->task);
+       s->done = 1;
+       wake_up_process(p);
+       rcu_read_unlock();
 }
 
 void __sched __closure_sync(struct closure *cl)
@@ -113,11 +119,10 @@ void __sched __closure_sync(struct closure *cl)
        struct closure_syncer s = { .task = current };
 
        cl->s = &s;
-       continue_at_noreturn(cl, closure_sync_fn, NULL);
+       continue_at(cl, closure_sync_fn, NULL);
 
        while (1) {
-               __set_current_state(TASK_UNINTERRUPTIBLE);
-               smp_mb();
+               set_current_state(TASK_UNINTERRUPTIBLE);
                if (s.done)
                        break;
                schedule();
@@ -158,9 +163,7 @@ void closure_debug_destroy(struct closure *cl)
 }
 EXPORT_SYMBOL(closure_debug_destroy);
 
-static struct dentry *debug;
-
-static int debug_seq_show(struct seq_file *f, void *data)
+static int debug_show(struct seq_file *f, void *data)
 {
        struct closure *cl;
 
@@ -169,7 +172,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
        list_for_each_entry(cl, &closure_list, all) {
                int r = atomic_read(&cl->remaining);
 
-               seq_printf(f, "%p: %pF -> %pf p %p r %i ",
+               seq_printf(f, "%p: %pS -> %pS p %p r %i ",
                           cl, (void *) cl->ip, cl->fn, cl->parent,
                           r & CLOSURE_REMAINING_MASK);
 
@@ -179,7 +182,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
                           r & CLOSURE_RUNNING  ? "R" : "");
 
                if (r & CLOSURE_WAITING)
-                       seq_printf(f, " W %pF\n",
+                       seq_printf(f, " W %pS\n",
                                   (void *) cl->waiting_on);
 
                seq_puts(f, "\n");
@@ -189,21 +192,11 @@ static int debug_seq_show(struct seq_file *f, void *data)
        return 0;
 }
 
-static int debug_seq_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, debug_seq_show, NULL);
-}
-
-static const struct file_operations debug_ops = {
-       .owner          = THIS_MODULE,
-       .open           = debug_seq_open,
-       .read           = seq_read,
-       .release        = single_release
-};
+DEFINE_SHOW_ATTRIBUTE(debug);
 
 static int __init closure_debug_init(void)
 {
-       debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
+       debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
        return 0;
 }
 late_initcall(closure_debug_init)