]> git.sesse.net Git - bcachefs-tools-debian/commitdiff
Change preempt_disable() etc. to use a recursive mutex.
authorJustin Husted <sigstop@gmail.com>
Thu, 10 Oct 2019 02:27:22 +0000 (19:27 -0700)
committerKent Overstreet <kent.overstreet@gmail.com>
Mon, 4 Nov 2019 04:17:43 +0000 (23:17 -0500)
This is part of the userspace implementation of the kernel APIs for
bcachefs-tools. The previous implementation just provided a barrier, but
this isn't sufficient to make the associated percpu implementation safe.

Signed-off-by: Justin Husted <sigstop@gmail.com>
include/linux/preempt.h
linux/preempt.c [new file with mode: 0644]

index 061860161c049e31914de2d698bfd782728b5006..dbc7c24dd89460852b4923a04846de46a325204d 100644 (file)
@@ -1,15 +1,16 @@
 #ifndef __LINUX_PREEMPT_H
 #define __LINUX_PREEMPT_H
 
-#define preempt_disable()                      barrier()
-#define sched_preempt_enable_no_resched()      barrier()
-#define preempt_enable_no_resched()            barrier()
-#define preempt_enable()                       barrier()
+extern void preempt_disable(void);
+extern void preempt_enable(void);
+
+#define sched_preempt_enable_no_resched()      preempt_enable()
+#define preempt_enable_no_resched()            preempt_enable()
 #define preempt_check_resched()                        do { } while (0)
 
-#define preempt_disable_notrace()              barrier()
-#define preempt_enable_no_resched_notrace()    barrier()
-#define preempt_enable_notrace()               barrier()
+#define preempt_disable_notrace()              preempt_disable()
+#define preempt_enable_no_resched_notrace()    preempt_enable()
+#define preempt_enable_notrace()               preempt_enable()
 #define preemptible()                          0
 
 #endif /* __LINUX_PREEMPT_H */
diff --git a/linux/preempt.c b/linux/preempt.c
new file mode 100644 (file)
index 0000000..aa092c1
--- /dev/null
@@ -0,0 +1,28 @@
+#include <pthread.h>
+
+#include "linux/preempt.h"
+
+/*
+ * In userspace, pthreads are preemptible and can migrate CPUs at any time.
+ *
+ * In the kernel, preempt_disable() logic essentially guarantees that a marked
+ * critical section owns its CPU for the relevant block. This is necessary for
+ * various code paths, critically including the percpu system as it allows for
+ * non-atomic reads and writes to CPU-local data structures.
+ *
+ * The high performance userspace equivalent would be to use thread local
+ * storage to replace percpu data, but that would be complicated. It should be
+ * correct to instead guarantee mutual exclusion for the critical sections.
+ */
+
+static pthread_mutex_t preempt_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+
+void preempt_disable(void)
+{
+       pthread_mutex_lock(&preempt_lock);
+}
+
+void preempt_enable(void)
+{
+       pthread_mutex_unlock(&preempt_lock);
+}