#ifndef __LINUX_PREEMPT_H
#define __LINUX_PREEMPT_H
-#define preempt_disable() barrier()
-#define sched_preempt_enable_no_resched() barrier()
-#define preempt_enable_no_resched() barrier()
-#define preempt_enable() barrier()
+extern void preempt_disable(void);
+extern void preempt_enable(void);
+
+#define sched_preempt_enable_no_resched() preempt_enable()
+#define preempt_enable_no_resched() preempt_enable()
#define preempt_check_resched() do { } while (0)
-#define preempt_disable_notrace() barrier()
-#define preempt_enable_no_resched_notrace() barrier()
-#define preempt_enable_notrace() barrier()
+#define preempt_disable_notrace() preempt_disable()
+#define preempt_enable_no_resched_notrace() preempt_enable()
+#define preempt_enable_notrace() preempt_enable()
#define preemptible() 0
#endif /* __LINUX_PREEMPT_H */
--- /dev/null
+#include <pthread.h>
+
+#include "linux/preempt.h"
+
+/*
+ * In userspace, pthreads are preemptible and can migrate CPUs at any time.
+ *
+ * In the kernel, preempt_disable() logic essentially guarantees that a marked
+ * critical section owns its CPU for the relevant block. This is necessary for
+ * various code paths, critically including the percpu system as it allows for
+ * non-atomic reads and writes to CPU-local data structures.
+ *
+ * The high performance userspace equivalent would be to use thread local
+ * storage to replace percpu data, but that would be complicated. It should be
+ * correct to instead guarantee mutual exclusion for the critical sections.
+ */
+
+static pthread_mutex_t preempt_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+
+void preempt_disable(void)
+{
+ pthread_mutex_lock(&preempt_lock);
+}
+
+void preempt_enable(void)
+{
+ pthread_mutex_unlock(&preempt_lock);
+}