3 #include <linux/kthread.h>
4 #include <linux/slab.h>
5 #include <linux/workqueue.h>
7 static pthread_mutex_t wq_lock = PTHREAD_MUTEX_INITIALIZER;
8 static LIST_HEAD(wq_list);
10 struct workqueue_struct {
11 struct list_head list;
13 struct work_struct *current_work;
14 struct list_head pending_work;
16 pthread_cond_t work_finished;
18 struct task_struct *worker;
26 static void clear_work_pending(struct work_struct *work)
28 clear_bit(WORK_PENDING_BIT, work_data_bits(work));
31 static bool set_work_pending(struct work_struct *work)
33 return !test_and_set_bit(WORK_PENDING_BIT, work_data_bits(work));
36 static void __queue_work(struct workqueue_struct *wq,
37 struct work_struct *work)
39 BUG_ON(!test_bit(WORK_PENDING_BIT, work_data_bits(work)));
40 BUG_ON(!list_empty(&work->entry));
42 list_add_tail(&work->entry, &wq->pending_work);
43 wake_up_process(wq->worker);
46 bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
50 pthread_mutex_lock(&wq_lock);
51 if ((ret = set_work_pending(work)))
52 __queue_work(wq, work);
53 pthread_mutex_unlock(&wq_lock);
58 void delayed_work_timer_fn(unsigned long __data)
60 struct delayed_work *dwork = (struct delayed_work *) __data;
62 pthread_mutex_lock(&wq_lock);
63 __queue_work(dwork->wq, &dwork->work);
64 pthread_mutex_unlock(&wq_lock);
67 static void __queue_delayed_work(struct workqueue_struct *wq,
68 struct delayed_work *dwork,
71 struct timer_list *timer = &dwork->timer;
72 struct work_struct *work = &dwork->work;
74 BUG_ON(timer->function != delayed_work_timer_fn ||
75 timer->data != (unsigned long)dwork);
76 BUG_ON(timer_pending(timer));
77 BUG_ON(!list_empty(&work->entry));
80 __queue_work(wq, &dwork->work);
83 timer->expires = jiffies + delay;
88 bool queue_delayed_work(struct workqueue_struct *wq,
89 struct delayed_work *dwork,
92 struct work_struct *work = &dwork->work;
95 pthread_mutex_lock(&wq_lock);
96 if ((ret = set_work_pending(work)))
97 __queue_delayed_work(wq, dwork, delay);
98 pthread_mutex_unlock(&wq_lock);
103 static bool grab_pending(struct work_struct *work, bool is_dwork)
106 if (set_work_pending(work)) {
107 BUG_ON(!list_empty(&work->entry));
112 struct delayed_work *dwork = to_delayed_work(work);
114 if (likely(del_timer(&dwork->timer))) {
115 BUG_ON(!list_empty(&work->entry));
120 if (!list_empty(&work->entry)) {
121 list_del_init(&work->entry);
127 pthread_mutex_unlock(&wq_lock);
129 pthread_mutex_lock(&wq_lock);
133 static bool __flush_work(struct work_struct *work)
135 struct workqueue_struct *wq;
138 list_for_each_entry(wq, &wq_list, list)
139 if (wq->current_work == work) {
140 pthread_cond_wait(&wq->work_finished, &wq_lock);
148 bool cancel_work_sync(struct work_struct *work)
152 pthread_mutex_lock(&wq_lock);
153 ret = grab_pending(work, false);
156 clear_work_pending(work);
157 pthread_mutex_unlock(&wq_lock);
162 bool mod_delayed_work(struct workqueue_struct *wq,
163 struct delayed_work *dwork,
166 struct work_struct *work = &dwork->work;
169 pthread_mutex_lock(&wq_lock);
170 ret = grab_pending(work, true);
172 __queue_delayed_work(wq, dwork, delay);
173 pthread_mutex_unlock(&wq_lock);
178 bool cancel_delayed_work(struct delayed_work *dwork)
180 struct work_struct *work = &dwork->work;
183 pthread_mutex_lock(&wq_lock);
184 ret = grab_pending(work, true);
186 clear_work_pending(&dwork->work);
187 pthread_mutex_unlock(&wq_lock);
192 bool cancel_delayed_work_sync(struct delayed_work *dwork)
194 struct work_struct *work = &dwork->work;
197 pthread_mutex_lock(&wq_lock);
198 ret = grab_pending(work, true);
201 clear_work_pending(work);
202 pthread_mutex_unlock(&wq_lock);
207 static int worker_thread(void *arg)
209 struct workqueue_struct *wq = arg;
210 struct work_struct *work;
212 pthread_mutex_lock(&wq_lock);
214 __set_current_state(TASK_INTERRUPTIBLE);
215 work = list_first_entry_or_null(&wq->pending_work,
216 struct work_struct, entry);
217 wq->current_work = work;
219 if (kthread_should_stop()) {
220 BUG_ON(wq->current_work);
225 pthread_mutex_unlock(&wq_lock);
227 pthread_mutex_lock(&wq_lock);
231 BUG_ON(!test_bit(WORK_PENDING_BIT, work_data_bits(work)));
232 list_del_init(&work->entry);
233 clear_work_pending(work);
235 pthread_mutex_unlock(&wq_lock);
237 pthread_mutex_lock(&wq_lock);
239 pthread_cond_broadcast(&wq->work_finished);
241 pthread_mutex_unlock(&wq_lock);
246 void destroy_workqueue(struct workqueue_struct *wq)
248 kthread_stop(wq->worker);
250 pthread_mutex_lock(&wq_lock);
252 pthread_mutex_unlock(&wq_lock);
257 struct workqueue_struct *alloc_workqueue(const char *fmt,
263 struct workqueue_struct *wq;
265 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
269 INIT_LIST_HEAD(&wq->list);
270 INIT_LIST_HEAD(&wq->pending_work);
272 pthread_cond_init(&wq->work_finished, NULL);
274 va_start(args, max_active);
275 vsnprintf(wq->name, sizeof(wq->name), fmt, args);
278 wq->worker = kthread_run(worker_thread, wq, "%s", wq->name);
279 if (IS_ERR(wq->worker)) {
284 pthread_mutex_lock(&wq_lock);
285 list_add(&wq->list, &wq_list);
286 pthread_mutex_unlock(&wq_lock);
291 struct workqueue_struct *system_wq;
292 struct workqueue_struct *system_highpri_wq;
293 struct workqueue_struct *system_long_wq;
294 struct workqueue_struct *system_unbound_wq;
295 struct workqueue_struct *system_freezable_wq;
297 __attribute__((constructor(102)))
298 static void wq_init(void)
300 system_wq = alloc_workqueue("events", 0, 0);
301 system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
302 system_long_wq = alloc_workqueue("events_long", 0, 0);
303 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
304 WQ_UNBOUND_MAX_ACTIVE);
305 system_freezable_wq = alloc_workqueue("events_freezable",
307 BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
308 !system_unbound_wq || !system_freezable_wq);