]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/workqueue.c
Update bcachefs sources to 0906b1fb49 bcachefs: fixes for 32 bit/big endian machines
[bcachefs-tools-debian] / linux / workqueue.c
1 #include <pthread.h>
2
3 #include <linux/kthread.h>
4 #include <linux/slab.h>
5 #include <linux/workqueue.h>
6
7 static pthread_mutex_t  wq_lock = PTHREAD_MUTEX_INITIALIZER;
8 static LIST_HEAD(wq_list);
9
10 struct workqueue_struct {
11         struct list_head        list;
12
13         struct work_struct      *current_work;
14         struct list_head        pending_work;
15
16         pthread_cond_t          work_finished;
17
18         struct task_struct      *worker;
19         char                    name[24];
20 };
21
22 enum {
23         WORK_PENDING_BIT,
24 };
25
26 static void clear_work_pending(struct work_struct *work)
27 {
28         clear_bit(WORK_PENDING_BIT, work_data_bits(work));
29 }
30
31 static bool set_work_pending(struct work_struct *work)
32 {
33         return !test_and_set_bit(WORK_PENDING_BIT, work_data_bits(work));
34 }
35
36 static void __queue_work(struct workqueue_struct *wq,
37                          struct work_struct *work)
38 {
39         BUG_ON(!test_bit(WORK_PENDING_BIT, work_data_bits(work)));
40         BUG_ON(!list_empty(&work->entry));
41
42         list_add_tail(&work->entry, &wq->pending_work);
43         wake_up_process(wq->worker);
44 }
45
46 bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
47 {
48         bool ret;
49
50         pthread_mutex_lock(&wq_lock);
51         if ((ret = set_work_pending(work)))
52                 __queue_work(wq, work);
53         pthread_mutex_unlock(&wq_lock);
54
55         return ret;
56 }
57
58 void delayed_work_timer_fn(struct timer_list *timer)
59 {
60         struct delayed_work *dwork =
61                 container_of(timer, struct delayed_work, timer);
62
63         pthread_mutex_lock(&wq_lock);
64         __queue_work(dwork->wq, &dwork->work);
65         pthread_mutex_unlock(&wq_lock);
66 }
67
68 static void __queue_delayed_work(struct workqueue_struct *wq,
69                                  struct delayed_work *dwork,
70                                  unsigned long delay)
71 {
72         struct timer_list *timer = &dwork->timer;
73         struct work_struct *work = &dwork->work;
74
75         BUG_ON(timer->function != delayed_work_timer_fn);
76         BUG_ON(timer_pending(timer));
77         BUG_ON(!list_empty(&work->entry));
78
79         if (!delay) {
80                 __queue_work(wq, &dwork->work);
81         } else {
82                 dwork->wq = wq;
83                 timer->expires = jiffies + delay;
84                 add_timer(timer);
85         }
86 }
87
88 bool queue_delayed_work(struct workqueue_struct *wq,
89                         struct delayed_work *dwork,
90                         unsigned long delay)
91 {
92         struct work_struct *work = &dwork->work;
93         bool ret;
94
95         pthread_mutex_lock(&wq_lock);
96         if ((ret = set_work_pending(work)))
97                 __queue_delayed_work(wq, dwork, delay);
98         pthread_mutex_unlock(&wq_lock);
99
100         return ret;
101 }
102
103 static bool grab_pending(struct work_struct *work, bool is_dwork)
104 {
105 retry:
106         if (set_work_pending(work)) {
107                 BUG_ON(!list_empty(&work->entry));
108                 return false;
109         }
110
111         if (is_dwork) {
112                 struct delayed_work *dwork = to_delayed_work(work);
113
114                 if (likely(del_timer(&dwork->timer))) {
115                         BUG_ON(!list_empty(&work->entry));
116                         return true;
117                 }
118         }
119
120         if (!list_empty(&work->entry)) {
121                 list_del_init(&work->entry);
122                 return true;
123         }
124
125         BUG_ON(!is_dwork);
126
127         pthread_mutex_unlock(&wq_lock);
128         flush_timers();
129         pthread_mutex_lock(&wq_lock);
130         goto retry;
131 }
132
133 static bool __flush_work(struct work_struct *work)
134 {
135         struct workqueue_struct *wq;
136         bool ret = false;
137 retry:
138         list_for_each_entry(wq, &wq_list, list)
139                 if (wq->current_work == work) {
140                         pthread_cond_wait(&wq->work_finished, &wq_lock);
141                         ret = true;
142                         goto retry;
143                 }
144
145         return ret;
146 }
147
148 bool cancel_work_sync(struct work_struct *work)
149 {
150         bool ret;
151
152         pthread_mutex_lock(&wq_lock);
153         ret = grab_pending(work, false);
154
155         __flush_work(work);
156         clear_work_pending(work);
157         pthread_mutex_unlock(&wq_lock);
158
159         return ret;
160 }
161
162 bool mod_delayed_work(struct workqueue_struct *wq,
163                       struct delayed_work *dwork,
164                       unsigned long delay)
165 {
166         struct work_struct *work = &dwork->work;
167         bool ret;
168
169         pthread_mutex_lock(&wq_lock);
170         ret = grab_pending(work, true);
171
172         __queue_delayed_work(wq, dwork, delay);
173         pthread_mutex_unlock(&wq_lock);
174
175         return ret;
176 }
177
178 bool cancel_delayed_work(struct delayed_work *dwork)
179 {
180         struct work_struct *work = &dwork->work;
181         bool ret;
182
183         pthread_mutex_lock(&wq_lock);
184         ret = grab_pending(work, true);
185
186         clear_work_pending(&dwork->work);
187         pthread_mutex_unlock(&wq_lock);
188
189         return ret;
190 }
191
192 bool cancel_delayed_work_sync(struct delayed_work *dwork)
193 {
194         struct work_struct *work = &dwork->work;
195         bool ret;
196
197         pthread_mutex_lock(&wq_lock);
198         ret = grab_pending(work, true);
199
200         __flush_work(work);
201         clear_work_pending(work);
202         pthread_mutex_unlock(&wq_lock);
203
204         return ret;
205 }
206
207 static int worker_thread(void *arg)
208 {
209         struct workqueue_struct *wq = arg;
210         struct work_struct *work;
211
212         pthread_mutex_lock(&wq_lock);
213         while (1) {
214                 __set_current_state(TASK_INTERRUPTIBLE);
215                 work = list_first_entry_or_null(&wq->pending_work,
216                                 struct work_struct, entry);
217                 wq->current_work = work;
218
219                 if (kthread_should_stop()) {
220                         BUG_ON(wq->current_work);
221                         break;
222                 }
223
224                 if (!work) {
225                         pthread_mutex_unlock(&wq_lock);
226                         schedule();
227                         pthread_mutex_lock(&wq_lock);
228                         continue;
229                 }
230
231                 BUG_ON(!test_bit(WORK_PENDING_BIT, work_data_bits(work)));
232                 list_del_init(&work->entry);
233                 clear_work_pending(work);
234
235                 pthread_mutex_unlock(&wq_lock);
236                 work->func(work);
237                 pthread_mutex_lock(&wq_lock);
238
239                 pthread_cond_broadcast(&wq->work_finished);
240         }
241         pthread_mutex_unlock(&wq_lock);
242
243         return 0;
244 }
245
246 void destroy_workqueue(struct workqueue_struct *wq)
247 {
248         kthread_stop(wq->worker);
249
250         pthread_mutex_lock(&wq_lock);
251         list_del(&wq->list);
252         pthread_mutex_unlock(&wq_lock);
253
254         kfree(wq);
255 }
256
257 struct workqueue_struct *alloc_workqueue(const char *fmt,
258                                          unsigned flags,
259                                          int max_active,
260                                          ...)
261 {
262         va_list args;
263         struct workqueue_struct *wq;
264
265         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
266         if (!wq)
267                 return NULL;
268
269         INIT_LIST_HEAD(&wq->list);
270         INIT_LIST_HEAD(&wq->pending_work);
271
272         pthread_cond_init(&wq->work_finished, NULL);
273
274         va_start(args, max_active);
275         vsnprintf(wq->name, sizeof(wq->name), fmt, args);
276         va_end(args);
277
278         wq->worker = kthread_run(worker_thread, wq, "%s", wq->name);
279         if (IS_ERR(wq->worker)) {
280                 kfree(wq);
281                 return NULL;
282         }
283
284         pthread_mutex_lock(&wq_lock);
285         list_add(&wq->list, &wq_list);
286         pthread_mutex_unlock(&wq_lock);
287
288         return wq;
289 }
290
291 struct workqueue_struct *system_wq;
292 struct workqueue_struct *system_highpri_wq;
293 struct workqueue_struct *system_long_wq;
294 struct workqueue_struct *system_unbound_wq;
295 struct workqueue_struct *system_freezable_wq;
296
297 __attribute__((constructor(102)))
298 static void wq_init(void)
299 {
300         system_wq = alloc_workqueue("events", 0, 0);
301         system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
302         system_long_wq = alloc_workqueue("events_long", 0, 0);
303         system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
304                                             WQ_UNBOUND_MAX_ACTIVE);
305         system_freezable_wq = alloc_workqueue("events_freezable",
306                                               WQ_FREEZABLE, 0);
307         BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
308                !system_unbound_wq || !system_freezable_wq);
309 }