]> git.sesse.net Git - bcachefs-tools-debian/blob - linux/wait.c
Move c_src dirs back to toplevel
[bcachefs-tools-debian] / linux / wait.c
1 /*
2  * Generic waiting primitives.
3  *
4  * (C) 2004 Nadia Yvette Chambers, Oracle
5  */
6
7 #include <linux/completion.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
10
11 static inline int waitqueue_active(wait_queue_head_t *q)
12 {
13         return !list_empty(&q->task_list);
14 }
15
16 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
17 {
18         list_add(&new->task_list, &head->task_list);
19 }
20
21 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
22                                          wait_queue_t *new)
23 {
24         list_add_tail(&new->task_list, &head->task_list);
25 }
26
27 static inline void
28 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
29 {
30         wait->flags |= WQ_FLAG_EXCLUSIVE;
31         __add_wait_queue_tail(q, wait);
32 }
33
34 static inline void
35 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
36 {
37         list_del(&old->task_list);
38 }
39
40 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
41                              int nr_exclusive, int wake_flags, void *key)
42 {
43         wait_queue_t *curr, *next;
44
45         list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
46                 unsigned flags = curr->flags;
47
48                 if (curr->func(curr, mode, wake_flags, key) &&
49                                 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
50                         break;
51         }
52 }
53
54 static void __wake_up(wait_queue_head_t *q, unsigned int mode,
55                       int nr_exclusive, void *key)
56 {
57         unsigned long flags;
58
59         spin_lock_irqsave(&q->lock, flags);
60         __wake_up_common(q, mode, nr_exclusive, 0, key);
61         spin_unlock_irqrestore(&q->lock, flags);
62 }
63
64 void wake_up(wait_queue_head_t *q)
65 {
66         __wake_up(q, TASK_NORMAL, 1, NULL);
67 }
68
69 void wake_up_all(wait_queue_head_t *q)
70 {
71         __wake_up(q, TASK_NORMAL, 0, NULL);
72 }
73
74 static void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
75 {
76         __wake_up_common(q, mode, nr, 0, NULL);
77 }
78
79 void
80 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
81 {
82         unsigned long flags;
83
84         wait->flags &= ~WQ_FLAG_EXCLUSIVE;
85         spin_lock_irqsave(&q->lock, flags);
86         if (list_empty(&wait->task_list))
87                 __add_wait_queue(q, wait);
88         set_current_state(state);
89         spin_unlock_irqrestore(&q->lock, flags);
90 }
91
92 static void
93 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
94 {
95         unsigned long flags;
96
97         wait->flags |= WQ_FLAG_EXCLUSIVE;
98         spin_lock_irqsave(&q->lock, flags);
99         if (list_empty(&wait->task_list))
100                 __add_wait_queue_tail(q, wait);
101         set_current_state(state);
102         spin_unlock_irqrestore(&q->lock, flags);
103 }
104
105 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
106 {
107         unsigned long flags;
108
109         __set_current_state(TASK_RUNNING);
110         /*
111          * We can check for list emptiness outside the lock
112          * IFF:
113          *  - we use the "careful" check that verifies both
114          *    the next and prev pointers, so that there cannot
115          *    be any half-pending updates in progress on other
116          *    CPU's that we haven't seen yet (and that might
117          *    still change the stack area.
118          * and
119          *  - all other users take the lock (ie we can only
120          *    have _one_ other CPU that looks at or modifies
121          *    the list).
122          */
123         if (!list_empty_careful(&wait->task_list)) {
124                 spin_lock_irqsave(&q->lock, flags);
125                 list_del_init(&wait->task_list);
126                 spin_unlock_irqrestore(&q->lock, flags);
127         }
128 }
129
130 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
131                           void *key)
132 {
133         return wake_up_process(curr->private);
134 }
135
136 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
137 {
138         int ret = default_wake_function(wait, mode, sync, key);
139
140         if (ret)
141                 list_del_init(&wait->task_list);
142         return ret;
143 }
144
145 struct wait_bit_key {
146         void                    *flags;
147         int                     bit_nr;
148         unsigned long           timeout;
149 };
150
151 struct wait_bit_queue {
152         struct wait_bit_key     key;
153         wait_queue_t            wait;
154 };
155
156 static int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
157 {
158         struct wait_bit_key *key = arg;
159         struct wait_bit_queue *wait_bit =
160                 container_of(wait, struct wait_bit_queue, wait);
161
162         return (wait_bit->key.flags == key->flags &&
163                 wait_bit->key.bit_nr == key->bit_nr &&
164                 !test_bit(key->bit_nr, key->flags))
165                 ? autoremove_wake_function(wait, mode, sync, key) : 0;
166 }
167
168 static DECLARE_WAIT_QUEUE_HEAD(bit_wq);
169
170 #define __WAIT_BIT_KEY_INITIALIZER(word, bit)                           \
171         { .flags = word, .bit_nr = bit, }
172
173 #define DEFINE_WAIT_BIT(name, word, bit)                                \
174         struct wait_bit_queue name = {                                  \
175                 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit),           \
176                 .wait   = {                                             \
177                         .private        = current,                      \
178                         .func           = wake_bit_function,            \
179                         .task_list      =                               \
180                                 LIST_HEAD_INIT((name).wait.task_list),  \
181                 },                                                      \
182         }
183
184 void wake_up_bit(void *word, int bit)
185 {
186         struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
187
188         if (waitqueue_active(&bit_wq))
189                 __wake_up(&bit_wq, TASK_NORMAL, 1, &key);
190 }
191
192 void __wait_on_bit(void *word, int bit, unsigned mode)
193 {
194         DEFINE_WAIT_BIT(wait, word, bit);
195
196         do {
197                 prepare_to_wait(&bit_wq, &wait.wait, mode);
198                 if (test_bit(wait.key.bit_nr, wait.key.flags))
199                         schedule();
200         } while (test_bit(wait.key.bit_nr, wait.key.flags));
201
202         finish_wait(&bit_wq, &wait.wait);
203 }
204
205 void __wait_on_bit_lock(void *word, int bit, unsigned mode)
206 {
207         DEFINE_WAIT_BIT(wait, word, bit);
208
209         do {
210                 prepare_to_wait_exclusive(&bit_wq, &wait.wait, mode);
211                 if (!test_bit(wait.key.bit_nr, wait.key.flags))
212                         continue;
213                 schedule();
214         } while (test_and_set_bit(wait.key.bit_nr, wait.key.flags));
215         finish_wait(&bit_wq, &wait.wait);
216 }
217
218 void complete(struct completion *x)
219 {
220         unsigned long flags;
221
222         spin_lock_irqsave(&x->wait.lock, flags);
223         x->done++;
224         __wake_up_locked(&x->wait, TASK_NORMAL, 1);
225         spin_unlock_irqrestore(&x->wait.lock, flags);
226 }
227
228 void wait_for_completion(struct completion *x)
229 {
230         spin_lock_irq(&x->wait.lock);
231
232         if (!x->done) {
233                 DECLARE_WAITQUEUE(wait, current);
234
235                 __add_wait_queue_tail_exclusive(&x->wait, &wait);
236                 do {
237                         __set_current_state(TASK_UNINTERRUPTIBLE);
238                         spin_unlock_irq(&x->wait.lock);
239
240                         schedule();
241                         spin_lock_irq(&x->wait.lock);
242                 } while (!x->done);
243                 __remove_wait_queue(&x->wait, &wait);
244                 if (!x->done)
245                         goto out;
246         }
247         x->done--;
248 out:
249         spin_unlock_irq(&x->wait.lock);
250 }