2 * Generic waiting primitives.
4 * (C) 2004 Nadia Yvette Chambers, Oracle
7 #include <linux/completion.h>
8 #include <linux/sched.h>
9 #include <linux/wait.h>
11 static inline int waitqueue_active(wait_queue_head_t *q)
13 return !list_empty(&q->task_list);
16 static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
18 list_add(&new->task_list, &head->task_list);
21 static inline void __add_wait_queue_tail(wait_queue_head_t *head,
24 list_add_tail(&new->task_list, &head->task_list);
28 __add_wait_queue_tail_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
30 wait->flags |= WQ_FLAG_EXCLUSIVE;
31 __add_wait_queue_tail(q, wait);
35 __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
37 list_del(&old->task_list);
40 static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
41 int nr_exclusive, int wake_flags, void *key)
43 wait_queue_t *curr, *next;
45 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
46 unsigned flags = curr->flags;
48 if (curr->func(curr, mode, wake_flags, key) &&
49 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
54 static void __wake_up(wait_queue_head_t *q, unsigned int mode,
55 int nr_exclusive, void *key)
59 spin_lock_irqsave(&q->lock, flags);
60 __wake_up_common(q, mode, nr_exclusive, 0, key);
61 spin_unlock_irqrestore(&q->lock, flags);
64 void wake_up(wait_queue_head_t *q)
66 __wake_up(q, TASK_NORMAL, 1, NULL);
69 void wake_up_all(wait_queue_head_t *q)
71 __wake_up(q, TASK_NORMAL, 0, NULL);
74 static void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr)
76 __wake_up_common(q, mode, nr, 0, NULL);
80 prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
84 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
85 spin_lock_irqsave(&q->lock, flags);
86 if (list_empty(&wait->task_list))
87 __add_wait_queue(q, wait);
88 set_current_state(state);
89 spin_unlock_irqrestore(&q->lock, flags);
93 prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
97 wait->flags |= WQ_FLAG_EXCLUSIVE;
98 spin_lock_irqsave(&q->lock, flags);
99 if (list_empty(&wait->task_list))
100 __add_wait_queue_tail(q, wait);
101 set_current_state(state);
102 spin_unlock_irqrestore(&q->lock, flags);
105 void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
109 __set_current_state(TASK_RUNNING);
111 * We can check for list emptiness outside the lock
113 * - we use the "careful" check that verifies both
114 * the next and prev pointers, so that there cannot
115 * be any half-pending updates in progress on other
116 * CPU's that we haven't seen yet (and that might
117 * still change the stack area.
119 * - all other users take the lock (ie we can only
120 * have _one_ other CPU that looks at or modifies
123 if (!list_empty_careful(&wait->task_list)) {
124 spin_lock_irqsave(&q->lock, flags);
125 list_del_init(&wait->task_list);
126 spin_unlock_irqrestore(&q->lock, flags);
130 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
133 return wake_up_process(curr->private);
136 int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
138 int ret = default_wake_function(wait, mode, sync, key);
141 list_del_init(&wait->task_list);
145 struct wait_bit_key {
148 unsigned long timeout;
151 struct wait_bit_queue {
152 struct wait_bit_key key;
156 static int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *arg)
158 struct wait_bit_key *key = arg;
159 struct wait_bit_queue *wait_bit =
160 container_of(wait, struct wait_bit_queue, wait);
162 return (wait_bit->key.flags == key->flags &&
163 wait_bit->key.bit_nr == key->bit_nr &&
164 !test_bit(key->bit_nr, key->flags))
165 ? autoremove_wake_function(wait, mode, sync, key) : 0;
168 static DECLARE_WAIT_QUEUE_HEAD(bit_wq);
170 #define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
171 { .flags = word, .bit_nr = bit, }
173 #define DEFINE_WAIT_BIT(name, word, bit) \
174 struct wait_bit_queue name = { \
175 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
177 .private = current, \
178 .func = wake_bit_function, \
180 LIST_HEAD_INIT((name).wait.task_list), \
184 void wake_up_bit(void *word, int bit)
186 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
188 if (waitqueue_active(&bit_wq))
189 __wake_up(&bit_wq, TASK_NORMAL, 1, &key);
192 void __wait_on_bit(void *word, int bit, unsigned mode)
194 DEFINE_WAIT_BIT(wait, word, bit);
197 prepare_to_wait(&bit_wq, &wait.wait, mode);
198 if (test_bit(wait.key.bit_nr, wait.key.flags))
200 } while (test_bit(wait.key.bit_nr, wait.key.flags));
202 finish_wait(&bit_wq, &wait.wait);
205 void __wait_on_bit_lock(void *word, int bit, unsigned mode)
207 DEFINE_WAIT_BIT(wait, word, bit);
210 prepare_to_wait_exclusive(&bit_wq, &wait.wait, mode);
211 if (!test_bit(wait.key.bit_nr, wait.key.flags))
214 } while (test_and_set_bit(wait.key.bit_nr, wait.key.flags));
215 finish_wait(&bit_wq, &wait.wait);
218 void complete(struct completion *x)
222 spin_lock_irqsave(&x->wait.lock, flags);
224 __wake_up_locked(&x->wait, TASK_NORMAL, 1);
225 spin_unlock_irqrestore(&x->wait.lock, flags);
228 void wait_for_completion(struct completion *x)
230 spin_lock_irq(&x->wait.lock);
233 DECLARE_WAITQUEUE(wait, current);
235 __add_wait_queue_tail_exclusive(&x->wait, &wait);
237 __set_current_state(TASK_UNINTERRUPTIBLE);
238 spin_unlock_irq(&x->wait.lock);
241 spin_lock_irq(&x->wait.lock);
243 __remove_wait_queue(&x->wait, &wait);
249 spin_unlock_irq(&x->wait.lock);