blob: fc3c040e5e3a2e4c830bc6b45a4b00e5493366c5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_WAIT_H
2#define _LINUX_WAIT_H
3
4#define WNOHANG 0x00000001
5#define WUNTRACED 0x00000002
6#define WSTOPPED WUNTRACED
7#define WEXITED 0x00000004
8#define WCONTINUED 0x00000008
9#define WNOWAIT 0x01000000 /* Don't reap, just poll status. */
10
11#define __WNOTHREAD 0x20000000 /* Don't wait on children of other threads in this group */
12#define __WALL 0x40000000 /* Wait on all children, regardless of type */
13#define __WCLONE 0x80000000 /* Wait only on non-SIGCHLD children */
14
15/* First argument to waitid: */
16#define P_ALL 0
17#define P_PID 1
18#define P_PGID 2
19
20#ifdef __KERNEL__
21
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/list.h>
23#include <linux/stddef.h>
24#include <linux/spinlock.h>
25#include <asm/system.h>
26#include <asm/current.h>
27
28typedef struct __wait_queue wait_queue_t;
Peter Zijlstra7d478722009-09-14 19:55:44 +020029typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
30int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32struct __wait_queue {
33 unsigned int flags;
34#define WQ_FLAG_EXCLUSIVE 0x01
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070035 void *private;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 wait_queue_func_t func;
37 struct list_head task_list;
38};
39
40struct wait_bit_key {
41 void *flags;
42 int bit_nr;
43};
44
45struct wait_bit_queue {
46 struct wait_bit_key key;
47 wait_queue_t wait;
48};
49
50struct __wait_queue_head {
51 spinlock_t lock;
52 struct list_head task_list;
53};
54typedef struct __wait_queue_head wait_queue_head_t;
55
Tim Schmielau8c65b4a2005-11-07 00:59:43 -080056struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58/*
59 * Macros for declaration and initialisaton of the datatypes
60 */
61
62#define __WAITQUEUE_INITIALIZER(name, tsk) { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -070063 .private = tsk, \
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 .func = default_wake_function, \
65 .task_list = { NULL, NULL } }
66
67#define DECLARE_WAITQUEUE(name, tsk) \
68 wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
69
70#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
Ingo Molnare4d91912006-07-03 00:24:34 -070071 .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 .task_list = { &(name).task_list, &(name).task_list } }
73
74#define DECLARE_WAIT_QUEUE_HEAD(name) \
75 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
76
77#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
78 { .flags = word, .bit_nr = bit, }
79
Peter Zijlstra2fc39112009-08-10 12:33:05 +010080extern void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *);
81
82#define init_waitqueue_head(q) \
83 do { \
84 static struct lock_class_key __key; \
85 \
86 __init_waitqueue_head((q), &__key); \
87 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Peter Zijlstra7259f0d2006-10-29 22:46:36 -080089#ifdef CONFIG_LOCKDEP
90# define __WAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
91 ({ init_waitqueue_head(&name); name; })
92# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) \
93 wait_queue_head_t name = __WAIT_QUEUE_HEAD_INIT_ONSTACK(name)
94#else
95# define DECLARE_WAIT_QUEUE_HEAD_ONSTACK(name) DECLARE_WAIT_QUEUE_HEAD(name)
96#endif
97
Linus Torvalds1da177e2005-04-16 15:20:36 -070098static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p)
99{
100 q->flags = 0;
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700101 q->private = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 q->func = default_wake_function;
103}
104
105static inline void init_waitqueue_func_entry(wait_queue_t *q,
106 wait_queue_func_t func)
107{
108 q->flags = 0;
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700109 q->private = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 q->func = func;
111}
112
113static inline int waitqueue_active(wait_queue_head_t *q)
114{
115 return !list_empty(&q->task_list);
116}
117
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800118extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
119extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait);
120extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
122static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
123{
124 list_add(&new->task_list, &head->task_list);
125}
126
127/*
128 * Used for wake-one threads:
129 */
130static inline void __add_wait_queue_tail(wait_queue_head_t *head,
131 wait_queue_t *new)
132{
133 list_add_tail(&new->task_list, &head->task_list);
134}
135
136static inline void __remove_wait_queue(wait_queue_head_t *head,
137 wait_queue_t *old)
138{
139 list_del(&old->task_list);
140}
141
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800142void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
Davide Libenzi4ede8162009-03-31 15:24:20 -0700143void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
144void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
145 void *key);
146void __wake_up_locked(wait_queue_head_t *q, unsigned int mode);
147void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800148void __wake_up_bit(wait_queue_head_t *, void *, int);
149int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
150int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
151void wake_up_bit(void *, int);
152int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
153int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
154wait_queue_head_t *bit_waitqueue(void *, int);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500156#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
157#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
158#define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL)
159#define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL)
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
162#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
163#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
Matthew Wilcoxe64d66c2007-12-06 17:34:36 -0500164#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800166/*
Davide Libenzic0da3772009-03-31 15:24:20 -0700167 * Wakeup macros to be used to report events to the targets.
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800168 */
Davide Libenzic0da3772009-03-31 15:24:20 -0700169#define wake_up_poll(x, m) \
170 __wake_up(x, TASK_NORMAL, 1, (void *) (m))
171#define wake_up_locked_poll(x, m) \
172 __wake_up_locked_key((x), TASK_NORMAL, (void *) (m))
173#define wake_up_interruptible_poll(x, m) \
174 __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m))
175#define wake_up_interruptible_sync_poll(x, m) \
176 __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m))
Peter Zijlstra0ccf8312008-02-04 22:27:20 -0800177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178#define __wait_event(wq, condition) \
179do { \
180 DEFINE_WAIT(__wait); \
181 \
182 for (;;) { \
183 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
184 if (condition) \
185 break; \
186 schedule(); \
187 } \
188 finish_wait(&wq, &__wait); \
189} while (0)
190
191/**
192 * wait_event - sleep until a condition gets true
193 * @wq: the waitqueue to wait on
194 * @condition: a C expression for the event to wait for
195 *
196 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
197 * @condition evaluates to true. The @condition is checked each time
198 * the waitqueue @wq is woken up.
199 *
200 * wake_up() has to be called after changing any variable that could
201 * change the result of the wait condition.
202 */
203#define wait_event(wq, condition) \
204do { \
205 if (condition) \
206 break; \
207 __wait_event(wq, condition); \
208} while (0)
209
210#define __wait_event_timeout(wq, condition, ret) \
211do { \
212 DEFINE_WAIT(__wait); \
213 \
214 for (;;) { \
215 prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
216 if (condition) \
217 break; \
218 ret = schedule_timeout(ret); \
219 if (!ret) \
220 break; \
221 } \
222 finish_wait(&wq, &__wait); \
223} while (0)
224
225/**
226 * wait_event_timeout - sleep until a condition gets true or a timeout elapses
227 * @wq: the waitqueue to wait on
228 * @condition: a C expression for the event to wait for
229 * @timeout: timeout, in jiffies
230 *
231 * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
232 * @condition evaluates to true. The @condition is checked each time
233 * the waitqueue @wq is woken up.
234 *
235 * wake_up() has to be called after changing any variable that could
236 * change the result of the wait condition.
237 *
238 * The function returns 0 if the @timeout elapsed, and the remaining
239 * jiffies if the condition evaluated to true before the timeout elapsed.
240 */
241#define wait_event_timeout(wq, condition, timeout) \
242({ \
243 long __ret = timeout; \
244 if (!(condition)) \
245 __wait_event_timeout(wq, condition, __ret); \
246 __ret; \
247})
248
249#define __wait_event_interruptible(wq, condition, ret) \
250do { \
251 DEFINE_WAIT(__wait); \
252 \
253 for (;;) { \
254 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
255 if (condition) \
256 break; \
257 if (!signal_pending(current)) { \
258 schedule(); \
259 continue; \
260 } \
261 ret = -ERESTARTSYS; \
262 break; \
263 } \
264 finish_wait(&wq, &__wait); \
265} while (0)
266
267/**
268 * wait_event_interruptible - sleep until a condition gets true
269 * @wq: the waitqueue to wait on
270 * @condition: a C expression for the event to wait for
271 *
272 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
273 * @condition evaluates to true or a signal is received.
274 * The @condition is checked each time the waitqueue @wq is woken up.
275 *
276 * wake_up() has to be called after changing any variable that could
277 * change the result of the wait condition.
278 *
279 * The function will return -ERESTARTSYS if it was interrupted by a
280 * signal and 0 if @condition evaluated to true.
281 */
282#define wait_event_interruptible(wq, condition) \
283({ \
284 int __ret = 0; \
285 if (!(condition)) \
286 __wait_event_interruptible(wq, condition, __ret); \
287 __ret; \
288})
289
290#define __wait_event_interruptible_timeout(wq, condition, ret) \
291do { \
292 DEFINE_WAIT(__wait); \
293 \
294 for (;;) { \
295 prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
296 if (condition) \
297 break; \
298 if (!signal_pending(current)) { \
299 ret = schedule_timeout(ret); \
300 if (!ret) \
301 break; \
302 continue; \
303 } \
304 ret = -ERESTARTSYS; \
305 break; \
306 } \
307 finish_wait(&wq, &__wait); \
308} while (0)
309
310/**
311 * wait_event_interruptible_timeout - sleep until a condition gets true or a timeout elapses
312 * @wq: the waitqueue to wait on
313 * @condition: a C expression for the event to wait for
314 * @timeout: timeout, in jiffies
315 *
316 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
317 * @condition evaluates to true or a signal is received.
318 * The @condition is checked each time the waitqueue @wq is woken up.
319 *
320 * wake_up() has to be called after changing any variable that could
321 * change the result of the wait condition.
322 *
323 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
324 * was interrupted by a signal, and the remaining jiffies otherwise
325 * if the condition evaluated to true before the timeout elapsed.
326 */
327#define wait_event_interruptible_timeout(wq, condition, timeout) \
328({ \
329 long __ret = timeout; \
330 if (!(condition)) \
331 __wait_event_interruptible_timeout(wq, condition, __ret); \
332 __ret; \
333})
334
335#define __wait_event_interruptible_exclusive(wq, condition, ret) \
336do { \
337 DEFINE_WAIT(__wait); \
338 \
339 for (;;) { \
340 prepare_to_wait_exclusive(&wq, &__wait, \
341 TASK_INTERRUPTIBLE); \
Johannes Weiner777c6c52009-02-04 15:12:14 -0800342 if (condition) { \
343 finish_wait(&wq, &__wait); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 break; \
Johannes Weiner777c6c52009-02-04 15:12:14 -0800345 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 if (!signal_pending(current)) { \
347 schedule(); \
348 continue; \
349 } \
350 ret = -ERESTARTSYS; \
Johannes Weiner777c6c52009-02-04 15:12:14 -0800351 abort_exclusive_wait(&wq, &__wait, \
352 TASK_INTERRUPTIBLE, NULL); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 break; \
354 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355} while (0)
356
357#define wait_event_interruptible_exclusive(wq, condition) \
358({ \
359 int __ret = 0; \
360 if (!(condition)) \
361 __wait_event_interruptible_exclusive(wq, condition, __ret);\
362 __ret; \
363})
364
Michal Nazarewicz22c43c82010-05-05 12:53:11 +0200365
366#define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
367({ \
368 int __ret = 0; \
369 DEFINE_WAIT(__wait); \
370 if (exclusive) \
371 __wait.flags |= WQ_FLAG_EXCLUSIVE; \
372 do { \
373 if (likely(list_empty(&__wait.task_list))) \
374 __add_wait_queue_tail(&(wq), &__wait); \
375 set_current_state(TASK_INTERRUPTIBLE); \
376 if (signal_pending(current)) { \
377 __ret = -ERESTARTSYS; \
378 break; \
379 } \
380 if (irq) \
381 spin_unlock_irq(&(wq).lock); \
382 else \
383 spin_unlock(&(wq).lock); \
384 schedule(); \
385 if (irq) \
386 spin_lock_irq(&(wq).lock); \
387 else \
388 spin_lock(&(wq).lock); \
389 } while (!(condition)); \
390 __remove_wait_queue(&(wq), &__wait); \
391 __set_current_state(TASK_RUNNING); \
392 __ret; \
393})
394
395
396/**
397 * wait_event_interruptible_locked - sleep until a condition gets true
398 * @wq: the waitqueue to wait on
399 * @condition: a C expression for the event to wait for
400 *
401 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
402 * @condition evaluates to true or a signal is received.
403 * The @condition is checked each time the waitqueue @wq is woken up.
404 *
405 * It must be called with wq.lock being held. This spinlock is
406 * unlocked while sleeping but @condition testing is done while lock
407 * is held and when this macro exits the lock is held.
408 *
409 * The lock is locked/unlocked using spin_lock()/spin_unlock()
410 * functions which must match the way they are locked/unlocked outside
411 * of this macro.
412 *
413 * wake_up_locked() has to be called after changing any variable that could
414 * change the result of the wait condition.
415 *
416 * The function will return -ERESTARTSYS if it was interrupted by a
417 * signal and 0 if @condition evaluated to true.
418 */
419#define wait_event_interruptible_locked(wq, condition) \
420 ((condition) \
421 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
422
423/**
424 * wait_event_interruptible_locked_irq - sleep until a condition gets true
425 * @wq: the waitqueue to wait on
426 * @condition: a C expression for the event to wait for
427 *
428 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
429 * @condition evaluates to true or a signal is received.
430 * The @condition is checked each time the waitqueue @wq is woken up.
431 *
432 * It must be called with wq.lock being held. This spinlock is
433 * unlocked while sleeping but @condition testing is done while lock
434 * is held and when this macro exits the lock is held.
435 *
436 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
437 * functions which must match the way they are locked/unlocked outside
438 * of this macro.
439 *
440 * wake_up_locked() has to be called after changing any variable that could
441 * change the result of the wait condition.
442 *
443 * The function will return -ERESTARTSYS if it was interrupted by a
444 * signal and 0 if @condition evaluated to true.
445 */
446#define wait_event_interruptible_locked_irq(wq, condition) \
447 ((condition) \
448 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
449
450/**
451 * wait_event_interruptible_exclusive_locked - sleep exclusively until a condition gets true
452 * @wq: the waitqueue to wait on
453 * @condition: a C expression for the event to wait for
454 *
455 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
456 * @condition evaluates to true or a signal is received.
457 * The @condition is checked each time the waitqueue @wq is woken up.
458 *
459 * It must be called with wq.lock being held. This spinlock is
460 * unlocked while sleeping but @condition testing is done while lock
461 * is held and when this macro exits the lock is held.
462 *
463 * The lock is locked/unlocked using spin_lock()/spin_unlock()
464 * functions which must match the way they are locked/unlocked outside
465 * of this macro.
466 *
467 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
468 * set thus when other process waits process on the list if this
469 * process is awaken further processes are not considered.
470 *
471 * wake_up_locked() has to be called after changing any variable that could
472 * change the result of the wait condition.
473 *
474 * The function will return -ERESTARTSYS if it was interrupted by a
475 * signal and 0 if @condition evaluated to true.
476 */
477#define wait_event_interruptible_exclusive_locked(wq, condition) \
478 ((condition) \
479 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
480
481/**
482 * wait_event_interruptible_exclusive_locked_irq - sleep until a condition gets true
483 * @wq: the waitqueue to wait on
484 * @condition: a C expression for the event to wait for
485 *
486 * The process is put to sleep (TASK_INTERRUPTIBLE) until the
487 * @condition evaluates to true or a signal is received.
488 * The @condition is checked each time the waitqueue @wq is woken up.
489 *
490 * It must be called with wq.lock being held. This spinlock is
491 * unlocked while sleeping but @condition testing is done while lock
492 * is held and when this macro exits the lock is held.
493 *
494 * The lock is locked/unlocked using spin_lock_irq()/spin_unlock_irq()
495 * functions which must match the way they are locked/unlocked outside
496 * of this macro.
497 *
498 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
499 * set thus when other process waits process on the list if this
500 * process is awaken further processes are not considered.
501 *
502 * wake_up_locked() has to be called after changing any variable that could
503 * change the result of the wait condition.
504 *
505 * The function will return -ERESTARTSYS if it was interrupted by a
506 * signal and 0 if @condition evaluated to true.
507 */
508#define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
509 ((condition) \
510 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
511
512
513
Matthew Wilcox1411d5a2007-12-06 12:00:00 -0500514#define __wait_event_killable(wq, condition, ret) \
515do { \
516 DEFINE_WAIT(__wait); \
517 \
518 for (;;) { \
519 prepare_to_wait(&wq, &__wait, TASK_KILLABLE); \
520 if (condition) \
521 break; \
522 if (!fatal_signal_pending(current)) { \
523 schedule(); \
524 continue; \
525 } \
526 ret = -ERESTARTSYS; \
527 break; \
528 } \
529 finish_wait(&wq, &__wait); \
530} while (0)
531
532/**
533 * wait_event_killable - sleep until a condition gets true
534 * @wq: the waitqueue to wait on
535 * @condition: a C expression for the event to wait for
536 *
537 * The process is put to sleep (TASK_KILLABLE) until the
538 * @condition evaluates to true or a signal is received.
539 * The @condition is checked each time the waitqueue @wq is woken up.
540 *
541 * wake_up() has to be called after changing any variable that could
542 * change the result of the wait condition.
543 *
544 * The function will return -ERESTARTSYS if it was interrupted by a
545 * signal and 0 if @condition evaluated to true.
546 */
547#define wait_event_killable(wq, condition) \
548({ \
549 int __ret = 0; \
550 if (!(condition)) \
551 __wait_event_killable(wq, condition, __ret); \
552 __ret; \
553})
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555/*
556 * Must be called with the spinlock in the wait_queue_head_t held.
557 */
558static inline void add_wait_queue_exclusive_locked(wait_queue_head_t *q,
559 wait_queue_t * wait)
560{
561 wait->flags |= WQ_FLAG_EXCLUSIVE;
562 __add_wait_queue_tail(q, wait);
563}
564
565/*
566 * Must be called with the spinlock in the wait_queue_head_t held.
567 */
568static inline void remove_wait_queue_locked(wait_queue_head_t *q,
569 wait_queue_t * wait)
570{
571 __remove_wait_queue(q, wait);
572}
573
574/*
575 * These are the old interfaces to sleep waiting for an event.
Ingo Molnar0fec1712007-07-09 18:52:01 +0200576 * They are racy. DO NOT use them, use the wait_event* interfaces above.
577 * We plan to remove these interfaces.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
Ingo Molnar0fec1712007-07-09 18:52:01 +0200579extern void sleep_on(wait_queue_head_t *q);
580extern long sleep_on_timeout(wait_queue_head_t *q,
581 signed long timeout);
582extern void interruptible_sleep_on(wait_queue_head_t *q);
583extern long interruptible_sleep_on_timeout(wait_queue_head_t *q,
584 signed long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
586/*
587 * Waitqueues which are removed from the waitqueue_head at wakeup time
588 */
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800589void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
590void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
591void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
Johannes Weiner777c6c52009-02-04 15:12:14 -0800592void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
593 unsigned int mode, void *key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
595int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
596
Eric Dumazetbf368e42009-04-28 02:24:21 -0700597#define DEFINE_WAIT_FUNC(name, function) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 wait_queue_t name = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700599 .private = current, \
Eric Dumazetbf368e42009-04-28 02:24:21 -0700600 .func = function, \
blaisorblade@yahoo.it7e43c842005-05-25 01:31:42 +0200601 .task_list = LIST_HEAD_INIT((name).task_list), \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 }
603
Eric Dumazetbf368e42009-04-28 02:24:21 -0700604#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606#define DEFINE_WAIT_BIT(name, word, bit) \
607 struct wait_bit_queue name = { \
608 .key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
609 .wait = { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700610 .private = current, \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 .func = wake_bit_function, \
612 .task_list = \
613 LIST_HEAD_INIT((name).wait.task_list), \
614 }, \
615 }
616
617#define init_wait(wait) \
618 do { \
Benjamin LaHaisec43dc2f2005-06-23 00:10:27 -0700619 (wait)->private = current; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 (wait)->func = autoremove_wake_function; \
621 INIT_LIST_HEAD(&(wait)->task_list); \
622 } while (0)
623
624/**
625 * wait_on_bit - wait for a bit to be cleared
626 * @word: the word being waited on, a kernel virtual address
627 * @bit: the bit of the word being waited on
628 * @action: the function used to sleep, which may take special actions
629 * @mode: the task state to sleep in
630 *
631 * There is a standard hashed waitqueue table for generic use. This
632 * is the part of the hashtable's accessor API that waits on a bit.
633 * For instance, if one were to have waiters on a bitflag, one would
634 * call wait_on_bit() in threads waiting for the bit to clear.
635 * One uses wait_on_bit() where one is waiting for the bit to clear,
636 * but has no intention of setting it.
637 */
638static inline int wait_on_bit(void *word, int bit,
639 int (*action)(void *), unsigned mode)
640{
641 if (!test_bit(bit, word))
642 return 0;
643 return out_of_line_wait_on_bit(word, bit, action, mode);
644}
645
646/**
647 * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
648 * @word: the word being waited on, a kernel virtual address
649 * @bit: the bit of the word being waited on
650 * @action: the function used to sleep, which may take special actions
651 * @mode: the task state to sleep in
652 *
653 * There is a standard hashed waitqueue table for generic use. This
654 * is the part of the hashtable's accessor API that waits on a bit
655 * when one intends to set it, for instance, trying to lock bitflags.
656 * For instance, if one were to have waiters trying to set bitflag
657 * and waiting for it to clear before setting it, one would call
658 * wait_on_bit() in threads waiting to be able to set the bit.
659 * One uses wait_on_bit_lock() where one is waiting for the bit to
660 * clear with the intention of setting it, and when done, clearing it.
661 */
662static inline int wait_on_bit_lock(void *word, int bit,
663 int (*action)(void *), unsigned mode)
664{
665 if (!test_and_set_bit(bit, word))
666 return 0;
667 return out_of_line_wait_on_bit_lock(word, bit, action, mode);
668}
669
670#endif /* __KERNEL__ */
671
672#endif