| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_SEMAPHORE_H | 
 | 2 | #define _ASM_IA64_SEMAPHORE_H | 
 | 3 |  | 
 | 4 | /* | 
 | 5 |  * Copyright (C) 1998-2000 Hewlett-Packard Co | 
 | 6 |  * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> | 
 | 7 |  */ | 
 | 8 |  | 
 | 9 | #include <linux/wait.h> | 
 | 10 | #include <linux/rwsem.h> | 
 | 11 |  | 
 | 12 | #include <asm/atomic.h> | 
 | 13 |  | 
 | 14 | struct semaphore { | 
 | 15 | 	atomic_t count; | 
 | 16 | 	int sleepers; | 
 | 17 | 	wait_queue_head_t wait; | 
 | 18 | }; | 
 | 19 |  | 
 | 20 | #define __SEMAPHORE_INITIALIZER(name, n)				\ | 
 | 21 | {									\ | 
 | 22 | 	.count		= ATOMIC_INIT(n),				\ | 
 | 23 | 	.sleepers	= 0,						\ | 
 | 24 | 	.wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\ | 
 | 25 | } | 
 | 26 |  | 
 | 27 | #define __MUTEX_INITIALIZER(name)	__SEMAPHORE_INITIALIZER(name,1) | 
 | 28 |  | 
 | 29 | #define __DECLARE_SEMAPHORE_GENERIC(name,count)					\ | 
 | 30 | 	struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | 
 | 31 |  | 
 | 32 | #define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1) | 
 | 33 | #define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name, 0) | 
 | 34 |  | 
 | 35 | static inline void | 
 | 36 | sema_init (struct semaphore *sem, int val) | 
 | 37 | { | 
 | 38 | 	*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | 
 | 39 | } | 
 | 40 |  | 
 | 41 | static inline void | 
 | 42 | init_MUTEX (struct semaphore *sem) | 
 | 43 | { | 
 | 44 | 	sema_init(sem, 1); | 
 | 45 | } | 
 | 46 |  | 
 | 47 | static inline void | 
 | 48 | init_MUTEX_LOCKED (struct semaphore *sem) | 
 | 49 | { | 
 | 50 | 	sema_init(sem, 0); | 
 | 51 | } | 
 | 52 |  | 
 | 53 | extern void __down (struct semaphore * sem); | 
 | 54 | extern int  __down_interruptible (struct semaphore * sem); | 
 | 55 | extern int  __down_trylock (struct semaphore * sem); | 
 | 56 | extern void __up (struct semaphore * sem); | 
 | 57 |  | 
 | 58 | /* | 
 | 59 |  * Atomically decrement the semaphore's count.  If it goes negative, | 
 | 60 |  * block the calling thread in the TASK_UNINTERRUPTIBLE state. | 
 | 61 |  */ | 
 | 62 | static inline void | 
 | 63 | down (struct semaphore *sem) | 
 | 64 | { | 
 | 65 | 	might_sleep(); | 
 | 66 | 	if (atomic_dec_return(&sem->count) < 0) | 
 | 67 | 		__down(sem); | 
 | 68 | } | 
 | 69 |  | 
 | 70 | /* | 
 | 71 |  * Atomically decrement the semaphore's count.  If it goes negative, | 
 | 72 |  * block the calling thread in the TASK_INTERRUPTIBLE state. | 
 | 73 |  */ | 
 | 74 | static inline int | 
 | 75 | down_interruptible (struct semaphore * sem) | 
 | 76 | { | 
 | 77 | 	int ret = 0; | 
 | 78 |  | 
 | 79 | 	might_sleep(); | 
 | 80 | 	if (atomic_dec_return(&sem->count) < 0) | 
 | 81 | 		ret = __down_interruptible(sem); | 
 | 82 | 	return ret; | 
 | 83 | } | 
 | 84 |  | 
 | 85 | static inline int | 
 | 86 | down_trylock (struct semaphore *sem) | 
 | 87 | { | 
 | 88 | 	int ret = 0; | 
 | 89 |  | 
 | 90 | 	if (atomic_dec_return(&sem->count) < 0) | 
 | 91 | 		ret = __down_trylock(sem); | 
 | 92 | 	return ret; | 
 | 93 | } | 
 | 94 |  | 
 | 95 | static inline void | 
 | 96 | up (struct semaphore * sem) | 
 | 97 | { | 
 | 98 | 	if (atomic_inc_return(&sem->count) <= 0) | 
 | 99 | 		__up(sem); | 
 | 100 | } | 
 | 101 |  | 
 | 102 | #endif /* _ASM_IA64_SEMAPHORE_H */ |