| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_IA64_SEMAPHORE_H | 
|  | 2 | #define _ASM_IA64_SEMAPHORE_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Copyright (C) 1998-2000 Hewlett-Packard Co | 
|  | 6 | * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com> | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #include <linux/wait.h> | 
|  | 10 | #include <linux/rwsem.h> | 
|  | 11 |  | 
|  | 12 | #include <asm/atomic.h> | 
|  | 13 |  | 
|  | 14 | struct semaphore { | 
|  | 15 | atomic_t count; | 
|  | 16 | int sleepers; | 
|  | 17 | wait_queue_head_t wait; | 
|  | 18 | }; | 
|  | 19 |  | 
|  | 20 | #define __SEMAPHORE_INITIALIZER(name, n)				\ | 
|  | 21 | {									\ | 
|  | 22 | .count		= ATOMIC_INIT(n),				\ | 
|  | 23 | .sleepers	= 0,						\ | 
|  | 24 | .wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\ | 
|  | 25 | } | 
|  | 26 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #define __DECLARE_SEMAPHORE_GENERIC(name,count)					\ | 
|  | 28 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, count) | 
|  | 29 |  | 
|  | 30 | #define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1) | 
|  | 31 | #define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name, 0) | 
|  | 32 |  | 
|  | 33 | static inline void | 
|  | 34 | sema_init (struct semaphore *sem, int val) | 
|  | 35 | { | 
|  | 36 | *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | static inline void | 
|  | 40 | init_MUTEX (struct semaphore *sem) | 
|  | 41 | { | 
|  | 42 | sema_init(sem, 1); | 
|  | 43 | } | 
|  | 44 |  | 
|  | 45 | static inline void | 
|  | 46 | init_MUTEX_LOCKED (struct semaphore *sem) | 
|  | 47 | { | 
|  | 48 | sema_init(sem, 0); | 
|  | 49 | } | 
|  | 50 |  | 
|  | 51 | extern void __down (struct semaphore * sem); | 
|  | 52 | extern int  __down_interruptible (struct semaphore * sem); | 
|  | 53 | extern int  __down_trylock (struct semaphore * sem); | 
|  | 54 | extern void __up (struct semaphore * sem); | 
|  | 55 |  | 
|  | 56 | /* | 
|  | 57 | * Atomically decrement the semaphore's count.  If it goes negative, | 
|  | 58 | * block the calling thread in the TASK_UNINTERRUPTIBLE state. | 
|  | 59 | */ | 
|  | 60 | static inline void | 
|  | 61 | down (struct semaphore *sem) | 
|  | 62 | { | 
|  | 63 | might_sleep(); | 
|  | 64 | if (atomic_dec_return(&sem->count) < 0) | 
|  | 65 | __down(sem); | 
|  | 66 | } | 
|  | 67 |  | 
|  | 68 | /* | 
|  | 69 | * Atomically decrement the semaphore's count.  If it goes negative, | 
|  | 70 | * block the calling thread in the TASK_INTERRUPTIBLE state. | 
|  | 71 | */ | 
|  | 72 | static inline int | 
|  | 73 | down_interruptible (struct semaphore * sem) | 
|  | 74 | { | 
|  | 75 | int ret = 0; | 
|  | 76 |  | 
|  | 77 | might_sleep(); | 
|  | 78 | if (atomic_dec_return(&sem->count) < 0) | 
|  | 79 | ret = __down_interruptible(sem); | 
|  | 80 | return ret; | 
|  | 81 | } | 
|  | 82 |  | 
|  | 83 | static inline int | 
|  | 84 | down_trylock (struct semaphore *sem) | 
|  | 85 | { | 
|  | 86 | int ret = 0; | 
|  | 87 |  | 
|  | 88 | if (atomic_dec_return(&sem->count) < 0) | 
|  | 89 | ret = __down_trylock(sem); | 
|  | 90 | return ret; | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | static inline void | 
|  | 94 | up (struct semaphore * sem) | 
|  | 95 | { | 
|  | 96 | if (atomic_inc_return(&sem->count) <= 0) | 
|  | 97 | __up(sem); | 
|  | 98 | } | 
|  | 99 |  | 
|  | 100 | #endif /* _ASM_IA64_SEMAPHORE_H */ |