| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _PPC_SEMAPHORE_H | 
|  | 2 | #define _PPC_SEMAPHORE_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Swiped from asm-sparc/semaphore.h and modified | 
|  | 6 | * -- Cort (cort@cs.nmt.edu) | 
|  | 7 | * | 
|  | 8 | * Stole some rw spinlock-based semaphore stuff from asm-alpha/semaphore.h | 
|  | 9 | * -- Ani Joshi (ajoshi@unixbox.com) | 
|  | 10 | * | 
|  | 11 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | 
|  | 12 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | 
|  | 13 | * Rework semaphores to use atomic_dec_if_positive. | 
|  | 14 | * -- Paul Mackerras (paulus@samba.org) | 
|  | 15 | */ | 
|  | 16 |  | 
|  | 17 | #ifdef __KERNEL__ | 
|  | 18 |  | 
|  | 19 | #include <asm/atomic.h> | 
|  | 20 | #include <asm/system.h> | 
|  | 21 | #include <linux/wait.h> | 
|  | 22 | #include <linux/rwsem.h> | 
|  | 23 |  | 
|  | 24 | struct semaphore { | 
|  | 25 | /* | 
|  | 26 | * Note that any negative value of count is equivalent to 0, | 
|  | 27 | * but additionally indicates that some process(es) might be | 
|  | 28 | * sleeping on `wait'. | 
|  | 29 | */ | 
|  | 30 | atomic_t count; | 
|  | 31 | wait_queue_head_t wait; | 
|  | 32 | }; | 
|  | 33 |  | 
|  | 34 | #define __SEMAPHORE_INITIALIZER(name, n)				\ | 
|  | 35 | {									\ | 
|  | 36 | .count		= ATOMIC_INIT(n),				\ | 
|  | 37 | .wait		= __WAIT_QUEUE_HEAD_INITIALIZER((name).wait)	\ | 
|  | 38 | } | 
|  | 39 |  | 
|  | 40 | #define __MUTEX_INITIALIZER(name) \ | 
|  | 41 | __SEMAPHORE_INITIALIZER(name, 1) | 
|  | 42 |  | 
|  | 43 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | 
|  | 44 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | 
|  | 45 |  | 
|  | 46 | #define DECLARE_MUTEX(name)		__DECLARE_SEMAPHORE_GENERIC(name, 1) | 
|  | 47 | #define DECLARE_MUTEX_LOCKED(name)	__DECLARE_SEMAPHORE_GENERIC(name, 0) | 
|  | 48 |  | 
|  | 49 | static inline void sema_init (struct semaphore *sem, int val) | 
|  | 50 | { | 
|  | 51 | atomic_set(&sem->count, val); | 
|  | 52 | init_waitqueue_head(&sem->wait); | 
|  | 53 | } | 
|  | 54 |  | 
|  | 55 | static inline void init_MUTEX (struct semaphore *sem) | 
|  | 56 | { | 
|  | 57 | sema_init(sem, 1); | 
|  | 58 | } | 
|  | 59 |  | 
|  | 60 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | 
|  | 61 | { | 
|  | 62 | sema_init(sem, 0); | 
|  | 63 | } | 
|  | 64 |  | 
|  | 65 | extern void __down(struct semaphore * sem); | 
|  | 66 | extern int  __down_interruptible(struct semaphore * sem); | 
|  | 67 | extern void __up(struct semaphore * sem); | 
|  | 68 |  | 
|  | 69 | extern inline void down(struct semaphore * sem) | 
|  | 70 | { | 
|  | 71 | might_sleep(); | 
|  | 72 |  | 
|  | 73 | /* | 
|  | 74 | * Try to get the semaphore, take the slow path if we fail. | 
|  | 75 | */ | 
|  | 76 | if (atomic_dec_return(&sem->count) < 0) | 
|  | 77 | __down(sem); | 
|  | 78 | smp_wmb(); | 
|  | 79 | } | 
|  | 80 |  | 
|  | 81 | extern inline int down_interruptible(struct semaphore * sem) | 
|  | 82 | { | 
|  | 83 | int ret = 0; | 
|  | 84 |  | 
|  | 85 | might_sleep(); | 
|  | 86 |  | 
|  | 87 | if (atomic_dec_return(&sem->count) < 0) | 
|  | 88 | ret = __down_interruptible(sem); | 
|  | 89 | smp_wmb(); | 
|  | 90 | return ret; | 
|  | 91 | } | 
|  | 92 |  | 
|  | 93 | extern inline int down_trylock(struct semaphore * sem) | 
|  | 94 | { | 
|  | 95 | int ret; | 
|  | 96 |  | 
|  | 97 | ret = atomic_dec_if_positive(&sem->count) < 0; | 
|  | 98 | smp_wmb(); | 
|  | 99 | return ret; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | extern inline void up(struct semaphore * sem) | 
|  | 103 | { | 
|  | 104 | smp_wmb(); | 
|  | 105 | if (atomic_inc_return(&sem->count) <= 0) | 
|  | 106 | __up(sem); | 
|  | 107 | } | 
|  | 108 |  | 
|  | 109 | #endif /* __KERNEL__ */ | 
|  | 110 |  | 
|  | 111 | #endif /* !(_PPC_SEMAPHORE_H) */ |