| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_PARISC_SEMAPHORE_HELPER_H | 
 | 2 | #define _ASM_PARISC_SEMAPHORE_HELPER_H | 
 | 3 |  | 
 | 4 | /* | 
 | 5 |  * SMP- and interrupt-safe semaphores helper functions. | 
 | 6 |  * | 
 | 7 |  * (C) Copyright 1996 Linus Torvalds | 
 | 8 |  * (C) Copyright 1999 Andrea Arcangeli | 
 | 9 |  */ | 
 | 10 |  | 
 | 11 | /* | 
 | 12 |  * These two _must_ execute atomically wrt each other. | 
 | 13 |  * | 
 | 14 |  * This is trivially done with load_locked/store_cond, | 
 | 15 |  * which we have.  Let the rest of the losers suck eggs. | 
 | 16 |  */ | 
 | 17 | static __inline__ void wake_one_more(struct semaphore * sem) | 
 | 18 | { | 
 | 19 | 	atomic_inc((atomic_t *)&sem->waking); | 
 | 20 | } | 
 | 21 |  | 
 | 22 | static __inline__ int waking_non_zero(struct semaphore *sem) | 
 | 23 | { | 
 | 24 | 	unsigned long flags; | 
 | 25 | 	int ret = 0; | 
 | 26 |  | 
 | 27 | 	spin_lock_irqsave(&semaphore_wake_lock, flags); | 
 | 28 | 	if (sem->waking > 0) { | 
 | 29 | 		sem->waking--; | 
 | 30 | 		ret = 1; | 
 | 31 | 	} | 
 | 32 | 	spin_unlock_irqrestore(&semaphore_wake_lock, flags); | 
 | 33 | 	return ret; | 
 | 34 | } | 
 | 35 |  | 
 | 36 | /* | 
 | 37 |  * waking_non_zero_interruptible: | 
 | 38 |  *	1	got the lock | 
 | 39 |  *	0	go to sleep | 
 | 40 |  *	-EINTR	interrupted | 
 | 41 |  * | 
 | 42 |  * We must undo the sem->count down_interruptible() increment while we are | 
 | 43 |  * protected by the spinlock in order to make atomic this atomic_inc() with the | 
 | 44 |  * atomic_read() in wake_one_more(), otherwise we can race. -arca | 
 | 45 |  */ | 
 | 46 | static __inline__ int waking_non_zero_interruptible(struct semaphore *sem, | 
 | 47 | 						struct task_struct *tsk) | 
 | 48 | { | 
 | 49 | 	unsigned long flags; | 
 | 50 | 	int ret = 0; | 
 | 51 |  | 
 | 52 | 	spin_lock_irqsave(&semaphore_wake_lock, flags); | 
 | 53 | 	if (sem->waking > 0) { | 
 | 54 | 		sem->waking--; | 
 | 55 | 		ret = 1; | 
 | 56 | 	} else if (signal_pending(tsk)) { | 
 | 57 | 		atomic_inc(&sem->count); | 
 | 58 | 		ret = -EINTR; | 
 | 59 | 	} | 
 | 60 | 	spin_unlock_irqrestore(&semaphore_wake_lock, flags); | 
 | 61 | 	return ret; | 
 | 62 | } | 
 | 63 |  | 
 | 64 | /* | 
 | 65 |  * waking_non_zero_trylock: | 
 | 66 |  *	1	failed to lock | 
 | 67 |  *	0	got the lock | 
 | 68 |  * | 
 | 69 |  * We must undo the sem->count down_trylock() increment while we are | 
 | 70 |  * protected by the spinlock in order to make atomic this atomic_inc() with the | 
 | 71 |  * atomic_read() in wake_one_more(), otherwise we can race. -arca | 
 | 72 |  */ | 
 | 73 | static __inline__ int waking_non_zero_trylock(struct semaphore *sem) | 
 | 74 | { | 
 | 75 | 	unsigned long flags; | 
 | 76 | 	int ret = 1; | 
 | 77 |  | 
 | 78 | 	spin_lock_irqsave(&semaphore_wake_lock, flags); | 
 | 79 | 	if (sem->waking <= 0) | 
 | 80 | 		atomic_inc(&sem->count); | 
 | 81 | 	else { | 
 | 82 | 		sem->waking--; | 
 | 83 | 		ret = 0; | 
 | 84 | 	} | 
 | 85 | 	spin_unlock_irqrestore(&semaphore_wake_lock, flags); | 
 | 86 | 	return ret; | 
 | 87 | } | 
 | 88 |  | 
 | 89 | #endif /* _ASM_PARISC_SEMAPHORE_HELPER_H */ |