| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 1 | /* | 
| Uwe Zeisberger | f30c226 | 2006-10-03 23:01:26 +0200 | [diff] [blame] | 2 |  * include/asm-generic/mutex-dec.h | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 3 |  * | 
 | 4 |  * Generic implementation of the mutex fastpath, based on atomic | 
 | 5 |  * decrement/increment. | 
 | 6 |  */ | 
 | 7 | #ifndef _ASM_GENERIC_MUTEX_DEC_H | 
 | 8 | #define _ASM_GENERIC_MUTEX_DEC_H | 
 | 9 |  | 
 | 10 | /** | 
 | 11 |  *  __mutex_fastpath_lock - try to take the lock by moving the count | 
 | 12 |  *                          from 1 to a 0 value | 
 | 13 |  *  @count: pointer of type atomic_t | 
 | 14 |  *  @fail_fn: function to call if the original value was not 1 | 
 | 15 |  * | 
 | 16 |  * Change the count from 1 to a value lower than 1, and call <fail_fn> if | 
 | 17 |  * it wasn't 1 originally. This function MUST leave the value lower than | 
 | 18 |  * 1 even when the "1" assertion wasn't true. | 
 | 19 |  */ | 
| Nicolas Pitre | e358c1a | 2006-03-31 02:32:13 -0800 | [diff] [blame] | 20 | static inline void | 
| Harvey Harrison | 144b2a9 | 2008-02-08 04:19:56 -0800 | [diff] [blame] | 21 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 
| Nicolas Pitre | e358c1a | 2006-03-31 02:32:13 -0800 | [diff] [blame] | 22 | { | 
 | 23 | 	if (unlikely(atomic_dec_return(count) < 0)) | 
 | 24 | 		fail_fn(count); | 
| Nicolas Pitre | e358c1a | 2006-03-31 02:32:13 -0800 | [diff] [blame] | 25 | } | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 26 |  | 
 | 27 | /** | 
 | 28 |  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count | 
 | 29 |  *                                 from 1 to a 0 value | 
 | 30 |  *  @count: pointer of type atomic_t | 
 | 31 |  *  @fail_fn: function to call if the original value was not 1 | 
 | 32 |  * | 
 | 33 |  * Change the count from 1 to a value lower than 1, and call <fail_fn> if | 
 | 34 |  * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | 
 | 35 |  * or anything the slow path function returns. | 
 | 36 |  */ | 
 | 37 | static inline int | 
| Harvey Harrison | 144b2a9 | 2008-02-08 04:19:56 -0800 | [diff] [blame] | 38 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 39 | { | 
 | 40 | 	if (unlikely(atomic_dec_return(count) < 0)) | 
 | 41 | 		return fail_fn(count); | 
| Nick Piggin | a8ddac7 | 2008-10-21 10:59:15 +0200 | [diff] [blame] | 42 | 	return 0; | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 43 | } | 
 | 44 |  | 
 | 45 | /** | 
 | 46 |  *  __mutex_fastpath_unlock - try to promote the count from 0 to 1 | 
 | 47 |  *  @count: pointer of type atomic_t | 
 | 48 |  *  @fail_fn: function to call if the original value was not 0 | 
 | 49 |  * | 
 | 50 |  * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>. | 
 | 51 |  * In the failure case, this function is allowed to either set the value to | 
 | 52 |  * 1, or to set it to a value lower than 1. | 
 | 53 |  * | 
 | 54 |  * If the implementation sets it to a value of lower than 1, then the | 
 | 55 |  * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs | 
 | 56 |  * to return 0 otherwise. | 
 | 57 |  */ | 
| Nicolas Pitre | e358c1a | 2006-03-31 02:32:13 -0800 | [diff] [blame] | 58 | static inline void | 
| Harvey Harrison | 144b2a9 | 2008-02-08 04:19:56 -0800 | [diff] [blame] | 59 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 
| Nicolas Pitre | e358c1a | 2006-03-31 02:32:13 -0800 | [diff] [blame] | 60 | { | 
| Nicolas Pitre | e358c1a | 2006-03-31 02:32:13 -0800 | [diff] [blame] | 61 | 	if (unlikely(atomic_inc_return(count) <= 0)) | 
 | 62 | 		fail_fn(count); | 
 | 63 | } | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 64 |  | 
 | 65 | #define __mutex_slowpath_needs_to_unlock()		1 | 
 | 66 |  | 
 | 67 | /** | 
 | 68 |  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | 
 | 69 |  * | 
 | 70 |  *  @count: pointer of type atomic_t | 
 | 71 |  *  @fail_fn: fallback function | 
 | 72 |  * | 
 | 73 |  * Change the count from 1 to a value lower than 1, and return 0 (failure) | 
 | 74 |  * if it wasn't 1 originally, or return 1 (success) otherwise. This function | 
 | 75 |  * MUST leave the value lower than 1 even when the "1" assertion wasn't true. | 
 | 76 |  * Additionally, if the value was < 0 originally, this function must not leave | 
 | 77 |  * it to 0 on failure. | 
 | 78 |  * | 
 | 79 |  * If the architecture has no effective trylock variant, it should call the | 
 | 80 |  * <fail_fn> spinlock-based trylock variant unconditionally. | 
 | 81 |  */ | 
 | 82 | static inline int | 
 | 83 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 
 | 84 | { | 
| Nick Piggin | a8ddac7 | 2008-10-21 10:59:15 +0200 | [diff] [blame] | 85 | 	if (likely(atomic_cmpxchg(count, 1, 0) == 1)) | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 86 | 		return 1; | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 87 | 	return 0; | 
| Ingo Molnar | 620a6fd | 2006-01-09 15:59:17 -0800 | [diff] [blame] | 88 | } | 
 | 89 |  | 
 | 90 | #endif |