| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 1 | /* | 
|  | 2 | * arch/sh/include/asm/mutex-llsc.h | 
|  | 3 | * | 
|  | 4 | * SH-4A optimized mutex locking primitives | 
|  | 5 | * | 
|  | 6 | * Please look into asm-generic/mutex-xchg.h for a formal definition. | 
|  | 7 | */ | 
|  | 8 | #ifndef __ASM_SH_MUTEX_LLSC_H | 
|  | 9 | #define __ASM_SH_MUTEX_LLSC_H | 
|  | 10 |  | 
|  | 11 | /* | 
|  | 12 | * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure. | 
|  | 13 | * with a bastardized atomic decrement (it is not a reliable atomic decrement | 
|  | 14 | * but it satisfies the defined semantics for our purpose, while being | 
|  | 15 | * smaller and faster than a real atomic decrement or atomic swap. | 
|  | 16 | * The idea is to attempt  decrementing the lock value only once. If once | 
|  | 17 | * decremented it isn't zero, or if its store-back fails due to a dispute | 
|  | 18 | * on the exclusive store, we simply bail out immediately through the slow | 
|  | 19 | * path where the lock will be reattempted until it succeeds. | 
|  | 20 | */ | 
|  | 21 | static inline void | 
|  | 22 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 
|  | 23 | { | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 24 | int __done, __res; | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 25 |  | 
|  | 26 | __asm__ __volatile__ ( | 
| Paul Mundt | 77ba93a | 2008-12-08 11:25:50 +0900 | [diff] [blame] | 27 | "movli.l	@%2, %0	\n" | 
|  | 28 | "add		#-1, %0	\n" | 
|  | 29 | "movco.l	%0, @%2	\n" | 
|  | 30 | "movt		%1	\n" | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 31 | : "=&z" (__res), "=&r" (__done) | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 32 | : "r" (&(count)->counter) | 
|  | 33 | : "t"); | 
|  | 34 |  | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 35 | if (unlikely(!__done || __res != 0)) | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 36 | fail_fn(count); | 
|  | 37 | } | 
|  | 38 |  | 
|  | 39 | static inline int | 
|  | 40 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) | 
|  | 41 | { | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 42 | int __done, __res; | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 43 |  | 
|  | 44 | __asm__ __volatile__ ( | 
| Paul Mundt | 77ba93a | 2008-12-08 11:25:50 +0900 | [diff] [blame] | 45 | "movli.l	@%2, %0	\n" | 
|  | 46 | "add		#-1, %0	\n" | 
|  | 47 | "movco.l	%0, @%2	\n" | 
|  | 48 | "movt		%1	\n" | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 49 | : "=&z" (__res), "=&r" (__done) | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 50 | : "r" (&(count)->counter) | 
|  | 51 | : "t"); | 
|  | 52 |  | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 53 | if (unlikely(!__done || __res != 0)) | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 54 | __res = fail_fn(count); | 
|  | 55 |  | 
|  | 56 | return __res; | 
|  | 57 | } | 
|  | 58 |  | 
|  | 59 | static inline void | 
|  | 60 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) | 
|  | 61 | { | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 62 | int __done, __res; | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 63 |  | 
|  | 64 | __asm__ __volatile__ ( | 
| Paul Mundt | 77ba93a | 2008-12-08 11:25:50 +0900 | [diff] [blame] | 65 | "movli.l	@%2, %0	\n\t" | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 66 | "add		#1, %0	\n\t" | 
| Paul Mundt | 77ba93a | 2008-12-08 11:25:50 +0900 | [diff] [blame] | 67 | "movco.l	%0, @%2 \n\t" | 
|  | 68 | "movt		%1	\n\t" | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 69 | : "=&z" (__res), "=&r" (__done) | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 70 | : "r" (&(count)->counter) | 
|  | 71 | : "t"); | 
|  | 72 |  | 
| Takashi Yoshii | c20f326 | 2009-01-28 09:29:13 +0000 | [diff] [blame] | 73 | if (unlikely(!__done || __res <= 0)) | 
| Michael Trimarchi | 0c91223 | 2008-11-25 21:37:14 +0900 | [diff] [blame] | 74 | fail_fn(count); | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 | /* | 
|  | 78 | * If the unlock was done on a contended lock, or if the unlock simply fails | 
|  | 79 | * then the mutex remains locked. | 
|  | 80 | */ | 
|  | 81 | #define __mutex_slowpath_needs_to_unlock()	1 | 
|  | 82 |  | 
|  | 83 | /* | 
|  | 84 | * For __mutex_fastpath_trylock we do an atomic decrement and check the | 
|  | 85 | * result and put it in the __res variable. | 
|  | 86 | */ | 
|  | 87 | static inline int | 
|  | 88 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | 
|  | 89 | { | 
|  | 90 | int __res, __orig; | 
|  | 91 |  | 
|  | 92 | __asm__ __volatile__ ( | 
|  | 93 | "1: movli.l	@%2, %0		\n\t" | 
|  | 94 | "dt		%0		\n\t" | 
|  | 95 | "movco.l	%0,@%2		\n\t" | 
|  | 96 | "bf		1b		\n\t" | 
|  | 97 | "cmp/eq		#0,%0		\n\t" | 
|  | 98 | "bt		2f		\n\t" | 
|  | 99 | "mov		#0, %1		\n\t" | 
|  | 100 | "bf		3f		\n\t" | 
|  | 101 | "2: mov		#1, %1		\n\t" | 
|  | 102 | "3:				" | 
|  | 103 | : "=&z" (__orig), "=&r" (__res) | 
|  | 104 | : "r" (&count->counter) | 
|  | 105 | : "t"); | 
|  | 106 |  | 
|  | 107 | return __res; | 
|  | 108 | } | 
|  | 109 | #endif /* __ASM_SH_MUTEX_LLSC_H */ |