blob: 835da9214dccdb6cf5afb9d37272d75de1024816 [file] [log] [blame]
Nicholas Flintham1e3d3112013-04-10 10:48:38 +01001#ifndef _ASM_MUTEX_H
2#define _ASM_MUTEX_H
3
4#if __LINUX_ARM_ARCH__ < 6
5# include <asm-generic/mutex-xchg.h>
6#else
7
8static inline void
9__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
10{
11 int __ex_flag, __res;
12
13 __asm__ (
14
15 "ldrex %0, [%2] \n\t"
16 "sub %0, %0, #1 \n\t"
17 "strex %1, %0, [%2] "
18
19 : "=&r" (__res), "=&r" (__ex_flag)
20 : "r" (&(count)->counter)
21 : "cc","memory" );
22
23 __res |= __ex_flag;
24 if (unlikely(__res != 0))
25 fail_fn(count);
26 else
27 smp_rmb();
28}
29
30static inline int
31__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
32{
33 int __ex_flag, __res;
34
35 __asm__ (
36
37 "ldrex %0, [%2] \n\t"
38 "sub %0, %0, #1 \n\t"
39 "strex %1, %0, [%2] "
40
41 : "=&r" (__res), "=&r" (__ex_flag)
42 : "r" (&(count)->counter)
43 : "cc","memory" );
44
45 __res |= __ex_flag;
46 if (unlikely(__res != 0))
47 __res = fail_fn(count);
48 else
49 smp_rmb();
50
51 return __res;
52}
53
54static inline void
55__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
56{
57 int __ex_flag, __res, __orig;
58
59 smp_wmb();
60 __asm__ (
61
62 "ldrex %0, [%3] \n\t"
63 "add %1, %0, #1 \n\t"
64 "strex %2, %1, [%3] "
65
66 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
67 : "r" (&(count)->counter)
68 : "cc","memory" );
69
70 __orig |= __ex_flag;
71 if (unlikely(__orig != 0))
72 fail_fn(count);
73}
74
75#define __mutex_slowpath_needs_to_unlock() 1
76
77static inline int
78__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
79{
80 int __ex_flag, __res, __orig;
81
82 __asm__ (
83
84 "1: ldrex %0, [%3] \n\t"
85 "subs %1, %0, #1 \n\t"
86 "strexeq %2, %1, [%3] \n\t"
87 "movlt %0, #0 \n\t"
88 "cmpeq %2, #0 \n\t"
89 "bgt 1b "
90
91 : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
92 : "r" (&count->counter)
93 : "cc", "memory" );
94 if (__orig)
95 smp_rmb();
96
97 return __orig;
98}
99
100#endif
101#endif