blob: dea60709db29fe233285e01a1e733739bf98bd5a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SPINLOCK_H
2#define __ASM_SPINLOCK_H
3
4#include <asm/atomic.h>
5#include <asm/rwlock.h>
6#include <asm/page.h>
Andi Kleenfb2e2842006-09-26 10:52:32 +02007#include <asm/processor.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/compiler.h>
9
Rusty Russelld3561b72006-12-07 02:14:07 +010010#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
Rusty Russell0da5db32006-09-26 10:52:39 +020013#define CLI_STRING "cli"
14#define STI_STRING "sti"
Rusty Russelld3561b72006-12-07 02:14:07 +010015#endif /* CONFIG_PARAVIRT */
Rusty Russell0da5db32006-09-26 10:52:39 +020016
Linus Torvalds1da177e2005-04-16 15:20:36 -070017/*
18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070019 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 * Simple spin lock operations. There are two variants, one clears IRQ's
21 * on the local processor, one does not.
22 *
23 * We make no fairness assumptions. They have a cost.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070024 *
25 * (the type definitions are in asm/spinlock_types.h)
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 */
27
Andi Kleenfb2e2842006-09-26 10:52:32 +020028static inline int __raw_spin_is_locked(raw_spinlock_t *x)
29{
30 return *(volatile signed char *)(&(x)->slock) <= 0;
31}
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070033static inline void __raw_spin_lock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070034{
Andi Kleenfb2e2842006-09-26 10:52:32 +020035 asm volatile("\n1:\t"
36 LOCK_PREFIX " ; decb %0\n\t"
37 "jns 3f\n"
38 "2:\t"
39 "rep;nop\n\t"
40 "cmpb $0,%0\n\t"
41 "jle 2b\n\t"
42 "jmp 1b\n"
43 "3:\n\t"
44 : "+m" (lock->slock) : : "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -070045}
46
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070047/*
48 * It is easier for the lock validator if interrupts are not re-enabled
49 * in the middle of a lock-acquire. This is a performance feature anyway
50 * so we turn it off:
Andi Kleenfb2e2842006-09-26 10:52:32 +020051 *
52 * NOTE: there's an irqs-on section here, which normally would have to be
53 * irq-traced, but on CONFIG_TRACE_IRQFLAGS we never use this variant.
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070054 */
55#ifndef CONFIG_PROVE_LOCKING
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070056static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Andi Kleenfb2e2842006-09-26 10:52:32 +020058 asm volatile(
59 "\n1:\t"
60 LOCK_PREFIX " ; decb %0\n\t"
61 "jns 5f\n"
62 "2:\t"
63 "testl $0x200, %1\n\t"
64 "jz 4f\n\t"
Rusty Russell0da5db32006-09-26 10:52:39 +020065 STI_STRING "\n"
Andi Kleenfb2e2842006-09-26 10:52:32 +020066 "3:\t"
67 "rep;nop\n\t"
68 "cmpb $0, %0\n\t"
69 "jle 3b\n\t"
Rusty Russell0da5db32006-09-26 10:52:39 +020070 CLI_STRING "\n\t"
Andi Kleenfb2e2842006-09-26 10:52:32 +020071 "jmp 1b\n"
72 "4:\t"
73 "rep;nop\n\t"
74 "cmpb $0, %0\n\t"
75 "jg 1b\n\t"
76 "jmp 4b\n"
77 "5:\n\t"
78 : "+m" (lock->slock) : "r" (flags) : "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
Ingo Molnar8a25d5d2006-07-03 00:24:54 -070080#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070082static inline int __raw_spin_trylock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083{
84 char oldval;
Andi Kleenfb2e2842006-09-26 10:52:32 +020085 asm volatile(
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 "xchgb %b0,%1"
Linus Torvaldsb862f3b2006-07-08 15:24:18 -070087 :"=q" (oldval), "+m" (lock->slock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 :"0" (0) : "memory");
89 return oldval > 0;
90}
91
Ingo Molnarfb1c8f92005-09-10 00:25:56 -070092/*
93 * __raw_spin_unlock based on writing $1 to the low byte.
94 * This method works. Despite all the confusion.
95 * (except on PPro SMP or if we are using OOSTORE, so we use xchgb there)
96 * (PPro errata 66, 92)
97 */
98
99#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
100
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700101static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
Andi Kleenfb2e2842006-09-26 10:52:32 +0200103 asm volatile("movb $1,%0" : "+m" (lock->slock) :: "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700106#else
107
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700108static inline void __raw_spin_unlock(raw_spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700110 char oldval = 1;
111
Andi Kleenfb2e2842006-09-26 10:52:32 +0200112 asm volatile("xchgb %b0, %1"
113 : "=q" (oldval), "+m" (lock->slock)
114 : "0" (oldval) : "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
116
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700117#endif
118
Andi Kleenfb2e2842006-09-26 10:52:32 +0200119static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
120{
121 while (__raw_spin_is_locked(lock))
122 cpu_relax();
123}
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125/*
126 * Read-write spinlocks, allowing multiple readers
127 * but only one writer.
128 *
129 * NOTE! it is quite common to have readers in interrupts
130 * but no interrupt writers. For those circumstances we
131 * can "mix" irq-safe locks - any writer needs to get a
132 * irq-safe write-lock, but readers can get non-irqsafe
133 * read-locks.
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700134 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 * On x86, we implement read-write locks as a 32-bit counter
136 * with the high bit (sign) being the "contended" bit.
137 *
138 * The inline assembly is non-obvious. Think about it.
139 *
140 * Changed to use the same technique as rw semaphores. See
141 * semaphore.h for details. -ben
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700142 *
143 * the helpers are in arch/i386/kernel/semaphore.c
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700146/**
147 * read_can_lock - would read_trylock() succeed?
148 * @lock: the rwlock in question.
149 */
Andi Kleenfb2e2842006-09-26 10:52:32 +0200150static inline int __raw_read_can_lock(raw_rwlock_t *x)
151{
152 return (int)(x)->lock > 0;
153}
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700154
155/**
156 * write_can_lock - would write_trylock() succeed?
157 * @lock: the rwlock in question.
158 */
Andi Kleenfb2e2842006-09-26 10:52:32 +0200159static inline int __raw_write_can_lock(raw_rwlock_t *x)
160{
161 return (x)->lock == RW_LOCK_BIAS;
162}
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700163
164static inline void __raw_read_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Andi Kleenfb2e2842006-09-26 10:52:32 +0200166 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
167 "jns 1f\n"
168 "call __read_lock_failed\n\t"
169 "1:\n"
170 ::"a" (rw) : "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700173static inline void __raw_write_lock(raw_rwlock_t *rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
Andi Kleenfb2e2842006-09-26 10:52:32 +0200175 asm volatile(LOCK_PREFIX " subl $" RW_LOCK_BIAS_STR ",(%0)\n\t"
176 "jz 1f\n"
177 "call __write_lock_failed\n\t"
178 "1:\n"
179 ::"a" (rw) : "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700182static inline int __raw_read_trylock(raw_rwlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
184 atomic_t *count = (atomic_t *)lock;
185 atomic_dec(count);
186 if (atomic_read(count) >= 0)
187 return 1;
188 atomic_inc(count);
189 return 0;
190}
191
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700192static inline int __raw_write_trylock(raw_rwlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
194 atomic_t *count = (atomic_t *)lock;
195 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
196 return 1;
197 atomic_add(RW_LOCK_BIAS, count);
198 return 0;
199}
200
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700201static inline void __raw_read_unlock(raw_rwlock_t *rw)
202{
Linus Torvaldsb862f3b2006-07-08 15:24:18 -0700203 asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700204}
205
206static inline void __raw_write_unlock(raw_rwlock_t *rw)
207{
Gerd Hoffmann9a0b5812006-03-23 02:59:32 -0800208 asm volatile(LOCK_PREFIX "addl $" RW_LOCK_BIAS_STR ", %0"
Linus Torvaldsb862f3b2006-07-08 15:24:18 -0700209 : "+m" (rw->lock) : : "memory");
Ingo Molnarfb1c8f92005-09-10 00:25:56 -0700210}
211
Martin Schwidefskyef6edc92006-09-30 23:27:43 -0700212#define _raw_spin_relax(lock) cpu_relax()
213#define _raw_read_relax(lock) cpu_relax()
214#define _raw_write_relax(lock) cpu_relax()
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216#endif /* __ASM_SPINLOCK_H */