| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * This file is subject to the terms and conditions of the GNU General Public | 
 | 3 |  * License.  See the file "COPYING" in the main directory of this archive | 
 | 4 |  * for more details. | 
 | 5 |  * | 
 | 6 |  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle | 
 | 7 |  * Copyright (C) 1996 by Paul M. Antoine | 
 | 8 |  * Copyright (C) 1999 Silicon Graphics | 
 | 9 |  * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com | 
 | 10 |  * Copyright (C) 2000 MIPS Technologies, Inc. | 
 | 11 |  */ | 
 | 12 | #ifndef _ASM_SYSTEM_H | 
 | 13 | #define _ASM_SYSTEM_H | 
 | 14 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/types.h> | 
 | 16 |  | 
 | 17 | #include <asm/addrspace.h> | 
 | 18 | #include <asm/cpu-features.h> | 
| Ralf Baechle | e50c0a8 | 2005-05-31 11:49:19 +0000 | [diff] [blame] | 19 | #include <asm/dsp.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/ptrace.h> | 
 | 21 | #include <asm/war.h> | 
 | 22 | #include <asm/interrupt.h> | 
 | 23 |  | 
 | 24 | /* | 
 | 25 |  * read_barrier_depends - Flush all pending reads that subsequents reads | 
 | 26 |  * depend on. | 
 | 27 |  * | 
 | 28 |  * No data-dependent reads from memory-like regions are ever reordered | 
 | 29 |  * over this barrier.  All reads preceding this primitive are guaranteed | 
 | 30 |  * to access memory (but not necessarily other CPUs' caches) before any | 
 | 31 |  * reads following this primitive that depend on the data return by | 
 | 32 |  * any of the preceding reads.  This primitive is much lighter weight than | 
 | 33 |  * rmb() on most CPUs, and is never heavier weight than is | 
 | 34 |  * rmb(). | 
 | 35 |  * | 
 | 36 |  * These ordering constraints are respected by both the local CPU | 
 | 37 |  * and the compiler. | 
 | 38 |  * | 
 | 39 |  * Ordering is not guaranteed by anything other than these primitives, | 
 | 40 |  * not even by data dependencies.  See the documentation for | 
 | 41 |  * memory_barrier() for examples and URLs to more information. | 
 | 42 |  * | 
 | 43 |  * For example, the following code would force ordering (the initial | 
 | 44 |  * value of "a" is zero, "b" is one, and "p" is "&a"): | 
 | 45 |  * | 
 | 46 |  * <programlisting> | 
 | 47 |  *	CPU 0				CPU 1 | 
 | 48 |  * | 
 | 49 |  *	b = 2; | 
 | 50 |  *	memory_barrier(); | 
 | 51 |  *	p = &b;				q = p; | 
 | 52 |  *					read_barrier_depends(); | 
 | 53 |  *					d = *q; | 
 | 54 |  * </programlisting> | 
 | 55 |  * | 
 | 56 |  * because the read of "*q" depends on the read of "p" and these | 
 | 57 |  * two reads are separated by a read_barrier_depends().  However, | 
 | 58 |  * the following code, with the same initial values for "a" and "b": | 
 | 59 |  * | 
 | 60 |  * <programlisting> | 
 | 61 |  *	CPU 0				CPU 1 | 
 | 62 |  * | 
 | 63 |  *	a = 2; | 
 | 64 |  *	memory_barrier(); | 
 | 65 |  *	b = 3;				y = b; | 
 | 66 |  *					read_barrier_depends(); | 
 | 67 |  *					x = a; | 
 | 68 |  * </programlisting> | 
 | 69 |  * | 
 | 70 |  * does not enforce ordering, since there is no data dependency between | 
 | 71 |  * the read of "a" and the read of "b".  Therefore, on some CPUs, such | 
 | 72 |  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb() | 
| Ralf Baechle | 3fd5646 | 2005-08-16 16:54:12 +0000 | [diff] [blame] | 73 |  * in cases like this where there are no data dependencies. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 |  */ | 
 | 75 |  | 
 | 76 | #define read_barrier_depends()	do { } while(0) | 
 | 77 |  | 
 | 78 | #ifdef CONFIG_CPU_HAS_SYNC | 
 | 79 | #define __sync()				\ | 
 | 80 | 	__asm__ __volatile__(			\ | 
 | 81 | 		".set	push\n\t"		\ | 
 | 82 | 		".set	noreorder\n\t"		\ | 
 | 83 | 		".set	mips2\n\t"		\ | 
 | 84 | 		"sync\n\t"			\ | 
 | 85 | 		".set	pop"			\ | 
 | 86 | 		: /* no output */		\ | 
 | 87 | 		: /* no input */		\ | 
 | 88 | 		: "memory") | 
 | 89 | #else | 
 | 90 | #define __sync()	do { } while(0) | 
 | 91 | #endif | 
 | 92 |  | 
 | 93 | #define __fast_iob()				\ | 
 | 94 | 	__asm__ __volatile__(			\ | 
 | 95 | 		".set	push\n\t"		\ | 
 | 96 | 		".set	noreorder\n\t"		\ | 
 | 97 | 		"lw	$0,%0\n\t"		\ | 
 | 98 | 		"nop\n\t"			\ | 
 | 99 | 		".set	pop"			\ | 
 | 100 | 		: /* no output */		\ | 
 | 101 | 		: "m" (*(int *)CKSEG1)		\ | 
 | 102 | 		: "memory") | 
 | 103 |  | 
 | 104 | #define fast_wmb()	__sync() | 
 | 105 | #define fast_rmb()	__sync() | 
 | 106 | #define fast_mb()	__sync() | 
 | 107 | #define fast_iob()				\ | 
 | 108 | 	do {					\ | 
 | 109 | 		__sync();			\ | 
 | 110 | 		__fast_iob();			\ | 
 | 111 | 	} while (0) | 
 | 112 |  | 
 | 113 | #ifdef CONFIG_CPU_HAS_WB | 
 | 114 |  | 
 | 115 | #include <asm/wbflush.h> | 
 | 116 |  | 
 | 117 | #define wmb()		fast_wmb() | 
 | 118 | #define rmb()		fast_rmb() | 
 | 119 | #define mb()		wbflush() | 
 | 120 | #define iob()		wbflush() | 
 | 121 |  | 
 | 122 | #else /* !CONFIG_CPU_HAS_WB */ | 
 | 123 |  | 
 | 124 | #define wmb()		fast_wmb() | 
 | 125 | #define rmb()		fast_rmb() | 
 | 126 | #define mb()		fast_mb() | 
 | 127 | #define iob()		fast_iob() | 
 | 128 |  | 
 | 129 | #endif /* !CONFIG_CPU_HAS_WB */ | 
 | 130 |  | 
 | 131 | #ifdef CONFIG_SMP | 
 | 132 | #define smp_mb()	mb() | 
 | 133 | #define smp_rmb()	rmb() | 
 | 134 | #define smp_wmb()	wmb() | 
 | 135 | #define smp_read_barrier_depends()	read_barrier_depends() | 
 | 136 | #else | 
 | 137 | #define smp_mb()	barrier() | 
 | 138 | #define smp_rmb()	barrier() | 
 | 139 | #define smp_wmb()	barrier() | 
 | 140 | #define smp_read_barrier_depends()	do { } while(0) | 
 | 141 | #endif | 
 | 142 |  | 
 | 143 | #define set_mb(var, value) \ | 
 | 144 | do { var = value; mb(); } while (0) | 
 | 145 |  | 
 | 146 | #define set_wmb(var, value) \ | 
 | 147 | do { var = value; wmb(); } while (0) | 
 | 148 |  | 
 | 149 | /* | 
 | 150 |  * switch_to(n) should switch tasks to task nr n, first | 
 | 151 |  * checking that n isn't the current task, in which case it does nothing. | 
 | 152 |  */ | 
 | 153 | extern asmlinkage void *resume(void *last, void *next, void *next_ti); | 
 | 154 |  | 
 | 155 | struct task_struct; | 
 | 156 |  | 
| Ralf Baechle | f088fc8 | 2006-04-05 09:45:47 +0100 | [diff] [blame] | 157 | #ifdef CONFIG_MIPS_MT_FPAFF | 
 | 158 |  | 
 | 159 | /* | 
 | 160 |  * Handle the scheduler resume end of FPU affinity management.  We do this | 
 | 161 |  * inline to try to keep the overhead down. If we have been forced to run on | 
 | 162 |  * a "CPU" with an FPU because of a previous high level of FP computation, | 
 | 163 |  * but did not actually use the FPU during the most recent time-slice (CU1 | 
 | 164 |  * isn't set), we undo the restriction on cpus_allowed. | 
 | 165 |  * | 
 | 166 |  * We're not calling set_cpus_allowed() here, because we have no need to | 
 | 167 |  * force prompt migration - we're already switching the current CPU to a | 
 | 168 |  * different thread. | 
 | 169 |  */ | 
 | 170 |  | 
 | 171 | #define switch_to(prev,next,last)					\ | 
 | 172 | do {									\ | 
 | 173 | 	if (cpu_has_fpu &&						\ | 
 | 174 | 	    (prev->thread.mflags & MF_FPUBOUND) &&			\ | 
 | 175 | 	     (!(KSTK_STATUS(prev) & ST0_CU1))) {			\ | 
 | 176 | 		prev->thread.mflags &= ~MF_FPUBOUND;			\ | 
 | 177 | 		prev->cpus_allowed = prev->thread.user_cpus_allowed;	\ | 
 | 178 | 	}								\ | 
 | 179 | 	if (cpu_has_dsp)						\ | 
 | 180 | 		__save_dsp(prev);					\ | 
 | 181 | 	next->thread.emulated_fp = 0;					\ | 
 | 182 | 	(last) = resume(prev, next, next->thread_info);			\ | 
 | 183 | 	if (cpu_has_dsp)						\ | 
 | 184 | 		__restore_dsp(current);					\ | 
 | 185 | } while(0) | 
 | 186 |  | 
 | 187 | #else | 
| Ralf Baechle | e50c0a8 | 2005-05-31 11:49:19 +0000 | [diff] [blame] | 188 | #define switch_to(prev,next,last)					\ | 
 | 189 | do {									\ | 
 | 190 | 	if (cpu_has_dsp)						\ | 
 | 191 | 		__save_dsp(prev);					\ | 
| Al Viro | 40bc9c6 | 2006-01-12 01:06:07 -0800 | [diff] [blame] | 192 | 	(last) = resume(prev, next, task_thread_info(next));		\ | 
| Ralf Baechle | e50c0a8 | 2005-05-31 11:49:19 +0000 | [diff] [blame] | 193 | 	if (cpu_has_dsp)						\ | 
 | 194 | 		__restore_dsp(current);					\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | } while(0) | 
| Ralf Baechle | f088fc8 | 2006-04-05 09:45:47 +0100 | [diff] [blame] | 196 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 |  | 
| Ingo Molnar | 4dc7a0b | 2006-01-12 01:05:27 -0800 | [diff] [blame] | 198 | /* | 
 | 199 |  * On SMP systems, when the scheduler does migration-cost autodetection, | 
 | 200 |  * it needs a way to flush as much of the CPU's caches as possible. | 
 | 201 |  * | 
 | 202 |  * TODO: fill this in! | 
 | 203 |  */ | 
 | 204 | static inline void sched_cacheflush(void) | 
 | 205 | { | 
 | 206 | } | 
 | 207 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) | 
 | 209 | { | 
 | 210 | 	__u32 retval; | 
 | 211 |  | 
 | 212 | 	if (cpu_has_llsc && R10000_LLSC_WAR) { | 
 | 213 | 		unsigned long dummy; | 
 | 214 |  | 
 | 215 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | c4559f6 | 2005-06-23 15:57:15 +0000 | [diff] [blame] | 216 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | 		"1:	ll	%0, %3			# xchg_u32	\n" | 
| Ralf Baechle | 7222424 | 2005-06-29 13:35:19 +0000 | [diff] [blame] | 218 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | 		"	move	%2, %z4					\n" | 
| Ralf Baechle | 7222424 | 2005-06-29 13:35:19 +0000 | [diff] [blame] | 220 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | 		"	sc	%2, %1					\n" | 
 | 222 | 		"	beqzl	%2, 1b					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | #ifdef CONFIG_SMP | 
 | 224 | 		"	sync						\n" | 
 | 225 | #endif | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 226 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | 		: "=&r" (retval), "=m" (*m), "=&r" (dummy) | 
 | 228 | 		: "R" (*m), "Jr" (val) | 
 | 229 | 		: "memory"); | 
 | 230 | 	} else if (cpu_has_llsc) { | 
 | 231 | 		unsigned long dummy; | 
 | 232 |  | 
 | 233 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | c4559f6 | 2005-06-23 15:57:15 +0000 | [diff] [blame] | 234 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | 		"1:	ll	%0, %3			# xchg_u32	\n" | 
| Ralf Baechle | 7222424 | 2005-06-29 13:35:19 +0000 | [diff] [blame] | 236 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | 		"	move	%2, %z4					\n" | 
| Ralf Baechle | 7222424 | 2005-06-29 13:35:19 +0000 | [diff] [blame] | 238 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | 		"	sc	%2, %1					\n" | 
 | 240 | 		"	beqz	%2, 1b					\n" | 
 | 241 | #ifdef CONFIG_SMP | 
 | 242 | 		"	sync						\n" | 
 | 243 | #endif | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 244 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | 		: "=&r" (retval), "=m" (*m), "=&r" (dummy) | 
 | 246 | 		: "R" (*m), "Jr" (val) | 
 | 247 | 		: "memory"); | 
 | 248 | 	} else { | 
 | 249 | 		unsigned long flags; | 
 | 250 |  | 
 | 251 | 		local_irq_save(flags); | 
 | 252 | 		retval = *m; | 
 | 253 | 		*m = val; | 
 | 254 | 		local_irq_restore(flags);	/* implies memory barrier  */ | 
 | 255 | 	} | 
 | 256 |  | 
 | 257 | 	return retval; | 
 | 258 | } | 
 | 259 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 260 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) | 
 | 262 | { | 
 | 263 | 	__u64 retval; | 
 | 264 |  | 
 | 265 | 	if (cpu_has_llsc && R10000_LLSC_WAR) { | 
 | 266 | 		unsigned long dummy; | 
 | 267 |  | 
 | 268 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 269 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | 		"1:	lld	%0, %3			# xchg_u64	\n" | 
 | 271 | 		"	move	%2, %z4					\n" | 
 | 272 | 		"	scd	%2, %1					\n" | 
 | 273 | 		"	beqzl	%2, 1b					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | #ifdef CONFIG_SMP | 
 | 275 | 		"	sync						\n" | 
 | 276 | #endif | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 277 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | 		: "=&r" (retval), "=m" (*m), "=&r" (dummy) | 
 | 279 | 		: "R" (*m), "Jr" (val) | 
 | 280 | 		: "memory"); | 
 | 281 | 	} else if (cpu_has_llsc) { | 
 | 282 | 		unsigned long dummy; | 
 | 283 |  | 
 | 284 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 285 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 286 | 		"1:	lld	%0, %3			# xchg_u64	\n" | 
 | 287 | 		"	move	%2, %z4					\n" | 
 | 288 | 		"	scd	%2, %1					\n" | 
 | 289 | 		"	beqz	%2, 1b					\n" | 
 | 290 | #ifdef CONFIG_SMP | 
 | 291 | 		"	sync						\n" | 
 | 292 | #endif | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 293 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | 		: "=&r" (retval), "=m" (*m), "=&r" (dummy) | 
 | 295 | 		: "R" (*m), "Jr" (val) | 
 | 296 | 		: "memory"); | 
 | 297 | 	} else { | 
 | 298 | 		unsigned long flags; | 
 | 299 |  | 
 | 300 | 		local_irq_save(flags); | 
 | 301 | 		retval = *m; | 
 | 302 | 		*m = val; | 
 | 303 | 		local_irq_restore(flags);	/* implies memory barrier  */ | 
 | 304 | 	} | 
 | 305 |  | 
 | 306 | 	return retval; | 
 | 307 | } | 
 | 308 | #else | 
 | 309 | extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val); | 
 | 310 | #define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels | 
 | 311 | #endif | 
 | 312 |  | 
 | 313 | /* This function doesn't exist, so you'll get a linker error | 
 | 314 |    if something tries to do an invalid xchg().  */ | 
 | 315 | extern void __xchg_called_with_bad_pointer(void); | 
 | 316 |  | 
 | 317 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) | 
 | 318 | { | 
 | 319 | 	switch (size) { | 
| Ralf Baechle | 0cea043 | 2006-03-03 09:42:05 +0000 | [diff] [blame] | 320 | 	case 4: | 
 | 321 | 		return __xchg_u32(ptr, x); | 
 | 322 | 	case 8: | 
 | 323 | 		return __xchg_u64(ptr, x); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | 	} | 
 | 325 | 	__xchg_called_with_bad_pointer(); | 
 | 326 | 	return x; | 
 | 327 | } | 
 | 328 |  | 
 | 329 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 
 | 330 | #define tas(ptr) (xchg((ptr),1)) | 
 | 331 |  | 
 | 332 | #define __HAVE_ARCH_CMPXCHG 1 | 
 | 333 |  | 
 | 334 | static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old, | 
 | 335 | 	unsigned long new) | 
 | 336 | { | 
 | 337 | 	__u32 retval; | 
 | 338 |  | 
 | 339 | 	if (cpu_has_llsc && R10000_LLSC_WAR) { | 
 | 340 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 341 | 		"	.set	push					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | 		"	.set	noat					\n" | 
| Maciej W. Rozycki | c4559f6 | 2005-06-23 15:57:15 +0000 | [diff] [blame] | 343 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 344 | 		"1:	ll	%0, %2			# __cmpxchg_u32	\n" | 
 | 345 | 		"	bne	%0, %z3, 2f				\n" | 
| Ralf Baechle | f99d302 | 2005-08-25 16:22:09 +0000 | [diff] [blame] | 346 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | 		"	move	$1, %z4					\n" | 
| Ralf Baechle | f99d302 | 2005-08-25 16:22:09 +0000 | [diff] [blame] | 348 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | 		"	sc	$1, %1					\n" | 
 | 350 | 		"	beqzl	$1, 1b					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | #ifdef CONFIG_SMP | 
 | 352 | 		"	sync						\n" | 
 | 353 | #endif | 
 | 354 | 		"2:							\n" | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 355 | 		"	.set	pop					\n" | 
| Ralf Baechle | 3e6cb2d | 2006-02-21 18:32:14 +0000 | [diff] [blame] | 356 | 		: "=&r" (retval), "=R" (*m) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | 		: "R" (*m), "Jr" (old), "Jr" (new) | 
 | 358 | 		: "memory"); | 
 | 359 | 	} else if (cpu_has_llsc) { | 
 | 360 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 361 | 		"	.set	push					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | 		"	.set	noat					\n" | 
| Maciej W. Rozycki | c4559f6 | 2005-06-23 15:57:15 +0000 | [diff] [blame] | 363 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | 		"1:	ll	%0, %2			# __cmpxchg_u32	\n" | 
 | 365 | 		"	bne	%0, %z3, 2f				\n" | 
| Ralf Baechle | f99d302 | 2005-08-25 16:22:09 +0000 | [diff] [blame] | 366 | 		"	.set	mips0					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 367 | 		"	move	$1, %z4					\n" | 
| Ralf Baechle | f99d302 | 2005-08-25 16:22:09 +0000 | [diff] [blame] | 368 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | 		"	sc	$1, %1					\n" | 
 | 370 | 		"	beqz	$1, 1b					\n" | 
 | 371 | #ifdef CONFIG_SMP | 
 | 372 | 		"	sync						\n" | 
 | 373 | #endif | 
 | 374 | 		"2:							\n" | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 375 | 		"	.set	pop					\n" | 
| Ralf Baechle | 3e6cb2d | 2006-02-21 18:32:14 +0000 | [diff] [blame] | 376 | 		: "=&r" (retval), "=R" (*m) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | 		: "R" (*m), "Jr" (old), "Jr" (new) | 
 | 378 | 		: "memory"); | 
 | 379 | 	} else { | 
 | 380 | 		unsigned long flags; | 
 | 381 |  | 
 | 382 | 		local_irq_save(flags); | 
 | 383 | 		retval = *m; | 
 | 384 | 		if (retval == old) | 
 | 385 | 			*m = new; | 
 | 386 | 		local_irq_restore(flags);	/* implies memory barrier  */ | 
 | 387 | 	} | 
 | 388 |  | 
 | 389 | 	return retval; | 
 | 390 | } | 
 | 391 |  | 
| Ralf Baechle | 875d43e | 2005-09-03 15:56:16 -0700 | [diff] [blame] | 392 | #ifdef CONFIG_64BIT | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old, | 
 | 394 | 	unsigned long new) | 
 | 395 | { | 
 | 396 | 	__u64 retval; | 
 | 397 |  | 
 | 398 | 	if (cpu_has_llsc) { | 
 | 399 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 400 | 		"	.set	push					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | 		"	.set	noat					\n" | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 402 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | 		"1:	lld	%0, %2			# __cmpxchg_u64	\n" | 
 | 404 | 		"	bne	%0, %z3, 2f				\n" | 
 | 405 | 		"	move	$1, %z4					\n" | 
 | 406 | 		"	scd	$1, %1					\n" | 
 | 407 | 		"	beqzl	$1, 1b					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 408 | #ifdef CONFIG_SMP | 
 | 409 | 		"	sync						\n" | 
 | 410 | #endif | 
 | 411 | 		"2:							\n" | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 412 | 		"	.set	pop					\n" | 
| Ralf Baechle | 3e6cb2d | 2006-02-21 18:32:14 +0000 | [diff] [blame] | 413 | 		: "=&r" (retval), "=R" (*m) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | 		: "R" (*m), "Jr" (old), "Jr" (new) | 
 | 415 | 		: "memory"); | 
 | 416 | 	} else if (cpu_has_llsc) { | 
 | 417 | 		__asm__ __volatile__( | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 418 | 		"	.set	push					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | 		"	.set	noat					\n" | 
| Maciej W. Rozycki | c4559f6 | 2005-06-23 15:57:15 +0000 | [diff] [blame] | 420 | 		"	.set	mips3					\n" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 421 | 		"1:	lld	%0, %2			# __cmpxchg_u64	\n" | 
 | 422 | 		"	bne	%0, %z3, 2f				\n" | 
 | 423 | 		"	move	$1, %z4					\n" | 
 | 424 | 		"	scd	$1, %1					\n" | 
 | 425 | 		"	beqz	$1, 1b					\n" | 
 | 426 | #ifdef CONFIG_SMP | 
 | 427 | 		"	sync						\n" | 
 | 428 | #endif | 
 | 429 | 		"2:							\n" | 
| Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 430 | 		"	.set	pop					\n" | 
| Ralf Baechle | 3e6cb2d | 2006-02-21 18:32:14 +0000 | [diff] [blame] | 431 | 		: "=&r" (retval), "=R" (*m) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | 		: "R" (*m), "Jr" (old), "Jr" (new) | 
 | 433 | 		: "memory"); | 
 | 434 | 	} else { | 
 | 435 | 		unsigned long flags; | 
 | 436 |  | 
 | 437 | 		local_irq_save(flags); | 
 | 438 | 		retval = *m; | 
 | 439 | 		if (retval == old) | 
 | 440 | 			*m = new; | 
 | 441 | 		local_irq_restore(flags);	/* implies memory barrier  */ | 
 | 442 | 	} | 
 | 443 |  | 
 | 444 | 	return retval; | 
 | 445 | } | 
 | 446 | #else | 
 | 447 | extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels( | 
 | 448 | 	volatile int * m, unsigned long old, unsigned long new); | 
 | 449 | #define __cmpxchg_u64 __cmpxchg_u64_unsupported_on_32bit_kernels | 
 | 450 | #endif | 
 | 451 |  | 
 | 452 | /* This function doesn't exist, so you'll get a linker error | 
 | 453 |    if something tries to do an invalid cmpxchg().  */ | 
 | 454 | extern void __cmpxchg_called_with_bad_pointer(void); | 
 | 455 |  | 
 | 456 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | 
 | 457 | 	unsigned long new, int size) | 
 | 458 | { | 
 | 459 | 	switch (size) { | 
 | 460 | 	case 4: | 
 | 461 | 		return __cmpxchg_u32(ptr, old, new); | 
 | 462 | 	case 8: | 
 | 463 | 		return __cmpxchg_u64(ptr, old, new); | 
 | 464 | 	} | 
 | 465 | 	__cmpxchg_called_with_bad_pointer(); | 
 | 466 | 	return old; | 
 | 467 | } | 
 | 468 |  | 
 | 469 | #define cmpxchg(ptr,old,new) ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(old), (unsigned long)(new),sizeof(*(ptr)))) | 
 | 470 |  | 
| Ralf Baechle | e01402b | 2005-07-14 15:57:16 +0000 | [diff] [blame] | 471 | extern void set_handler (unsigned long offset, void *addr, unsigned long len); | 
 | 472 | extern void set_uncached_handler (unsigned long offset, void *addr, unsigned long len); | 
 | 473 | extern void *set_vi_handler (int n, void *addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | extern void *set_except_vector(int n, void *addr); | 
| Ralf Baechle | 91b05e6 | 2006-03-29 18:53:00 +0100 | [diff] [blame] | 475 | extern unsigned long ebase; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | extern void per_cpu_trap_init(void); | 
 | 477 |  | 
| Ralf Baechle | 178086c | 2005-10-13 17:07:54 +0100 | [diff] [blame] | 478 | extern NORET_TYPE void die(const char *, struct pt_regs *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 479 |  | 
| Ralf Baechle | 178086c | 2005-10-13 17:07:54 +0100 | [diff] [blame] | 480 | static inline void die_if_kernel(const char *str, struct pt_regs *regs) | 
 | 481 | { | 
 | 482 | 	if (unlikely(!user_mode(regs))) | 
 | 483 | 		die(str, regs); | 
 | 484 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 |  | 
 | 486 | extern int stop_a_enabled; | 
 | 487 |  | 
 | 488 | /* | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 489 |  * See include/asm-ia64/system.h; prevents deadlock on SMP | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 |  * systems. | 
 | 491 |  */ | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 492 | #define __ARCH_WANT_UNLOCKED_CTXSW | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 |  | 
 | 494 | #define arch_align_stack(x) (x) | 
 | 495 |  | 
 | 496 | #endif /* _ASM_SYSTEM_H */ |