| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* atomic.S: Move this stuff here for better ICACHE hit rates. | 
 | 2 |  * | 
 | 3 |  * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu) | 
 | 4 |  */ | 
 | 5 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <asm/ptrace.h> | 
 | 7 | #include <asm/psr.h> | 
 | 8 |  | 
 | 9 | 	.text | 
 | 10 | 	.align	4 | 
 | 11 |  | 
 | 12 | 	.globl  __atomic_begin | 
 | 13 | __atomic_begin: | 
 | 14 |  | 
 | 15 | #ifndef CONFIG_SMP | 
 | 16 | 	.globl	___xchg32_sun4c | 
 | 17 | ___xchg32_sun4c: | 
 | 18 | 	rd	%psr, %g3 | 
 | 19 | 	andcc	%g3, PSR_PIL, %g0 | 
 | 20 | 	bne	1f | 
 | 21 | 	 nop | 
 | 22 | 	wr	%g3, PSR_PIL, %psr | 
 | 23 | 	nop; nop; nop | 
 | 24 | 1: | 
 | 25 | 	andcc	%g3, PSR_PIL, %g0 | 
 | 26 | 	ld	[%g1], %g7 | 
 | 27 | 	bne	1f | 
 | 28 | 	 st	%g2, [%g1] | 
 | 29 | 	wr	%g3, 0x0, %psr | 
 | 30 | 	nop; nop; nop | 
 | 31 | 1: | 
 | 32 | 	mov	%g7, %g2 | 
 | 33 | 	jmpl	%o7 + 8, %g0 | 
 | 34 | 	 mov	%g4, %o7 | 
 | 35 |  | 
 | 36 | 	.globl	___xchg32_sun4md | 
 | 37 | ___xchg32_sun4md: | 
 | 38 | 	swap	[%g1], %g2 | 
 | 39 | 	jmpl	%o7 + 8, %g0 | 
 | 40 | 	 mov	%g4, %o7 | 
 | 41 | #endif | 
 | 42 |  | 
 | 43 | 	/* Read asm-sparc/atomic.h carefully to understand how this works for SMP. | 
 | 44 | 	 * Really, some things here for SMP are overly clever, go read the header. | 
 | 45 | 	 */ | 
 | 46 | 	.globl	___atomic24_add | 
 | 47 | ___atomic24_add: | 
 | 48 | 	rd	%psr, %g3		! Keep the code small, old way was stupid | 
 | 49 | 	nop; nop; nop;			! Let the bits set | 
 | 50 | 	or	%g3, PSR_PIL, %g7	! Disable interrupts | 
 | 51 | 	wr	%g7, 0x0, %psr		! Set %psr | 
 | 52 | 	nop; nop; nop;			! Let the bits set | 
 | 53 | #ifdef CONFIG_SMP | 
 | 54 | 1:	ldstub	[%g1 + 3], %g7		! Spin on the byte lock for SMP. | 
 | 55 | 	orcc	%g7, 0x0, %g0		! Did we get it? | 
 | 56 | 	bne	1b			! Nope... | 
 | 57 | 	 ld	[%g1], %g7		! Load locked atomic24_t | 
 | 58 | 	sra	%g7, 8, %g7		! Get signed 24-bit integer | 
 | 59 | 	add	%g7, %g2, %g2		! Add in argument | 
 | 60 | 	sll	%g2, 8, %g7		! Transpose back to atomic24_t | 
 | 61 | 	st	%g7, [%g1]		! Clever: This releases the lock as well. | 
 | 62 | #else | 
 | 63 | 	ld	[%g1], %g7		! Load locked atomic24_t | 
 | 64 | 	add	%g7, %g2, %g2		! Add in argument | 
 | 65 | 	st	%g2, [%g1]		! Store it back | 
 | 66 | #endif | 
 | 67 | 	wr	%g3, 0x0, %psr		! Restore original PSR_PIL | 
 | 68 | 	nop; nop; nop;			! Let the bits set | 
 | 69 | 	jmpl	%o7, %g0		! NOTE: not + 8, see callers in atomic.h | 
 | 70 | 	 mov	%g4, %o7		! Restore %o7 | 
 | 71 |  | 
 | 72 | 	.globl	___atomic24_sub | 
 | 73 | ___atomic24_sub: | 
 | 74 | 	rd	%psr, %g3		! Keep the code small, old way was stupid | 
 | 75 | 	nop; nop; nop;			! Let the bits set | 
 | 76 | 	or	%g3, PSR_PIL, %g7	! Disable interrupts | 
 | 77 | 	wr	%g7, 0x0, %psr		! Set %psr | 
 | 78 | 	nop; nop; nop;			! Let the bits set | 
 | 79 | #ifdef CONFIG_SMP | 
 | 80 | 1:	ldstub	[%g1 + 3], %g7		! Spin on the byte lock for SMP. | 
 | 81 | 	orcc	%g7, 0x0, %g0		! Did we get it? | 
 | 82 | 	bne	1b			! Nope... | 
 | 83 | 	 ld	[%g1], %g7		! Load locked atomic24_t | 
 | 84 | 	sra	%g7, 8, %g7		! Get signed 24-bit integer | 
 | 85 | 	sub	%g7, %g2, %g2		! Subtract argument | 
 | 86 | 	sll	%g2, 8, %g7		! Transpose back to atomic24_t | 
 | 87 | 	st	%g7, [%g1]		! Clever: This releases the lock as well | 
 | 88 | #else | 
 | 89 | 	ld	[%g1], %g7		! Load locked atomic24_t | 
 | 90 | 	sub	%g7, %g2, %g2		! Subtract argument | 
 | 91 | 	st	%g2, [%g1]		! Store it back | 
 | 92 | #endif | 
 | 93 | 	wr	%g3, 0x0, %psr		! Restore original PSR_PIL | 
 | 94 | 	nop; nop; nop;			! Let the bits set | 
 | 95 | 	jmpl	%o7, %g0		! NOTE: not + 8, see callers in atomic.h | 
 | 96 | 	 mov	%g4, %o7		! Restore %o7 | 
 | 97 |  | 
 | 98 | 	.globl  __atomic_end | 
 | 99 | __atomic_end: |