| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* atomic.S: Move this stuff here for better ICACHE hit rates. | 
|  | 2 | * | 
|  | 3 | * Copyright (C) 1996 David S. Miller (davem@caipfs.rutgers.edu) | 
|  | 4 | */ | 
|  | 5 |  | 
|  | 6 | #include <linux/config.h> | 
|  | 7 | #include <asm/ptrace.h> | 
|  | 8 | #include <asm/psr.h> | 
|  | 9 |  | 
|  | 10 | .text | 
|  | 11 | .align	4 | 
|  | 12 |  | 
|  | 13 | .globl  __atomic_begin | 
|  | 14 | __atomic_begin: | 
|  | 15 |  | 
|  | 16 | #ifndef CONFIG_SMP | 
|  | 17 | .globl	___xchg32_sun4c | 
|  | 18 | ___xchg32_sun4c: | 
|  | 19 | rd	%psr, %g3 | 
|  | 20 | andcc	%g3, PSR_PIL, %g0 | 
|  | 21 | bne	1f | 
|  | 22 | nop | 
|  | 23 | wr	%g3, PSR_PIL, %psr | 
|  | 24 | nop; nop; nop | 
|  | 25 | 1: | 
|  | 26 | andcc	%g3, PSR_PIL, %g0 | 
|  | 27 | ld	[%g1], %g7 | 
|  | 28 | bne	1f | 
|  | 29 | st	%g2, [%g1] | 
|  | 30 | wr	%g3, 0x0, %psr | 
|  | 31 | nop; nop; nop | 
|  | 32 | 1: | 
|  | 33 | mov	%g7, %g2 | 
|  | 34 | jmpl	%o7 + 8, %g0 | 
|  | 35 | mov	%g4, %o7 | 
|  | 36 |  | 
|  | 37 | .globl	___xchg32_sun4md | 
|  | 38 | ___xchg32_sun4md: | 
|  | 39 | swap	[%g1], %g2 | 
|  | 40 | jmpl	%o7 + 8, %g0 | 
|  | 41 | mov	%g4, %o7 | 
|  | 42 | #endif | 
|  | 43 |  | 
|  | 44 | /* Read asm-sparc/atomic.h carefully to understand how this works for SMP. | 
|  | 45 | * Really, some things here for SMP are overly clever, go read the header. | 
|  | 46 | */ | 
|  | 47 | .globl	___atomic24_add | 
|  | 48 | ___atomic24_add: | 
|  | 49 | rd	%psr, %g3		! Keep the code small, old way was stupid | 
|  | 50 | nop; nop; nop;			! Let the bits set | 
|  | 51 | or	%g3, PSR_PIL, %g7	! Disable interrupts | 
|  | 52 | wr	%g7, 0x0, %psr		! Set %psr | 
|  | 53 | nop; nop; nop;			! Let the bits set | 
|  | 54 | #ifdef CONFIG_SMP | 
|  | 55 | 1:	ldstub	[%g1 + 3], %g7		! Spin on the byte lock for SMP. | 
|  | 56 | orcc	%g7, 0x0, %g0		! Did we get it? | 
|  | 57 | bne	1b			! Nope... | 
|  | 58 | ld	[%g1], %g7		! Load locked atomic24_t | 
|  | 59 | sra	%g7, 8, %g7		! Get signed 24-bit integer | 
|  | 60 | add	%g7, %g2, %g2		! Add in argument | 
|  | 61 | sll	%g2, 8, %g7		! Transpose back to atomic24_t | 
|  | 62 | st	%g7, [%g1]		! Clever: This releases the lock as well. | 
|  | 63 | #else | 
|  | 64 | ld	[%g1], %g7		! Load locked atomic24_t | 
|  | 65 | add	%g7, %g2, %g2		! Add in argument | 
|  | 66 | st	%g2, [%g1]		! Store it back | 
|  | 67 | #endif | 
|  | 68 | wr	%g3, 0x0, %psr		! Restore original PSR_PIL | 
|  | 69 | nop; nop; nop;			! Let the bits set | 
|  | 70 | jmpl	%o7, %g0		! NOTE: not + 8, see callers in atomic.h | 
|  | 71 | mov	%g4, %o7		! Restore %o7 | 
|  | 72 |  | 
|  | 73 | .globl	___atomic24_sub | 
|  | 74 | ___atomic24_sub: | 
|  | 75 | rd	%psr, %g3		! Keep the code small, old way was stupid | 
|  | 76 | nop; nop; nop;			! Let the bits set | 
|  | 77 | or	%g3, PSR_PIL, %g7	! Disable interrupts | 
|  | 78 | wr	%g7, 0x0, %psr		! Set %psr | 
|  | 79 | nop; nop; nop;			! Let the bits set | 
|  | 80 | #ifdef CONFIG_SMP | 
|  | 81 | 1:	ldstub	[%g1 + 3], %g7		! Spin on the byte lock for SMP. | 
|  | 82 | orcc	%g7, 0x0, %g0		! Did we get it? | 
|  | 83 | bne	1b			! Nope... | 
|  | 84 | ld	[%g1], %g7		! Load locked atomic24_t | 
|  | 85 | sra	%g7, 8, %g7		! Get signed 24-bit integer | 
|  | 86 | sub	%g7, %g2, %g2		! Subtract argument | 
|  | 87 | sll	%g2, 8, %g7		! Transpose back to atomic24_t | 
|  | 88 | st	%g7, [%g1]		! Clever: This releases the lock as well | 
|  | 89 | #else | 
|  | 90 | ld	[%g1], %g7		! Load locked atomic24_t | 
|  | 91 | sub	%g7, %g2, %g2		! Subtract argument | 
|  | 92 | st	%g2, [%g1]		! Store it back | 
|  | 93 | #endif | 
|  | 94 | wr	%g3, 0x0, %psr		! Restore original PSR_PIL | 
|  | 95 | nop; nop; nop;			! Let the bits set | 
|  | 96 | jmpl	%o7, %g0		! NOTE: not + 8, see callers in atomic.h | 
|  | 97 | mov	%g4, %o7		! Restore %o7 | 
|  | 98 |  | 
|  | 99 | .globl  __atomic_end | 
|  | 100 | __atomic_end: |