| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_ARM_SYSTEM_H | 
|  | 2 | #define __ASM_ARM_SYSTEM_H | 
|  | 3 |  | 
|  | 4 | #ifdef __KERNEL__ | 
|  | 5 |  | 
| Lennert Buytenhek | 398e692 | 2007-03-31 12:03:20 +0100 | [diff] [blame] | 6 | #include <asm/memory.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 |  | 
|  | 8 | #define CPU_ARCH_UNKNOWN	0 | 
|  | 9 | #define CPU_ARCH_ARMv3		1 | 
|  | 10 | #define CPU_ARCH_ARMv4		2 | 
|  | 11 | #define CPU_ARCH_ARMv4T		3 | 
|  | 12 | #define CPU_ARCH_ARMv5		4 | 
|  | 13 | #define CPU_ARCH_ARMv5T		5 | 
|  | 14 | #define CPU_ARCH_ARMv5TE	6 | 
|  | 15 | #define CPU_ARCH_ARMv5TEJ	7 | 
|  | 16 | #define CPU_ARCH_ARMv6		8 | 
| Catalin Marinas | bbe8888 | 2007-05-08 22:27:46 +0100 | [diff] [blame] | 17 | #define CPU_ARCH_ARMv7		9 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 |  | 
|  | 19 | /* | 
|  | 20 | * CR1 bits (CP#15 CR1) | 
|  | 21 | */ | 
|  | 22 | #define CR_M	(1 << 0)	/* MMU enable				*/ | 
|  | 23 | #define CR_A	(1 << 1)	/* Alignment abort enable		*/ | 
|  | 24 | #define CR_C	(1 << 2)	/* Dcache enable			*/ | 
|  | 25 | #define CR_W	(1 << 3)	/* Write buffer enable			*/ | 
|  | 26 | #define CR_P	(1 << 4)	/* 32-bit exception handler		*/ | 
|  | 27 | #define CR_D	(1 << 5)	/* 32-bit data address range		*/ | 
|  | 28 | #define CR_L	(1 << 6)	/* Implementation defined		*/ | 
|  | 29 | #define CR_B	(1 << 7)	/* Big endian				*/ | 
|  | 30 | #define CR_S	(1 << 8)	/* System MMU protection		*/ | 
|  | 31 | #define CR_R	(1 << 9)	/* ROM MMU protection			*/ | 
|  | 32 | #define CR_F	(1 << 10)	/* Implementation defined		*/ | 
|  | 33 | #define CR_Z	(1 << 11)	/* Implementation defined		*/ | 
|  | 34 | #define CR_I	(1 << 12)	/* Icache enable			*/ | 
|  | 35 | #define CR_V	(1 << 13)	/* Vectors relocated to 0xffff0000	*/ | 
|  | 36 | #define CR_RR	(1 << 14)	/* Round Robin cache replacement	*/ | 
|  | 37 | #define CR_L4	(1 << 15)	/* LDR pc can set T bit			*/ | 
|  | 38 | #define CR_DT	(1 << 16) | 
|  | 39 | #define CR_IT	(1 << 18) | 
|  | 40 | #define CR_ST	(1 << 19) | 
|  | 41 | #define CR_FI	(1 << 21)	/* Fast interrupt (lower latency mode)	*/ | 
|  | 42 | #define CR_U	(1 << 22)	/* Unaligned access operation		*/ | 
|  | 43 | #define CR_XP	(1 << 23)	/* Extended page tables			*/ | 
|  | 44 | #define CR_VE	(1 << 24)	/* Vectored interrupts			*/ | 
|  | 45 |  | 
|  | 46 | #define CPUID_ID	0 | 
|  | 47 | #define CPUID_CACHETYPE	1 | 
|  | 48 | #define CPUID_TCM	2 | 
|  | 49 | #define CPUID_TLBTYPE	3 | 
|  | 50 |  | 
| Hyok S. Choi | f12d0d7 | 2006-09-26 17:36:37 +0900 | [diff] [blame] | 51 | #ifdef CONFIG_CPU_CP15 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | #define read_cpuid(reg)							\ | 
|  | 53 | ({								\ | 
|  | 54 | unsigned int __val;					\ | 
|  | 55 | asm("mrc	p15, 0, %0, c0, c0, " __stringify(reg)	\ | 
|  | 56 | : "=r" (__val)					\ | 
|  | 57 | :							\ | 
|  | 58 | : "cc");						\ | 
|  | 59 | __val;							\ | 
|  | 60 | }) | 
| Hyok S. Choi | f12d0d7 | 2006-09-26 17:36:37 +0900 | [diff] [blame] | 61 | #else | 
|  | 62 | #define read_cpuid(reg) (processor_id) | 
|  | 63 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 |  | 
|  | 65 | /* | 
|  | 66 | * This is used to ensure the compiler did actually allocate the register we | 
|  | 67 | * asked it for some inline assembly sequences.  Apparently we can't trust | 
|  | 68 | * the compiler from one version to another so a bit of paranoia won't hurt. | 
|  | 69 | * This string is meant to be concatenated with the inline asm string and | 
|  | 70 | * will cause compilation to stop on mismatch. | 
|  | 71 | * (for details, see gcc PR 15089) | 
|  | 72 | */ | 
|  | 73 | #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t" | 
|  | 74 |  | 
|  | 75 | #ifndef __ASSEMBLY__ | 
|  | 76 |  | 
|  | 77 | #include <linux/linkage.h> | 
| Russell King | 255d1f8 | 2006-12-18 00:12:47 +0000 | [diff] [blame] | 78 | #include <linux/irqflags.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 |  | 
| Russell King | 7ab3f8d | 2007-03-02 15:01:36 +0000 | [diff] [blame] | 80 | #define __exception	__attribute__((section(".exception.text"))) | 
|  | 81 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | struct thread_info; | 
|  | 83 | struct task_struct; | 
|  | 84 |  | 
|  | 85 | /* information about the system we're running on */ | 
|  | 86 | extern unsigned int system_rev; | 
|  | 87 | extern unsigned int system_serial_low; | 
|  | 88 | extern unsigned int system_serial_high; | 
|  | 89 | extern unsigned int mem_fclk_21285; | 
|  | 90 |  | 
|  | 91 | struct pt_regs; | 
|  | 92 |  | 
|  | 93 | void die(const char *msg, struct pt_regs *regs, int err) | 
|  | 94 | __attribute__((noreturn)); | 
|  | 95 |  | 
| Russell King | cfb0810 | 2005-06-30 11:06:49 +0100 | [diff] [blame] | 96 | struct siginfo; | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 97 | void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, | 
| Russell King | cfb0810 | 2005-06-30 11:06:49 +0100 | [diff] [blame] | 98 | unsigned long err, unsigned long trap); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 |  | 
|  | 100 | void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, | 
|  | 101 | struct pt_regs *), | 
|  | 102 | int sig, const char *name); | 
|  | 103 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | #define xchg(ptr,x) \ | 
|  | 105 | ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 
|  | 106 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | extern asmlinkage void __backtrace(void); | 
| Russell King | 652a12e | 2005-04-17 15:50:36 +0100 | [diff] [blame] | 108 | extern asmlinkage void c_backtrace(unsigned long fp, int pmode); | 
| Russell King | 5470dc6 | 2005-11-16 18:36:49 +0000 | [diff] [blame] | 109 |  | 
|  | 110 | struct mm_struct; | 
| Russell King | 652a12e | 2005-04-17 15:50:36 +0100 | [diff] [blame] | 111 | extern void show_pte(struct mm_struct *mm, unsigned long addr); | 
|  | 112 | extern void __show_regs(struct pt_regs *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 |  | 
|  | 114 | extern int cpu_architecture(void); | 
| Russell King | 36c5ed2 | 2005-06-19 18:39:33 +0100 | [diff] [blame] | 115 | extern void cpu_init(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 |  | 
| Richard Purdie | 74617fb | 2006-06-19 19:57:12 +0100 | [diff] [blame] | 117 | void arm_machine_restart(char mode); | 
|  | 118 | extern void (*arm_pm_restart)(char str); | 
|  | 119 |  | 
| Lennert Buytenhek | 23bdf86 | 2006-03-28 21:00:40 +0100 | [diff] [blame] | 120 | /* | 
|  | 121 | * Intel's XScale3 core supports some v6 features (supersections, L2) | 
|  | 122 | * but advertises itself as v5 as it does not support the v6 ISA.  For | 
|  | 123 | * this reason, we need a way to explicitly test for this type of CPU. | 
|  | 124 | */ | 
|  | 125 | #ifndef CONFIG_CPU_XSC3 | 
|  | 126 | #define cpu_is_xsc3()	0 | 
|  | 127 | #else | 
|  | 128 | static inline int cpu_is_xsc3(void) | 
|  | 129 | { | 
|  | 130 | extern unsigned int processor_id; | 
|  | 131 |  | 
|  | 132 | if ((processor_id & 0xffffe000) == 0x69056000) | 
|  | 133 | return 1; | 
|  | 134 |  | 
|  | 135 | return 0; | 
|  | 136 | } | 
|  | 137 | #endif | 
|  | 138 |  | 
| Deepak Saxena | 5cedae9 | 2006-05-31 16:14:05 -0700 | [diff] [blame] | 139 | #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3) | 
|  | 140 | #define	cpu_is_xscale()	0 | 
|  | 141 | #else | 
|  | 142 | #define	cpu_is_xscale()	1 | 
|  | 143 | #endif | 
|  | 144 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | #define UDBG_UNDEFINED	(1 << 0) | 
|  | 146 | #define UDBG_SYSCALL	(1 << 1) | 
|  | 147 | #define UDBG_BADABORT	(1 << 2) | 
|  | 148 | #define UDBG_SEGV	(1 << 3) | 
|  | 149 | #define UDBG_BUS	(1 << 4) | 
|  | 150 |  | 
|  | 151 | extern unsigned int user_debug; | 
|  | 152 |  | 
|  | 153 | #if __LINUX_ARM_ARCH__ >= 4 | 
|  | 154 | #define vectors_high()	(cr_alignment & CR_V) | 
|  | 155 | #else | 
|  | 156 | #define vectors_high()	(0) | 
|  | 157 | #endif | 
|  | 158 |  | 
| Catalin Marinas | 56163fc | 2007-05-08 22:53:44 +0100 | [diff] [blame] | 159 | #if __LINUX_ARM_ARCH__ >= 7 | 
|  | 160 | #define isb() __asm__ __volatile__ ("isb" : : : "memory") | 
|  | 161 | #define dsb() __asm__ __volatile__ ("dsb" : : : "memory") | 
|  | 162 | #define dmb() __asm__ __volatile__ ("dmb" : : : "memory") | 
|  | 163 | #elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 | 
| Catalin Marinas | dcda7e4 | 2007-02-05 14:47:35 +0100 | [diff] [blame] | 164 | #define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ | 
|  | 165 | : : "r" (0) : "memory") | 
|  | 166 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | 
|  | 167 | : : "r" (0) : "memory") | 
|  | 168 | #define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ | 
|  | 169 | : : "r" (0) : "memory") | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 170 | #else | 
| Catalin Marinas | dcda7e4 | 2007-02-05 14:47:35 +0100 | [diff] [blame] | 171 | #define isb() __asm__ __volatile__ ("" : : : "memory") | 
|  | 172 | #define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ | 
|  | 173 | : : "r" (0) : "memory") | 
|  | 174 | #define dmb() __asm__ __volatile__ ("" : : : "memory") | 
| Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 175 | #endif | 
| Catalin Marinas | 9623b37 | 2007-02-28 12:30:38 +0100 | [diff] [blame] | 176 |  | 
| Lennert Buytenhek | 398e692 | 2007-03-31 12:03:20 +0100 | [diff] [blame] | 177 | #ifndef CONFIG_SMP | 
|  | 178 | #define mb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | 
|  | 179 | #define rmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | 
|  | 180 | #define wmb()	do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | 
|  | 181 | #define smp_mb()	barrier() | 
|  | 182 | #define smp_rmb()	barrier() | 
|  | 183 | #define smp_wmb()	barrier() | 
| Catalin Marinas | 9623b37 | 2007-02-28 12:30:38 +0100 | [diff] [blame] | 184 | #else | 
| Lennert Buytenhek | 398e692 | 2007-03-31 12:03:20 +0100 | [diff] [blame] | 185 | #define mb()		dmb() | 
|  | 186 | #define rmb()		dmb() | 
|  | 187 | #define wmb()		dmb() | 
|  | 188 | #define smp_mb()	dmb() | 
|  | 189 | #define smp_rmb()	dmb() | 
|  | 190 | #define smp_wmb()	dmb() | 
|  | 191 | #endif | 
|  | 192 | #define read_barrier_depends()		do { } while(0) | 
|  | 193 | #define smp_read_barrier_depends()	do { } while(0) | 
| Catalin Marinas | 9623b37 | 2007-02-28 12:30:38 +0100 | [diff] [blame] | 194 |  | 
|  | 195 | #define set_mb(var, value)	do { var = value; smp_mb(); } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | 
|  | 197 |  | 
| Catalin Marinas | 56660fa | 2007-02-05 14:48:02 +0100 | [diff] [blame] | 198 | extern unsigned long cr_no_alignment;	/* defined in entry-armv.S */ | 
|  | 199 | extern unsigned long cr_alignment;	/* defined in entry-armv.S */ | 
|  | 200 |  | 
|  | 201 | static inline unsigned int get_cr(void) | 
|  | 202 | { | 
|  | 203 | unsigned int val; | 
|  | 204 | asm("mrc p15, 0, %0, c1, c0, 0	@ get CR" : "=r" (val) : : "cc"); | 
|  | 205 | return val; | 
|  | 206 | } | 
|  | 207 |  | 
|  | 208 | static inline void set_cr(unsigned int val) | 
|  | 209 | { | 
|  | 210 | asm volatile("mcr p15, 0, %0, c1, c0, 0	@ set CR" | 
|  | 211 | : : "r" (val) : "cc"); | 
|  | 212 | isb(); | 
|  | 213 | } | 
|  | 214 |  | 
|  | 215 | #ifndef CONFIG_SMP | 
|  | 216 | extern void adjust_cr(unsigned long mask, unsigned long set); | 
|  | 217 | #endif | 
|  | 218 |  | 
|  | 219 | #define CPACC_FULL(n)		(3 << (n * 2)) | 
|  | 220 | #define CPACC_SVC(n)		(1 << (n * 2)) | 
|  | 221 | #define CPACC_DISABLE(n)	(0 << (n * 2)) | 
|  | 222 |  | 
|  | 223 | static inline unsigned int get_copro_access(void) | 
|  | 224 | { | 
|  | 225 | unsigned int val; | 
|  | 226 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" | 
|  | 227 | : "=r" (val) : : "cc"); | 
|  | 228 | return val; | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | static inline void set_copro_access(unsigned int val) | 
|  | 232 | { | 
|  | 233 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" | 
|  | 234 | : : "r" (val) : "cc"); | 
|  | 235 | isb(); | 
|  | 236 | } | 
|  | 237 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | /* | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 239 | * switch_mm() may do a full cache flush over the context switch, | 
|  | 240 | * so enable interrupts over the context switch to avoid high | 
|  | 241 | * latency. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | */ | 
| Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 243 | #define __ARCH_WANT_INTERRUPTS_ON_CTXSW | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 |  | 
|  | 245 | /* | 
|  | 246 | * switch_to(prev, next) should switch from task `prev' to `next' | 
|  | 247 | * `prev' will never be the same as `next'.  schedule() itself | 
|  | 248 | * contains the memory barrier to tell GCC not to cache `current'. | 
|  | 249 | */ | 
|  | 250 | extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *); | 
|  | 251 |  | 
|  | 252 | #define switch_to(prev,next,last)					\ | 
|  | 253 | do {									\ | 
| Al Viro | e7c1b32 | 2006-01-12 01:05:56 -0800 | [diff] [blame] | 254 | last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 255 | } while (0) | 
|  | 256 |  | 
|  | 257 | /* | 
| Ingo Molnar | 4dc7a0b | 2006-01-12 01:05:27 -0800 | [diff] [blame] | 258 | * On SMP systems, when the scheduler does migration-cost autodetection, | 
|  | 259 | * it needs a way to flush as much of the CPU's caches as possible. | 
|  | 260 | * | 
|  | 261 | * TODO: fill this in! | 
|  | 262 | */ | 
|  | 263 | static inline void sched_cacheflush(void) | 
|  | 264 | { | 
|  | 265 | } | 
|  | 266 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110) | 
|  | 268 | /* | 
|  | 269 | * On the StrongARM, "swp" is terminally broken since it bypasses the | 
|  | 270 | * cache totally.  This means that the cache becomes inconsistent, and, | 
|  | 271 | * since we use normal loads/stores as well, this is really bad. | 
|  | 272 | * Typically, this causes oopsen in filp_close, but could have other, | 
|  | 273 | * more disasterous effects.  There are two work-arounds: | 
|  | 274 | *  1. Disable interrupts and emulate the atomic swap | 
|  | 275 | *  2. Clean the cache, perform atomic swap, flush the cache | 
|  | 276 | * | 
|  | 277 | * We choose (1) since its the "easiest" to achieve here and is not | 
|  | 278 | * dependent on the processor type. | 
| Russell King | 053a7b5 | 2005-06-28 19:22:25 +0100 | [diff] [blame] | 279 | * | 
|  | 280 | * NOTE that this solution won't work on an SMP system, so explcitly | 
|  | 281 | * forbid it here. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | */ | 
|  | 283 | #define swp_is_buggy | 
|  | 284 | #endif | 
|  | 285 |  | 
|  | 286 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | 
|  | 287 | { | 
|  | 288 | extern void __bad_xchg(volatile void *, int); | 
|  | 289 | unsigned long ret; | 
|  | 290 | #ifdef swp_is_buggy | 
|  | 291 | unsigned long flags; | 
|  | 292 | #endif | 
| Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 293 | #if __LINUX_ARM_ARCH__ >= 6 | 
|  | 294 | unsigned int tmp; | 
|  | 295 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 |  | 
|  | 297 | switch (size) { | 
| Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 298 | #if __LINUX_ARM_ARCH__ >= 6 | 
|  | 299 | case 1: | 
|  | 300 | asm volatile("@	__xchg1\n" | 
|  | 301 | "1:	ldrexb	%0, [%3]\n" | 
|  | 302 | "	strexb	%1, %2, [%3]\n" | 
|  | 303 | "	teq	%1, #0\n" | 
|  | 304 | "	bne	1b" | 
|  | 305 | : "=&r" (ret), "=&r" (tmp) | 
|  | 306 | : "r" (x), "r" (ptr) | 
|  | 307 | : "memory", "cc"); | 
|  | 308 | break; | 
|  | 309 | case 4: | 
|  | 310 | asm volatile("@	__xchg4\n" | 
|  | 311 | "1:	ldrex	%0, [%3]\n" | 
|  | 312 | "	strex	%1, %2, [%3]\n" | 
|  | 313 | "	teq	%1, #0\n" | 
|  | 314 | "	bne	1b" | 
|  | 315 | : "=&r" (ret), "=&r" (tmp) | 
|  | 316 | : "r" (x), "r" (ptr) | 
|  | 317 | : "memory", "cc"); | 
|  | 318 | break; | 
|  | 319 | #elif defined(swp_is_buggy) | 
|  | 320 | #ifdef CONFIG_SMP | 
|  | 321 | #error SMP is not supported on this platform | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | #endif | 
| Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 323 | case 1: | 
| Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 324 | raw_local_irq_save(flags); | 
| Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 325 | ret = *(volatile unsigned char *)ptr; | 
|  | 326 | *(volatile unsigned char *)ptr = x; | 
| Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 327 | raw_local_irq_restore(flags); | 
| Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 328 | break; | 
|  | 329 |  | 
|  | 330 | case 4: | 
| Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 331 | raw_local_irq_save(flags); | 
| Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 332 | ret = *(volatile unsigned long *)ptr; | 
|  | 333 | *(volatile unsigned long *)ptr = x; | 
| Lennert Buytenhek | e7cc2c5 | 2006-09-21 03:35:20 +0100 | [diff] [blame] | 334 | raw_local_irq_restore(flags); | 
| Russell King | 9560782 | 2005-07-26 19:39:31 +0100 | [diff] [blame] | 335 | break; | 
|  | 336 | #else | 
|  | 337 | case 1: | 
|  | 338 | asm volatile("@	__xchg1\n" | 
|  | 339 | "	swpb	%0, %1, [%2]" | 
|  | 340 | : "=&r" (ret) | 
|  | 341 | : "r" (x), "r" (ptr) | 
|  | 342 | : "memory", "cc"); | 
|  | 343 | break; | 
|  | 344 | case 4: | 
|  | 345 | asm volatile("@	__xchg4\n" | 
|  | 346 | "	swp	%0, %1, [%2]" | 
|  | 347 | : "=&r" (ret) | 
|  | 348 | : "r" (x), "r" (ptr) | 
|  | 349 | : "memory", "cc"); | 
|  | 350 | break; | 
|  | 351 | #endif | 
|  | 352 | default: | 
|  | 353 | __bad_xchg(ptr, size), ret = 0; | 
|  | 354 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | } | 
|  | 356 |  | 
|  | 357 | return ret; | 
|  | 358 | } | 
|  | 359 |  | 
| Ben Dooks | dabaeff | 2006-03-15 23:17:26 +0000 | [diff] [blame] | 360 | extern void disable_hlt(void); | 
|  | 361 | extern void enable_hlt(void); | 
|  | 362 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 363 | #endif /* __ASSEMBLY__ */ | 
|  | 364 |  | 
|  | 365 | #define arch_align_stack(x) (x) | 
|  | 366 |  | 
|  | 367 | #endif /* __KERNEL__ */ | 
|  | 368 |  | 
|  | 369 | #endif |