blob: f06d8a43fdee9eed0553cbb910ef279ee2813a82 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_ARM_SYSTEM_H
2#define __ASM_ARM_SYSTEM_H
3
4#ifdef __KERNEL__
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7#define CPU_ARCH_UNKNOWN 0
8#define CPU_ARCH_ARMv3 1
9#define CPU_ARCH_ARMv4 2
10#define CPU_ARCH_ARMv4T 3
11#define CPU_ARCH_ARMv5 4
12#define CPU_ARCH_ARMv5T 5
13#define CPU_ARCH_ARMv5TE 6
14#define CPU_ARCH_ARMv5TEJ 7
15#define CPU_ARCH_ARMv6 8
16
17/*
18 * CR1 bits (CP#15 CR1)
19 */
20#define CR_M (1 << 0) /* MMU enable */
21#define CR_A (1 << 1) /* Alignment abort enable */
22#define CR_C (1 << 2) /* Dcache enable */
23#define CR_W (1 << 3) /* Write buffer enable */
24#define CR_P (1 << 4) /* 32-bit exception handler */
25#define CR_D (1 << 5) /* 32-bit data address range */
26#define CR_L (1 << 6) /* Implementation defined */
27#define CR_B (1 << 7) /* Big endian */
28#define CR_S (1 << 8) /* System MMU protection */
29#define CR_R (1 << 9) /* ROM MMU protection */
30#define CR_F (1 << 10) /* Implementation defined */
31#define CR_Z (1 << 11) /* Implementation defined */
32#define CR_I (1 << 12) /* Icache enable */
33#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
34#define CR_RR (1 << 14) /* Round Robin cache replacement */
35#define CR_L4 (1 << 15) /* LDR pc can set T bit */
36#define CR_DT (1 << 16)
37#define CR_IT (1 << 18)
38#define CR_ST (1 << 19)
39#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
40#define CR_U (1 << 22) /* Unaligned access operation */
41#define CR_XP (1 << 23) /* Extended page tables */
42#define CR_VE (1 << 24) /* Vectored interrupts */
43
44#define CPUID_ID 0
45#define CPUID_CACHETYPE 1
46#define CPUID_TCM 2
47#define CPUID_TLBTYPE 3
48
Hyok S. Choif12d0d72006-09-26 17:36:37 +090049#ifdef CONFIG_CPU_CP15
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#define read_cpuid(reg) \
51 ({ \
52 unsigned int __val; \
53 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
54 : "=r" (__val) \
55 : \
56 : "cc"); \
57 __val; \
58 })
Hyok S. Choif12d0d72006-09-26 17:36:37 +090059#else
60#define read_cpuid(reg) (processor_id)
61#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63/*
64 * This is used to ensure the compiler did actually allocate the register we
65 * asked it for some inline assembly sequences. Apparently we can't trust
66 * the compiler from one version to another so a bit of paranoia won't hurt.
67 * This string is meant to be concatenated with the inline asm string and
68 * will cause compilation to stop on mismatch.
69 * (for details, see gcc PR 15089)
70 */
71#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
72
73#ifndef __ASSEMBLY__
74
75#include <linux/linkage.h>
Russell King255d1f82006-12-18 00:12:47 +000076#include <linux/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78struct thread_info;
79struct task_struct;
80
81/* information about the system we're running on */
82extern unsigned int system_rev;
83extern unsigned int system_serial_low;
84extern unsigned int system_serial_high;
85extern unsigned int mem_fclk_21285;
86
87struct pt_regs;
88
89void die(const char *msg, struct pt_regs *regs, int err)
90 __attribute__((noreturn));
91
Russell Kingcfb08102005-06-30 11:06:49 +010092struct siginfo;
93void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
94 unsigned long err, unsigned long trap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
97 struct pt_regs *),
98 int sig, const char *name);
99
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100#define xchg(ptr,x) \
101 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
102
103#define tas(ptr) (xchg((ptr),1))
104
105extern asmlinkage void __backtrace(void);
Russell King652a12e2005-04-17 15:50:36 +0100106extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
Russell King5470dc62005-11-16 18:36:49 +0000107
108struct mm_struct;
Russell King652a12e2005-04-17 15:50:36 +0100109extern void show_pte(struct mm_struct *mm, unsigned long addr);
110extern void __show_regs(struct pt_regs *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112extern int cpu_architecture(void);
Russell King36c5ed22005-06-19 18:39:33 +0100113extern void cpu_init(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Richard Purdie74617fb2006-06-19 19:57:12 +0100115void arm_machine_restart(char mode);
116extern void (*arm_pm_restart)(char str);
117
Lennert Buytenhek23bdf862006-03-28 21:00:40 +0100118/*
119 * Intel's XScale3 core supports some v6 features (supersections, L2)
120 * but advertises itself as v5 as it does not support the v6 ISA. For
121 * this reason, we need a way to explicitly test for this type of CPU.
122 */
123#ifndef CONFIG_CPU_XSC3
124#define cpu_is_xsc3() 0
125#else
126static inline int cpu_is_xsc3(void)
127{
128 extern unsigned int processor_id;
129
130 if ((processor_id & 0xffffe000) == 0x69056000)
131 return 1;
132
133 return 0;
134}
135#endif
136
Deepak Saxena5cedae92006-05-31 16:14:05 -0700137#if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
138#define cpu_is_xscale() 0
139#else
140#define cpu_is_xscale() 1
141#endif
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#define UDBG_UNDEFINED (1 << 0)
144#define UDBG_SYSCALL (1 << 1)
145#define UDBG_BADABORT (1 << 2)
146#define UDBG_SEGV (1 << 3)
147#define UDBG_BUS (1 << 4)
148
149extern unsigned int user_debug;
150
151#if __LINUX_ARM_ARCH__ >= 4
152#define vectors_high() (cr_alignment & CR_V)
153#else
154#define vectors_high() (0)
155#endif
156
Russell King6d9b37a2005-07-26 19:44:26 +0100157#if __LINUX_ARM_ARCH__ >= 6
Catalin Marinasdcda7e42007-02-05 14:47:35 +0100158#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
159 : : "r" (0) : "memory")
160#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
161 : : "r" (0) : "memory")
162#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
163 : : "r" (0) : "memory")
Russell King6d9b37a2005-07-26 19:44:26 +0100164#else
Catalin Marinasdcda7e42007-02-05 14:47:35 +0100165#define isb() __asm__ __volatile__ ("" : : : "memory")
166#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
167 : : "r" (0) : "memory")
168#define dmb() __asm__ __volatile__ ("" : : : "memory")
Russell King6d9b37a2005-07-26 19:44:26 +0100169#endif
Catalin Marinas9623b372007-02-28 12:30:38 +0100170
171#define mb() barrier()
172#define rmb() barrier()
173#define wmb() barrier()
174#define read_barrier_depends() do { } while(0)
175
176#ifdef CONFIG_SMP
177#define smp_mb() dmb()
178#define smp_rmb() dmb()
179#define smp_wmb() dmb()
180#define smp_read_barrier_depends() read_barrier_depends()
181#else
182#define smp_mb() barrier()
183#define smp_rmb() barrier()
184#define smp_wmb() barrier()
185#define smp_read_barrier_depends() read_barrier_depends()
186#endif /* CONFIG_SMP */
187
188#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
190
Catalin Marinas56660fa2007-02-05 14:48:02 +0100191extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
192extern unsigned long cr_alignment; /* defined in entry-armv.S */
193
194static inline unsigned int get_cr(void)
195{
196 unsigned int val;
197 asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
198 return val;
199}
200
201static inline void set_cr(unsigned int val)
202{
203 asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
204 : : "r" (val) : "cc");
205 isb();
206}
207
208#ifndef CONFIG_SMP
209extern void adjust_cr(unsigned long mask, unsigned long set);
210#endif
211
212#define CPACC_FULL(n) (3 << (n * 2))
213#define CPACC_SVC(n) (1 << (n * 2))
214#define CPACC_DISABLE(n) (0 << (n * 2))
215
216static inline unsigned int get_copro_access(void)
217{
218 unsigned int val;
219 asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
220 : "=r" (val) : : "cc");
221 return val;
222}
223
224static inline void set_copro_access(unsigned int val)
225{
226 asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
227 : : "r" (val) : "cc");
228 isb();
229}
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/*
Nick Piggin4866cde2005-06-25 14:57:23 -0700232 * switch_mm() may do a full cache flush over the context switch,
233 * so enable interrupts over the context switch to avoid high
234 * latency.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 */
Nick Piggin4866cde2005-06-25 14:57:23 -0700236#define __ARCH_WANT_INTERRUPTS_ON_CTXSW
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238/*
239 * switch_to(prev, next) should switch from task `prev' to `next'
240 * `prev' will never be the same as `next'. schedule() itself
241 * contains the memory barrier to tell GCC not to cache `current'.
242 */
243extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
244
245#define switch_to(prev,next,last) \
246do { \
Al Viroe7c1b322006-01-12 01:05:56 -0800247 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248} while (0)
249
250/*
Ingo Molnar4dc7a0b2006-01-12 01:05:27 -0800251 * On SMP systems, when the scheduler does migration-cost autodetection,
252 * it needs a way to flush as much of the CPU's caches as possible.
253 *
254 * TODO: fill this in!
255 */
256static inline void sched_cacheflush(void)
257{
258}
259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
261/*
262 * On the StrongARM, "swp" is terminally broken since it bypasses the
263 * cache totally. This means that the cache becomes inconsistent, and,
264 * since we use normal loads/stores as well, this is really bad.
265 * Typically, this causes oopsen in filp_close, but could have other,
266 * more disasterous effects. There are two work-arounds:
267 * 1. Disable interrupts and emulate the atomic swap
268 * 2. Clean the cache, perform atomic swap, flush the cache
269 *
270 * We choose (1) since its the "easiest" to achieve here and is not
271 * dependent on the processor type.
Russell King053a7b52005-06-28 19:22:25 +0100272 *
273 * NOTE that this solution won't work on an SMP system, so explcitly
274 * forbid it here.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 */
276#define swp_is_buggy
277#endif
278
279static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
280{
281 extern void __bad_xchg(volatile void *, int);
282 unsigned long ret;
283#ifdef swp_is_buggy
284 unsigned long flags;
285#endif
Russell King95607822005-07-26 19:39:31 +0100286#if __LINUX_ARM_ARCH__ >= 6
287 unsigned int tmp;
288#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
290 switch (size) {
Russell King95607822005-07-26 19:39:31 +0100291#if __LINUX_ARM_ARCH__ >= 6
292 case 1:
293 asm volatile("@ __xchg1\n"
294 "1: ldrexb %0, [%3]\n"
295 " strexb %1, %2, [%3]\n"
296 " teq %1, #0\n"
297 " bne 1b"
298 : "=&r" (ret), "=&r" (tmp)
299 : "r" (x), "r" (ptr)
300 : "memory", "cc");
301 break;
302 case 4:
303 asm volatile("@ __xchg4\n"
304 "1: ldrex %0, [%3]\n"
305 " strex %1, %2, [%3]\n"
306 " teq %1, #0\n"
307 " bne 1b"
308 : "=&r" (ret), "=&r" (tmp)
309 : "r" (x), "r" (ptr)
310 : "memory", "cc");
311 break;
312#elif defined(swp_is_buggy)
313#ifdef CONFIG_SMP
314#error SMP is not supported on this platform
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315#endif
Russell King95607822005-07-26 19:39:31 +0100316 case 1:
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100317 raw_local_irq_save(flags);
Russell King95607822005-07-26 19:39:31 +0100318 ret = *(volatile unsigned char *)ptr;
319 *(volatile unsigned char *)ptr = x;
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100320 raw_local_irq_restore(flags);
Russell King95607822005-07-26 19:39:31 +0100321 break;
322
323 case 4:
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100324 raw_local_irq_save(flags);
Russell King95607822005-07-26 19:39:31 +0100325 ret = *(volatile unsigned long *)ptr;
326 *(volatile unsigned long *)ptr = x;
Lennert Buytenheke7cc2c52006-09-21 03:35:20 +0100327 raw_local_irq_restore(flags);
Russell King95607822005-07-26 19:39:31 +0100328 break;
329#else
330 case 1:
331 asm volatile("@ __xchg1\n"
332 " swpb %0, %1, [%2]"
333 : "=&r" (ret)
334 : "r" (x), "r" (ptr)
335 : "memory", "cc");
336 break;
337 case 4:
338 asm volatile("@ __xchg4\n"
339 " swp %0, %1, [%2]"
340 : "=&r" (ret)
341 : "r" (x), "r" (ptr)
342 : "memory", "cc");
343 break;
344#endif
345 default:
346 __bad_xchg(ptr, size), ret = 0;
347 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
349
350 return ret;
351}
352
Ben Dooksdabaeff2006-03-15 23:17:26 +0000353extern void disable_hlt(void);
354extern void enable_hlt(void);
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356#endif /* __ASSEMBLY__ */
357
358#define arch_align_stack(x) (x)
359
360#endif /* __KERNEL__ */
361
362#endif