blob: 1630a5411e5fb8bbfdb288634f7afe397851e881 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_SYSTEM_H
2#define __ASM_SH_SYSTEM_H
3
4/*
5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
6 * Copyright (C) 2002 Paul Mundt
7 */
8
Tom Rinie4e3b5c2006-09-27 11:28:20 +09009#include <asm/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11/*
12 * switch_to() should switch tasks to task nr n, first
13 */
14
15#define switch_to(prev, next, last) do { \
Ingo Molnar36c8b582006-07-03 00:25:41 -070016 struct task_struct *__last; \
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 register unsigned long *__ts1 __asm__ ("r1") = &prev->thread.sp; \
18 register unsigned long *__ts2 __asm__ ("r2") = &prev->thread.pc; \
19 register unsigned long *__ts4 __asm__ ("r4") = (unsigned long *)prev; \
20 register unsigned long *__ts5 __asm__ ("r5") = (unsigned long *)next; \
21 register unsigned long *__ts6 __asm__ ("r6") = &next->thread.sp; \
22 register unsigned long __ts7 __asm__ ("r7") = next->thread.pc; \
23 __asm__ __volatile__ (".balign 4\n\t" \
24 "stc.l gbr, @-r15\n\t" \
25 "sts.l pr, @-r15\n\t" \
26 "mov.l r8, @-r15\n\t" \
27 "mov.l r9, @-r15\n\t" \
28 "mov.l r10, @-r15\n\t" \
29 "mov.l r11, @-r15\n\t" \
30 "mov.l r12, @-r15\n\t" \
31 "mov.l r13, @-r15\n\t" \
32 "mov.l r14, @-r15\n\t" \
33 "mov.l r15, @r1 ! save SP\n\t" \
34 "mov.l @r6, r15 ! change to new stack\n\t" \
35 "mova 1f, %0\n\t" \
36 "mov.l %0, @r2 ! save PC\n\t" \
37 "mov.l 2f, %0\n\t" \
38 "jmp @%0 ! call __switch_to\n\t" \
39 " lds r7, pr ! with return to new PC\n\t" \
40 ".balign 4\n" \
41 "2:\n\t" \
42 ".long __switch_to\n" \
43 "1:\n\t" \
44 "mov.l @r15+, r14\n\t" \
45 "mov.l @r15+, r13\n\t" \
46 "mov.l @r15+, r12\n\t" \
47 "mov.l @r15+, r11\n\t" \
48 "mov.l @r15+, r10\n\t" \
49 "mov.l @r15+, r9\n\t" \
50 "mov.l @r15+, r8\n\t" \
51 "lds.l @r15+, pr\n\t" \
52 "ldc.l @r15+, gbr\n\t" \
53 : "=z" (__last) \
54 : "r" (__ts1), "r" (__ts2), "r" (__ts4), \
55 "r" (__ts5), "r" (__ts6), "r" (__ts7) \
56 : "r3", "t"); \
57 last = __last; \
58} while (0)
59
Ingo Molnar4dc7a0b2006-01-12 01:05:27 -080060/*
61 * On SMP systems, when the scheduler does migration-cost autodetection,
62 * it needs a way to flush as much of the CPU's caches as possible.
63 *
64 * TODO: fill this in!
65 */
66static inline void sched_cacheflush(void)
67{
68}
69
Paul Mundt29847622006-09-27 14:57:44 +090070#ifdef CONFIG_CPU_SH4A
71#define __icbi() \
72{ \
73 unsigned long __addr; \
74 __addr = 0xa8000000; \
75 __asm__ __volatile__( \
76 "icbi %0\n\t" \
77 : /* no output */ \
78 : "m" (__m(__addr))); \
79}
80#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
82#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
83
84static __inline__ unsigned long tas(volatile int *m)
85{ /* #define tas(ptr) (xchg((ptr),1)) */
86 unsigned long retval;
87
88 __asm__ __volatile__ ("tas.b @%1\n\t"
89 "movt %0"
90 : "=r" (retval): "r" (m): "t", "memory");
91 return retval;
92}
93
94extern void __xchg_called_with_bad_pointer(void);
95
Paul Mundt29847622006-09-27 14:57:44 +090096/*
97 * A brief note on ctrl_barrier(), the control register write barrier.
98 *
99 * Legacy SH cores typically require a sequence of 8 nops after
100 * modification of a control register in order for the changes to take
101 * effect. On newer cores (like the sh4a and sh5) this is accomplished
102 * with icbi.
103 *
104 * Also note that on sh4a in the icbi case we can forego a synco for the
105 * write barrier, as it's not necessary for control registers.
106 *
107 * Historically we have only done this type of barrier for the MMUCR, but
108 * it's also necessary for the CCR, so we make it generic here instead.
109 */
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900110#ifdef CONFIG_CPU_SH4A
Paul Mundt29847622006-09-27 14:57:44 +0900111#define mb() __asm__ __volatile__ ("synco": : :"memory")
112#define rmb() mb()
113#define wmb() __asm__ __volatile__ ("synco": : :"memory")
114#define ctrl_barrier() __icbi()
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900115#define read_barrier_depends() do { } while(0)
116#else
Paul Mundt29847622006-09-27 14:57:44 +0900117#define mb() __asm__ __volatile__ ("": : :"memory")
118#define rmb() mb()
119#define wmb() __asm__ __volatile__ ("": : :"memory")
120#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121#define read_barrier_depends() do { } while(0)
Paul Mundtfdfc74f2006-09-27 14:05:52 +0900122#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124#ifdef CONFIG_SMP
125#define smp_mb() mb()
126#define smp_rmb() rmb()
127#define smp_wmb() wmb()
128#define smp_read_barrier_depends() read_barrier_depends()
129#else
130#define smp_mb() barrier()
131#define smp_rmb() barrier()
132#define smp_wmb() barrier()
133#define smp_read_barrier_depends() do { } while(0)
134#endif
135
136#define set_mb(var, value) do { xchg(&var, value); } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138/* Interrupt Control */
139static __inline__ void local_irq_enable(void)
140{
141 unsigned long __dummy0, __dummy1;
142
143 __asm__ __volatile__("stc sr, %0\n\t"
144 "and %1, %0\n\t"
145 "stc r6_bank, %1\n\t"
146 "or %1, %0\n\t"
147 "ldc %0, sr"
148 : "=&r" (__dummy0), "=r" (__dummy1)
149 : "1" (~0x000000f0)
150 : "memory");
151}
152
153static __inline__ void local_irq_disable(void)
154{
155 unsigned long __dummy;
156 __asm__ __volatile__("stc sr, %0\n\t"
157 "or #0xf0, %0\n\t"
158 "ldc %0, sr"
159 : "=&z" (__dummy)
160 : /* no inputs */
161 : "memory");
162}
163
164#define local_save_flags(x) \
165 __asm__("stc sr, %0; and #0xf0, %0" : "=&z" (x) :/**/: "memory" )
166
167#define irqs_disabled() \
168({ \
169 unsigned long flags; \
170 local_save_flags(flags); \
171 (flags != 0); \
172})
173
174static __inline__ unsigned long local_irq_save(void)
175{
176 unsigned long flags, __dummy;
177
178 __asm__ __volatile__("stc sr, %1\n\t"
179 "mov %1, %0\n\t"
180 "or #0xf0, %0\n\t"
181 "ldc %0, sr\n\t"
182 "mov %1, %0\n\t"
183 "and #0xf0, %0"
184 : "=&z" (flags), "=&r" (__dummy)
185 :/**/
186 : "memory" );
187 return flags;
188}
189
190#ifdef DEBUG_CLI_STI
191static __inline__ void local_irq_restore(unsigned long x)
192{
193 if ((x & 0x000000f0) != 0x000000f0)
194 local_irq_enable();
195 else {
196 unsigned long flags;
197 local_save_flags(flags);
198
199 if (flags == 0) {
200 extern void dump_stack(void);
201 printk(KERN_ERR "BUG!\n");
202 dump_stack();
203 local_irq_disable();
204 }
205 }
206}
207#else
208#define local_irq_restore(x) do { \
209 if ((x & 0x000000f0) != 0x000000f0) \
210 local_irq_enable(); \
211} while (0)
212#endif
213
214#define really_restore_flags(x) do { \
215 if ((x & 0x000000f0) != 0x000000f0) \
216 local_irq_enable(); \
217 else \
218 local_irq_disable(); \
219} while (0)
220
221/*
222 * Jump to P2 area.
223 * When handling TLB or caches, we need to do it from P2 area.
224 */
225#define jump_to_P2() \
226do { \
227 unsigned long __dummy; \
228 __asm__ __volatile__( \
229 "mov.l 1f, %0\n\t" \
230 "or %1, %0\n\t" \
231 "jmp @%0\n\t" \
232 " nop\n\t" \
233 ".balign 4\n" \
234 "1: .long 2f\n" \
235 "2:" \
236 : "=&r" (__dummy) \
237 : "r" (0x20000000)); \
238} while (0)
239
240/*
241 * Back to P1 area.
242 */
243#define back_to_P1() \
244do { \
245 unsigned long __dummy; \
Paul Mundt29847622006-09-27 14:57:44 +0900246 ctrl_barrier(); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 __asm__ __volatile__( \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 "mov.l 1f, %0\n\t" \
249 "jmp @%0\n\t" \
250 " nop\n\t" \
251 ".balign 4\n" \
252 "1: .long 2f\n" \
253 "2:" \
254 : "=&r" (__dummy)); \
255} while (0)
256
257/* For spinlocks etc */
258#define local_irq_save(x) x = local_irq_save()
259
260static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
261{
262 unsigned long flags, retval;
263
264 local_irq_save(flags);
265 retval = *m;
266 *m = val;
267 local_irq_restore(flags);
268 return retval;
269}
270
271static __inline__ unsigned long xchg_u8(volatile unsigned char * m, unsigned long val)
272{
273 unsigned long flags, retval;
274
275 local_irq_save(flags);
276 retval = *m;
277 *m = val & 0xff;
278 local_irq_restore(flags);
279 return retval;
280}
281
282static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
283{
284 switch (size) {
285 case 4:
286 return xchg_u32(ptr, x);
287 break;
288 case 1:
289 return xchg_u8(ptr, x);
290 break;
291 }
292 __xchg_called_with_bad_pointer();
293 return x;
294}
295
Tom Rinie4e3b5c2006-09-27 11:28:20 +0900296static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
297 unsigned long new)
298{
299 __u32 retval;
300 unsigned long flags;
301
302 local_irq_save(flags);
303 retval = *m;
304 if (retval == old)
305 *m = new;
306 local_irq_restore(flags); /* implies memory barrier */
307 return retval;
308}
309
310/* This function doesn't exist, so you'll get a linker error
311 * if something tries to do an invalid cmpxchg(). */
312extern void __cmpxchg_called_with_bad_pointer(void);
313
314#define __HAVE_ARCH_CMPXCHG 1
315
316static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
317 unsigned long new, int size)
318{
319 switch (size) {
320 case 4:
321 return __cmpxchg_u32(ptr, old, new);
322 }
323 __cmpxchg_called_with_bad_pointer();
324 return old;
325}
326
327#define cmpxchg(ptr,o,n) \
328 ({ \
329 __typeof__(*(ptr)) _o_ = (o); \
330 __typeof__(*(ptr)) _n_ = (n); \
331 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
332 (unsigned long)_n_, sizeof(*(ptr))); \
333 })
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335/* XXX
336 * disable hlt during certain critical i/o operations
337 */
338#define HAVE_DISABLE_HLT
339void disable_hlt(void);
340void enable_hlt(void);
341
342#define arch_align_stack(x) (x)
343
344#endif