blob: 1faefa6d3708c789c757b5e2b2e9e32fa4ae43d1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __SPARC64_SYSTEM_H
2#define __SPARC64_SYSTEM_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <asm/ptrace.h>
5#include <asm/processor.h>
6#include <asm/visasm.h>
7
8#ifndef __ASSEMBLY__
David S. Miller10e26722006-11-16 13:38:57 -08009
10#include <linux/irqflags.h>
Mathieu Desnoyers80af4ee2008-02-07 00:16:25 -080011#include <asm-generic/cmpxchg-local.h>
David S. Miller10e26722006-11-16 13:38:57 -080012
Linus Torvalds1da177e2005-04-16 15:20:36 -070013/*
14 * Sparc (general) CPU types
15 */
16enum sparc_cpu {
17 sun4 = 0x00,
18 sun4c = 0x01,
19 sun4m = 0x02,
20 sun4d = 0x03,
21 sun4e = 0x04,
22 sun4u = 0x05, /* V8 ploos ploos */
23 sun_unknown = 0x06,
24 ap1000 = 0x07, /* almost a sun4m */
25};
26
27#define sparc_cpu_model sun4u
28
29/* This cannot ever be a sun4c nor sun4 :) That's just history. */
30#define ARCH_SUN4C_SUN4 0
31#define ARCH_SUN4 0
32
David S. Miller4d803fc2005-09-08 14:37:53 -070033/* These are here in an effort to more fully work around Spitfire Errata
34 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
35 * branch, the chip can stop executing instructions until a trap occurs.
36 * Therefore, if interrupts are disabled, the chip can hang forever.
37 *
38 * It used to be believed that the memory barrier had to be right in the
39 * delay slot, but a case has been traced recently wherein the memory barrier
40 * was one instruction after the branch delay slot and the chip still hung.
41 * The offending sequence was the following in sym_wakeup_done() of the
42 * sym53c8xx_2 driver:
43 *
44 * call sym_ccb_from_dsa, 0
45 * movge %icc, 0, %l0
46 * brz,pn %o0, .LL1303
47 * mov %o0, %l2
48 * membar #LoadLoad
49 *
50 * The branch has to be mispredicted for the bug to occur. Therefore, we put
51 * the memory barrier explicitly into a "branch always, predicted taken"
52 * delay slot to avoid the problem case.
53 */
54#define membar_safe(type) \
55do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
56 " membar " type "\n" \
57 "1:\n" \
58 : : : "memory"); \
59} while (0)
60
61#define mb() \
62 membar_safe("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad")
63#define rmb() \
64 membar_safe("#LoadLoad")
65#define wmb() \
66 membar_safe("#StoreStore")
67#define membar_storeload() \
68 membar_safe("#StoreLoad")
69#define membar_storeload_storestore() \
70 membar_safe("#StoreLoad | #StoreStore")
71#define membar_storeload_loadload() \
72 membar_safe("#StoreLoad | #LoadLoad")
73#define membar_storestore_loadstore() \
74 membar_safe("#StoreStore | #LoadStore")
David S. Miller4f071182005-08-29 12:46:22 -070075
Linus Torvalds1da177e2005-04-16 15:20:36 -070076#endif
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078#define nop() __asm__ __volatile__ ("nop")
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080#define read_barrier_depends() do { } while(0)
81#define set_mb(__var, __value) \
David S. Miller4f071182005-08-29 12:46:22 -070082 do { __var = __value; membar_storeload_storestore(); } while(0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84#ifdef CONFIG_SMP
85#define smp_mb() mb()
86#define smp_rmb() rmb()
87#define smp_wmb() wmb()
88#define smp_read_barrier_depends() read_barrier_depends()
89#else
90#define smp_mb() __asm__ __volatile__("":::"memory")
91#define smp_rmb() __asm__ __volatile__("":::"memory")
92#define smp_wmb() __asm__ __volatile__("":::"memory")
93#define smp_read_barrier_depends() do { } while(0)
94#endif
95
96#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
97
98#define flushw_all() __asm__ __volatile__("flushw")
99
100/* Performance counter register access. */
101#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p))
102#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p))
103#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p))
104
105/* Blackbird errata workaround. See commentary in
106 * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt()
107 * for more information.
108 */
109#define reset_pic() \
110 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \
111 ".align 64\n" \
112 "99:wr %g0, 0x0, %pic\n\t" \
113 "rd %pic, %g0")
114
115#ifndef __ASSEMBLY__
116
117extern void sun_do_break(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118extern int stop_a_enabled;
119
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120extern void synchronize_user_stack(void);
121
122extern void __flushw_user(void);
123#define flushw_user() __flushw_user()
124
125#define flush_user_windows flushw_user
126#define flush_register_windows flushw_all
127
Nick Piggin4866cde2005-06-25 14:57:23 -0700128/* Don't hold the runqueue lock over context switch */
129#define __ARCH_WANT_UNLOCKED_CTXSW
130#define prepare_arch_switch(next) \
131do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 flushw_all(); \
133} while (0)
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 /* See what happens when you design the chip correctly?
136 *
137 * We tell gcc we clobber all non-fixed-usage registers except
138 * for l0/l1. It will use one for 'next' and the other to hold
139 * the output value of 'last'. 'next' is not referenced again
140 * past the invocation of switch_to in the scheduler, so we need
141 * not preserve it's value. Hairy, but it lets us remove 2 loads
142 * and 2 stores in this critical code path. -DaveM
143 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144#define switch_to(prev, next, last) \
145do { if (test_thread_flag(TIF_PERFCTR)) { \
146 unsigned long __tmp; \
147 read_pcr(__tmp); \
148 current_thread_info()->pcr_reg = __tmp; \
149 read_pic(__tmp); \
150 current_thread_info()->kernel_cntd0 += (unsigned int)(__tmp);\
151 current_thread_info()->kernel_cntd1 += ((__tmp) >> 32); \
152 } \
153 flush_tlb_pending(); \
154 save_and_clear_fpu(); \
155 /* If you are tempted to conditionalize the following */ \
156 /* so that ASI is only written if it changes, think again. */ \
157 __asm__ __volatile__("wr %%g0, %0, %%asi" \
Al Virof3169642006-01-12 01:05:42 -0800158 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
David S. Miller56fb4df2006-02-26 23:24:22 -0800159 trap_block[current_thread_info()->cpu].thread = \
160 task_thread_info(next); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 __asm__ __volatile__( \
162 "mov %%g4, %%g7\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \
164 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \
165 "rdpr %%wstate, %%o5\n\t" \
David S. Miller195f7fd2007-08-18 00:07:40 -0700166 "stx %%o6, [%%g6 + %6]\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 "stb %%o5, [%%g6 + %5]\n\t" \
David S. Miller195f7fd2007-08-18 00:07:40 -0700168 "rdpr %%cwp, %%o5\n\t" \
169 "stb %%o5, [%%g6 + %8]\n\t" \
170 "mov %4, %%g6\n\t" \
171 "ldub [%4 + %8], %%g1\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 "wrpr %%g1, %%cwp\n\t" \
David S. Miller195f7fd2007-08-18 00:07:40 -0700173 "ldx [%%g6 + %6], %%o6\n\t" \
174 "ldub [%%g6 + %5], %%o5\n\t" \
175 "ldub [%%g6 + %7], %%o7\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 "wrpr %%o5, 0x0, %%wstate\n\t" \
177 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \
178 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \
David S. Miller195f7fd2007-08-18 00:07:40 -0700179 "ldx [%%g6 + %9], %%g4\n\t" \
David S. Millerdb7d9a42005-07-24 19:36:26 -0700180 "brz,pt %%o7, 1f\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 " mov %%g7, %0\n\t" \
David S. Miller52eb0532007-10-30 21:11:28 -0700182 "sethi %%hi(ret_from_syscall), %%g1\n\t" \
183 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
184 " nop\n\t" \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 "1:\n\t" \
David S. Miller195f7fd2007-08-18 00:07:40 -0700186 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
187 "=r" (__local_per_cpu_offset) \
Al Virof3169642006-01-12 01:05:42 -0800188 : "0" (task_thread_info(next)), \
David S. Millerdb7d9a42005-07-24 19:36:26 -0700189 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \
190 "i" (TI_CWP), "i" (TI_TASK) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 : "cc", \
192 "g1", "g2", "g3", "g7", \
David S. Miller195f7fd2007-08-18 00:07:40 -0700193 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 "i0", "i1", "i2", "i3", "i4", "i5", \
David S. Miller195f7fd2007-08-18 00:07:40 -0700195 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 /* If you fuck with this, update ret_from_syscall code too. */ \
197 if (test_thread_flag(TIF_PERFCTR)) { \
198 write_pcr(current_thread_info()->pcr_reg); \
199 reset_pic(); \
200 } \
201} while(0)
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
204{
205 unsigned long tmp1, tmp2;
206
207 __asm__ __volatile__(
208" membar #StoreLoad | #LoadLoad\n"
209" mov %0, %1\n"
210"1: lduw [%4], %2\n"
211" cas [%4], %2, %0\n"
212" cmp %2, %0\n"
213" bne,a,pn %%icc, 1b\n"
214" mov %1, %0\n"
215" membar #StoreLoad | #StoreStore\n"
216 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
217 : "0" (val), "r" (m)
218 : "cc", "memory");
219 return val;
220}
221
222static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
223{
224 unsigned long tmp1, tmp2;
225
226 __asm__ __volatile__(
227" membar #StoreLoad | #LoadLoad\n"
228" mov %0, %1\n"
229"1: ldx [%4], %2\n"
230" casx [%4], %2, %0\n"
231" cmp %2, %0\n"
232" bne,a,pn %%xcc, 1b\n"
233" mov %1, %0\n"
234" membar #StoreLoad | #StoreStore\n"
235 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
236 : "0" (val), "r" (m)
237 : "cc", "memory");
238 return val;
239}
240
241#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243extern void __xchg_called_with_bad_pointer(void);
244
David S. Millerd979f172007-10-27 00:13:04 -0700245static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 int size)
247{
248 switch (size) {
249 case 4:
250 return xchg32(ptr, x);
251 case 8:
252 return xchg64(ptr, x);
253 };
254 __xchg_called_with_bad_pointer();
255 return x;
256}
257
258extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
259
260/*
261 * Atomic compare and exchange. Compare OLD with MEM, if identical,
262 * store NEW in MEM. Return the initial value in MEM. Success is
263 * indicated by comparing RETURN with OLD.
264 */
265
266#define __HAVE_ARCH_CMPXCHG 1
267
David S. Millerd979f172007-10-27 00:13:04 -0700268static inline unsigned long
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269__cmpxchg_u32(volatile int *m, int old, int new)
270{
271 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
272 "cas [%2], %3, %0\n\t"
273 "membar #StoreLoad | #StoreStore"
274 : "=&r" (new)
275 : "0" (new), "r" (m), "r" (old)
276 : "memory");
277
278 return new;
279}
280
David S. Millerd979f172007-10-27 00:13:04 -0700281static inline unsigned long
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
283{
284 __asm__ __volatile__("membar #StoreLoad | #LoadLoad\n"
285 "casx [%2], %3, %0\n\t"
286 "membar #StoreLoad | #StoreStore"
287 : "=&r" (new)
288 : "0" (new), "r" (m), "r" (old)
289 : "memory");
290
291 return new;
292}
293
294/* This function doesn't exist, so you'll get a linker error
295 if something tries to do an invalid cmpxchg(). */
296extern void __cmpxchg_called_with_bad_pointer(void);
297
David S. Millerd979f172007-10-27 00:13:04 -0700298static inline unsigned long
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
300{
301 switch (size) {
302 case 4:
303 return __cmpxchg_u32(ptr, old, new);
304 case 8:
305 return __cmpxchg_u64(ptr, old, new);
306 }
307 __cmpxchg_called_with_bad_pointer();
308 return old;
309}
310
311#define cmpxchg(ptr,o,n) \
312 ({ \
313 __typeof__(*(ptr)) _o_ = (o); \
314 __typeof__(*(ptr)) _n_ = (n); \
315 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
316 (unsigned long)_n_, sizeof(*(ptr))); \
317 })
318
Mathieu Desnoyers80af4ee2008-02-07 00:16:25 -0800319/*
320 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
321 * them available.
322 */
323
324static inline unsigned long __cmpxchg_local(volatile void *ptr,
325 unsigned long old,
326 unsigned long new, int size)
327{
328 switch (size) {
329 case 4:
330 case 8: return __cmpxchg(ptr, old, new, size);
331 default:
332 return __cmpxchg_local_generic(ptr, old, new, size);
333 }
334
335 return old;
336}
337
338#define cmpxchg_local(ptr, o, n) \
339 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
340 (unsigned long)(n), sizeof(*(ptr))))
341#define cmpxchg64_local(ptr, o, n) \
342 ({ \
343 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
344 cmpxchg_local((ptr), (o), (n)); \
345 })
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347#endif /* !(__ASSEMBLY__) */
348
349#define arch_align_stack(x) (x)
350
351#endif /* !(__SPARC64_SYSTEM_H) */