| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_SYSTEM_H | 
|  | 2 | #define __ASM_SH_SYSTEM_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Copyright (C) 1999, 2000  Niibe Yutaka  &  Kaz Kojima | 
|  | 6 | * Copyright (C) 2002 Paul Mundt | 
|  | 7 | */ | 
|  | 8 |  | 
| Paul Mundt | afbfb52 | 2006-12-04 18:17:28 +0900 | [diff] [blame] | 9 | #include <linux/irqflags.h> | 
| Paul Mundt | 310f796 | 2007-03-28 17:26:19 +0900 | [diff] [blame] | 10 | #include <linux/compiler.h> | 
| Paul Mundt | e08f457 | 2007-05-14 12:52:56 +0900 | [diff] [blame] | 11 | #include <linux/linkage.h> | 
| Tom Rini | e4e3b5c | 2006-09-27 11:28:20 +0900 | [diff] [blame] | 12 | #include <asm/types.h> | 
| Paul Mundt | 3a2e117 | 2007-05-01 16:33:10 +0900 | [diff] [blame] | 13 | #include <asm/ptrace.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 |  | 
| Paul Mundt | 98c4ecd | 2007-12-10 16:21:57 +0900 | [diff] [blame] | 15 | #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 |  | 
| Paul Mundt | a62a386 | 2007-11-10 19:46:31 +0900 | [diff] [blame] | 17 | #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) | 
| Paul Mundt | 2984762 | 2006-09-27 14:57:44 +0900 | [diff] [blame] | 18 | #define __icbi()			\ | 
|  | 19 | {					\ | 
|  | 20 | unsigned long __addr;		\ | 
|  | 21 | __addr = 0xa8000000;		\ | 
|  | 22 | __asm__ __volatile__(		\ | 
|  | 23 | "icbi   %0\n\t"		\ | 
|  | 24 | : /* no output */	\ | 
|  | 25 | : "m" (__m(__addr)));	\ | 
|  | 26 | } | 
|  | 27 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 |  | 
| Paul Mundt | 2984762 | 2006-09-27 14:57:44 +0900 | [diff] [blame] | 29 | /* | 
|  | 30 | * A brief note on ctrl_barrier(), the control register write barrier. | 
|  | 31 | * | 
|  | 32 | * Legacy SH cores typically require a sequence of 8 nops after | 
|  | 33 | * modification of a control register in order for the changes to take | 
|  | 34 | * effect. On newer cores (like the sh4a and sh5) this is accomplished | 
|  | 35 | * with icbi. | 
|  | 36 | * | 
|  | 37 | * Also note that on sh4a in the icbi case we can forego a synco for the | 
|  | 38 | * write barrier, as it's not necessary for control registers. | 
|  | 39 | * | 
|  | 40 | * Historically we have only done this type of barrier for the MMUCR, but | 
|  | 41 | * it's also necessary for the CCR, so we make it generic here instead. | 
|  | 42 | */ | 
| Paul Mundt | a62a386 | 2007-11-10 19:46:31 +0900 | [diff] [blame] | 43 | #if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) | 
| Paul Mundt | 2984762 | 2006-09-27 14:57:44 +0900 | [diff] [blame] | 44 | #define mb()		__asm__ __volatile__ ("synco": : :"memory") | 
|  | 45 | #define rmb()		mb() | 
|  | 46 | #define wmb()		__asm__ __volatile__ ("synco": : :"memory") | 
|  | 47 | #define ctrl_barrier()	__icbi() | 
| Paul Mundt | fdfc74f | 2006-09-27 14:05:52 +0900 | [diff] [blame] | 48 | #define read_barrier_depends()	do { } while(0) | 
|  | 49 | #else | 
| Paul Mundt | 2984762 | 2006-09-27 14:57:44 +0900 | [diff] [blame] | 50 | #define mb()		__asm__ __volatile__ ("": : :"memory") | 
|  | 51 | #define rmb()		mb() | 
|  | 52 | #define wmb()		__asm__ __volatile__ ("": : :"memory") | 
|  | 53 | #define ctrl_barrier()	__asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | #define read_barrier_depends()	do { } while(0) | 
| Paul Mundt | fdfc74f | 2006-09-27 14:05:52 +0900 | [diff] [blame] | 55 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 |  | 
|  | 57 | #ifdef CONFIG_SMP | 
|  | 58 | #define smp_mb()	mb() | 
|  | 59 | #define smp_rmb()	rmb() | 
|  | 60 | #define smp_wmb()	wmb() | 
|  | 61 | #define smp_read_barrier_depends()	read_barrier_depends() | 
|  | 62 | #else | 
|  | 63 | #define smp_mb()	barrier() | 
|  | 64 | #define smp_rmb()	barrier() | 
|  | 65 | #define smp_wmb()	barrier() | 
|  | 66 | #define smp_read_barrier_depends()	do { } while(0) | 
|  | 67 | #endif | 
|  | 68 |  | 
| Paul Mundt | 357d594 | 2007-06-11 15:32:07 +0900 | [diff] [blame] | 69 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 |  | 
| Stuart Menefy | 1efe4ce | 2007-11-30 16:12:36 +0900 | [diff] [blame] | 71 | #ifdef CONFIG_GUSA_RB | 
|  | 72 | #include <asm/cmpxchg-grb.h> | 
|  | 73 | #else | 
|  | 74 | #include <asm/cmpxchg-irq.h> | 
|  | 75 | #endif | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 |  | 
| Paul Mundt | 00b3aa3 | 2006-09-27 16:05:56 +0900 | [diff] [blame] | 77 | extern void __xchg_called_with_bad_pointer(void); | 
|  | 78 |  | 
|  | 79 | #define __xchg(ptr, x, size)				\ | 
|  | 80 | ({							\ | 
|  | 81 | unsigned long __xchg__res;			\ | 
|  | 82 | volatile void *__xchg_ptr = (ptr);		\ | 
|  | 83 | switch (size) {					\ | 
|  | 84 | case 4:						\ | 
|  | 85 | __xchg__res = xchg_u32(__xchg_ptr, x);	\ | 
|  | 86 | break;					\ | 
|  | 87 | case 1:						\ | 
|  | 88 | __xchg__res = xchg_u8(__xchg_ptr, x);	\ | 
|  | 89 | break;					\ | 
|  | 90 | default:					\ | 
|  | 91 | __xchg_called_with_bad_pointer();	\ | 
|  | 92 | __xchg__res = x;			\ | 
|  | 93 | break;					\ | 
|  | 94 | }						\ | 
|  | 95 | \ | 
|  | 96 | __xchg__res;					\ | 
|  | 97 | }) | 
|  | 98 |  | 
|  | 99 | #define xchg(ptr,x)	\ | 
|  | 100 | ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 |  | 
| Tom Rini | e4e3b5c | 2006-09-27 11:28:20 +0900 | [diff] [blame] | 102 | /* This function doesn't exist, so you'll get a linker error | 
|  | 103 | * if something tries to do an invalid cmpxchg(). */ | 
|  | 104 | extern void __cmpxchg_called_with_bad_pointer(void); | 
|  | 105 |  | 
|  | 106 | #define __HAVE_ARCH_CMPXCHG 1 | 
|  | 107 |  | 
|  | 108 | static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, | 
|  | 109 | unsigned long new, int size) | 
|  | 110 | { | 
|  | 111 | switch (size) { | 
|  | 112 | case 4: | 
|  | 113 | return __cmpxchg_u32(ptr, old, new); | 
|  | 114 | } | 
|  | 115 | __cmpxchg_called_with_bad_pointer(); | 
|  | 116 | return old; | 
|  | 117 | } | 
|  | 118 |  | 
|  | 119 | #define cmpxchg(ptr,o,n)						 \ | 
|  | 120 | ({									 \ | 
|  | 121 | __typeof__(*(ptr)) _o_ = (o);					 \ | 
|  | 122 | __typeof__(*(ptr)) _n_ = (n);					 \ | 
|  | 123 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \ | 
|  | 124 | (unsigned long)_n_, sizeof(*(ptr))); \ | 
|  | 125 | }) | 
|  | 126 |  | 
| Paul Mundt | 3a2e117 | 2007-05-01 16:33:10 +0900 | [diff] [blame] | 127 | extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); | 
|  | 128 |  | 
| Paul Mundt | 1f66658 | 2006-10-19 16:20:25 +0900 | [diff] [blame] | 129 | extern void *set_exception_table_vec(unsigned int vec, void *handler); | 
|  | 130 |  | 
|  | 131 | static inline void *set_exception_table_evt(unsigned int evt, void *handler) | 
|  | 132 | { | 
|  | 133 | return set_exception_table_vec(evt >> 5, handler); | 
|  | 134 | } | 
|  | 135 |  | 
| Paul Mundt | bd07999 | 2007-05-08 14:50:59 +0900 | [diff] [blame] | 136 | /* | 
|  | 137 | * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks. | 
|  | 138 | */ | 
|  | 139 | #ifdef CONFIG_CPU_SH2A | 
|  | 140 | extern unsigned int instruction_size(unsigned int insn); | 
| Paul Mundt | 0fa70ef | 2007-11-08 19:08:28 +0900 | [diff] [blame] | 141 | #elif defined(CONFIG_SUPERH32) | 
| Paul Mundt | bd07999 | 2007-05-08 14:50:59 +0900 | [diff] [blame] | 142 | #define instruction_size(insn)	(2) | 
| Paul Mundt | 0fa70ef | 2007-11-08 19:08:28 +0900 | [diff] [blame] | 143 | #else | 
|  | 144 | #define instruction_size(insn)	(4) | 
| Paul Mundt | bd07999 | 2007-05-08 14:50:59 +0900 | [diff] [blame] | 145 | #endif | 
|  | 146 |  | 
| Stuart Menefy | cbaa118 | 2007-11-30 17:06:36 +0900 | [diff] [blame] | 147 | extern unsigned long cached_to_uncached; | 
|  | 148 |  | 
| Paul Mundt | b9e393c | 2008-03-07 17:19:58 +0900 | [diff] [blame] | 149 | extern struct dentry *sh_debugfs_root; | 
|  | 150 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | /* XXX | 
|  | 152 | * disable hlt during certain critical i/o operations | 
|  | 153 | */ | 
|  | 154 | #define HAVE_DISABLE_HLT | 
|  | 155 | void disable_hlt(void); | 
|  | 156 | void enable_hlt(void); | 
|  | 157 |  | 
| Paul Mundt | e08f457 | 2007-05-14 12:52:56 +0900 | [diff] [blame] | 158 | void default_idle(void); | 
| Paul Mundt | aba1030 | 2007-09-21 18:32:32 +0900 | [diff] [blame] | 159 | void per_cpu_trap_init(void); | 
| Paul Mundt | e08f457 | 2007-05-14 12:52:56 +0900 | [diff] [blame] | 160 |  | 
|  | 161 | asmlinkage void break_point_trap(void); | 
| Paul Mundt | 5a4f7c6 | 2007-11-20 18:08:06 +0900 | [diff] [blame] | 162 |  | 
|  | 163 | #ifdef CONFIG_SUPERH32 | 
|  | 164 | #define BUILD_TRAP_HANDLER(name)					\ | 
|  | 165 | asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5,	\ | 
|  | 166 | unsigned long r6, unsigned long r7,	\ | 
|  | 167 | struct pt_regs __regs) | 
|  | 168 |  | 
|  | 169 | #define TRAP_HANDLER_DECL				\ | 
|  | 170 | struct pt_regs *regs = RELOC_HIDE(&__regs, 0);	\ | 
| Paul Mundt | b000659 | 2007-11-23 14:02:20 +0900 | [diff] [blame] | 171 | unsigned int vec = regs->tra;			\ | 
|  | 172 | (void)vec; | 
| Paul Mundt | 5a4f7c6 | 2007-11-20 18:08:06 +0900 | [diff] [blame] | 173 | #else | 
|  | 174 | #define BUILD_TRAP_HANDLER(name)	\ | 
|  | 175 | asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) | 
|  | 176 | #define TRAP_HANDLER_DECL | 
|  | 177 | #endif | 
|  | 178 |  | 
|  | 179 | BUILD_TRAP_HANDLER(address_error); | 
|  | 180 | BUILD_TRAP_HANDLER(debug); | 
|  | 181 | BUILD_TRAP_HANDLER(bug); | 
| Paul Mundt | 74d99a5 | 2007-11-26 20:38:36 +0900 | [diff] [blame] | 182 | BUILD_TRAP_HANDLER(fpu_error); | 
|  | 183 | BUILD_TRAP_HANDLER(fpu_state_restore); | 
| Paul Mundt | e08f457 | 2007-05-14 12:52:56 +0900 | [diff] [blame] | 184 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | #define arch_align_stack(x) (x) | 
|  | 186 |  | 
| Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 187 | struct mem_access { | 
|  | 188 | unsigned long (*from)(void *dst, const void *src, unsigned long cnt); | 
|  | 189 | unsigned long (*to)(void *dst, const void *src, unsigned long cnt); | 
|  | 190 | }; | 
|  | 191 |  | 
| Paul Mundt | a62a386 | 2007-11-10 19:46:31 +0900 | [diff] [blame] | 192 | #ifdef CONFIG_SUPERH32 | 
|  | 193 | # include "system_32.h" | 
|  | 194 | #else | 
|  | 195 | # include "system_64.h" | 
|  | 196 | #endif | 
|  | 197 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | #endif |