| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | 
 | 3 |  */ | 
| Stephen Rothwell | bbeb3f4 | 2005-09-27 13:51:59 +1000 | [diff] [blame] | 4 | #ifndef _ASM_POWERPC_SYSTEM_H | 
 | 5 | #define _ASM_POWERPC_SYSTEM_H | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 6 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 7 | #include <linux/kernel.h> | 
| Paul Mackerras | 14b3ca4 | 2008-04-20 17:57:10 +1000 | [diff] [blame] | 8 | #include <linux/irqflags.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 9 |  | 
 | 10 | #include <asm/hw_irq.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 11 |  | 
 | 12 | /* | 
 | 13 |  * Memory barrier. | 
 | 14 |  * The sync instruction guarantees that all memory accesses initiated | 
 | 15 |  * by this processor have been performed (with respect to all other | 
 | 16 |  * mechanisms that access memory).  The eieio instruction is a barrier | 
 | 17 |  * providing an ordering (separately) for (a) cacheable stores and (b) | 
 | 18 |  * loads and stores to non-cacheable memory (e.g. I/O devices). | 
 | 19 |  * | 
 | 20 |  * mb() prevents loads and stores being reordered across this point. | 
 | 21 |  * rmb() prevents loads being reordered across this point. | 
 | 22 |  * wmb() prevents stores being reordered across this point. | 
 | 23 |  * read_barrier_depends() prevents data-dependent loads being reordered | 
 | 24 |  *	across this point (nop on PPC). | 
 | 25 |  * | 
| Nick Piggin | 957ab07 | 2008-11-11 17:51:18 +0000 | [diff] [blame] | 26 |  * *mb() variants without smp_ prefix must order all types of memory | 
 | 27 |  * operations with one another. sync is the only instruction sufficient | 
 | 28 |  * to do this. | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 29 |  * | 
| Nick Piggin | 957ab07 | 2008-11-11 17:51:18 +0000 | [diff] [blame] | 30 |  * For the smp_ barriers, ordering is for cacheable memory operations | 
 | 31 |  * only. We have to use the sync instruction for smp_mb(), since lwsync | 
 | 32 |  * doesn't order loads with respect to previous stores.  Lwsync can be | 
 | 33 |  * used for smp_rmb() and smp_wmb(). | 
 | 34 |  * | 
 | 35 |  * However, on CPUs that don't support lwsync, lwsync actually maps to a | 
 | 36 |  * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 37 |  */ | 
 | 38 | #define mb()   __asm__ __volatile__ ("sync" : : : "memory") | 
| Nick Piggin | 598056d | 2008-05-22 00:10:56 +1000 | [diff] [blame] | 39 | #define rmb()  __asm__ __volatile__ ("sync" : : : "memory") | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 40 | #define wmb()  __asm__ __volatile__ ("sync" : : : "memory") | 
 | 41 | #define read_barrier_depends()  do { } while(0) | 
 | 42 |  | 
 | 43 | #define set_mb(var, value)	do { var = value; mb(); } while (0) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 44 |  | 
| Arnd Bergmann | 88ced03 | 2005-12-16 22:43:46 +0100 | [diff] [blame] | 45 | #ifdef __KERNEL__ | 
| Olaf Hering | 4f9a58d | 2007-10-16 23:30:12 -0700 | [diff] [blame] | 46 | #define AT_VECTOR_SIZE_ARCH 6 /* entries in ARCH_DLINFO */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 47 | #ifdef CONFIG_SMP | 
| Nick Piggin | 74f0609 | 2008-05-22 00:12:31 +1000 | [diff] [blame] | 48 |  | 
 | 49 | #ifdef __SUBARCH_HAS_LWSYNC | 
| Nick Piggin | 46d075be | 2008-11-11 17:50:48 +0000 | [diff] [blame] | 50 | #    define SMPWMB      LWSYNC | 
| Nick Piggin | 74f0609 | 2008-05-22 00:12:31 +1000 | [diff] [blame] | 51 | #else | 
 | 52 | #    define SMPWMB      eieio | 
 | 53 | #endif | 
 | 54 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 55 | #define smp_mb()	mb() | 
| Nick Piggin | 957ab07 | 2008-11-11 17:51:18 +0000 | [diff] [blame] | 56 | #define smp_rmb()	__asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") | 
| Nick Piggin | 46d075be | 2008-11-11 17:50:48 +0000 | [diff] [blame] | 57 | #define smp_wmb()	__asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 58 | #define smp_read_barrier_depends()	read_barrier_depends() | 
 | 59 | #else | 
 | 60 | #define smp_mb()	barrier() | 
 | 61 | #define smp_rmb()	barrier() | 
 | 62 | #define smp_wmb()	barrier() | 
 | 63 | #define smp_read_barrier_depends()	do { } while(0) | 
 | 64 | #endif /* CONFIG_SMP */ | 
 | 65 |  | 
| Nathan Lynch | 5db9fa9 | 2006-08-22 20:36:05 -0500 | [diff] [blame] | 66 | /* | 
 | 67 |  * This is a barrier which prevents following instructions from being | 
 | 68 |  * started until the value of the argument x is known.  For example, if | 
 | 69 |  * x is a variable loaded from memory, this prevents following | 
 | 70 |  * instructions from being executed until the load has been performed. | 
 | 71 |  */ | 
 | 72 | #define data_barrier(x)	\ | 
 | 73 | 	asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); | 
 | 74 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 75 | struct task_struct; | 
 | 76 | struct pt_regs; | 
 | 77 |  | 
| Olof Johansson | 7dbb922 | 2008-01-31 14:34:47 +1100 | [diff] [blame] | 78 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 79 |  | 
 | 80 | extern int (*__debugger)(struct pt_regs *regs); | 
 | 81 | extern int (*__debugger_ipi)(struct pt_regs *regs); | 
 | 82 | extern int (*__debugger_bpt)(struct pt_regs *regs); | 
 | 83 | extern int (*__debugger_sstep)(struct pt_regs *regs); | 
 | 84 | extern int (*__debugger_iabr_match)(struct pt_regs *regs); | 
 | 85 | extern int (*__debugger_dabr_match)(struct pt_regs *regs); | 
 | 86 | extern int (*__debugger_fault_handler)(struct pt_regs *regs); | 
 | 87 |  | 
 | 88 | #define DEBUGGER_BOILERPLATE(__NAME) \ | 
 | 89 | static inline int __NAME(struct pt_regs *regs) \ | 
 | 90 | { \ | 
 | 91 | 	if (unlikely(__ ## __NAME)) \ | 
 | 92 | 		return __ ## __NAME(regs); \ | 
 | 93 | 	return 0; \ | 
 | 94 | } | 
 | 95 |  | 
 | 96 | DEBUGGER_BOILERPLATE(debugger) | 
 | 97 | DEBUGGER_BOILERPLATE(debugger_ipi) | 
 | 98 | DEBUGGER_BOILERPLATE(debugger_bpt) | 
 | 99 | DEBUGGER_BOILERPLATE(debugger_sstep) | 
 | 100 | DEBUGGER_BOILERPLATE(debugger_iabr_match) | 
 | 101 | DEBUGGER_BOILERPLATE(debugger_dabr_match) | 
 | 102 | DEBUGGER_BOILERPLATE(debugger_fault_handler) | 
 | 103 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 104 | #else | 
 | 105 | static inline int debugger(struct pt_regs *regs) { return 0; } | 
 | 106 | static inline int debugger_ipi(struct pt_regs *regs) { return 0; } | 
 | 107 | static inline int debugger_bpt(struct pt_regs *regs) { return 0; } | 
 | 108 | static inline int debugger_sstep(struct pt_regs *regs) { return 0; } | 
 | 109 | static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; } | 
 | 110 | static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; } | 
 | 111 | static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; } | 
 | 112 | #endif | 
 | 113 |  | 
 | 114 | extern int set_dabr(unsigned long dabr); | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 115 | extern void do_dabr(struct pt_regs *regs, unsigned long address, | 
 | 116 | 		    unsigned long error_code); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 117 | extern void print_backtrace(unsigned long *); | 
 | 118 | extern void show_regs(struct pt_regs * regs); | 
 | 119 | extern void flush_instruction_cache(void); | 
 | 120 | extern void hard_reset_now(void); | 
 | 121 | extern void poweroff_now(void); | 
 | 122 |  | 
 | 123 | #ifdef CONFIG_6xx | 
 | 124 | extern long _get_L2CR(void); | 
 | 125 | extern long _get_L3CR(void); | 
 | 126 | extern void _set_L2CR(unsigned long); | 
 | 127 | extern void _set_L3CR(unsigned long); | 
 | 128 | #else | 
 | 129 | #define _get_L2CR()	0L | 
 | 130 | #define _get_L3CR()	0L | 
 | 131 | #define _set_L2CR(val)	do { } while(0) | 
 | 132 | #define _set_L3CR(val)	do { } while(0) | 
 | 133 | #endif | 
 | 134 |  | 
 | 135 | extern void via_cuda_init(void); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 136 | extern void read_rtc_time(void); | 
 | 137 | extern void pmac_find_display(void); | 
 | 138 | extern void giveup_fpu(struct task_struct *); | 
| Stephen Rothwell | cabb558 | 2005-09-30 16:16:52 +1000 | [diff] [blame] | 139 | extern void disable_kernel_fp(void); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 140 | extern void enable_kernel_fp(void); | 
 | 141 | extern void flush_fp_to_thread(struct task_struct *); | 
 | 142 | extern void enable_kernel_altivec(void); | 
 | 143 | extern void giveup_altivec(struct task_struct *); | 
 | 144 | extern void load_up_altivec(struct task_struct *); | 
| Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 145 | extern int emulate_altivec(struct pt_regs *); | 
| Michael Neuling | 7c29217 | 2008-07-11 16:29:12 +1000 | [diff] [blame] | 146 | extern void __giveup_vsx(struct task_struct *); | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 147 | extern void giveup_vsx(struct task_struct *); | 
| Johannes Berg | d169d14 | 2007-04-28 08:00:03 +1000 | [diff] [blame] | 148 | extern void enable_kernel_spe(void); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 149 | extern void giveup_spe(struct task_struct *); | 
 | 150 | extern void load_up_spe(struct task_struct *); | 
 | 151 | extern int fix_alignment(struct pt_regs *); | 
| David Gibson | 25c8a78 | 2005-10-27 16:27:25 +1000 | [diff] [blame] | 152 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); | 
 | 153 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 154 |  | 
| Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 155 | #ifndef CONFIG_SMP | 
 | 156 | extern void discard_lazy_cpu_state(void); | 
 | 157 | #else | 
 | 158 | static inline void discard_lazy_cpu_state(void) | 
 | 159 | { | 
 | 160 | } | 
 | 161 | #endif | 
 | 162 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 163 | #ifdef CONFIG_ALTIVEC | 
 | 164 | extern void flush_altivec_to_thread(struct task_struct *); | 
 | 165 | #else | 
 | 166 | static inline void flush_altivec_to_thread(struct task_struct *t) | 
 | 167 | { | 
 | 168 | } | 
 | 169 | #endif | 
 | 170 |  | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 171 | #ifdef CONFIG_VSX | 
 | 172 | extern void flush_vsx_to_thread(struct task_struct *); | 
 | 173 | #else | 
 | 174 | static inline void flush_vsx_to_thread(struct task_struct *t) | 
 | 175 | { | 
 | 176 | } | 
 | 177 | #endif | 
 | 178 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 179 | #ifdef CONFIG_SPE | 
 | 180 | extern void flush_spe_to_thread(struct task_struct *); | 
 | 181 | #else | 
 | 182 | static inline void flush_spe_to_thread(struct task_struct *t) | 
 | 183 | { | 
 | 184 | } | 
 | 185 | #endif | 
 | 186 |  | 
 | 187 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | 
 | 188 | extern void cacheable_memzero(void *p, unsigned int nb); | 
 | 189 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | 
 | 190 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | 
 | 191 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | 
 | 192 | extern int die(const char *, struct pt_regs *, long); | 
 | 193 | extern void _exception(int, struct pt_regs *, int, unsigned long); | 
| Jon Loeliger | 1d59483 | 2008-01-23 12:42:07 -0600 | [diff] [blame] | 194 | extern void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); | 
 | 195 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 196 | #ifdef CONFIG_BOOKE_WDT | 
 | 197 | extern u32 booke_wdt_enabled; | 
 | 198 | extern u32 booke_wdt_period; | 
 | 199 | #endif /* CONFIG_BOOKE_WDT */ | 
 | 200 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 201 | struct device_node; | 
 | 202 | extern void note_scsi_host(struct device_node *, void *); | 
 | 203 |  | 
 | 204 | extern struct task_struct *__switch_to(struct task_struct *, | 
 | 205 | 	struct task_struct *); | 
 | 206 | #define switch_to(prev, next, last)	((last) = __switch_to((prev), (next))) | 
 | 207 |  | 
 | 208 | struct thread_struct; | 
 | 209 | extern struct task_struct *_switch(struct thread_struct *prev, | 
 | 210 | 				   struct thread_struct *next); | 
 | 211 |  | 
 | 212 | extern unsigned int rtas_data; | 
| Paul Mackerras | 40ef8cb | 2005-10-10 22:50:37 +1000 | [diff] [blame] | 213 | extern int mem_init_done;	/* set on boot once kmalloc can be called */ | 
| Benjamin Herrenschmidt | d3f6204 | 2009-06-02 21:16:38 +0000 | [diff] [blame] | 214 | extern int init_bootmem_done;	/* set once bootmem is available */ | 
| Becky Bruce | 49a8496 | 2009-05-08 12:19:27 +0000 | [diff] [blame] | 215 | extern phys_addr_t memory_limit; | 
| Paul Mackerras | 49b0985 | 2005-11-10 15:53:40 +1100 | [diff] [blame] | 216 | extern unsigned long klimit; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 217 |  | 
| Stephen Rothwell | 7b2c3c5 | 2007-09-17 14:08:06 +1000 | [diff] [blame] | 218 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); | 
| Stephen Rothwell | 5669c3c | 2007-10-02 13:37:53 +1000 | [diff] [blame] | 219 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | 
| Stephen Rothwell | 7b2c3c5 | 2007-09-17 14:08:06 +1000 | [diff] [blame] | 220 |  | 
| Paul Mackerras | 17a6392 | 2005-10-20 21:10:09 +1000 | [diff] [blame] | 221 | extern int powersave_nap;	/* set if nap mode can be used in idle loop */ | 
 | 222 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 223 | /* | 
 | 224 |  * Atomic exchange | 
 | 225 |  * | 
 | 226 |  * Changes the memory location '*ptr' to be val and returns | 
 | 227 |  * the previous value stored there. | 
 | 228 |  */ | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 229 | static __always_inline unsigned long | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 230 | __xchg_u32(volatile void *p, unsigned long val) | 
 | 231 | { | 
 | 232 | 	unsigned long prev; | 
 | 233 |  | 
 | 234 | 	__asm__ __volatile__( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 235 | 	LWSYNC_ON_SMP | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 236 | "1:	lwarx	%0,0,%2 \n" | 
 | 237 | 	PPC405_ERR77(0,%2) | 
 | 238 | "	stwcx.	%3,0,%2 \n\ | 
 | 239 | 	bne-	1b" | 
 | 240 | 	ISYNC_ON_SMP | 
| Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 241 | 	: "=&r" (prev), "+m" (*(volatile unsigned int *)p) | 
 | 242 | 	: "r" (p), "r" (val) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 243 | 	: "cc", "memory"); | 
 | 244 |  | 
 | 245 | 	return prev; | 
 | 246 | } | 
 | 247 |  | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 248 | /* | 
 | 249 |  * Atomic exchange | 
 | 250 |  * | 
 | 251 |  * Changes the memory location '*ptr' to be val and returns | 
 | 252 |  * the previous value stored there. | 
 | 253 |  */ | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 254 | static __always_inline unsigned long | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 255 | __xchg_u32_local(volatile void *p, unsigned long val) | 
 | 256 | { | 
 | 257 | 	unsigned long prev; | 
 | 258 |  | 
 | 259 | 	__asm__ __volatile__( | 
 | 260 | "1:	lwarx	%0,0,%2 \n" | 
 | 261 | 	PPC405_ERR77(0,%2) | 
 | 262 | "	stwcx.	%3,0,%2 \n\ | 
 | 263 | 	bne-	1b" | 
 | 264 | 	: "=&r" (prev), "+m" (*(volatile unsigned int *)p) | 
 | 265 | 	: "r" (p), "r" (val) | 
 | 266 | 	: "cc", "memory"); | 
 | 267 |  | 
 | 268 | 	return prev; | 
 | 269 | } | 
 | 270 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 271 | #ifdef CONFIG_PPC64 | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 272 | static __always_inline unsigned long | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 273 | __xchg_u64(volatile void *p, unsigned long val) | 
 | 274 | { | 
 | 275 | 	unsigned long prev; | 
 | 276 |  | 
 | 277 | 	__asm__ __volatile__( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 278 | 	LWSYNC_ON_SMP | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 279 | "1:	ldarx	%0,0,%2 \n" | 
 | 280 | 	PPC405_ERR77(0,%2) | 
 | 281 | "	stdcx.	%3,0,%2 \n\ | 
 | 282 | 	bne-	1b" | 
 | 283 | 	ISYNC_ON_SMP | 
| Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 284 | 	: "=&r" (prev), "+m" (*(volatile unsigned long *)p) | 
 | 285 | 	: "r" (p), "r" (val) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 286 | 	: "cc", "memory"); | 
 | 287 |  | 
 | 288 | 	return prev; | 
 | 289 | } | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 290 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 291 | static __always_inline unsigned long | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 292 | __xchg_u64_local(volatile void *p, unsigned long val) | 
 | 293 | { | 
 | 294 | 	unsigned long prev; | 
 | 295 |  | 
 | 296 | 	__asm__ __volatile__( | 
 | 297 | "1:	ldarx	%0,0,%2 \n" | 
 | 298 | 	PPC405_ERR77(0,%2) | 
 | 299 | "	stdcx.	%3,0,%2 \n\ | 
 | 300 | 	bne-	1b" | 
 | 301 | 	: "=&r" (prev), "+m" (*(volatile unsigned long *)p) | 
 | 302 | 	: "r" (p), "r" (val) | 
 | 303 | 	: "cc", "memory"); | 
 | 304 |  | 
 | 305 | 	return prev; | 
 | 306 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 307 | #endif | 
 | 308 |  | 
 | 309 | /* | 
 | 310 |  * This function doesn't exist, so you'll get a linker error | 
 | 311 |  * if something tries to do an invalid xchg(). | 
 | 312 |  */ | 
 | 313 | extern void __xchg_called_with_bad_pointer(void); | 
 | 314 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 315 | static __always_inline unsigned long | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 316 | __xchg(volatile void *ptr, unsigned long x, unsigned int size) | 
 | 317 | { | 
 | 318 | 	switch (size) { | 
 | 319 | 	case 4: | 
 | 320 | 		return __xchg_u32(ptr, x); | 
 | 321 | #ifdef CONFIG_PPC64 | 
 | 322 | 	case 8: | 
 | 323 | 		return __xchg_u64(ptr, x); | 
 | 324 | #endif | 
 | 325 | 	} | 
 | 326 | 	__xchg_called_with_bad_pointer(); | 
 | 327 | 	return x; | 
 | 328 | } | 
 | 329 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 330 | static __always_inline unsigned long | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 331 | __xchg_local(volatile void *ptr, unsigned long x, unsigned int size) | 
 | 332 | { | 
 | 333 | 	switch (size) { | 
 | 334 | 	case 4: | 
 | 335 | 		return __xchg_u32_local(ptr, x); | 
 | 336 | #ifdef CONFIG_PPC64 | 
 | 337 | 	case 8: | 
 | 338 | 		return __xchg_u64_local(ptr, x); | 
 | 339 | #endif | 
 | 340 | 	} | 
 | 341 | 	__xchg_called_with_bad_pointer(); | 
 | 342 | 	return x; | 
 | 343 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 344 | #define xchg(ptr,x)							     \ | 
 | 345 |   ({									     \ | 
 | 346 |      __typeof__(*(ptr)) _x_ = (x);					     \ | 
 | 347 |      (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ | 
 | 348 |   }) | 
 | 349 |  | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 350 | #define xchg_local(ptr,x)						     \ | 
 | 351 |   ({									     \ | 
 | 352 |      __typeof__(*(ptr)) _x_ = (x);					     \ | 
 | 353 |      (__typeof__(*(ptr))) __xchg_local((ptr),				     \ | 
 | 354 |      		(unsigned long)_x_, sizeof(*(ptr))); 			     \ | 
 | 355 |   }) | 
 | 356 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 357 | /* | 
 | 358 |  * Compare and exchange - if *p == old, set it to new, | 
 | 359 |  * and return the old value of *p. | 
 | 360 |  */ | 
 | 361 | #define __HAVE_ARCH_CMPXCHG	1 | 
 | 362 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 363 | static __always_inline unsigned long | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 364 | __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new) | 
 | 365 | { | 
 | 366 | 	unsigned int prev; | 
 | 367 |  | 
 | 368 | 	__asm__ __volatile__ ( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 369 | 	LWSYNC_ON_SMP | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 370 | "1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\ | 
 | 371 | 	cmpw	0,%0,%3\n\ | 
 | 372 | 	bne-	2f\n" | 
 | 373 | 	PPC405_ERR77(0,%2) | 
 | 374 | "	stwcx.	%4,0,%2\n\ | 
 | 375 | 	bne-	1b" | 
 | 376 | 	ISYNC_ON_SMP | 
 | 377 | 	"\n\ | 
 | 378 | 2:" | 
| Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 379 | 	: "=&r" (prev), "+m" (*p) | 
 | 380 | 	: "r" (p), "r" (old), "r" (new) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 381 | 	: "cc", "memory"); | 
 | 382 |  | 
 | 383 | 	return prev; | 
 | 384 | } | 
 | 385 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 386 | static __always_inline unsigned long | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 387 | __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old, | 
 | 388 | 			unsigned long new) | 
 | 389 | { | 
 | 390 | 	unsigned int prev; | 
 | 391 |  | 
 | 392 | 	__asm__ __volatile__ ( | 
 | 393 | "1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\ | 
 | 394 | 	cmpw	0,%0,%3\n\ | 
 | 395 | 	bne-	2f\n" | 
 | 396 | 	PPC405_ERR77(0,%2) | 
 | 397 | "	stwcx.	%4,0,%2\n\ | 
 | 398 | 	bne-	1b" | 
 | 399 | 	"\n\ | 
 | 400 | 2:" | 
 | 401 | 	: "=&r" (prev), "+m" (*p) | 
 | 402 | 	: "r" (p), "r" (old), "r" (new) | 
 | 403 | 	: "cc", "memory"); | 
 | 404 |  | 
 | 405 | 	return prev; | 
 | 406 | } | 
 | 407 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 408 | #ifdef CONFIG_PPC64 | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 409 | static __always_inline unsigned long | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 410 | __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 411 | { | 
 | 412 | 	unsigned long prev; | 
 | 413 |  | 
 | 414 | 	__asm__ __volatile__ ( | 
| Anton Blanchard | 144b9c1 | 2006-01-13 15:37:17 +1100 | [diff] [blame] | 415 | 	LWSYNC_ON_SMP | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 416 | "1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\ | 
 | 417 | 	cmpd	0,%0,%3\n\ | 
 | 418 | 	bne-	2f\n\ | 
 | 419 | 	stdcx.	%4,0,%2\n\ | 
 | 420 | 	bne-	1b" | 
 | 421 | 	ISYNC_ON_SMP | 
 | 422 | 	"\n\ | 
 | 423 | 2:" | 
| Linus Torvalds | e2a3d40 | 2006-07-08 15:00:28 -0700 | [diff] [blame] | 424 | 	: "=&r" (prev), "+m" (*p) | 
 | 425 | 	: "r" (p), "r" (old), "r" (new) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 426 | 	: "cc", "memory"); | 
 | 427 |  | 
 | 428 | 	return prev; | 
 | 429 | } | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 430 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 431 | static __always_inline unsigned long | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 432 | __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old, | 
 | 433 | 			unsigned long new) | 
 | 434 | { | 
 | 435 | 	unsigned long prev; | 
 | 436 |  | 
 | 437 | 	__asm__ __volatile__ ( | 
 | 438 | "1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\ | 
 | 439 | 	cmpd	0,%0,%3\n\ | 
 | 440 | 	bne-	2f\n\ | 
 | 441 | 	stdcx.	%4,0,%2\n\ | 
 | 442 | 	bne-	1b" | 
 | 443 | 	"\n\ | 
 | 444 | 2:" | 
 | 445 | 	: "=&r" (prev), "+m" (*p) | 
 | 446 | 	: "r" (p), "r" (old), "r" (new) | 
 | 447 | 	: "cc", "memory"); | 
 | 448 |  | 
 | 449 | 	return prev; | 
 | 450 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 451 | #endif | 
 | 452 |  | 
 | 453 | /* This function doesn't exist, so you'll get a linker error | 
 | 454 |    if something tries to do an invalid cmpxchg().  */ | 
 | 455 | extern void __cmpxchg_called_with_bad_pointer(void); | 
 | 456 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 457 | static __always_inline unsigned long | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 458 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, | 
 | 459 | 	  unsigned int size) | 
 | 460 | { | 
 | 461 | 	switch (size) { | 
 | 462 | 	case 4: | 
 | 463 | 		return __cmpxchg_u32(ptr, old, new); | 
 | 464 | #ifdef CONFIG_PPC64 | 
 | 465 | 	case 8: | 
 | 466 | 		return __cmpxchg_u64(ptr, old, new); | 
 | 467 | #endif | 
 | 468 | 	} | 
 | 469 | 	__cmpxchg_called_with_bad_pointer(); | 
 | 470 | 	return old; | 
 | 471 | } | 
 | 472 |  | 
| Paul Mackerras | dd18434f | 2008-04-28 14:44:08 +1000 | [diff] [blame] | 473 | static __always_inline unsigned long | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 474 | __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new, | 
 | 475 | 	  unsigned int size) | 
 | 476 | { | 
 | 477 | 	switch (size) { | 
 | 478 | 	case 4: | 
 | 479 | 		return __cmpxchg_u32_local(ptr, old, new); | 
 | 480 | #ifdef CONFIG_PPC64 | 
 | 481 | 	case 8: | 
 | 482 | 		return __cmpxchg_u64_local(ptr, old, new); | 
 | 483 | #endif | 
 | 484 | 	} | 
 | 485 | 	__cmpxchg_called_with_bad_pointer(); | 
 | 486 | 	return old; | 
 | 487 | } | 
 | 488 |  | 
| Mathieu Desnoyers | f9c4650 | 2008-02-07 00:16:10 -0800 | [diff] [blame] | 489 | #define cmpxchg(ptr, o, n)						 \ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 490 |   ({									 \ | 
 | 491 |      __typeof__(*(ptr)) _o_ = (o);					 \ | 
 | 492 |      __typeof__(*(ptr)) _n_ = (n);					 \ | 
 | 493 |      (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \ | 
 | 494 | 				    (unsigned long)_n_, sizeof(*(ptr))); \ | 
 | 495 |   }) | 
 | 496 |  | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 497 |  | 
| Mathieu Desnoyers | f9c4650 | 2008-02-07 00:16:10 -0800 | [diff] [blame] | 498 | #define cmpxchg_local(ptr, o, n)					 \ | 
| Mathieu Desnoyers | f46e477 | 2007-05-08 00:34:27 -0700 | [diff] [blame] | 499 |   ({									 \ | 
 | 500 |      __typeof__(*(ptr)) _o_ = (o);					 \ | 
 | 501 |      __typeof__(*(ptr)) _n_ = (n);					 \ | 
 | 502 |      (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	 \ | 
 | 503 | 				    (unsigned long)_n_, sizeof(*(ptr))); \ | 
 | 504 |   }) | 
 | 505 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 506 | #ifdef CONFIG_PPC64 | 
 | 507 | /* | 
 | 508 |  * We handle most unaligned accesses in hardware. On the other hand  | 
 | 509 |  * unaligned DMA can be very expensive on some ppc64 IO chips (it does | 
 | 510 |  * powers of 2 writes until it reaches sufficient alignment). | 
 | 511 |  * | 
 | 512 |  * Based on this we disable the IP header alignment in network drivers. | 
| Anton Blanchard | 025be81 | 2006-03-31 02:27:06 -0800 | [diff] [blame] | 513 |  * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining | 
 | 514 |  * cacheline alignment of buffers. | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 515 |  */ | 
| Anton Blanchard | 025be81 | 2006-03-31 02:27:06 -0800 | [diff] [blame] | 516 | #define NET_IP_ALIGN	0 | 
 | 517 | #define NET_SKB_PAD	L1_CACHE_BYTES | 
| Mathieu Desnoyers | f9c4650 | 2008-02-07 00:16:10 -0800 | [diff] [blame] | 518 |  | 
 | 519 | #define cmpxchg64(ptr, o, n)						\ | 
 | 520 |   ({									\ | 
 | 521 | 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\ | 
 | 522 | 	cmpxchg((ptr), (o), (n));					\ | 
 | 523 |   }) | 
 | 524 | #define cmpxchg64_local(ptr, o, n)					\ | 
 | 525 |   ({									\ | 
 | 526 | 	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\ | 
 | 527 | 	cmpxchg_local((ptr), (o), (n));					\ | 
 | 528 |   }) | 
 | 529 | #else | 
 | 530 | #include <asm-generic/cmpxchg-local.h> | 
 | 531 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 532 | #endif | 
 | 533 |  | 
| Anton Blanchard | d839088 | 2009-02-22 01:50:03 +0000 | [diff] [blame] | 534 | extern unsigned long arch_align_stack(unsigned long sp); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 535 |  | 
| Paul Mackerras | 9b6b563 | 2005-10-06 12:06:20 +1000 | [diff] [blame] | 536 | /* Used in very early kernel initialization. */ | 
| Stephen Rothwell | cabb558 | 2005-09-30 16:16:52 +1000 | [diff] [blame] | 537 | extern unsigned long reloc_offset(void); | 
| Paul Mackerras | 9b6b563 | 2005-10-06 12:06:20 +1000 | [diff] [blame] | 538 | extern unsigned long add_reloc_offset(unsigned long); | 
 | 539 | extern void reloc_got2(unsigned long); | 
 | 540 |  | 
 | 541 | #define PTRRELOC(x)	((typeof(x)) add_reloc_offset((unsigned long)(x))) | 
| Stephen Rothwell | cabb558 | 2005-09-30 16:16:52 +1000 | [diff] [blame] | 542 |  | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 543 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 
 | 544 | extern void account_system_vtime(struct task_struct *); | 
 | 545 | #endif | 
 | 546 |  | 
| Michael Ellerman | 94a3807 | 2007-06-20 10:54:19 +1000 | [diff] [blame] | 547 | extern struct dentry *powerpc_debugfs_root; | 
 | 548 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 549 | #endif /* __KERNEL__ */ | 
| Stephen Rothwell | bbeb3f4 | 2005-09-27 13:51:59 +1000 | [diff] [blame] | 550 | #endif /* _ASM_POWERPC_SYSTEM_H */ |