| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> | 
|  | 3 | */ | 
|  | 4 | #ifndef __PPC_SYSTEM_H | 
|  | 5 | #define __PPC_SYSTEM_H | 
|  | 6 |  | 
|  | 7 | #include <linux/config.h> | 
|  | 8 | #include <linux/kernel.h> | 
|  | 9 |  | 
|  | 10 | #include <asm/atomic.h> | 
|  | 11 | #include <asm/hw_irq.h> | 
|  | 12 |  | 
|  | 13 | /* | 
|  | 14 | * Memory barrier. | 
|  | 15 | * The sync instruction guarantees that all memory accesses initiated | 
|  | 16 | * by this processor have been performed (with respect to all other | 
|  | 17 | * mechanisms that access memory).  The eieio instruction is a barrier | 
|  | 18 | * providing an ordering (separately) for (a) cacheable stores and (b) | 
|  | 19 | * loads and stores to non-cacheable memory (e.g. I/O devices). | 
|  | 20 | * | 
|  | 21 | * mb() prevents loads and stores being reordered across this point. | 
|  | 22 | * rmb() prevents loads being reordered across this point. | 
|  | 23 | * wmb() prevents stores being reordered across this point. | 
|  | 24 | * read_barrier_depends() prevents data-dependent loads being reordered | 
|  | 25 | *	across this point (nop on PPC). | 
|  | 26 | * | 
|  | 27 | * We can use the eieio instruction for wmb, but since it doesn't | 
|  | 28 | * give any ordering guarantees about loads, we have to use the | 
|  | 29 | * stronger but slower sync instruction for mb and rmb. | 
|  | 30 | */ | 
|  | 31 | #define mb()  __asm__ __volatile__ ("sync" : : : "memory") | 
|  | 32 | #define rmb()  __asm__ __volatile__ ("sync" : : : "memory") | 
|  | 33 | #define wmb()  __asm__ __volatile__ ("eieio" : : : "memory") | 
|  | 34 | #define read_barrier_depends()  do { } while(0) | 
|  | 35 |  | 
|  | 36 | #define set_mb(var, value)	do { var = value; mb(); } while (0) | 
|  | 37 | #define set_wmb(var, value)	do { var = value; wmb(); } while (0) | 
|  | 38 |  | 
|  | 39 | #ifdef CONFIG_SMP | 
|  | 40 | #define smp_mb()	mb() | 
|  | 41 | #define smp_rmb()	rmb() | 
|  | 42 | #define smp_wmb()	wmb() | 
|  | 43 | #define smp_read_barrier_depends()	read_barrier_depends() | 
|  | 44 | #else | 
|  | 45 | #define smp_mb()	barrier() | 
|  | 46 | #define smp_rmb()	barrier() | 
|  | 47 | #define smp_wmb()	barrier() | 
|  | 48 | #define smp_read_barrier_depends()	do { } while(0) | 
|  | 49 | #endif /* CONFIG_SMP */ | 
|  | 50 |  | 
|  | 51 | #ifdef __KERNEL__ | 
|  | 52 | struct task_struct; | 
|  | 53 | struct pt_regs; | 
|  | 54 |  | 
|  | 55 | extern void print_backtrace(unsigned long *); | 
|  | 56 | extern void show_regs(struct pt_regs * regs); | 
|  | 57 | extern void flush_instruction_cache(void); | 
|  | 58 | extern void hard_reset_now(void); | 
|  | 59 | extern void poweroff_now(void); | 
|  | 60 | #ifdef CONFIG_6xx | 
|  | 61 | extern long _get_L2CR(void); | 
|  | 62 | extern long _get_L3CR(void); | 
|  | 63 | extern void _set_L2CR(unsigned long); | 
|  | 64 | extern void _set_L3CR(unsigned long); | 
|  | 65 | #else | 
|  | 66 | #define _get_L2CR()	0L | 
|  | 67 | #define _get_L3CR()	0L | 
|  | 68 | #define _set_L2CR(val)	do { } while(0) | 
|  | 69 | #define _set_L3CR(val)	do { } while(0) | 
|  | 70 | #endif | 
|  | 71 | extern void via_cuda_init(void); | 
|  | 72 | extern void pmac_nvram_init(void); | 
| Olaf Hering | 35e95e6 | 2005-10-28 17:46:19 -0700 | [diff] [blame] | 73 | extern void chrp_nvram_init(void); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | extern void read_rtc_time(void); | 
|  | 75 | extern void pmac_find_display(void); | 
|  | 76 | extern void giveup_fpu(struct task_struct *); | 
|  | 77 | extern void enable_kernel_fp(void); | 
| Paul Mackerras | 7ac59c6 | 2005-10-17 20:12:39 +1000 | [diff] [blame] | 78 | extern void flush_fp_to_thread(struct task_struct *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | extern void enable_kernel_altivec(void); | 
|  | 80 | extern void giveup_altivec(struct task_struct *); | 
|  | 81 | extern void load_up_altivec(struct task_struct *); | 
| Paul Mackerras | fd582ec | 2005-10-11 22:08:12 +1000 | [diff] [blame] | 82 | extern int emulate_altivec(struct pt_regs *); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | extern void giveup_spe(struct task_struct *); | 
|  | 84 | extern void load_up_spe(struct task_struct *); | 
|  | 85 | extern int fix_alignment(struct pt_regs *); | 
| David Gibson | 25c8a78 | 2005-10-27 16:27:25 +1000 | [diff] [blame] | 86 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); | 
|  | 87 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | 
| Paul Mackerras | 7ac59c6 | 2005-10-17 20:12:39 +1000 | [diff] [blame] | 88 |  | 
|  | 89 | #ifdef CONFIG_ALTIVEC | 
|  | 90 | extern void flush_altivec_to_thread(struct task_struct *); | 
|  | 91 | #else | 
|  | 92 | static inline void flush_altivec_to_thread(struct task_struct *t) | 
|  | 93 | { | 
|  | 94 | } | 
|  | 95 | #endif | 
|  | 96 |  | 
|  | 97 | #ifdef CONFIG_SPE | 
|  | 98 | extern void flush_spe_to_thread(struct task_struct *); | 
|  | 99 | #else | 
|  | 100 | static inline void flush_spe_to_thread(struct task_struct *t) | 
|  | 101 | { | 
|  | 102 | } | 
|  | 103 | #endif | 
|  | 104 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | extern int call_rtas(const char *, int, int, unsigned long *, ...); | 
|  | 106 | extern void cacheable_memzero(void *p, unsigned int nb); | 
| Eugene Surovegin | e883480 | 2005-09-03 15:55:54 -0700 | [diff] [blame] | 107 | extern void *cacheable_memcpy(void *, const void *, unsigned int); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long); | 
|  | 109 | extern void bad_page_fault(struct pt_regs *, unsigned long, int); | 
| Stephen Rothwell | dc1c1ca | 2005-10-01 18:43:42 +1000 | [diff] [blame] | 110 | extern int die(const char *, struct pt_regs *, long); | 
| Paul Mackerras | bb0bb3b | 2005-09-10 21:13:11 +1000 | [diff] [blame] | 111 | extern void _exception(int, struct pt_regs *, int, unsigned long); | 
| Paul Mackerras | fd582ec | 2005-10-11 22:08:12 +1000 | [diff] [blame] | 112 | void _nmask_and_or_msr(unsigned long nmask, unsigned long or_val); | 
|  | 113 |  | 
| Kumar Gala | 39cdc4b | 2005-09-03 15:55:39 -0700 | [diff] [blame] | 114 | #ifdef CONFIG_BOOKE_WDT | 
|  | 115 | extern u32 booke_wdt_enabled; | 
|  | 116 | extern u32 booke_wdt_period; | 
|  | 117 | #endif /* CONFIG_BOOKE_WDT */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 |  | 
|  | 119 | struct device_node; | 
|  | 120 | extern void note_scsi_host(struct device_node *, void *); | 
|  | 121 |  | 
|  | 122 | extern struct task_struct *__switch_to(struct task_struct *, | 
|  | 123 | struct task_struct *); | 
|  | 124 | #define switch_to(prev, next, last)	((last) = __switch_to((prev), (next))) | 
|  | 125 |  | 
|  | 126 | struct thread_struct; | 
|  | 127 | extern struct task_struct *_switch(struct thread_struct *prev, | 
|  | 128 | struct thread_struct *next); | 
|  | 129 |  | 
|  | 130 | extern unsigned int rtas_data; | 
|  | 131 |  | 
|  | 132 | static __inline__ unsigned long | 
|  | 133 | xchg_u32(volatile void *p, unsigned long val) | 
|  | 134 | { | 
|  | 135 | unsigned long prev; | 
|  | 136 |  | 
|  | 137 | __asm__ __volatile__ ("\n\ | 
|  | 138 | 1:	lwarx	%0,0,%2 \n" | 
|  | 139 | PPC405_ERR77(0,%2) | 
|  | 140 | "	stwcx.	%3,0,%2 \n\ | 
|  | 141 | bne-	1b" | 
|  | 142 | : "=&r" (prev), "=m" (*(volatile unsigned long *)p) | 
|  | 143 | : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p) | 
|  | 144 | : "cc", "memory"); | 
|  | 145 |  | 
|  | 146 | return prev; | 
|  | 147 | } | 
|  | 148 |  | 
|  | 149 | /* | 
|  | 150 | * This function doesn't exist, so you'll get a linker error | 
|  | 151 | * if something tries to do an invalid xchg(). | 
|  | 152 | */ | 
|  | 153 | extern void __xchg_called_with_bad_pointer(void); | 
|  | 154 |  | 
|  | 155 | #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) | 
|  | 156 | #define tas(ptr) (xchg((ptr),1)) | 
|  | 157 |  | 
|  | 158 | static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | 
|  | 159 | { | 
|  | 160 | switch (size) { | 
|  | 161 | case 4: | 
|  | 162 | return (unsigned long) xchg_u32(ptr, x); | 
|  | 163 | #if 0	/* xchg_u64 doesn't exist on 32-bit PPC */ | 
|  | 164 | case 8: | 
|  | 165 | return (unsigned long) xchg_u64(ptr, x); | 
|  | 166 | #endif /* 0 */ | 
|  | 167 | } | 
|  | 168 | __xchg_called_with_bad_pointer(); | 
|  | 169 | return x; | 
|  | 170 |  | 
|  | 171 |  | 
|  | 172 | } | 
|  | 173 |  | 
|  | 174 | extern inline void * xchg_ptr(void * m, void * val) | 
|  | 175 | { | 
|  | 176 | return (void *) xchg_u32(m, (unsigned long) val); | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 |  | 
|  | 180 | #define __HAVE_ARCH_CMPXCHG	1 | 
|  | 181 |  | 
|  | 182 | static __inline__ unsigned long | 
|  | 183 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | 
|  | 184 | { | 
|  | 185 | unsigned int prev; | 
|  | 186 |  | 
|  | 187 | __asm__ __volatile__ ("\n\ | 
|  | 188 | 1:	lwarx	%0,0,%2 \n\ | 
|  | 189 | cmpw	0,%0,%3 \n\ | 
|  | 190 | bne	2f \n" | 
|  | 191 | PPC405_ERR77(0,%2) | 
|  | 192 | "	stwcx.	%4,0,%2 \n\ | 
|  | 193 | bne-	1b\n" | 
|  | 194 | #ifdef CONFIG_SMP | 
|  | 195 | "	sync\n" | 
|  | 196 | #endif /* CONFIG_SMP */ | 
|  | 197 | "2:" | 
|  | 198 | : "=&r" (prev), "=m" (*p) | 
|  | 199 | : "r" (p), "r" (old), "r" (new), "m" (*p) | 
|  | 200 | : "cc", "memory"); | 
|  | 201 |  | 
|  | 202 | return prev; | 
|  | 203 | } | 
|  | 204 |  | 
|  | 205 | /* This function doesn't exist, so you'll get a linker error | 
|  | 206 | if something tries to do an invalid cmpxchg().  */ | 
|  | 207 | extern void __cmpxchg_called_with_bad_pointer(void); | 
|  | 208 |  | 
|  | 209 | static __inline__ unsigned long | 
|  | 210 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | 
|  | 211 | { | 
|  | 212 | switch (size) { | 
|  | 213 | case 4: | 
|  | 214 | return __cmpxchg_u32(ptr, old, new); | 
|  | 215 | #if 0	/* we don't have __cmpxchg_u64 on 32-bit PPC */ | 
|  | 216 | case 8: | 
|  | 217 | return __cmpxchg_u64(ptr, old, new); | 
|  | 218 | #endif /* 0 */ | 
|  | 219 | } | 
|  | 220 | __cmpxchg_called_with_bad_pointer(); | 
|  | 221 | return old; | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | #define cmpxchg(ptr,o,n)						 \ | 
|  | 225 | ({									 \ | 
|  | 226 | __typeof__(*(ptr)) _o_ = (o);					 \ | 
|  | 227 | __typeof__(*(ptr)) _n_ = (n);					 \ | 
|  | 228 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \ | 
|  | 229 | (unsigned long)_n_, sizeof(*(ptr))); \ | 
|  | 230 | }) | 
|  | 231 |  | 
|  | 232 | #define arch_align_stack(x) (x) | 
|  | 233 |  | 
|  | 234 | #endif /* __KERNEL__ */ | 
|  | 235 | #endif /* __PPC_SYSTEM_H */ |