| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 1 | #ifndef _ASM_POWERPC_PROCESSOR_H | 
|  | 2 | #define _ASM_POWERPC_PROCESSOR_H | 
|  | 3 |  | 
|  | 4 | /* | 
|  | 5 | * Copyright (C) 2001 PPC 64 Team, IBM Corp | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or | 
|  | 8 | * modify it under the terms of the GNU General Public License | 
|  | 9 | * as published by the Free Software Foundation; either version | 
|  | 10 | * 2 of the License, or (at your option) any later version. | 
|  | 11 | */ | 
|  | 12 |  | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 13 | #include <asm/reg.h> | 
|  | 14 |  | 
|  | 15 | #ifndef __ASSEMBLY__ | 
|  | 16 | #include <linux/compiler.h> | 
|  | 17 | #include <asm/ptrace.h> | 
|  | 18 | #include <asm/types.h> | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 19 |  | 
| Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 20 | /* We do _not_ want to define new machine types at all, those must die | 
|  | 21 | * in favor of using the device-tree | 
|  | 22 | * -- BenH. | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 23 | */ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 24 |  | 
| Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 25 | /* PREP sub-platform types see residual.h for these */ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 26 | #define _PREP_Motorola	0x01	/* motorola prep */ | 
|  | 27 | #define _PREP_Firm	0x02	/* firmworks prep */ | 
|  | 28 | #define _PREP_IBM	0x00	/* ibm prep */ | 
|  | 29 | #define _PREP_Bull	0x03	/* bull prep */ | 
|  | 30 |  | 
| Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 31 | /* CHRP sub-platform types. These are arbitrary */ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 32 | #define _CHRP_Motorola	0x04	/* motorola chrp, the cobra */ | 
|  | 33 | #define _CHRP_IBM	0x05	/* IBM chrp, the longtrail and longtrail 2 */ | 
|  | 34 | #define _CHRP_Pegasos	0x06	/* Genesi/bplan's Pegasos and Pegasos2 */ | 
| Benjamin Herrenschmidt | 26c5032 | 2006-07-04 14:16:28 +1000 | [diff] [blame] | 35 | #define _CHRP_briq	0x07	/* TotalImpact's briQ */ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 36 |  | 
| Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 37 | #if defined(__KERNEL__) && defined(CONFIG_PPC32) | 
|  | 38 |  | 
|  | 39 | extern int _chrp_type; | 
| Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 40 |  | 
| Paul Mackerras | 0a26b13 | 2006-03-28 10:22:10 +1100 | [diff] [blame] | 41 | #ifdef CONFIG_PPC_PREP | 
| Paul Mackerras | 799d604 | 2005-11-10 13:37:51 +1100 | [diff] [blame] | 42 |  | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 43 | /* what kind of prep workstation we are */ | 
|  | 44 | extern int _prep_type; | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 45 |  | 
| Paul Mackerras | 0a26b13 | 2006-03-28 10:22:10 +1100 | [diff] [blame] | 46 | #endif /* CONFIG_PPC_PREP */ | 
|  | 47 |  | 
| Benjamin Herrenschmidt | e822250 | 2006-03-28 23:15:54 +1100 | [diff] [blame] | 48 | #endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */ | 
|  | 49 |  | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 50 | /* | 
|  | 51 | * Default implementation of macro that returns current | 
|  | 52 | * instruction pointer ("program counter"). | 
|  | 53 | */ | 
|  | 54 | #define current_text_addr() ({ __label__ _l; _l: &&_l;}) | 
|  | 55 |  | 
|  | 56 | /* Macros for adjusting thread priority (hardware multi-threading) */ | 
|  | 57 | #define HMT_very_low()   asm volatile("or 31,31,31   # very low priority") | 
|  | 58 | #define HMT_low()	 asm volatile("or 1,1,1	     # low priority") | 
|  | 59 | #define HMT_medium_low() asm volatile("or 6,6,6      # medium low priority") | 
|  | 60 | #define HMT_medium()	 asm volatile("or 2,2,2	     # medium priority") | 
|  | 61 | #define HMT_medium_high() asm volatile("or 5,5,5      # medium high priority") | 
|  | 62 | #define HMT_high()	 asm volatile("or 3,3,3	     # high priority") | 
|  | 63 |  | 
|  | 64 | #ifdef __KERNEL__ | 
|  | 65 |  | 
|  | 66 | extern int have_of; | 
|  | 67 |  | 
|  | 68 | struct task_struct; | 
|  | 69 | void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp); | 
|  | 70 | void release_thread(struct task_struct *); | 
|  | 71 |  | 
|  | 72 | /* Prepare to copy thread state - unlazy all lazy status */ | 
|  | 73 | extern void prepare_to_copy(struct task_struct *tsk); | 
|  | 74 |  | 
|  | 75 | /* Create a new kernel thread. */ | 
|  | 76 | extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | 
|  | 77 |  | 
|  | 78 | /* Lazy FPU handling on uni-processor */ | 
|  | 79 | extern struct task_struct *last_task_used_math; | 
|  | 80 | extern struct task_struct *last_task_used_altivec; | 
|  | 81 | extern struct task_struct *last_task_used_spe; | 
|  | 82 |  | 
|  | 83 | #ifdef CONFIG_PPC32 | 
|  | 84 | #define TASK_SIZE	(CONFIG_TASK_SIZE) | 
|  | 85 |  | 
|  | 86 | /* This decides where the kernel will search for a free chunk of vm | 
|  | 87 | * space during mmap's. | 
|  | 88 | */ | 
|  | 89 | #define TASK_UNMAPPED_BASE	(TASK_SIZE / 8 * 3) | 
|  | 90 | #endif | 
|  | 91 |  | 
|  | 92 | #ifdef CONFIG_PPC64 | 
|  | 93 | /* 64-bit user address space is 44-bits (16TB user VM) */ | 
|  | 94 | #define TASK_SIZE_USER64 (0x0000100000000000UL) | 
|  | 95 |  | 
|  | 96 | /* | 
|  | 97 | * 32-bit user address space is 4GB - 1 page | 
|  | 98 | * (this 1 page is needed so referencing of 0xFFFFFFFF generates EFAULT | 
|  | 99 | */ | 
|  | 100 | #define TASK_SIZE_USER32 (0x0000000100000000UL - (1*PAGE_SIZE)) | 
|  | 101 |  | 
| Dave Hansen | 8245525 | 2008-02-04 22:28:59 -0800 | [diff] [blame] | 102 | #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 103 | TASK_SIZE_USER32 : TASK_SIZE_USER64) | 
| Dave Hansen | 8245525 | 2008-02-04 22:28:59 -0800 | [diff] [blame] | 104 | #define TASK_SIZE	  TASK_SIZE_OF(current) | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 105 |  | 
|  | 106 | /* This decides where the kernel will search for a free chunk of vm | 
|  | 107 | * space during mmap's. | 
|  | 108 | */ | 
|  | 109 | #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) | 
|  | 110 | #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4)) | 
|  | 111 |  | 
|  | 112 | #define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \ | 
|  | 113 | TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) | 
|  | 114 | #endif | 
|  | 115 |  | 
| David Howells | 922a70d | 2008-02-08 04:19:26 -0800 | [diff] [blame] | 116 | #ifdef __KERNEL__ | 
|  | 117 | #ifdef __powerpc64__ | 
|  | 118 |  | 
|  | 119 | #define STACK_TOP_USER64 TASK_SIZE_USER64 | 
|  | 120 | #define STACK_TOP_USER32 TASK_SIZE_USER32 | 
|  | 121 |  | 
|  | 122 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ | 
|  | 123 | STACK_TOP_USER32 : STACK_TOP_USER64) | 
|  | 124 |  | 
|  | 125 | #define STACK_TOP_MAX STACK_TOP_USER64 | 
|  | 126 |  | 
|  | 127 | #else /* __powerpc64__ */ | 
|  | 128 |  | 
|  | 129 | #define STACK_TOP TASK_SIZE | 
|  | 130 | #define STACK_TOP_MAX	STACK_TOP | 
|  | 131 |  | 
|  | 132 | #endif /* __powerpc64__ */ | 
|  | 133 | #endif /* __KERNEL__ */ | 
|  | 134 |  | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 135 | typedef struct { | 
|  | 136 | unsigned long seg; | 
|  | 137 | } mm_segment_t; | 
|  | 138 |  | 
|  | 139 | struct thread_struct { | 
|  | 140 | unsigned long	ksp;		/* Kernel stack pointer */ | 
|  | 141 | #ifdef CONFIG_PPC64 | 
|  | 142 | unsigned long	ksp_vsid; | 
|  | 143 | #endif | 
|  | 144 | struct pt_regs	*regs;		/* Pointer to saved register state */ | 
|  | 145 | mm_segment_t	fs;		/* for get_fs() validation */ | 
|  | 146 | #ifdef CONFIG_PPC32 | 
|  | 147 | void		*pgdir;		/* root of page-table tree */ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 148 | #endif | 
|  | 149 | #if defined(CONFIG_4xx) || defined (CONFIG_BOOKE) | 
|  | 150 | unsigned long	dbcr0;		/* debug control register values */ | 
|  | 151 | unsigned long	dbcr1; | 
|  | 152 | #endif | 
|  | 153 | double		fpr[32];	/* Complete floating point set */ | 
| David Gibson | 25c8a78 | 2005-10-27 16:27:25 +1000 | [diff] [blame] | 154 | struct {			/* fpr ... fpscr must be contiguous */ | 
|  | 155 |  | 
|  | 156 | unsigned int pad; | 
|  | 157 | unsigned int val;	/* Floating point status */ | 
|  | 158 | } fpscr; | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 159 | int		fpexc_mode;	/* floating-point exception mode */ | 
| Paul Mackerras | e9370ae | 2006-06-07 16:15:39 +1000 | [diff] [blame] | 160 | unsigned int	align_ctl;	/* alignment handling control */ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 161 | #ifdef CONFIG_PPC64 | 
|  | 162 | unsigned long	start_tb;	/* Start purr when proc switched in */ | 
|  | 163 | unsigned long	accum_tb;	/* Total accumilated purr for process */ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 164 | #endif | 
|  | 165 | unsigned long	dabr;		/* Data address breakpoint register */ | 
|  | 166 | #ifdef CONFIG_ALTIVEC | 
|  | 167 | /* Complete AltiVec register set */ | 
| Mike Frysinger | fc624ea | 2007-07-15 13:36:09 +1000 | [diff] [blame] | 168 | vector128	vr[32] __attribute__((aligned(16))); | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 169 | /* AltiVec status */ | 
| Mike Frysinger | fc624ea | 2007-07-15 13:36:09 +1000 | [diff] [blame] | 170 | vector128	vscr __attribute__((aligned(16))); | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 171 | unsigned long	vrsave; | 
|  | 172 | int		used_vr;	/* set if process has used altivec */ | 
|  | 173 | #endif /* CONFIG_ALTIVEC */ | 
|  | 174 | #ifdef CONFIG_SPE | 
|  | 175 | unsigned long	evr[32];	/* upper 32-bits of SPE regs */ | 
|  | 176 | u64		acc;		/* Accumulator */ | 
|  | 177 | unsigned long	spefscr;	/* SPE & eFP status */ | 
|  | 178 | int		used_spe;	/* set if process has used spe */ | 
|  | 179 | #endif /* CONFIG_SPE */ | 
|  | 180 | }; | 
|  | 181 |  | 
|  | 182 | #define ARCH_MIN_TASKALIGN 16 | 
|  | 183 |  | 
|  | 184 | #define INIT_SP		(sizeof(init_stack) + (unsigned long) &init_stack) | 
|  | 185 |  | 
|  | 186 |  | 
|  | 187 | #ifdef CONFIG_PPC32 | 
|  | 188 | #define INIT_THREAD { \ | 
|  | 189 | .ksp = INIT_SP, \ | 
|  | 190 | .fs = KERNEL_DS, \ | 
|  | 191 | .pgdir = swapper_pg_dir, \ | 
|  | 192 | .fpexc_mode = MSR_FE0 | MSR_FE1, \ | 
|  | 193 | } | 
|  | 194 | #else | 
|  | 195 | #define INIT_THREAD  { \ | 
|  | 196 | .ksp = INIT_SP, \ | 
|  | 197 | .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ | 
|  | 198 | .fs = KERNEL_DS, \ | 
|  | 199 | .fpr = {0}, \ | 
| David Gibson | 25c8a78 | 2005-10-27 16:27:25 +1000 | [diff] [blame] | 200 | .fpscr = { .val = 0, }, \ | 
| Arnd Bergmann | ddf5f75 | 2006-06-20 02:30:33 +0200 | [diff] [blame] | 201 | .fpexc_mode = 0, \ | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 202 | } | 
|  | 203 | #endif | 
|  | 204 |  | 
|  | 205 | /* | 
|  | 206 | * Return saved PC of a blocked thread. For now, this is the "user" PC | 
|  | 207 | */ | 
|  | 208 | #define thread_saved_pc(tsk)    \ | 
|  | 209 | ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) | 
|  | 210 |  | 
|  | 211 | unsigned long get_wchan(struct task_struct *p); | 
|  | 212 |  | 
|  | 213 | #define KSTK_EIP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->nip: 0) | 
|  | 214 | #define KSTK_ESP(tsk)  ((tsk)->thread.regs? (tsk)->thread.regs->gpr[1]: 0) | 
|  | 215 |  | 
|  | 216 | /* Get/set floating-point exception mode */ | 
|  | 217 | #define GET_FPEXC_CTL(tsk, adr) get_fpexc_mode((tsk), (adr)) | 
|  | 218 | #define SET_FPEXC_CTL(tsk, val) set_fpexc_mode((tsk), (val)) | 
|  | 219 |  | 
|  | 220 | extern int get_fpexc_mode(struct task_struct *tsk, unsigned long adr); | 
|  | 221 | extern int set_fpexc_mode(struct task_struct *tsk, unsigned int val); | 
|  | 222 |  | 
| Paul Mackerras | fab5db9 | 2006-06-07 16:14:40 +1000 | [diff] [blame] | 223 | #define GET_ENDIAN(tsk, adr) get_endian((tsk), (adr)) | 
|  | 224 | #define SET_ENDIAN(tsk, val) set_endian((tsk), (val)) | 
|  | 225 |  | 
|  | 226 | extern int get_endian(struct task_struct *tsk, unsigned long adr); | 
|  | 227 | extern int set_endian(struct task_struct *tsk, unsigned int val); | 
|  | 228 |  | 
| Paul Mackerras | e9370ae | 2006-06-07 16:15:39 +1000 | [diff] [blame] | 229 | #define GET_UNALIGN_CTL(tsk, adr)	get_unalign_ctl((tsk), (adr)) | 
|  | 230 | #define SET_UNALIGN_CTL(tsk, val)	set_unalign_ctl((tsk), (val)) | 
|  | 231 |  | 
|  | 232 | extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); | 
|  | 233 | extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); | 
|  | 234 |  | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 235 | static inline unsigned int __unpack_fe01(unsigned long msr_bits) | 
|  | 236 | { | 
|  | 237 | return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); | 
|  | 238 | } | 
|  | 239 |  | 
|  | 240 | static inline unsigned long __pack_fe01(unsigned int fpmode) | 
|  | 241 | { | 
|  | 242 | return ((fpmode << 10) & MSR_FE0) | ((fpmode << 8) & MSR_FE1); | 
|  | 243 | } | 
|  | 244 |  | 
|  | 245 | #ifdef CONFIG_PPC64 | 
|  | 246 | #define cpu_relax()	do { HMT_low(); HMT_medium(); barrier(); } while (0) | 
|  | 247 | #else | 
|  | 248 | #define cpu_relax()	barrier() | 
|  | 249 | #endif | 
|  | 250 |  | 
| Anton Blanchard | 2f25194 | 2006-03-27 11:46:18 +1100 | [diff] [blame] | 251 | /* Check that a certain kernel stack pointer is valid in task_struct p */ | 
|  | 252 | int validate_sp(unsigned long sp, struct task_struct *p, | 
|  | 253 | unsigned long nbytes); | 
|  | 254 |  | 
| Paul Mackerras | 9f04b9e | 2005-10-10 14:19:43 +1000 | [diff] [blame] | 255 | /* | 
|  | 256 | * Prefetch macros. | 
|  | 257 | */ | 
|  | 258 | #define ARCH_HAS_PREFETCH | 
|  | 259 | #define ARCH_HAS_PREFETCHW | 
|  | 260 | #define ARCH_HAS_SPINLOCK_PREFETCH | 
|  | 261 |  | 
|  | 262 | static inline void prefetch(const void *x) | 
|  | 263 | { | 
|  | 264 | if (unlikely(!x)) | 
|  | 265 | return; | 
|  | 266 |  | 
|  | 267 | __asm__ __volatile__ ("dcbt 0,%0" : : "r" (x)); | 
|  | 268 | } | 
|  | 269 |  | 
|  | 270 | static inline void prefetchw(const void *x) | 
|  | 271 | { | 
|  | 272 | if (unlikely(!x)) | 
|  | 273 | return; | 
|  | 274 |  | 
|  | 275 | __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x)); | 
|  | 276 | } | 
|  | 277 |  | 
|  | 278 | #define spin_lock_prefetch(x)	prefetchw(x) | 
|  | 279 |  | 
|  | 280 | #ifdef CONFIG_PPC64 | 
|  | 281 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | 
|  | 282 | #endif | 
|  | 283 |  | 
|  | 284 | #endif /* __KERNEL__ */ | 
|  | 285 | #endif /* __ASSEMBLY__ */ | 
|  | 286 | #endif /* _ASM_POWERPC_PROCESSOR_H */ |