| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 2 |  *  Derived from "arch/i386/kernel/process.c" | 
 | 3 |  *    Copyright (C) 1995  Linus Torvalds | 
 | 4 |  * | 
 | 5 |  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | 
 | 6 |  *  Paul Mackerras (paulus@cs.anu.edu.au) | 
 | 7 |  * | 
 | 8 |  *  PowerPC version | 
 | 9 |  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
 | 10 |  * | 
 | 11 |  *  This program is free software; you can redistribute it and/or | 
 | 12 |  *  modify it under the terms of the GNU General Public License | 
 | 13 |  *  as published by the Free Software Foundation; either version | 
 | 14 |  *  2 of the License, or (at your option) any later version. | 
 | 15 |  */ | 
 | 16 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 17 | #include <linux/errno.h> | 
 | 18 | #include <linux/sched.h> | 
 | 19 | #include <linux/kernel.h> | 
 | 20 | #include <linux/mm.h> | 
 | 21 | #include <linux/smp.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 22 | #include <linux/stddef.h> | 
 | 23 | #include <linux/unistd.h> | 
 | 24 | #include <linux/ptrace.h> | 
 | 25 | #include <linux/slab.h> | 
 | 26 | #include <linux/user.h> | 
 | 27 | #include <linux/elf.h> | 
 | 28 | #include <linux/init.h> | 
 | 29 | #include <linux/prctl.h> | 
 | 30 | #include <linux/init_task.h> | 
 | 31 | #include <linux/module.h> | 
 | 32 | #include <linux/kallsyms.h> | 
 | 33 | #include <linux/mqueue.h> | 
 | 34 | #include <linux/hardirq.h> | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 35 | #include <linux/utsname.h> | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 36 | #include <linux/ftrace.h> | 
| Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 37 | #include <linux/kernel_stat.h> | 
| Anton Blanchard | d839088 | 2009-02-22 01:50:03 +0000 | [diff] [blame] | 38 | #include <linux/personality.h> | 
 | 39 | #include <linux/random.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 40 |  | 
 | 41 | #include <asm/pgtable.h> | 
 | 42 | #include <asm/uaccess.h> | 
 | 43 | #include <asm/system.h> | 
 | 44 | #include <asm/io.h> | 
 | 45 | #include <asm/processor.h> | 
 | 46 | #include <asm/mmu.h> | 
 | 47 | #include <asm/prom.h> | 
| Michael Ellerman | 76032de | 2005-11-07 13:12:03 +1100 | [diff] [blame] | 48 | #include <asm/machdep.h> | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 49 | #include <asm/time.h> | 
| Arnd Bergmann | a7f3184 | 2006-03-23 00:00:08 +0100 | [diff] [blame] | 50 | #include <asm/syscalls.h> | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 51 | #ifdef CONFIG_PPC64 | 
 | 52 | #include <asm/firmware.h> | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 53 | #endif | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 54 | #include <linux/kprobes.h> | 
 | 55 | #include <linux/kdebug.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 56 |  | 
 | 57 | extern unsigned long _get_SP(void); | 
 | 58 |  | 
 | 59 | #ifndef CONFIG_SMP | 
 | 60 | struct task_struct *last_task_used_math = NULL; | 
 | 61 | struct task_struct *last_task_used_altivec = NULL; | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 62 | struct task_struct *last_task_used_vsx = NULL; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 63 | struct task_struct *last_task_used_spe = NULL; | 
 | 64 | #endif | 
 | 65 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 66 | /* | 
 | 67 |  * Make sure the floating-point register state in the | 
 | 68 |  * the thread_struct is up to date for task tsk. | 
 | 69 |  */ | 
 | 70 | void flush_fp_to_thread(struct task_struct *tsk) | 
 | 71 | { | 
 | 72 | 	if (tsk->thread.regs) { | 
 | 73 | 		/* | 
 | 74 | 		 * We need to disable preemption here because if we didn't, | 
 | 75 | 		 * another process could get scheduled after the regs->msr | 
 | 76 | 		 * test but before we have finished saving the FP registers | 
 | 77 | 		 * to the thread_struct.  That process could take over the | 
 | 78 | 		 * FPU, and then when we get scheduled again we would store | 
 | 79 | 		 * bogus values for the remaining FP registers. | 
 | 80 | 		 */ | 
 | 81 | 		preempt_disable(); | 
 | 82 | 		if (tsk->thread.regs->msr & MSR_FP) { | 
 | 83 | #ifdef CONFIG_SMP | 
 | 84 | 			/* | 
 | 85 | 			 * This should only ever be called for current or | 
 | 86 | 			 * for a stopped child process.  Since we save away | 
 | 87 | 			 * the FP register state on context switch on SMP, | 
 | 88 | 			 * there is something wrong if a stopped child appears | 
 | 89 | 			 * to still have its FP state in the CPU registers. | 
 | 90 | 			 */ | 
 | 91 | 			BUG_ON(tsk != current); | 
 | 92 | #endif | 
| Kumar Gala | 0ee6c15 | 2007-08-28 21:15:53 -0500 | [diff] [blame] | 93 | 			giveup_fpu(tsk); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 94 | 		} | 
 | 95 | 		preempt_enable(); | 
 | 96 | 	} | 
 | 97 | } | 
 | 98 |  | 
 | 99 | void enable_kernel_fp(void) | 
 | 100 | { | 
 | 101 | 	WARN_ON(preemptible()); | 
 | 102 |  | 
 | 103 | #ifdef CONFIG_SMP | 
 | 104 | 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | 
 | 105 | 		giveup_fpu(current); | 
 | 106 | 	else | 
 | 107 | 		giveup_fpu(NULL);	/* just enables FP for kernel */ | 
 | 108 | #else | 
 | 109 | 	giveup_fpu(last_task_used_math); | 
 | 110 | #endif /* CONFIG_SMP */ | 
 | 111 | } | 
 | 112 | EXPORT_SYMBOL(enable_kernel_fp); | 
 | 113 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 114 | #ifdef CONFIG_ALTIVEC | 
 | 115 | void enable_kernel_altivec(void) | 
 | 116 | { | 
 | 117 | 	WARN_ON(preemptible()); | 
 | 118 |  | 
 | 119 | #ifdef CONFIG_SMP | 
 | 120 | 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | 
 | 121 | 		giveup_altivec(current); | 
 | 122 | 	else | 
 | 123 | 		giveup_altivec(NULL);	/* just enable AltiVec for kernel - force */ | 
 | 124 | #else | 
 | 125 | 	giveup_altivec(last_task_used_altivec); | 
 | 126 | #endif /* CONFIG_SMP */ | 
 | 127 | } | 
 | 128 | EXPORT_SYMBOL(enable_kernel_altivec); | 
 | 129 |  | 
 | 130 | /* | 
 | 131 |  * Make sure the VMX/Altivec register state in the | 
 | 132 |  * the thread_struct is up to date for task tsk. | 
 | 133 |  */ | 
 | 134 | void flush_altivec_to_thread(struct task_struct *tsk) | 
 | 135 | { | 
 | 136 | 	if (tsk->thread.regs) { | 
 | 137 | 		preempt_disable(); | 
 | 138 | 		if (tsk->thread.regs->msr & MSR_VEC) { | 
 | 139 | #ifdef CONFIG_SMP | 
 | 140 | 			BUG_ON(tsk != current); | 
 | 141 | #endif | 
| Kumar Gala | 0ee6c15 | 2007-08-28 21:15:53 -0500 | [diff] [blame] | 142 | 			giveup_altivec(tsk); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 143 | 		} | 
 | 144 | 		preempt_enable(); | 
 | 145 | 	} | 
 | 146 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 147 | #endif /* CONFIG_ALTIVEC */ | 
 | 148 |  | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 149 | #ifdef CONFIG_VSX | 
 | 150 | #if 0 | 
 | 151 | /* not currently used, but some crazy RAID module might want to later */ | 
 | 152 | void enable_kernel_vsx(void) | 
 | 153 | { | 
 | 154 | 	WARN_ON(preemptible()); | 
 | 155 |  | 
 | 156 | #ifdef CONFIG_SMP | 
 | 157 | 	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) | 
 | 158 | 		giveup_vsx(current); | 
 | 159 | 	else | 
 | 160 | 		giveup_vsx(NULL);	/* just enable vsx for kernel - force */ | 
 | 161 | #else | 
 | 162 | 	giveup_vsx(last_task_used_vsx); | 
 | 163 | #endif /* CONFIG_SMP */ | 
 | 164 | } | 
 | 165 | EXPORT_SYMBOL(enable_kernel_vsx); | 
 | 166 | #endif | 
 | 167 |  | 
| Michael Neuling | 7c29217 | 2008-07-11 16:29:12 +1000 | [diff] [blame] | 168 | void giveup_vsx(struct task_struct *tsk) | 
 | 169 | { | 
 | 170 | 	giveup_fpu(tsk); | 
 | 171 | 	giveup_altivec(tsk); | 
 | 172 | 	__giveup_vsx(tsk); | 
 | 173 | } | 
 | 174 |  | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 175 | void flush_vsx_to_thread(struct task_struct *tsk) | 
 | 176 | { | 
 | 177 | 	if (tsk->thread.regs) { | 
 | 178 | 		preempt_disable(); | 
 | 179 | 		if (tsk->thread.regs->msr & MSR_VSX) { | 
 | 180 | #ifdef CONFIG_SMP | 
 | 181 | 			BUG_ON(tsk != current); | 
 | 182 | #endif | 
 | 183 | 			giveup_vsx(tsk); | 
 | 184 | 		} | 
 | 185 | 		preempt_enable(); | 
 | 186 | 	} | 
 | 187 | } | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 188 | #endif /* CONFIG_VSX */ | 
 | 189 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 190 | #ifdef CONFIG_SPE | 
 | 191 |  | 
 | 192 | void enable_kernel_spe(void) | 
 | 193 | { | 
 | 194 | 	WARN_ON(preemptible()); | 
 | 195 |  | 
 | 196 | #ifdef CONFIG_SMP | 
 | 197 | 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | 
 | 198 | 		giveup_spe(current); | 
 | 199 | 	else | 
 | 200 | 		giveup_spe(NULL);	/* just enable SPE for kernel - force */ | 
 | 201 | #else | 
 | 202 | 	giveup_spe(last_task_used_spe); | 
 | 203 | #endif /* __SMP __ */ | 
 | 204 | } | 
 | 205 | EXPORT_SYMBOL(enable_kernel_spe); | 
 | 206 |  | 
 | 207 | void flush_spe_to_thread(struct task_struct *tsk) | 
 | 208 | { | 
 | 209 | 	if (tsk->thread.regs) { | 
 | 210 | 		preempt_disable(); | 
 | 211 | 		if (tsk->thread.regs->msr & MSR_SPE) { | 
 | 212 | #ifdef CONFIG_SMP | 
 | 213 | 			BUG_ON(tsk != current); | 
 | 214 | #endif | 
| Kumar Gala | 0ee6c15 | 2007-08-28 21:15:53 -0500 | [diff] [blame] | 215 | 			giveup_spe(tsk); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 216 | 		} | 
 | 217 | 		preempt_enable(); | 
 | 218 | 	} | 
 | 219 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 220 | #endif /* CONFIG_SPE */ | 
 | 221 |  | 
| Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 222 | #ifndef CONFIG_SMP | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 223 | /* | 
 | 224 |  * If we are doing lazy switching of CPU state (FP, altivec or SPE), | 
 | 225 |  * and the current task has some state, discard it. | 
 | 226 |  */ | 
| Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 227 | void discard_lazy_cpu_state(void) | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 228 | { | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 229 | 	preempt_disable(); | 
 | 230 | 	if (last_task_used_math == current) | 
 | 231 | 		last_task_used_math = NULL; | 
 | 232 | #ifdef CONFIG_ALTIVEC | 
 | 233 | 	if (last_task_used_altivec == current) | 
 | 234 | 		last_task_used_altivec = NULL; | 
 | 235 | #endif /* CONFIG_ALTIVEC */ | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 236 | #ifdef CONFIG_VSX | 
 | 237 | 	if (last_task_used_vsx == current) | 
 | 238 | 		last_task_used_vsx = NULL; | 
 | 239 | #endif /* CONFIG_VSX */ | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 240 | #ifdef CONFIG_SPE | 
 | 241 | 	if (last_task_used_spe == current) | 
 | 242 | 		last_task_used_spe = NULL; | 
 | 243 | #endif | 
 | 244 | 	preempt_enable(); | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 245 | } | 
| Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 246 | #endif /* CONFIG_SMP */ | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 247 |  | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 248 | void do_dabr(struct pt_regs *regs, unsigned long address, | 
 | 249 | 		    unsigned long error_code) | 
 | 250 | { | 
 | 251 | 	siginfo_t info; | 
 | 252 |  | 
 | 253 | 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 
 | 254 | 			11, SIGSEGV) == NOTIFY_STOP) | 
 | 255 | 		return; | 
 | 256 |  | 
 | 257 | 	if (debugger_dabr_match(regs)) | 
 | 258 | 		return; | 
 | 259 |  | 
 | 260 | 	/* Clear the DAC and struct entries.  One shot trigger */ | 
| Kumar Gala | 2325f0a | 2008-07-26 05:27:33 +1000 | [diff] [blame] | 261 | #if defined(CONFIG_BOOKE) | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 262 | 	mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W | 
 | 263 | 							| DBCR0_IDM)); | 
 | 264 | #endif | 
 | 265 |  | 
 | 266 | 	/* Clear the DABR */ | 
 | 267 | 	set_dabr(0); | 
 | 268 |  | 
 | 269 | 	/* Deliver the signal to userspace */ | 
 | 270 | 	info.si_signo = SIGTRAP; | 
 | 271 | 	info.si_errno = 0; | 
 | 272 | 	info.si_code = TRAP_HWBKPT; | 
 | 273 | 	info.si_addr = (void __user *)address; | 
 | 274 | 	force_sig_info(SIGTRAP, &info, current); | 
 | 275 | } | 
 | 276 |  | 
| Michael Ellerman | a2ceff5 | 2008-03-28 19:11:48 +1100 | [diff] [blame] | 277 | static DEFINE_PER_CPU(unsigned long, current_dabr); | 
 | 278 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 279 | int set_dabr(unsigned long dabr) | 
 | 280 | { | 
| Michael Ellerman | a2ceff5 | 2008-03-28 19:11:48 +1100 | [diff] [blame] | 281 | 	__get_cpu_var(current_dabr) = dabr; | 
 | 282 |  | 
| Michael Ellerman | cab0af9 | 2005-11-03 15:30:49 +1100 | [diff] [blame] | 283 | 	if (ppc_md.set_dabr) | 
 | 284 | 		return ppc_md.set_dabr(dabr); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 285 |  | 
| Benjamin Herrenschmidt | 791cc50 | 2007-06-04 15:15:48 +1000 | [diff] [blame] | 286 | 	/* XXX should we have a CPU_FTR_HAS_DABR ? */ | 
| Benjamin Herrenschmidt | c6c9eac | 2009-09-08 14:16:58 +0000 | [diff] [blame] | 287 | #if defined(CONFIG_BOOKE) | 
 | 288 | 	mtspr(SPRN_DAC1, dabr); | 
 | 289 | #elif defined(CONFIG_PPC_BOOK3S) | 
| Michael Ellerman | cab0af9 | 2005-11-03 15:30:49 +1100 | [diff] [blame] | 290 | 	mtspr(SPRN_DABR, dabr); | 
| Benjamin Herrenschmidt | 791cc50 | 2007-06-04 15:15:48 +1000 | [diff] [blame] | 291 | #endif | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 292 |  | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 293 |  | 
| Michael Ellerman | cab0af9 | 2005-11-03 15:30:49 +1100 | [diff] [blame] | 294 | 	return 0; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 295 | } | 
 | 296 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 297 | #ifdef CONFIG_PPC64 | 
 | 298 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 299 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 300 |  | 
 | 301 | struct task_struct *__switch_to(struct task_struct *prev, | 
 | 302 | 	struct task_struct *new) | 
 | 303 | { | 
 | 304 | 	struct thread_struct *new_thread, *old_thread; | 
 | 305 | 	unsigned long flags; | 
 | 306 | 	struct task_struct *last; | 
 | 307 |  | 
 | 308 | #ifdef CONFIG_SMP | 
 | 309 | 	/* avoid complexity of lazy save/restore of fpu | 
 | 310 | 	 * by just saving it every time we switch out if | 
 | 311 | 	 * this task used the fpu during the last quantum. | 
 | 312 | 	 * | 
 | 313 | 	 * If it tries to use the fpu again, it'll trap and | 
 | 314 | 	 * reload its fp regs.  So we don't have to do a restore | 
 | 315 | 	 * every switch, just a save. | 
 | 316 | 	 *  -- Cort | 
 | 317 | 	 */ | 
 | 318 | 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | 
 | 319 | 		giveup_fpu(prev); | 
 | 320 | #ifdef CONFIG_ALTIVEC | 
 | 321 | 	/* | 
 | 322 | 	 * If the previous thread used altivec in the last quantum | 
 | 323 | 	 * (thus changing altivec regs) then save them. | 
 | 324 | 	 * We used to check the VRSAVE register but not all apps | 
 | 325 | 	 * set it, so we don't rely on it now (and in fact we need | 
 | 326 | 	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus | 
 | 327 | 	 * | 
 | 328 | 	 * On SMP we always save/restore altivec regs just to avoid the | 
 | 329 | 	 * complexity of changing processors. | 
 | 330 | 	 *  -- Cort | 
 | 331 | 	 */ | 
 | 332 | 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | 
 | 333 | 		giveup_altivec(prev); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 334 | #endif /* CONFIG_ALTIVEC */ | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 335 | #ifdef CONFIG_VSX | 
 | 336 | 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) | 
| Michael Neuling | 7c29217 | 2008-07-11 16:29:12 +1000 | [diff] [blame] | 337 | 		/* VMX and FPU registers are already save here */ | 
 | 338 | 		__giveup_vsx(prev); | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 339 | #endif /* CONFIG_VSX */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 340 | #ifdef CONFIG_SPE | 
 | 341 | 	/* | 
 | 342 | 	 * If the previous thread used spe in the last quantum | 
 | 343 | 	 * (thus changing spe regs) then save them. | 
 | 344 | 	 * | 
 | 345 | 	 * On SMP we always save/restore spe regs just to avoid the | 
 | 346 | 	 * complexity of changing processors. | 
 | 347 | 	 */ | 
 | 348 | 	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | 
 | 349 | 		giveup_spe(prev); | 
| Paul Mackerras | c0c0d99 | 2005-10-01 13:49:08 +1000 | [diff] [blame] | 350 | #endif /* CONFIG_SPE */ | 
 | 351 |  | 
 | 352 | #else  /* CONFIG_SMP */ | 
 | 353 | #ifdef CONFIG_ALTIVEC | 
 | 354 | 	/* Avoid the trap.  On smp this this never happens since | 
 | 355 | 	 * we don't set last_task_used_altivec -- Cort | 
 | 356 | 	 */ | 
 | 357 | 	if (new->thread.regs && last_task_used_altivec == new) | 
 | 358 | 		new->thread.regs->msr |= MSR_VEC; | 
 | 359 | #endif /* CONFIG_ALTIVEC */ | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 360 | #ifdef CONFIG_VSX | 
 | 361 | 	if (new->thread.regs && last_task_used_vsx == new) | 
 | 362 | 		new->thread.regs->msr |= MSR_VSX; | 
 | 363 | #endif /* CONFIG_VSX */ | 
| Paul Mackerras | c0c0d99 | 2005-10-01 13:49:08 +1000 | [diff] [blame] | 364 | #ifdef CONFIG_SPE | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 365 | 	/* Avoid the trap.  On smp this this never happens since | 
 | 366 | 	 * we don't set last_task_used_spe | 
 | 367 | 	 */ | 
 | 368 | 	if (new->thread.regs && last_task_used_spe == new) | 
 | 369 | 		new->thread.regs->msr |= MSR_SPE; | 
 | 370 | #endif /* CONFIG_SPE */ | 
| Paul Mackerras | c0c0d99 | 2005-10-01 13:49:08 +1000 | [diff] [blame] | 371 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 372 | #endif /* CONFIG_SMP */ | 
 | 373 |  | 
| Kumar Gala | 2325f0a | 2008-07-26 05:27:33 +1000 | [diff] [blame] | 374 | #if defined(CONFIG_BOOKE) | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 375 | 	/* If new thread DAC (HW breakpoint) is the same then leave it */ | 
 | 376 | 	if (new->thread.dabr) | 
 | 377 | 		set_dabr(new->thread.dabr); | 
| Benjamin Herrenschmidt | c6c9eac | 2009-09-08 14:16:58 +0000 | [diff] [blame] | 378 | #else | 
 | 379 | 	if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) | 
 | 380 | 		set_dabr(new->thread.dabr); | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 381 | #endif | 
 | 382 |  | 
| Benjamin Herrenschmidt | c6c9eac | 2009-09-08 14:16:58 +0000 | [diff] [blame] | 383 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 384 | 	new_thread = &new->thread; | 
 | 385 | 	old_thread = ¤t->thread; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 386 |  | 
 | 387 | #ifdef CONFIG_PPC64 | 
 | 388 | 	/* | 
 | 389 | 	 * Collect processor utilization data per process | 
 | 390 | 	 */ | 
 | 391 | 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 
 | 392 | 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 
 | 393 | 		long unsigned start_tb, current_tb; | 
 | 394 | 		start_tb = old_thread->start_tb; | 
 | 395 | 		cu->current_tb = current_tb = mfspr(SPRN_PURR); | 
 | 396 | 		old_thread->accum_tb += (current_tb - start_tb); | 
 | 397 | 		new_thread->start_tb = current_tb; | 
 | 398 | 	} | 
 | 399 | #endif | 
 | 400 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 401 | 	local_irq_save(flags); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 402 |  | 
 | 403 | 	account_system_vtime(current); | 
| Tony Breeds | 81a3843 | 2007-12-04 16:51:44 +1100 | [diff] [blame] | 404 | 	account_process_vtime(current); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 405 | 	calculate_steal_time(); | 
 | 406 |  | 
| Anton Blanchard | 44387e9 | 2008-03-17 15:27:09 +1100 | [diff] [blame] | 407 | 	/* | 
 | 408 | 	 * We can't take a PMU exception inside _switch() since there is a | 
 | 409 | 	 * window where the kernel stack SLB and the kernel stack are out | 
 | 410 | 	 * of sync. Hard disable here. | 
 | 411 | 	 */ | 
 | 412 | 	hard_irq_disable(); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 413 | 	last = _switch(old_thread, new_thread); | 
 | 414 |  | 
 | 415 | 	local_irq_restore(flags); | 
 | 416 |  | 
 | 417 | 	return last; | 
 | 418 | } | 
 | 419 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 420 | static int instructions_to_print = 16; | 
 | 421 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 422 | static void show_instructions(struct pt_regs *regs) | 
 | 423 | { | 
 | 424 | 	int i; | 
 | 425 | 	unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * | 
 | 426 | 			sizeof(int)); | 
 | 427 |  | 
 | 428 | 	printk("Instruction dump:"); | 
 | 429 |  | 
 | 430 | 	for (i = 0; i < instructions_to_print; i++) { | 
 | 431 | 		int instr; | 
 | 432 |  | 
 | 433 | 		if (!(i % 8)) | 
 | 434 | 			printk("\n"); | 
 | 435 |  | 
| Scott Wood | 0de2d82 | 2007-09-28 04:38:55 +1000 | [diff] [blame] | 436 | #if !defined(CONFIG_BOOKE) | 
 | 437 | 		/* If executing with the IMMU off, adjust pc rather | 
 | 438 | 		 * than print XXXXXXXX. | 
 | 439 | 		 */ | 
 | 440 | 		if (!(regs->msr & MSR_IR)) | 
 | 441 | 			pc = (unsigned long)phys_to_virt(pc); | 
 | 442 | #endif | 
 | 443 |  | 
| Stephen Rothwell | af30837 | 2006-03-23 17:38:10 +1100 | [diff] [blame] | 444 | 		/* We use __get_user here *only* to avoid an OOPS on a | 
 | 445 | 		 * bad address because the pc *should* only be a | 
 | 446 | 		 * kernel address. | 
 | 447 | 		 */ | 
| Anton Blanchard | 00ae36d | 2006-10-13 12:17:16 +1000 | [diff] [blame] | 448 | 		if (!__kernel_text_address(pc) || | 
 | 449 | 		     __get_user(instr, (unsigned int __user *)pc)) { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 450 | 			printk("XXXXXXXX "); | 
 | 451 | 		} else { | 
 | 452 | 			if (regs->nip == pc) | 
 | 453 | 				printk("<%08x> ", instr); | 
 | 454 | 			else | 
 | 455 | 				printk("%08x ", instr); | 
 | 456 | 		} | 
 | 457 |  | 
 | 458 | 		pc += sizeof(int); | 
 | 459 | 	} | 
 | 460 |  | 
 | 461 | 	printk("\n"); | 
 | 462 | } | 
 | 463 |  | 
 | 464 | static struct regbit { | 
 | 465 | 	unsigned long bit; | 
 | 466 | 	const char *name; | 
 | 467 | } msr_bits[] = { | 
 | 468 | 	{MSR_EE,	"EE"}, | 
 | 469 | 	{MSR_PR,	"PR"}, | 
 | 470 | 	{MSR_FP,	"FP"}, | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 471 | 	{MSR_VEC,	"VEC"}, | 
 | 472 | 	{MSR_VSX,	"VSX"}, | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 473 | 	{MSR_ME,	"ME"}, | 
| Kumar Gala | 1b98326 | 2008-11-19 04:39:53 +0000 | [diff] [blame] | 474 | 	{MSR_CE,	"CE"}, | 
 | 475 | 	{MSR_DE,	"DE"}, | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 476 | 	{MSR_IR,	"IR"}, | 
 | 477 | 	{MSR_DR,	"DR"}, | 
 | 478 | 	{0,		NULL} | 
 | 479 | }; | 
 | 480 |  | 
 | 481 | static void printbits(unsigned long val, struct regbit *bits) | 
 | 482 | { | 
 | 483 | 	const char *sep = ""; | 
 | 484 |  | 
 | 485 | 	printk("<"); | 
 | 486 | 	for (; bits->bit; ++bits) | 
 | 487 | 		if (val & bits->bit) { | 
 | 488 | 			printk("%s%s", sep, bits->name); | 
 | 489 | 			sep = ","; | 
 | 490 | 		} | 
 | 491 | 	printk(">"); | 
 | 492 | } | 
 | 493 |  | 
 | 494 | #ifdef CONFIG_PPC64 | 
| anton@samba.org | f6f7dde | 2007-03-20 20:38:19 -0500 | [diff] [blame] | 495 | #define REG		"%016lx" | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 496 | #define REGS_PER_LINE	4 | 
 | 497 | #define LAST_VOLATILE	13 | 
 | 498 | #else | 
| anton@samba.org | f6f7dde | 2007-03-20 20:38:19 -0500 | [diff] [blame] | 499 | #define REG		"%08lx" | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 500 | #define REGS_PER_LINE	8 | 
 | 501 | #define LAST_VOLATILE	12 | 
 | 502 | #endif | 
 | 503 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 504 | void show_regs(struct pt_regs * regs) | 
 | 505 | { | 
 | 506 | 	int i, trap; | 
 | 507 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 508 | 	printk("NIP: "REG" LR: "REG" CTR: "REG"\n", | 
 | 509 | 	       regs->nip, regs->link, regs->ctr); | 
 | 510 | 	printk("REGS: %p TRAP: %04lx   %s  (%s)\n", | 
| Serge E. Hallyn | 96b644b | 2006-10-02 02:18:13 -0700 | [diff] [blame] | 511 | 	       regs, regs->trap, print_tainted(), init_utsname()->release); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 512 | 	printk("MSR: "REG" ", regs->msr); | 
 | 513 | 	printbits(regs->msr, msr_bits); | 
| anton@samba.org | f6f7dde | 2007-03-20 20:38:19 -0500 | [diff] [blame] | 514 | 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 515 | 	trap = TRAP(regs); | 
 | 516 | 	if (trap == 0x300 || trap == 0x600) | 
| Kumar Gala | 1417078 | 2007-07-26 00:46:15 -0500 | [diff] [blame] | 517 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 
 | 518 | 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 
 | 519 | #else | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 520 | 		printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 
| Kumar Gala | 1417078 | 2007-07-26 00:46:15 -0500 | [diff] [blame] | 521 | #endif | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 522 | 	printk("TASK = %p[%d] '%s' THREAD: %p", | 
| Alexey Dobriyan | 19c5870 | 2007-10-18 23:40:41 -0700 | [diff] [blame] | 523 | 	       current, task_pid_nr(current), current->comm, task_thread_info(current)); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 524 |  | 
 | 525 | #ifdef CONFIG_SMP | 
| Hugh Dickins | 79ccd1b | 2008-02-09 05:25:13 +1100 | [diff] [blame] | 526 | 	printk(" CPU: %d", raw_smp_processor_id()); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 527 | #endif /* CONFIG_SMP */ | 
 | 528 |  | 
 | 529 | 	for (i = 0;  i < 32;  i++) { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 530 | 		if ((i % REGS_PER_LINE) == 0) | 
| Kumar Gala | a236719 | 2009-06-18 22:29:55 +0000 | [diff] [blame] | 531 | 			printk("\nGPR%02d: ", i); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 532 | 		printk(REG " ", regs->gpr[i]); | 
 | 533 | 		if (i == LAST_VOLATILE && !FULL_REGS(regs)) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 534 | 			break; | 
 | 535 | 	} | 
 | 536 | 	printk("\n"); | 
 | 537 | #ifdef CONFIG_KALLSYMS | 
 | 538 | 	/* | 
 | 539 | 	 * Lookup NIP late so we have the best change of getting the | 
 | 540 | 	 * above info out without failing | 
 | 541 | 	 */ | 
| Benjamin Herrenschmidt | 058c78f | 2008-07-07 13:44:31 +1000 | [diff] [blame] | 542 | 	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); | 
 | 543 | 	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 544 | #endif | 
 | 545 | 	show_stack(current, (unsigned long *) regs->gpr[1]); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 546 | 	if (!user_mode(regs)) | 
 | 547 | 		show_instructions(regs); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 548 | } | 
 | 549 |  | 
 | 550 | void exit_thread(void) | 
 | 551 | { | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 552 | 	discard_lazy_cpu_state(); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 553 | } | 
 | 554 |  | 
 | 555 | void flush_thread(void) | 
 | 556 | { | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 557 | 	discard_lazy_cpu_state(); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 558 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 559 | 	if (current->thread.dabr) { | 
 | 560 | 		current->thread.dabr = 0; | 
 | 561 | 		set_dabr(0); | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 562 |  | 
| Kumar Gala | 2325f0a | 2008-07-26 05:27:33 +1000 | [diff] [blame] | 563 | #if defined(CONFIG_BOOKE) | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 564 | 		current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W); | 
 | 565 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 566 | 	} | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 567 | } | 
 | 568 |  | 
 | 569 | void | 
 | 570 | release_thread(struct task_struct *t) | 
 | 571 | { | 
 | 572 | } | 
 | 573 |  | 
 | 574 | /* | 
 | 575 |  * This gets called before we allocate a new thread and copy | 
 | 576 |  * the current task into it. | 
 | 577 |  */ | 
 | 578 | void prepare_to_copy(struct task_struct *tsk) | 
 | 579 | { | 
 | 580 | 	flush_fp_to_thread(current); | 
 | 581 | 	flush_altivec_to_thread(current); | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 582 | 	flush_vsx_to_thread(current); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 583 | 	flush_spe_to_thread(current); | 
 | 584 | } | 
 | 585 |  | 
 | 586 | /* | 
 | 587 |  * Copy a thread.. | 
 | 588 |  */ | 
| Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 589 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 590 | 		unsigned long unused, struct task_struct *p, | 
 | 591 | 		struct pt_regs *regs) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 592 | { | 
 | 593 | 	struct pt_regs *childregs, *kregs; | 
 | 594 | 	extern void ret_from_fork(void); | 
| Al Viro | 0cec6fd | 2006-01-12 01:06:02 -0800 | [diff] [blame] | 595 | 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 596 |  | 
 | 597 | 	CHECK_FULL_REGS(regs); | 
 | 598 | 	/* Copy registers */ | 
 | 599 | 	sp -= sizeof(struct pt_regs); | 
 | 600 | 	childregs = (struct pt_regs *) sp; | 
 | 601 | 	*childregs = *regs; | 
 | 602 | 	if ((childregs->msr & MSR_PR) == 0) { | 
 | 603 | 		/* for kernel thread, set `current' and stackptr in new task */ | 
 | 604 | 		childregs->gpr[1] = sp + sizeof(struct pt_regs); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 605 | #ifdef CONFIG_PPC32 | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 606 | 		childregs->gpr[2] = (unsigned long) p; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 607 | #else | 
| Al Viro | b5e2fc1 | 2006-01-12 01:06:01 -0800 | [diff] [blame] | 608 | 		clear_tsk_thread_flag(p, TIF_32BIT); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 609 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 610 | 		p->thread.regs = NULL;	/* no user register state */ | 
 | 611 | 	} else { | 
 | 612 | 		childregs->gpr[1] = usp; | 
 | 613 | 		p->thread.regs = childregs; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 614 | 		if (clone_flags & CLONE_SETTLS) { | 
 | 615 | #ifdef CONFIG_PPC64 | 
 | 616 | 			if (!test_thread_flag(TIF_32BIT)) | 
 | 617 | 				childregs->gpr[13] = childregs->gpr[6]; | 
 | 618 | 			else | 
 | 619 | #endif | 
 | 620 | 				childregs->gpr[2] = childregs->gpr[6]; | 
 | 621 | 		} | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 622 | 	} | 
 | 623 | 	childregs->gpr[3] = 0;  /* Result from fork() */ | 
 | 624 | 	sp -= STACK_FRAME_OVERHEAD; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 625 |  | 
 | 626 | 	/* | 
 | 627 | 	 * The way this works is that at some point in the future | 
 | 628 | 	 * some task will call _switch to switch to the new task. | 
 | 629 | 	 * That will pop off the stack frame created below and start | 
 | 630 | 	 * the new task running at ret_from_fork.  The new task will | 
 | 631 | 	 * do some house keeping and then return from the fork or clone | 
 | 632 | 	 * system call, using the stack frame created above. | 
 | 633 | 	 */ | 
 | 634 | 	sp -= sizeof(struct pt_regs); | 
 | 635 | 	kregs = (struct pt_regs *) sp; | 
 | 636 | 	sp -= STACK_FRAME_OVERHEAD; | 
 | 637 | 	p->thread.ksp = sp; | 
| Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 638 | 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) + | 
 | 639 | 				_ALIGN_UP(sizeof(struct thread_info), 16); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 640 |  | 
| Benjamin Herrenschmidt | 9449168 | 2009-06-02 21:17:45 +0000 | [diff] [blame] | 641 | #ifdef CONFIG_PPC_STD_MMU_64 | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 642 | 	if (cpu_has_feature(CPU_FTR_SLB)) { | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 643 | 		unsigned long sp_vsid; | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 644 | 		unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 645 |  | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 646 | 		if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) | 
 | 647 | 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) | 
 | 648 | 				<< SLB_VSID_SHIFT_1T; | 
 | 649 | 		else | 
 | 650 | 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) | 
 | 651 | 				<< SLB_VSID_SHIFT; | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 652 | 		sp_vsid |= SLB_VSID_KERNEL | llp; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 653 | 		p->thread.ksp_vsid = sp_vsid; | 
 | 654 | 	} | 
| Benjamin Herrenschmidt | 747bea9 | 2009-07-23 23:15:27 +0000 | [diff] [blame] | 655 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 656 |  | 
 | 657 | 	/* | 
 | 658 | 	 * The PPC64 ABI makes use of a TOC to contain function  | 
 | 659 | 	 * pointers.  The function (ret_from_except) is actually a pointer | 
 | 660 | 	 * to the TOC entry.  The first entry is a pointer to the actual | 
 | 661 | 	 * function. | 
 | 662 |  	 */ | 
| Benjamin Herrenschmidt | 747bea9 | 2009-07-23 23:15:27 +0000 | [diff] [blame] | 663 | #ifdef CONFIG_PPC64 | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 664 | 	kregs->nip = *((unsigned long *)ret_from_fork); | 
 | 665 | #else | 
 | 666 | 	kregs->nip = (unsigned long)ret_from_fork; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 667 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 668 |  | 
 | 669 | 	return 0; | 
 | 670 | } | 
 | 671 |  | 
 | 672 | /* | 
 | 673 |  * Set up a thread for executing a new program | 
 | 674 |  */ | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 675 | void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 676 | { | 
| Michael Ellerman | 90eac72 | 2005-10-21 16:01:33 +1000 | [diff] [blame] | 677 | #ifdef CONFIG_PPC64 | 
 | 678 | 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */ | 
 | 679 | #endif | 
 | 680 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 681 | 	set_fs(USER_DS); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 682 |  | 
 | 683 | 	/* | 
 | 684 | 	 * If we exec out of a kernel thread then thread.regs will not be | 
 | 685 | 	 * set.  Do it now. | 
 | 686 | 	 */ | 
 | 687 | 	if (!current->thread.regs) { | 
| Al Viro | 0cec6fd | 2006-01-12 01:06:02 -0800 | [diff] [blame] | 688 | 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; | 
 | 689 | 		current->thread.regs = regs - 1; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 690 | 	} | 
 | 691 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 692 | 	memset(regs->gpr, 0, sizeof(regs->gpr)); | 
 | 693 | 	regs->ctr = 0; | 
 | 694 | 	regs->link = 0; | 
 | 695 | 	regs->xer = 0; | 
 | 696 | 	regs->ccr = 0; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 697 | 	regs->gpr[1] = sp; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 698 |  | 
| Roland McGrath | 474f819 | 2007-09-24 16:52:44 -0700 | [diff] [blame] | 699 | 	/* | 
 | 700 | 	 * We have just cleared all the nonvolatile GPRs, so make | 
 | 701 | 	 * FULL_REGS(regs) return true.  This is necessary to allow | 
 | 702 | 	 * ptrace to examine the thread immediately after exec. | 
 | 703 | 	 */ | 
 | 704 | 	regs->trap &= ~1UL; | 
 | 705 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 706 | #ifdef CONFIG_PPC32 | 
 | 707 | 	regs->mq = 0; | 
 | 708 | 	regs->nip = start; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 709 | 	regs->msr = MSR_USER; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 710 | #else | 
| Stephen Rothwell | d4bf9a7 | 2005-10-13 13:40:54 +1000 | [diff] [blame] | 711 | 	if (!test_thread_flag(TIF_32BIT)) { | 
| Michael Ellerman | 90eac72 | 2005-10-21 16:01:33 +1000 | [diff] [blame] | 712 | 		unsigned long entry, toc; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 713 |  | 
 | 714 | 		/* start is a relocated pointer to the function descriptor for | 
 | 715 | 		 * the elf _start routine.  The first entry in the function | 
 | 716 | 		 * descriptor is the entry address of _start and the second | 
 | 717 | 		 * entry is the TOC value we need to use. | 
 | 718 | 		 */ | 
 | 719 | 		__get_user(entry, (unsigned long __user *)start); | 
 | 720 | 		__get_user(toc, (unsigned long __user *)start+1); | 
 | 721 |  | 
 | 722 | 		/* Check whether the e_entry function descriptor entries | 
 | 723 | 		 * need to be relocated before we can use them. | 
 | 724 | 		 */ | 
 | 725 | 		if (load_addr != 0) { | 
 | 726 | 			entry += load_addr; | 
 | 727 | 			toc   += load_addr; | 
 | 728 | 		} | 
 | 729 | 		regs->nip = entry; | 
 | 730 | 		regs->gpr[2] = toc; | 
 | 731 | 		regs->msr = MSR_USER64; | 
| Stephen Rothwell | d4bf9a7 | 2005-10-13 13:40:54 +1000 | [diff] [blame] | 732 | 	} else { | 
 | 733 | 		regs->nip = start; | 
 | 734 | 		regs->gpr[2] = 0; | 
 | 735 | 		regs->msr = MSR_USER32; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 736 | 	} | 
 | 737 | #endif | 
 | 738 |  | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 739 | 	discard_lazy_cpu_state(); | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 740 | #ifdef CONFIG_VSX | 
 | 741 | 	current->thread.used_vsr = 0; | 
 | 742 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 743 | 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | 
| David Gibson | 25c8a78 | 2005-10-27 16:27:25 +1000 | [diff] [blame] | 744 | 	current->thread.fpscr.val = 0; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 745 | #ifdef CONFIG_ALTIVEC | 
 | 746 | 	memset(current->thread.vr, 0, sizeof(current->thread.vr)); | 
 | 747 | 	memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 748 | 	current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 749 | 	current->thread.vrsave = 0; | 
 | 750 | 	current->thread.used_vr = 0; | 
 | 751 | #endif /* CONFIG_ALTIVEC */ | 
 | 752 | #ifdef CONFIG_SPE | 
 | 753 | 	memset(current->thread.evr, 0, sizeof(current->thread.evr)); | 
 | 754 | 	current->thread.acc = 0; | 
 | 755 | 	current->thread.spefscr = 0; | 
 | 756 | 	current->thread.used_spe = 0; | 
 | 757 | #endif /* CONFIG_SPE */ | 
 | 758 | } | 
 | 759 |  | 
 | 760 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | 
 | 761 | 		| PR_FP_EXC_RES | PR_FP_EXC_INV) | 
 | 762 |  | 
 | 763 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | 
 | 764 | { | 
 | 765 | 	struct pt_regs *regs = tsk->thread.regs; | 
 | 766 |  | 
 | 767 | 	/* This is a bit hairy.  If we are an SPE enabled  processor | 
 | 768 | 	 * (have embedded fp) we store the IEEE exception enable flags in | 
 | 769 | 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception | 
 | 770 | 	 * mode (asyn, precise, disabled) for 'Classic' FP. */ | 
 | 771 | 	if (val & PR_FP_EXC_SW_ENABLE) { | 
 | 772 | #ifdef CONFIG_SPE | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 773 | 		if (cpu_has_feature(CPU_FTR_SPE)) { | 
 | 774 | 			tsk->thread.fpexc_mode = val & | 
 | 775 | 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | 
 | 776 | 			return 0; | 
 | 777 | 		} else { | 
 | 778 | 			return -EINVAL; | 
 | 779 | 		} | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 780 | #else | 
 | 781 | 		return -EINVAL; | 
 | 782 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 783 | 	} | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 784 |  | 
 | 785 | 	/* on a CONFIG_SPE this does not hurt us.  The bits that | 
 | 786 | 	 * __pack_fe01 use do not overlap with bits used for | 
 | 787 | 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits | 
 | 788 | 	 * on CONFIG_SPE implementations are reserved so writing to | 
 | 789 | 	 * them does not change anything */ | 
 | 790 | 	if (val > PR_FP_EXC_PRECISE) | 
 | 791 | 		return -EINVAL; | 
 | 792 | 	tsk->thread.fpexc_mode = __pack_fe01(val); | 
 | 793 | 	if (regs != NULL && (regs->msr & MSR_FP) != 0) | 
 | 794 | 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | 
 | 795 | 			| tsk->thread.fpexc_mode; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 796 | 	return 0; | 
 | 797 | } | 
 | 798 |  | 
 | 799 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | 
 | 800 | { | 
 | 801 | 	unsigned int val; | 
 | 802 |  | 
 | 803 | 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | 
 | 804 | #ifdef CONFIG_SPE | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 805 | 		if (cpu_has_feature(CPU_FTR_SPE)) | 
 | 806 | 			val = tsk->thread.fpexc_mode; | 
 | 807 | 		else | 
 | 808 | 			return -EINVAL; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 809 | #else | 
 | 810 | 		return -EINVAL; | 
 | 811 | #endif | 
 | 812 | 	else | 
 | 813 | 		val = __unpack_fe01(tsk->thread.fpexc_mode); | 
 | 814 | 	return put_user(val, (unsigned int __user *) adr); | 
 | 815 | } | 
 | 816 |  | 
| Paul Mackerras | fab5db9 | 2006-06-07 16:14:40 +1000 | [diff] [blame] | 817 | int set_endian(struct task_struct *tsk, unsigned int val) | 
 | 818 | { | 
 | 819 | 	struct pt_regs *regs = tsk->thread.regs; | 
 | 820 |  | 
 | 821 | 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || | 
 | 822 | 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) | 
 | 823 | 		return -EINVAL; | 
 | 824 |  | 
 | 825 | 	if (regs == NULL) | 
 | 826 | 		return -EINVAL; | 
 | 827 |  | 
 | 828 | 	if (val == PR_ENDIAN_BIG) | 
 | 829 | 		regs->msr &= ~MSR_LE; | 
 | 830 | 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) | 
 | 831 | 		regs->msr |= MSR_LE; | 
 | 832 | 	else | 
 | 833 | 		return -EINVAL; | 
 | 834 |  | 
 | 835 | 	return 0; | 
 | 836 | } | 
 | 837 |  | 
 | 838 | int get_endian(struct task_struct *tsk, unsigned long adr) | 
 | 839 | { | 
 | 840 | 	struct pt_regs *regs = tsk->thread.regs; | 
 | 841 | 	unsigned int val; | 
 | 842 |  | 
 | 843 | 	if (!cpu_has_feature(CPU_FTR_PPC_LE) && | 
 | 844 | 	    !cpu_has_feature(CPU_FTR_REAL_LE)) | 
 | 845 | 		return -EINVAL; | 
 | 846 |  | 
 | 847 | 	if (regs == NULL) | 
 | 848 | 		return -EINVAL; | 
 | 849 |  | 
 | 850 | 	if (regs->msr & MSR_LE) { | 
 | 851 | 		if (cpu_has_feature(CPU_FTR_REAL_LE)) | 
 | 852 | 			val = PR_ENDIAN_LITTLE; | 
 | 853 | 		else | 
 | 854 | 			val = PR_ENDIAN_PPC_LITTLE; | 
 | 855 | 	} else | 
 | 856 | 		val = PR_ENDIAN_BIG; | 
 | 857 |  | 
 | 858 | 	return put_user(val, (unsigned int __user *)adr); | 
 | 859 | } | 
 | 860 |  | 
| Paul Mackerras | e9370ae | 2006-06-07 16:15:39 +1000 | [diff] [blame] | 861 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) | 
 | 862 | { | 
 | 863 | 	tsk->thread.align_ctl = val; | 
 | 864 | 	return 0; | 
 | 865 | } | 
 | 866 |  | 
 | 867 | int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) | 
 | 868 | { | 
 | 869 | 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); | 
 | 870 | } | 
 | 871 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 872 | #define TRUNC_PTR(x)	((typeof(x))(((unsigned long)(x)) & 0xffffffff)) | 
 | 873 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 874 | int sys_clone(unsigned long clone_flags, unsigned long usp, | 
 | 875 | 	      int __user *parent_tidp, void __user *child_threadptr, | 
 | 876 | 	      int __user *child_tidp, int p6, | 
 | 877 | 	      struct pt_regs *regs) | 
 | 878 | { | 
 | 879 | 	CHECK_FULL_REGS(regs); | 
 | 880 | 	if (usp == 0) | 
 | 881 | 		usp = regs->gpr[1];	/* stack pointer for child */ | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 882 | #ifdef CONFIG_PPC64 | 
 | 883 | 	if (test_thread_flag(TIF_32BIT)) { | 
 | 884 | 		parent_tidp = TRUNC_PTR(parent_tidp); | 
 | 885 | 		child_tidp = TRUNC_PTR(child_tidp); | 
 | 886 | 	} | 
 | 887 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 888 |  	return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); | 
 | 889 | } | 
 | 890 |  | 
 | 891 | int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, | 
 | 892 | 	     unsigned long p4, unsigned long p5, unsigned long p6, | 
 | 893 | 	     struct pt_regs *regs) | 
 | 894 | { | 
 | 895 | 	CHECK_FULL_REGS(regs); | 
 | 896 | 	return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); | 
 | 897 | } | 
 | 898 |  | 
 | 899 | int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, | 
 | 900 | 	      unsigned long p4, unsigned long p5, unsigned long p6, | 
 | 901 | 	      struct pt_regs *regs) | 
 | 902 | { | 
 | 903 | 	CHECK_FULL_REGS(regs); | 
 | 904 | 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], | 
 | 905 | 			regs, 0, NULL, NULL); | 
 | 906 | } | 
 | 907 |  | 
 | 908 | int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, | 
 | 909 | 	       unsigned long a3, unsigned long a4, unsigned long a5, | 
 | 910 | 	       struct pt_regs *regs) | 
 | 911 | { | 
 | 912 | 	int error; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 913 | 	char *filename; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 914 |  | 
 | 915 | 	filename = getname((char __user *) a0); | 
 | 916 | 	error = PTR_ERR(filename); | 
 | 917 | 	if (IS_ERR(filename)) | 
 | 918 | 		goto out; | 
 | 919 | 	flush_fp_to_thread(current); | 
 | 920 | 	flush_altivec_to_thread(current); | 
 | 921 | 	flush_spe_to_thread(current); | 
| Paul Mackerras | 20c8c21 | 2005-09-28 20:28:14 +1000 | [diff] [blame] | 922 | 	error = do_execve(filename, (char __user * __user *) a1, | 
 | 923 | 			  (char __user * __user *) a2, regs); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 924 | 	putname(filename); | 
 | 925 | out: | 
 | 926 | 	return error; | 
 | 927 | } | 
 | 928 |  | 
| Paul Mackerras | bb72c48 | 2007-02-19 11:42:42 +1100 | [diff] [blame] | 929 | #ifdef CONFIG_IRQSTACKS | 
 | 930 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, | 
 | 931 | 				  unsigned long nbytes) | 
 | 932 | { | 
 | 933 | 	unsigned long stack_page; | 
 | 934 | 	unsigned long cpu = task_cpu(p); | 
 | 935 |  | 
 | 936 | 	/* | 
 | 937 | 	 * Avoid crashing if the stack has overflowed and corrupted | 
 | 938 | 	 * task_cpu(p), which is in the thread_info struct. | 
 | 939 | 	 */ | 
 | 940 | 	if (cpu < NR_CPUS && cpu_possible(cpu)) { | 
 | 941 | 		stack_page = (unsigned long) hardirq_ctx[cpu]; | 
 | 942 | 		if (sp >= stack_page + sizeof(struct thread_struct) | 
 | 943 | 		    && sp <= stack_page + THREAD_SIZE - nbytes) | 
 | 944 | 			return 1; | 
 | 945 |  | 
 | 946 | 		stack_page = (unsigned long) softirq_ctx[cpu]; | 
 | 947 | 		if (sp >= stack_page + sizeof(struct thread_struct) | 
 | 948 | 		    && sp <= stack_page + THREAD_SIZE - nbytes) | 
 | 949 | 			return 1; | 
 | 950 | 	} | 
 | 951 | 	return 0; | 
 | 952 | } | 
 | 953 |  | 
 | 954 | #else | 
 | 955 | #define valid_irq_stack(sp, p, nb)	0 | 
 | 956 | #endif /* CONFIG_IRQSTACKS */ | 
 | 957 |  | 
| Anton Blanchard | 2f25194 | 2006-03-27 11:46:18 +1100 | [diff] [blame] | 958 | int validate_sp(unsigned long sp, struct task_struct *p, | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 959 | 		       unsigned long nbytes) | 
 | 960 | { | 
| Al Viro | 0cec6fd | 2006-01-12 01:06:02 -0800 | [diff] [blame] | 961 | 	unsigned long stack_page = (unsigned long)task_stack_page(p); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 962 |  | 
 | 963 | 	if (sp >= stack_page + sizeof(struct thread_struct) | 
 | 964 | 	    && sp <= stack_page + THREAD_SIZE - nbytes) | 
 | 965 | 		return 1; | 
 | 966 |  | 
| Paul Mackerras | bb72c48 | 2007-02-19 11:42:42 +1100 | [diff] [blame] | 967 | 	return valid_irq_stack(sp, p, nbytes); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 968 | } | 
 | 969 |  | 
| Anton Blanchard | 2f25194 | 2006-03-27 11:46:18 +1100 | [diff] [blame] | 970 | EXPORT_SYMBOL(validate_sp); | 
 | 971 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 972 | unsigned long get_wchan(struct task_struct *p) | 
 | 973 | { | 
 | 974 | 	unsigned long ip, sp; | 
 | 975 | 	int count = 0; | 
 | 976 |  | 
 | 977 | 	if (!p || p == current || p->state == TASK_RUNNING) | 
 | 978 | 		return 0; | 
 | 979 |  | 
 | 980 | 	sp = p->thread.ksp; | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 981 | 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 982 | 		return 0; | 
 | 983 |  | 
 | 984 | 	do { | 
 | 985 | 		sp = *(unsigned long *)sp; | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 986 | 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 987 | 			return 0; | 
 | 988 | 		if (count > 0) { | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 989 | 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 990 | 			if (!in_sched_functions(ip)) | 
 | 991 | 				return ip; | 
 | 992 | 		} | 
 | 993 | 	} while (count++ < 16); | 
 | 994 | 	return 0; | 
 | 995 | } | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 996 |  | 
| Johannes Berg | c4d04be | 2008-11-20 03:24:07 +0000 | [diff] [blame] | 997 | static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 998 |  | 
 | 999 | void show_stack(struct task_struct *tsk, unsigned long *stack) | 
 | 1000 | { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1001 | 	unsigned long sp, ip, lr, newsp; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1002 | 	int count = 0; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1003 | 	int firstframe = 1; | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1004 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
 | 1005 | 	int curr_frame = current->curr_ret_stack; | 
 | 1006 | 	extern void return_to_handler(void); | 
| Steven Rostedt | 9135c3c | 2009-09-15 08:20:15 -0700 | [diff] [blame] | 1007 | 	unsigned long rth = (unsigned long)return_to_handler; | 
 | 1008 | 	unsigned long mrth = -1; | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1009 | #ifdef CONFIG_PPC64 | 
| Steven Rostedt | 9135c3c | 2009-09-15 08:20:15 -0700 | [diff] [blame] | 1010 | 	extern void mod_return_to_handler(void); | 
 | 1011 | 	rth = *(unsigned long *)rth; | 
 | 1012 | 	mrth = (unsigned long)mod_return_to_handler; | 
 | 1013 | 	mrth = *(unsigned long *)mrth; | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1014 | #endif | 
 | 1015 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1016 |  | 
 | 1017 | 	sp = (unsigned long) stack; | 
 | 1018 | 	if (tsk == NULL) | 
 | 1019 | 		tsk = current; | 
 | 1020 | 	if (sp == 0) { | 
 | 1021 | 		if (tsk == current) | 
 | 1022 | 			asm("mr %0,1" : "=r" (sp)); | 
 | 1023 | 		else | 
 | 1024 | 			sp = tsk->thread.ksp; | 
 | 1025 | 	} | 
 | 1026 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1027 | 	lr = 0; | 
 | 1028 | 	printk("Call Trace:\n"); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1029 | 	do { | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1030 | 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1031 | 			return; | 
 | 1032 |  | 
 | 1033 | 		stack = (unsigned long *) sp; | 
 | 1034 | 		newsp = stack[0]; | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1035 | 		ip = stack[STACK_FRAME_LR_SAVE]; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1036 | 		if (!firstframe || ip != lr) { | 
| Benjamin Herrenschmidt | 058c78f | 2008-07-07 13:44:31 +1000 | [diff] [blame] | 1037 | 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1038 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Steven Rostedt | 9135c3c | 2009-09-15 08:20:15 -0700 | [diff] [blame] | 1039 | 			if ((ip == rth || ip == mrth) && curr_frame >= 0) { | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1040 | 				printk(" (%pS)", | 
 | 1041 | 				       (void *)current->ret_stack[curr_frame].ret); | 
 | 1042 | 				curr_frame--; | 
 | 1043 | 			} | 
 | 1044 | #endif | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1045 | 			if (firstframe) | 
 | 1046 | 				printk(" (unreliable)"); | 
 | 1047 | 			printk("\n"); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1048 | 		} | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1049 | 		firstframe = 0; | 
 | 1050 |  | 
 | 1051 | 		/* | 
 | 1052 | 		 * See if this is an exception frame. | 
 | 1053 | 		 * We look for the "regshere" marker in the current frame. | 
 | 1054 | 		 */ | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1055 | 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) | 
 | 1056 | 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1057 | 			struct pt_regs *regs = (struct pt_regs *) | 
 | 1058 | 				(sp + STACK_FRAME_OVERHEAD); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1059 | 			lr = regs->link; | 
| Benjamin Herrenschmidt | 058c78f | 2008-07-07 13:44:31 +1000 | [diff] [blame] | 1060 | 			printk("--- Exception: %lx at %pS\n    LR = %pS\n", | 
 | 1061 | 			       regs->trap, (void *)regs->nip, (void *)lr); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1062 | 			firstframe = 1; | 
 | 1063 | 		} | 
 | 1064 |  | 
 | 1065 | 		sp = newsp; | 
 | 1066 | 	} while (count++ < kstack_depth_to_print); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1067 | } | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1068 |  | 
 | 1069 | void dump_stack(void) | 
 | 1070 | { | 
 | 1071 | 	show_stack(current, NULL); | 
 | 1072 | } | 
 | 1073 | EXPORT_SYMBOL(dump_stack); | 
| Anton Blanchard | cb2c9b2 | 2006-02-13 14:48:35 +1100 | [diff] [blame] | 1074 |  | 
 | 1075 | #ifdef CONFIG_PPC64 | 
 | 1076 | void ppc64_runlatch_on(void) | 
 | 1077 | { | 
 | 1078 | 	unsigned long ctrl; | 
 | 1079 |  | 
 | 1080 | 	if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { | 
 | 1081 | 		HMT_medium(); | 
 | 1082 |  | 
 | 1083 | 		ctrl = mfspr(SPRN_CTRLF); | 
 | 1084 | 		ctrl |= CTRL_RUNLATCH; | 
 | 1085 | 		mtspr(SPRN_CTRLT, ctrl); | 
 | 1086 |  | 
 | 1087 | 		set_thread_flag(TIF_RUNLATCH); | 
 | 1088 | 	} | 
 | 1089 | } | 
 | 1090 |  | 
 | 1091 | void ppc64_runlatch_off(void) | 
 | 1092 | { | 
 | 1093 | 	unsigned long ctrl; | 
 | 1094 |  | 
 | 1095 | 	if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { | 
 | 1096 | 		HMT_medium(); | 
 | 1097 |  | 
 | 1098 | 		clear_thread_flag(TIF_RUNLATCH); | 
 | 1099 |  | 
 | 1100 | 		ctrl = mfspr(SPRN_CTRLF); | 
 | 1101 | 		ctrl &= ~CTRL_RUNLATCH; | 
 | 1102 | 		mtspr(SPRN_CTRLT, ctrl); | 
 | 1103 | 	} | 
 | 1104 | } | 
 | 1105 | #endif | 
| Benjamin Herrenschmidt | f6a6168 | 2008-04-18 16:56:17 +1000 | [diff] [blame] | 1106 |  | 
 | 1107 | #if THREAD_SHIFT < PAGE_SHIFT | 
 | 1108 |  | 
 | 1109 | static struct kmem_cache *thread_info_cache; | 
 | 1110 |  | 
 | 1111 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | 
 | 1112 | { | 
 | 1113 | 	struct thread_info *ti; | 
 | 1114 |  | 
 | 1115 | 	ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | 
 | 1116 | 	if (unlikely(ti == NULL)) | 
 | 1117 | 		return NULL; | 
 | 1118 | #ifdef CONFIG_DEBUG_STACK_USAGE | 
 | 1119 | 	memset(ti, 0, THREAD_SIZE); | 
 | 1120 | #endif | 
 | 1121 | 	return ti; | 
 | 1122 | } | 
 | 1123 |  | 
 | 1124 | void free_thread_info(struct thread_info *ti) | 
 | 1125 | { | 
 | 1126 | 	kmem_cache_free(thread_info_cache, ti); | 
 | 1127 | } | 
 | 1128 |  | 
 | 1129 | void thread_info_cache_init(void) | 
 | 1130 | { | 
 | 1131 | 	thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | 
 | 1132 | 					      THREAD_SIZE, 0, NULL); | 
 | 1133 | 	BUG_ON(thread_info_cache == NULL); | 
 | 1134 | } | 
 | 1135 |  | 
 | 1136 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | 
| Anton Blanchard | d839088 | 2009-02-22 01:50:03 +0000 | [diff] [blame] | 1137 |  | 
 | 1138 | unsigned long arch_align_stack(unsigned long sp) | 
 | 1139 | { | 
 | 1140 | 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 
 | 1141 | 		sp -= get_random_int() & ~PAGE_MASK; | 
 | 1142 | 	return sp & ~0xf; | 
 | 1143 | } | 
| Anton Blanchard | 912f9ee | 2009-02-22 01:50:04 +0000 | [diff] [blame] | 1144 |  | 
 | 1145 | static inline unsigned long brk_rnd(void) | 
 | 1146 | { | 
 | 1147 |         unsigned long rnd = 0; | 
 | 1148 |  | 
 | 1149 | 	/* 8MB for 32bit, 1GB for 64bit */ | 
 | 1150 | 	if (is_32bit_task()) | 
 | 1151 | 		rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); | 
 | 1152 | 	else | 
 | 1153 | 		rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); | 
 | 1154 |  | 
 | 1155 | 	return rnd << PAGE_SHIFT; | 
 | 1156 | } | 
 | 1157 |  | 
 | 1158 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 
 | 1159 | { | 
| Anton Blanchard | 8bbde7a | 2009-09-21 16:52:35 +0000 | [diff] [blame] | 1160 | 	unsigned long base = mm->brk; | 
 | 1161 | 	unsigned long ret; | 
 | 1162 |  | 
| Kumar Gala | ce7a35c | 2009-10-16 07:05:17 +0000 | [diff] [blame] | 1163 | #ifdef CONFIG_PPC_STD_MMU_64 | 
| Anton Blanchard | 8bbde7a | 2009-09-21 16:52:35 +0000 | [diff] [blame] | 1164 | 	/* | 
 | 1165 | 	 * If we are using 1TB segments and we are allowed to randomise | 
 | 1166 | 	 * the heap, we can put it above 1TB so it is backed by a 1TB | 
 | 1167 | 	 * segment. Otherwise the heap will be in the bottom 1TB | 
 | 1168 | 	 * which always uses 256MB segments and this may result in a | 
 | 1169 | 	 * performance penalty. | 
 | 1170 | 	 */ | 
 | 1171 | 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) | 
 | 1172 | 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); | 
 | 1173 | #endif | 
 | 1174 |  | 
 | 1175 | 	ret = PAGE_ALIGN(base + brk_rnd()); | 
| Anton Blanchard | 912f9ee | 2009-02-22 01:50:04 +0000 | [diff] [blame] | 1176 |  | 
 | 1177 | 	if (ret < mm->brk) | 
 | 1178 | 		return mm->brk; | 
 | 1179 |  | 
 | 1180 | 	return ret; | 
 | 1181 | } | 
| Anton Blanchard | 501cb16 | 2009-02-22 01:50:07 +0000 | [diff] [blame] | 1182 |  | 
 | 1183 | unsigned long randomize_et_dyn(unsigned long base) | 
 | 1184 | { | 
 | 1185 | 	unsigned long ret = PAGE_ALIGN(base + brk_rnd()); | 
 | 1186 |  | 
 | 1187 | 	if (ret < base) | 
 | 1188 | 		return base; | 
 | 1189 |  | 
 | 1190 | 	return ret; | 
 | 1191 | } |