| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 2 | *  Derived from "arch/i386/kernel/process.c" | 
|  | 3 | *    Copyright (C) 1995  Linus Torvalds | 
|  | 4 | * | 
|  | 5 | *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and | 
|  | 6 | *  Paul Mackerras (paulus@cs.anu.edu.au) | 
|  | 7 | * | 
|  | 8 | *  PowerPC version | 
|  | 9 | *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | 
|  | 10 | * | 
|  | 11 | *  This program is free software; you can redistribute it and/or | 
|  | 12 | *  modify it under the terms of the GNU General Public License | 
|  | 13 | *  as published by the Free Software Foundation; either version | 
|  | 14 | *  2 of the License, or (at your option) any later version. | 
|  | 15 | */ | 
|  | 16 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 17 | #include <linux/errno.h> | 
|  | 18 | #include <linux/sched.h> | 
|  | 19 | #include <linux/kernel.h> | 
|  | 20 | #include <linux/mm.h> | 
|  | 21 | #include <linux/smp.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 22 | #include <linux/stddef.h> | 
|  | 23 | #include <linux/unistd.h> | 
|  | 24 | #include <linux/ptrace.h> | 
|  | 25 | #include <linux/slab.h> | 
|  | 26 | #include <linux/user.h> | 
|  | 27 | #include <linux/elf.h> | 
|  | 28 | #include <linux/init.h> | 
|  | 29 | #include <linux/prctl.h> | 
|  | 30 | #include <linux/init_task.h> | 
|  | 31 | #include <linux/module.h> | 
|  | 32 | #include <linux/kallsyms.h> | 
|  | 33 | #include <linux/mqueue.h> | 
|  | 34 | #include <linux/hardirq.h> | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 35 | #include <linux/utsname.h> | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 36 | #include <linux/ftrace.h> | 
| Martin Schwidefsky | 79741dd | 2008-12-31 15:11:38 +0100 | [diff] [blame] | 37 | #include <linux/kernel_stat.h> | 
| Anton Blanchard | d839088 | 2009-02-22 01:50:03 +0000 | [diff] [blame] | 38 | #include <linux/personality.h> | 
|  | 39 | #include <linux/random.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 40 |  | 
|  | 41 | #include <asm/pgtable.h> | 
|  | 42 | #include <asm/uaccess.h> | 
|  | 43 | #include <asm/system.h> | 
|  | 44 | #include <asm/io.h> | 
|  | 45 | #include <asm/processor.h> | 
|  | 46 | #include <asm/mmu.h> | 
|  | 47 | #include <asm/prom.h> | 
| Michael Ellerman | 76032de | 2005-11-07 13:12:03 +1100 | [diff] [blame] | 48 | #include <asm/machdep.h> | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 49 | #include <asm/time.h> | 
| Arnd Bergmann | a7f3184 | 2006-03-23 00:00:08 +0100 | [diff] [blame] | 50 | #include <asm/syscalls.h> | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 51 | #ifdef CONFIG_PPC64 | 
|  | 52 | #include <asm/firmware.h> | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 53 | #endif | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 54 | #include <linux/kprobes.h> | 
|  | 55 | #include <linux/kdebug.h> | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 56 |  | 
|  | 57 | extern unsigned long _get_SP(void); | 
|  | 58 |  | 
|  | 59 | #ifndef CONFIG_SMP | 
|  | 60 | struct task_struct *last_task_used_math = NULL; | 
|  | 61 | struct task_struct *last_task_used_altivec = NULL; | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 62 | struct task_struct *last_task_used_vsx = NULL; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 63 | struct task_struct *last_task_used_spe = NULL; | 
|  | 64 | #endif | 
|  | 65 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 66 | /* | 
|  | 67 | * Make sure the floating-point register state in the | 
|  | 68 | * the thread_struct is up to date for task tsk. | 
|  | 69 | */ | 
|  | 70 | void flush_fp_to_thread(struct task_struct *tsk) | 
|  | 71 | { | 
|  | 72 | if (tsk->thread.regs) { | 
|  | 73 | /* | 
|  | 74 | * We need to disable preemption here because if we didn't, | 
|  | 75 | * another process could get scheduled after the regs->msr | 
|  | 76 | * test but before we have finished saving the FP registers | 
|  | 77 | * to the thread_struct.  That process could take over the | 
|  | 78 | * FPU, and then when we get scheduled again we would store | 
|  | 79 | * bogus values for the remaining FP registers. | 
|  | 80 | */ | 
|  | 81 | preempt_disable(); | 
|  | 82 | if (tsk->thread.regs->msr & MSR_FP) { | 
|  | 83 | #ifdef CONFIG_SMP | 
|  | 84 | /* | 
|  | 85 | * This should only ever be called for current or | 
|  | 86 | * for a stopped child process.  Since we save away | 
|  | 87 | * the FP register state on context switch on SMP, | 
|  | 88 | * there is something wrong if a stopped child appears | 
|  | 89 | * to still have its FP state in the CPU registers. | 
|  | 90 | */ | 
|  | 91 | BUG_ON(tsk != current); | 
|  | 92 | #endif | 
| Kumar Gala | 0ee6c15 | 2007-08-28 21:15:53 -0500 | [diff] [blame] | 93 | giveup_fpu(tsk); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 94 | } | 
|  | 95 | preempt_enable(); | 
|  | 96 | } | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | void enable_kernel_fp(void) | 
|  | 100 | { | 
|  | 101 | WARN_ON(preemptible()); | 
|  | 102 |  | 
|  | 103 | #ifdef CONFIG_SMP | 
|  | 104 | if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) | 
|  | 105 | giveup_fpu(current); | 
|  | 106 | else | 
|  | 107 | giveup_fpu(NULL);	/* just enables FP for kernel */ | 
|  | 108 | #else | 
|  | 109 | giveup_fpu(last_task_used_math); | 
|  | 110 | #endif /* CONFIG_SMP */ | 
|  | 111 | } | 
|  | 112 | EXPORT_SYMBOL(enable_kernel_fp); | 
|  | 113 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 114 | #ifdef CONFIG_ALTIVEC | 
|  | 115 | void enable_kernel_altivec(void) | 
|  | 116 | { | 
|  | 117 | WARN_ON(preemptible()); | 
|  | 118 |  | 
|  | 119 | #ifdef CONFIG_SMP | 
|  | 120 | if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) | 
|  | 121 | giveup_altivec(current); | 
|  | 122 | else | 
|  | 123 | giveup_altivec(NULL);	/* just enable AltiVec for kernel - force */ | 
|  | 124 | #else | 
|  | 125 | giveup_altivec(last_task_used_altivec); | 
|  | 126 | #endif /* CONFIG_SMP */ | 
|  | 127 | } | 
|  | 128 | EXPORT_SYMBOL(enable_kernel_altivec); | 
|  | 129 |  | 
|  | 130 | /* | 
|  | 131 | * Make sure the VMX/Altivec register state in the | 
|  | 132 | * the thread_struct is up to date for task tsk. | 
|  | 133 | */ | 
|  | 134 | void flush_altivec_to_thread(struct task_struct *tsk) | 
|  | 135 | { | 
|  | 136 | if (tsk->thread.regs) { | 
|  | 137 | preempt_disable(); | 
|  | 138 | if (tsk->thread.regs->msr & MSR_VEC) { | 
|  | 139 | #ifdef CONFIG_SMP | 
|  | 140 | BUG_ON(tsk != current); | 
|  | 141 | #endif | 
| Kumar Gala | 0ee6c15 | 2007-08-28 21:15:53 -0500 | [diff] [blame] | 142 | giveup_altivec(tsk); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 143 | } | 
|  | 144 | preempt_enable(); | 
|  | 145 | } | 
|  | 146 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 147 | #endif /* CONFIG_ALTIVEC */ | 
|  | 148 |  | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 149 | #ifdef CONFIG_VSX | 
|  | 150 | #if 0 | 
|  | 151 | /* not currently used, but some crazy RAID module might want to later */ | 
|  | 152 | void enable_kernel_vsx(void) | 
|  | 153 | { | 
|  | 154 | WARN_ON(preemptible()); | 
|  | 155 |  | 
|  | 156 | #ifdef CONFIG_SMP | 
|  | 157 | if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) | 
|  | 158 | giveup_vsx(current); | 
|  | 159 | else | 
|  | 160 | giveup_vsx(NULL);	/* just enable vsx for kernel - force */ | 
|  | 161 | #else | 
|  | 162 | giveup_vsx(last_task_used_vsx); | 
|  | 163 | #endif /* CONFIG_SMP */ | 
|  | 164 | } | 
|  | 165 | EXPORT_SYMBOL(enable_kernel_vsx); | 
|  | 166 | #endif | 
|  | 167 |  | 
| Michael Neuling | 7c29217 | 2008-07-11 16:29:12 +1000 | [diff] [blame] | 168 | void giveup_vsx(struct task_struct *tsk) | 
|  | 169 | { | 
|  | 170 | giveup_fpu(tsk); | 
|  | 171 | giveup_altivec(tsk); | 
|  | 172 | __giveup_vsx(tsk); | 
|  | 173 | } | 
|  | 174 |  | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 175 | void flush_vsx_to_thread(struct task_struct *tsk) | 
|  | 176 | { | 
|  | 177 | if (tsk->thread.regs) { | 
|  | 178 | preempt_disable(); | 
|  | 179 | if (tsk->thread.regs->msr & MSR_VSX) { | 
|  | 180 | #ifdef CONFIG_SMP | 
|  | 181 | BUG_ON(tsk != current); | 
|  | 182 | #endif | 
|  | 183 | giveup_vsx(tsk); | 
|  | 184 | } | 
|  | 185 | preempt_enable(); | 
|  | 186 | } | 
|  | 187 | } | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 188 | #endif /* CONFIG_VSX */ | 
|  | 189 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 190 | #ifdef CONFIG_SPE | 
|  | 191 |  | 
|  | 192 | void enable_kernel_spe(void) | 
|  | 193 | { | 
|  | 194 | WARN_ON(preemptible()); | 
|  | 195 |  | 
|  | 196 | #ifdef CONFIG_SMP | 
|  | 197 | if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) | 
|  | 198 | giveup_spe(current); | 
|  | 199 | else | 
|  | 200 | giveup_spe(NULL);	/* just enable SPE for kernel - force */ | 
|  | 201 | #else | 
|  | 202 | giveup_spe(last_task_used_spe); | 
|  | 203 | #endif /* __SMP __ */ | 
|  | 204 | } | 
|  | 205 | EXPORT_SYMBOL(enable_kernel_spe); | 
|  | 206 |  | 
|  | 207 | void flush_spe_to_thread(struct task_struct *tsk) | 
|  | 208 | { | 
|  | 209 | if (tsk->thread.regs) { | 
|  | 210 | preempt_disable(); | 
|  | 211 | if (tsk->thread.regs->msr & MSR_SPE) { | 
|  | 212 | #ifdef CONFIG_SMP | 
|  | 213 | BUG_ON(tsk != current); | 
|  | 214 | #endif | 
| Kumar Gala | 0ee6c15 | 2007-08-28 21:15:53 -0500 | [diff] [blame] | 215 | giveup_spe(tsk); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 216 | } | 
|  | 217 | preempt_enable(); | 
|  | 218 | } | 
|  | 219 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 220 | #endif /* CONFIG_SPE */ | 
|  | 221 |  | 
| Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 222 | #ifndef CONFIG_SMP | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 223 | /* | 
|  | 224 | * If we are doing lazy switching of CPU state (FP, altivec or SPE), | 
|  | 225 | * and the current task has some state, discard it. | 
|  | 226 | */ | 
| Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 227 | void discard_lazy_cpu_state(void) | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 228 | { | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 229 | preempt_disable(); | 
|  | 230 | if (last_task_used_math == current) | 
|  | 231 | last_task_used_math = NULL; | 
|  | 232 | #ifdef CONFIG_ALTIVEC | 
|  | 233 | if (last_task_used_altivec == current) | 
|  | 234 | last_task_used_altivec = NULL; | 
|  | 235 | #endif /* CONFIG_ALTIVEC */ | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 236 | #ifdef CONFIG_VSX | 
|  | 237 | if (last_task_used_vsx == current) | 
|  | 238 | last_task_used_vsx = NULL; | 
|  | 239 | #endif /* CONFIG_VSX */ | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 240 | #ifdef CONFIG_SPE | 
|  | 241 | if (last_task_used_spe == current) | 
|  | 242 | last_task_used_spe = NULL; | 
|  | 243 | #endif | 
|  | 244 | preempt_enable(); | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 245 | } | 
| Paul Mackerras | 5388fb1 | 2006-01-11 22:11:39 +1100 | [diff] [blame] | 246 | #endif /* CONFIG_SMP */ | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 247 |  | 
| Dave Kleikamp | 3bffb65 | 2010-02-08 11:51:18 +0000 | [diff] [blame] | 248 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 
|  | 249 | void do_send_trap(struct pt_regs *regs, unsigned long address, | 
|  | 250 | unsigned long error_code, int signal_code, int breakpt) | 
|  | 251 | { | 
|  | 252 | siginfo_t info; | 
|  | 253 |  | 
|  | 254 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 
|  | 255 | 11, SIGSEGV) == NOTIFY_STOP) | 
|  | 256 | return; | 
|  | 257 |  | 
|  | 258 | /* Deliver the signal to userspace */ | 
|  | 259 | info.si_signo = SIGTRAP; | 
|  | 260 | info.si_errno = breakpt;	/* breakpoint or watchpoint id */ | 
|  | 261 | info.si_code = signal_code; | 
|  | 262 | info.si_addr = (void __user *)address; | 
|  | 263 | force_sig_info(SIGTRAP, &info, current); | 
|  | 264 | } | 
|  | 265 | #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */ | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 266 | void do_dabr(struct pt_regs *regs, unsigned long address, | 
|  | 267 | unsigned long error_code) | 
|  | 268 | { | 
|  | 269 | siginfo_t info; | 
|  | 270 |  | 
|  | 271 | if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, | 
|  | 272 | 11, SIGSEGV) == NOTIFY_STOP) | 
|  | 273 | return; | 
|  | 274 |  | 
|  | 275 | if (debugger_dabr_match(regs)) | 
|  | 276 | return; | 
|  | 277 |  | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 278 | /* Clear the DABR */ | 
|  | 279 | set_dabr(0); | 
|  | 280 |  | 
|  | 281 | /* Deliver the signal to userspace */ | 
|  | 282 | info.si_signo = SIGTRAP; | 
|  | 283 | info.si_errno = 0; | 
|  | 284 | info.si_code = TRAP_HWBKPT; | 
|  | 285 | info.si_addr = (void __user *)address; | 
|  | 286 | force_sig_info(SIGTRAP, &info, current); | 
|  | 287 | } | 
| Dave Kleikamp | 3bffb65 | 2010-02-08 11:51:18 +0000 | [diff] [blame] | 288 | #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */ | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 289 |  | 
| Michael Ellerman | a2ceff5 | 2008-03-28 19:11:48 +1100 | [diff] [blame] | 290 | static DEFINE_PER_CPU(unsigned long, current_dabr); | 
|  | 291 |  | 
| Dave Kleikamp | 3bffb65 | 2010-02-08 11:51:18 +0000 | [diff] [blame] | 292 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 
|  | 293 | /* | 
|  | 294 | * Set the debug registers back to their default "safe" values. | 
|  | 295 | */ | 
|  | 296 | static void set_debug_reg_defaults(struct thread_struct *thread) | 
|  | 297 | { | 
|  | 298 | thread->iac1 = thread->iac2 = 0; | 
|  | 299 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 
|  | 300 | thread->iac3 = thread->iac4 = 0; | 
|  | 301 | #endif | 
|  | 302 | thread->dac1 = thread->dac2 = 0; | 
|  | 303 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 
|  | 304 | thread->dvc1 = thread->dvc2 = 0; | 
|  | 305 | #endif | 
|  | 306 | thread->dbcr0 = 0; | 
|  | 307 | #ifdef CONFIG_BOOKE | 
|  | 308 | /* | 
|  | 309 | * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) | 
|  | 310 | */ | 
|  | 311 | thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |	\ | 
|  | 312 | DBCR1_IAC3US | DBCR1_IAC4US; | 
|  | 313 | /* | 
|  | 314 | * Force Data Address Compare User/Supervisor bits to be User-only | 
|  | 315 | * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. | 
|  | 316 | */ | 
|  | 317 | thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | 
|  | 318 | #else | 
|  | 319 | thread->dbcr1 = 0; | 
|  | 320 | #endif | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | static void prime_debug_regs(struct thread_struct *thread) | 
|  | 324 | { | 
|  | 325 | mtspr(SPRN_IAC1, thread->iac1); | 
|  | 326 | mtspr(SPRN_IAC2, thread->iac2); | 
|  | 327 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 
|  | 328 | mtspr(SPRN_IAC3, thread->iac3); | 
|  | 329 | mtspr(SPRN_IAC4, thread->iac4); | 
|  | 330 | #endif | 
|  | 331 | mtspr(SPRN_DAC1, thread->dac1); | 
|  | 332 | mtspr(SPRN_DAC2, thread->dac2); | 
|  | 333 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 
|  | 334 | mtspr(SPRN_DVC1, thread->dvc1); | 
|  | 335 | mtspr(SPRN_DVC2, thread->dvc2); | 
|  | 336 | #endif | 
|  | 337 | mtspr(SPRN_DBCR0, thread->dbcr0); | 
|  | 338 | mtspr(SPRN_DBCR1, thread->dbcr1); | 
|  | 339 | #ifdef CONFIG_BOOKE | 
|  | 340 | mtspr(SPRN_DBCR2, thread->dbcr2); | 
|  | 341 | #endif | 
|  | 342 | } | 
|  | 343 | /* | 
|  | 344 | * Unless neither the old or new thread are making use of the | 
|  | 345 | * debug registers, set the debug registers from the values | 
|  | 346 | * stored in the new thread. | 
|  | 347 | */ | 
|  | 348 | static void switch_booke_debug_regs(struct thread_struct *new_thread) | 
|  | 349 | { | 
|  | 350 | if ((current->thread.dbcr0 & DBCR0_IDM) | 
|  | 351 | || (new_thread->dbcr0 & DBCR0_IDM)) | 
|  | 352 | prime_debug_regs(new_thread); | 
|  | 353 | } | 
|  | 354 | #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */ | 
|  | 355 | static void set_debug_reg_defaults(struct thread_struct *thread) | 
|  | 356 | { | 
|  | 357 | if (thread->dabr) { | 
|  | 358 | thread->dabr = 0; | 
|  | 359 | set_dabr(0); | 
|  | 360 | } | 
|  | 361 | } | 
|  | 362 | #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */ | 
|  | 363 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 364 | int set_dabr(unsigned long dabr) | 
|  | 365 | { | 
| Michael Ellerman | a2ceff5 | 2008-03-28 19:11:48 +1100 | [diff] [blame] | 366 | __get_cpu_var(current_dabr) = dabr; | 
|  | 367 |  | 
| Michael Ellerman | cab0af9 | 2005-11-03 15:30:49 +1100 | [diff] [blame] | 368 | if (ppc_md.set_dabr) | 
|  | 369 | return ppc_md.set_dabr(dabr); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 370 |  | 
| Benjamin Herrenschmidt | 791cc50 | 2007-06-04 15:15:48 +1000 | [diff] [blame] | 371 | /* XXX should we have a CPU_FTR_HAS_DABR ? */ | 
| Dave Kleikamp | 172ae2e | 2010-02-08 11:50:57 +0000 | [diff] [blame] | 372 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 
| Benjamin Herrenschmidt | c6c9eac | 2009-09-08 14:16:58 +0000 | [diff] [blame] | 373 | mtspr(SPRN_DAC1, dabr); | 
|  | 374 | #elif defined(CONFIG_PPC_BOOK3S) | 
| Michael Ellerman | cab0af9 | 2005-11-03 15:30:49 +1100 | [diff] [blame] | 375 | mtspr(SPRN_DABR, dabr); | 
| Benjamin Herrenschmidt | 791cc50 | 2007-06-04 15:15:48 +1000 | [diff] [blame] | 376 | #endif | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 377 |  | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 378 |  | 
| Michael Ellerman | cab0af9 | 2005-11-03 15:30:49 +1100 | [diff] [blame] | 379 | return 0; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 380 | } | 
|  | 381 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 382 | #ifdef CONFIG_PPC64 | 
|  | 383 | DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 384 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 385 |  | 
|  | 386 | struct task_struct *__switch_to(struct task_struct *prev, | 
|  | 387 | struct task_struct *new) | 
|  | 388 | { | 
|  | 389 | struct thread_struct *new_thread, *old_thread; | 
|  | 390 | unsigned long flags; | 
|  | 391 | struct task_struct *last; | 
|  | 392 |  | 
|  | 393 | #ifdef CONFIG_SMP | 
|  | 394 | /* avoid complexity of lazy save/restore of fpu | 
|  | 395 | * by just saving it every time we switch out if | 
|  | 396 | * this task used the fpu during the last quantum. | 
|  | 397 | * | 
|  | 398 | * If it tries to use the fpu again, it'll trap and | 
|  | 399 | * reload its fp regs.  So we don't have to do a restore | 
|  | 400 | * every switch, just a save. | 
|  | 401 | *  -- Cort | 
|  | 402 | */ | 
|  | 403 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) | 
|  | 404 | giveup_fpu(prev); | 
|  | 405 | #ifdef CONFIG_ALTIVEC | 
|  | 406 | /* | 
|  | 407 | * If the previous thread used altivec in the last quantum | 
|  | 408 | * (thus changing altivec regs) then save them. | 
|  | 409 | * We used to check the VRSAVE register but not all apps | 
|  | 410 | * set it, so we don't rely on it now (and in fact we need | 
|  | 411 | * to save & restore VSCR even if VRSAVE == 0).  -- paulus | 
|  | 412 | * | 
|  | 413 | * On SMP we always save/restore altivec regs just to avoid the | 
|  | 414 | * complexity of changing processors. | 
|  | 415 | *  -- Cort | 
|  | 416 | */ | 
|  | 417 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) | 
|  | 418 | giveup_altivec(prev); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 419 | #endif /* CONFIG_ALTIVEC */ | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 420 | #ifdef CONFIG_VSX | 
|  | 421 | if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) | 
| Michael Neuling | 7c29217 | 2008-07-11 16:29:12 +1000 | [diff] [blame] | 422 | /* VMX and FPU registers are already save here */ | 
|  | 423 | __giveup_vsx(prev); | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 424 | #endif /* CONFIG_VSX */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 425 | #ifdef CONFIG_SPE | 
|  | 426 | /* | 
|  | 427 | * If the previous thread used spe in the last quantum | 
|  | 428 | * (thus changing spe regs) then save them. | 
|  | 429 | * | 
|  | 430 | * On SMP we always save/restore spe regs just to avoid the | 
|  | 431 | * complexity of changing processors. | 
|  | 432 | */ | 
|  | 433 | if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) | 
|  | 434 | giveup_spe(prev); | 
| Paul Mackerras | c0c0d99 | 2005-10-01 13:49:08 +1000 | [diff] [blame] | 435 | #endif /* CONFIG_SPE */ | 
|  | 436 |  | 
|  | 437 | #else  /* CONFIG_SMP */ | 
|  | 438 | #ifdef CONFIG_ALTIVEC | 
|  | 439 | /* Avoid the trap.  On smp this this never happens since | 
|  | 440 | * we don't set last_task_used_altivec -- Cort | 
|  | 441 | */ | 
|  | 442 | if (new->thread.regs && last_task_used_altivec == new) | 
|  | 443 | new->thread.regs->msr |= MSR_VEC; | 
|  | 444 | #endif /* CONFIG_ALTIVEC */ | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 445 | #ifdef CONFIG_VSX | 
|  | 446 | if (new->thread.regs && last_task_used_vsx == new) | 
|  | 447 | new->thread.regs->msr |= MSR_VSX; | 
|  | 448 | #endif /* CONFIG_VSX */ | 
| Paul Mackerras | c0c0d99 | 2005-10-01 13:49:08 +1000 | [diff] [blame] | 449 | #ifdef CONFIG_SPE | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 450 | /* Avoid the trap.  On smp this this never happens since | 
|  | 451 | * we don't set last_task_used_spe | 
|  | 452 | */ | 
|  | 453 | if (new->thread.regs && last_task_used_spe == new) | 
|  | 454 | new->thread.regs->msr |= MSR_SPE; | 
|  | 455 | #endif /* CONFIG_SPE */ | 
| Paul Mackerras | c0c0d99 | 2005-10-01 13:49:08 +1000 | [diff] [blame] | 456 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 457 | #endif /* CONFIG_SMP */ | 
|  | 458 |  | 
| Dave Kleikamp | 172ae2e | 2010-02-08 11:50:57 +0000 | [diff] [blame] | 459 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 
| Dave Kleikamp | 3bffb65 | 2010-02-08 11:51:18 +0000 | [diff] [blame] | 460 | switch_booke_debug_regs(&new->thread); | 
| Benjamin Herrenschmidt | c6c9eac | 2009-09-08 14:16:58 +0000 | [diff] [blame] | 461 | #else | 
|  | 462 | if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr)) | 
|  | 463 | set_dabr(new->thread.dabr); | 
| Luis Machado | d6a61bf | 2008-07-24 02:10:41 +1000 | [diff] [blame] | 464 | #endif | 
|  | 465 |  | 
| Benjamin Herrenschmidt | c6c9eac | 2009-09-08 14:16:58 +0000 | [diff] [blame] | 466 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 467 | new_thread = &new->thread; | 
|  | 468 | old_thread = ¤t->thread; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 469 |  | 
|  | 470 | #ifdef CONFIG_PPC64 | 
|  | 471 | /* | 
|  | 472 | * Collect processor utilization data per process | 
|  | 473 | */ | 
|  | 474 | if (firmware_has_feature(FW_FEATURE_SPLPAR)) { | 
|  | 475 | struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); | 
|  | 476 | long unsigned start_tb, current_tb; | 
|  | 477 | start_tb = old_thread->start_tb; | 
|  | 478 | cu->current_tb = current_tb = mfspr(SPRN_PURR); | 
|  | 479 | old_thread->accum_tb += (current_tb - start_tb); | 
|  | 480 | new_thread->start_tb = current_tb; | 
|  | 481 | } | 
|  | 482 | #endif | 
|  | 483 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 484 | local_irq_save(flags); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 485 |  | 
|  | 486 | account_system_vtime(current); | 
| Tony Breeds | 81a3843 | 2007-12-04 16:51:44 +1100 | [diff] [blame] | 487 | account_process_vtime(current); | 
| Paul Mackerras | c6622f6 | 2006-02-24 10:06:59 +1100 | [diff] [blame] | 488 | calculate_steal_time(); | 
|  | 489 |  | 
| Anton Blanchard | 44387e9 | 2008-03-17 15:27:09 +1100 | [diff] [blame] | 490 | /* | 
|  | 491 | * We can't take a PMU exception inside _switch() since there is a | 
|  | 492 | * window where the kernel stack SLB and the kernel stack are out | 
|  | 493 | * of sync. Hard disable here. | 
|  | 494 | */ | 
|  | 495 | hard_irq_disable(); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 496 | last = _switch(old_thread, new_thread); | 
|  | 497 |  | 
|  | 498 | local_irq_restore(flags); | 
|  | 499 |  | 
|  | 500 | return last; | 
|  | 501 | } | 
|  | 502 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 503 | static int instructions_to_print = 16; | 
|  | 504 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 505 | static void show_instructions(struct pt_regs *regs) | 
|  | 506 | { | 
|  | 507 | int i; | 
|  | 508 | unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * | 
|  | 509 | sizeof(int)); | 
|  | 510 |  | 
|  | 511 | printk("Instruction dump:"); | 
|  | 512 |  | 
|  | 513 | for (i = 0; i < instructions_to_print; i++) { | 
|  | 514 | int instr; | 
|  | 515 |  | 
|  | 516 | if (!(i % 8)) | 
|  | 517 | printk("\n"); | 
|  | 518 |  | 
| Scott Wood | 0de2d82 | 2007-09-28 04:38:55 +1000 | [diff] [blame] | 519 | #if !defined(CONFIG_BOOKE) | 
|  | 520 | /* If executing with the IMMU off, adjust pc rather | 
|  | 521 | * than print XXXXXXXX. | 
|  | 522 | */ | 
|  | 523 | if (!(regs->msr & MSR_IR)) | 
|  | 524 | pc = (unsigned long)phys_to_virt(pc); | 
|  | 525 | #endif | 
|  | 526 |  | 
| Stephen Rothwell | af30837 | 2006-03-23 17:38:10 +1100 | [diff] [blame] | 527 | /* We use __get_user here *only* to avoid an OOPS on a | 
|  | 528 | * bad address because the pc *should* only be a | 
|  | 529 | * kernel address. | 
|  | 530 | */ | 
| Anton Blanchard | 00ae36d | 2006-10-13 12:17:16 +1000 | [diff] [blame] | 531 | if (!__kernel_text_address(pc) || | 
|  | 532 | __get_user(instr, (unsigned int __user *)pc)) { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 533 | printk("XXXXXXXX "); | 
|  | 534 | } else { | 
|  | 535 | if (regs->nip == pc) | 
|  | 536 | printk("<%08x> ", instr); | 
|  | 537 | else | 
|  | 538 | printk("%08x ", instr); | 
|  | 539 | } | 
|  | 540 |  | 
|  | 541 | pc += sizeof(int); | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | printk("\n"); | 
|  | 545 | } | 
|  | 546 |  | 
|  | 547 | static struct regbit { | 
|  | 548 | unsigned long bit; | 
|  | 549 | const char *name; | 
|  | 550 | } msr_bits[] = { | 
|  | 551 | {MSR_EE,	"EE"}, | 
|  | 552 | {MSR_PR,	"PR"}, | 
|  | 553 | {MSR_FP,	"FP"}, | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 554 | {MSR_VEC,	"VEC"}, | 
|  | 555 | {MSR_VSX,	"VSX"}, | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 556 | {MSR_ME,	"ME"}, | 
| Kumar Gala | 1b98326 | 2008-11-19 04:39:53 +0000 | [diff] [blame] | 557 | {MSR_CE,	"CE"}, | 
|  | 558 | {MSR_DE,	"DE"}, | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 559 | {MSR_IR,	"IR"}, | 
|  | 560 | {MSR_DR,	"DR"}, | 
|  | 561 | {0,		NULL} | 
|  | 562 | }; | 
|  | 563 |  | 
|  | 564 | static void printbits(unsigned long val, struct regbit *bits) | 
|  | 565 | { | 
|  | 566 | const char *sep = ""; | 
|  | 567 |  | 
|  | 568 | printk("<"); | 
|  | 569 | for (; bits->bit; ++bits) | 
|  | 570 | if (val & bits->bit) { | 
|  | 571 | printk("%s%s", sep, bits->name); | 
|  | 572 | sep = ","; | 
|  | 573 | } | 
|  | 574 | printk(">"); | 
|  | 575 | } | 
|  | 576 |  | 
|  | 577 | #ifdef CONFIG_PPC64 | 
| anton@samba.org | f6f7dde | 2007-03-20 20:38:19 -0500 | [diff] [blame] | 578 | #define REG		"%016lx" | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 579 | #define REGS_PER_LINE	4 | 
|  | 580 | #define LAST_VOLATILE	13 | 
|  | 581 | #else | 
| anton@samba.org | f6f7dde | 2007-03-20 20:38:19 -0500 | [diff] [blame] | 582 | #define REG		"%08lx" | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 583 | #define REGS_PER_LINE	8 | 
|  | 584 | #define LAST_VOLATILE	12 | 
|  | 585 | #endif | 
|  | 586 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 587 | void show_regs(struct pt_regs * regs) | 
|  | 588 | { | 
|  | 589 | int i, trap; | 
|  | 590 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 591 | printk("NIP: "REG" LR: "REG" CTR: "REG"\n", | 
|  | 592 | regs->nip, regs->link, regs->ctr); | 
|  | 593 | printk("REGS: %p TRAP: %04lx   %s  (%s)\n", | 
| Serge E. Hallyn | 96b644b | 2006-10-02 02:18:13 -0700 | [diff] [blame] | 594 | regs, regs->trap, print_tainted(), init_utsname()->release); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 595 | printk("MSR: "REG" ", regs->msr); | 
|  | 596 | printbits(regs->msr, msr_bits); | 
| anton@samba.org | f6f7dde | 2007-03-20 20:38:19 -0500 | [diff] [blame] | 597 | printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 598 | trap = TRAP(regs); | 
|  | 599 | if (trap == 0x300 || trap == 0x600) | 
| Dave Kleikamp | 172ae2e | 2010-02-08 11:50:57 +0000 | [diff] [blame] | 600 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 
| Kumar Gala | 1417078 | 2007-07-26 00:46:15 -0500 | [diff] [blame] | 601 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 
|  | 602 | #else | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 603 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 
| Kumar Gala | 1417078 | 2007-07-26 00:46:15 -0500 | [diff] [blame] | 604 | #endif | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 605 | printk("TASK = %p[%d] '%s' THREAD: %p", | 
| Alexey Dobriyan | 19c5870 | 2007-10-18 23:40:41 -0700 | [diff] [blame] | 606 | current, task_pid_nr(current), current->comm, task_thread_info(current)); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 607 |  | 
|  | 608 | #ifdef CONFIG_SMP | 
| Hugh Dickins | 79ccd1b | 2008-02-09 05:25:13 +1100 | [diff] [blame] | 609 | printk(" CPU: %d", raw_smp_processor_id()); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 610 | #endif /* CONFIG_SMP */ | 
|  | 611 |  | 
|  | 612 | for (i = 0;  i < 32;  i++) { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 613 | if ((i % REGS_PER_LINE) == 0) | 
| Kumar Gala | a236719 | 2009-06-18 22:29:55 +0000 | [diff] [blame] | 614 | printk("\nGPR%02d: ", i); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 615 | printk(REG " ", regs->gpr[i]); | 
|  | 616 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 617 | break; | 
|  | 618 | } | 
|  | 619 | printk("\n"); | 
|  | 620 | #ifdef CONFIG_KALLSYMS | 
|  | 621 | /* | 
|  | 622 | * Lookup NIP late so we have the best change of getting the | 
|  | 623 | * above info out without failing | 
|  | 624 | */ | 
| Benjamin Herrenschmidt | 058c78f | 2008-07-07 13:44:31 +1000 | [diff] [blame] | 625 | printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); | 
|  | 626 | printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 627 | #endif | 
|  | 628 | show_stack(current, (unsigned long *) regs->gpr[1]); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 629 | if (!user_mode(regs)) | 
|  | 630 | show_instructions(regs); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 631 | } | 
|  | 632 |  | 
|  | 633 | void exit_thread(void) | 
|  | 634 | { | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 635 | discard_lazy_cpu_state(); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 636 | } | 
|  | 637 |  | 
|  | 638 | void flush_thread(void) | 
|  | 639 | { | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 640 | discard_lazy_cpu_state(); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 641 |  | 
| Dave Kleikamp | 3bffb65 | 2010-02-08 11:51:18 +0000 | [diff] [blame] | 642 | set_debug_reg_defaults(¤t->thread); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 643 | } | 
|  | 644 |  | 
|  | 645 | void | 
|  | 646 | release_thread(struct task_struct *t) | 
|  | 647 | { | 
|  | 648 | } | 
|  | 649 |  | 
|  | 650 | /* | 
|  | 651 | * This gets called before we allocate a new thread and copy | 
|  | 652 | * the current task into it. | 
|  | 653 | */ | 
|  | 654 | void prepare_to_copy(struct task_struct *tsk) | 
|  | 655 | { | 
|  | 656 | flush_fp_to_thread(current); | 
|  | 657 | flush_altivec_to_thread(current); | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 658 | flush_vsx_to_thread(current); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 659 | flush_spe_to_thread(current); | 
|  | 660 | } | 
|  | 661 |  | 
|  | 662 | /* | 
|  | 663 | * Copy a thread.. | 
|  | 664 | */ | 
| Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 665 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 666 | unsigned long unused, struct task_struct *p, | 
|  | 667 | struct pt_regs *regs) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 668 | { | 
|  | 669 | struct pt_regs *childregs, *kregs; | 
|  | 670 | extern void ret_from_fork(void); | 
| Al Viro | 0cec6fd | 2006-01-12 01:06:02 -0800 | [diff] [blame] | 671 | unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 672 |  | 
|  | 673 | CHECK_FULL_REGS(regs); | 
|  | 674 | /* Copy registers */ | 
|  | 675 | sp -= sizeof(struct pt_regs); | 
|  | 676 | childregs = (struct pt_regs *) sp; | 
|  | 677 | *childregs = *regs; | 
|  | 678 | if ((childregs->msr & MSR_PR) == 0) { | 
|  | 679 | /* for kernel thread, set `current' and stackptr in new task */ | 
|  | 680 | childregs->gpr[1] = sp + sizeof(struct pt_regs); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 681 | #ifdef CONFIG_PPC32 | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 682 | childregs->gpr[2] = (unsigned long) p; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 683 | #else | 
| Al Viro | b5e2fc1 | 2006-01-12 01:06:01 -0800 | [diff] [blame] | 684 | clear_tsk_thread_flag(p, TIF_32BIT); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 685 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 686 | p->thread.regs = NULL;	/* no user register state */ | 
|  | 687 | } else { | 
|  | 688 | childregs->gpr[1] = usp; | 
|  | 689 | p->thread.regs = childregs; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 690 | if (clone_flags & CLONE_SETTLS) { | 
|  | 691 | #ifdef CONFIG_PPC64 | 
|  | 692 | if (!test_thread_flag(TIF_32BIT)) | 
|  | 693 | childregs->gpr[13] = childregs->gpr[6]; | 
|  | 694 | else | 
|  | 695 | #endif | 
|  | 696 | childregs->gpr[2] = childregs->gpr[6]; | 
|  | 697 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 698 | } | 
|  | 699 | childregs->gpr[3] = 0;  /* Result from fork() */ | 
|  | 700 | sp -= STACK_FRAME_OVERHEAD; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 701 |  | 
|  | 702 | /* | 
|  | 703 | * The way this works is that at some point in the future | 
|  | 704 | * some task will call _switch to switch to the new task. | 
|  | 705 | * That will pop off the stack frame created below and start | 
|  | 706 | * the new task running at ret_from_fork.  The new task will | 
|  | 707 | * do some house keeping and then return from the fork or clone | 
|  | 708 | * system call, using the stack frame created above. | 
|  | 709 | */ | 
|  | 710 | sp -= sizeof(struct pt_regs); | 
|  | 711 | kregs = (struct pt_regs *) sp; | 
|  | 712 | sp -= STACK_FRAME_OVERHEAD; | 
|  | 713 | p->thread.ksp = sp; | 
| Kumar Gala | 8521882 | 2008-04-28 16:21:22 +1000 | [diff] [blame] | 714 | p->thread.ksp_limit = (unsigned long)task_stack_page(p) + | 
|  | 715 | _ALIGN_UP(sizeof(struct thread_info), 16); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 716 |  | 
| Benjamin Herrenschmidt | 9449168 | 2009-06-02 21:17:45 +0000 | [diff] [blame] | 717 | #ifdef CONFIG_PPC_STD_MMU_64 | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 718 | if (cpu_has_feature(CPU_FTR_SLB)) { | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 719 | unsigned long sp_vsid; | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 720 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 721 |  | 
| Paul Mackerras | 1189be6 | 2007-10-11 20:37:10 +1000 | [diff] [blame] | 722 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) | 
|  | 723 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) | 
|  | 724 | << SLB_VSID_SHIFT_1T; | 
|  | 725 | else | 
|  | 726 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) | 
|  | 727 | << SLB_VSID_SHIFT; | 
| Benjamin Herrenschmidt | 3c726f8 | 2005-11-07 11:06:55 +1100 | [diff] [blame] | 728 | sp_vsid |= SLB_VSID_KERNEL | llp; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 729 | p->thread.ksp_vsid = sp_vsid; | 
|  | 730 | } | 
| Benjamin Herrenschmidt | 747bea9 | 2009-07-23 23:15:27 +0000 | [diff] [blame] | 731 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 732 |  | 
|  | 733 | /* | 
|  | 734 | * The PPC64 ABI makes use of a TOC to contain function | 
|  | 735 | * pointers.  The function (ret_from_except) is actually a pointer | 
|  | 736 | * to the TOC entry.  The first entry is a pointer to the actual | 
|  | 737 | * function. | 
|  | 738 | */ | 
| Benjamin Herrenschmidt | 747bea9 | 2009-07-23 23:15:27 +0000 | [diff] [blame] | 739 | #ifdef CONFIG_PPC64 | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 740 | kregs->nip = *((unsigned long *)ret_from_fork); | 
|  | 741 | #else | 
|  | 742 | kregs->nip = (unsigned long)ret_from_fork; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 743 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 744 |  | 
|  | 745 | return 0; | 
|  | 746 | } | 
|  | 747 |  | 
|  | 748 | /* | 
|  | 749 | * Set up a thread for executing a new program | 
|  | 750 | */ | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 751 | void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 752 | { | 
| Michael Ellerman | 90eac72 | 2005-10-21 16:01:33 +1000 | [diff] [blame] | 753 | #ifdef CONFIG_PPC64 | 
|  | 754 | unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */ | 
|  | 755 | #endif | 
|  | 756 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 757 | set_fs(USER_DS); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 758 |  | 
|  | 759 | /* | 
|  | 760 | * If we exec out of a kernel thread then thread.regs will not be | 
|  | 761 | * set.  Do it now. | 
|  | 762 | */ | 
|  | 763 | if (!current->thread.regs) { | 
| Al Viro | 0cec6fd | 2006-01-12 01:06:02 -0800 | [diff] [blame] | 764 | struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; | 
|  | 765 | current->thread.regs = regs - 1; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 766 | } | 
|  | 767 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 768 | memset(regs->gpr, 0, sizeof(regs->gpr)); | 
|  | 769 | regs->ctr = 0; | 
|  | 770 | regs->link = 0; | 
|  | 771 | regs->xer = 0; | 
|  | 772 | regs->ccr = 0; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 773 | regs->gpr[1] = sp; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 774 |  | 
| Roland McGrath | 474f819 | 2007-09-24 16:52:44 -0700 | [diff] [blame] | 775 | /* | 
|  | 776 | * We have just cleared all the nonvolatile GPRs, so make | 
|  | 777 | * FULL_REGS(regs) return true.  This is necessary to allow | 
|  | 778 | * ptrace to examine the thread immediately after exec. | 
|  | 779 | */ | 
|  | 780 | regs->trap &= ~1UL; | 
|  | 781 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 782 | #ifdef CONFIG_PPC32 | 
|  | 783 | regs->mq = 0; | 
|  | 784 | regs->nip = start; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 785 | regs->msr = MSR_USER; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 786 | #else | 
| Stephen Rothwell | d4bf9a7 | 2005-10-13 13:40:54 +1000 | [diff] [blame] | 787 | if (!test_thread_flag(TIF_32BIT)) { | 
| Michael Ellerman | 90eac72 | 2005-10-21 16:01:33 +1000 | [diff] [blame] | 788 | unsigned long entry, toc; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 789 |  | 
|  | 790 | /* start is a relocated pointer to the function descriptor for | 
|  | 791 | * the elf _start routine.  The first entry in the function | 
|  | 792 | * descriptor is the entry address of _start and the second | 
|  | 793 | * entry is the TOC value we need to use. | 
|  | 794 | */ | 
|  | 795 | __get_user(entry, (unsigned long __user *)start); | 
|  | 796 | __get_user(toc, (unsigned long __user *)start+1); | 
|  | 797 |  | 
|  | 798 | /* Check whether the e_entry function descriptor entries | 
|  | 799 | * need to be relocated before we can use them. | 
|  | 800 | */ | 
|  | 801 | if (load_addr != 0) { | 
|  | 802 | entry += load_addr; | 
|  | 803 | toc   += load_addr; | 
|  | 804 | } | 
|  | 805 | regs->nip = entry; | 
|  | 806 | regs->gpr[2] = toc; | 
|  | 807 | regs->msr = MSR_USER64; | 
| Stephen Rothwell | d4bf9a7 | 2005-10-13 13:40:54 +1000 | [diff] [blame] | 808 | } else { | 
|  | 809 | regs->nip = start; | 
|  | 810 | regs->gpr[2] = 0; | 
|  | 811 | regs->msr = MSR_USER32; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 812 | } | 
|  | 813 | #endif | 
|  | 814 |  | 
| Paul Mackerras | 48abec0 | 2005-11-30 13:20:54 +1100 | [diff] [blame] | 815 | discard_lazy_cpu_state(); | 
| Michael Neuling | ce48b21 | 2008-06-25 14:07:18 +1000 | [diff] [blame] | 816 | #ifdef CONFIG_VSX | 
|  | 817 | current->thread.used_vsr = 0; | 
|  | 818 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 819 | memset(current->thread.fpr, 0, sizeof(current->thread.fpr)); | 
| David Gibson | 25c8a78 | 2005-10-27 16:27:25 +1000 | [diff] [blame] | 820 | current->thread.fpscr.val = 0; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 821 | #ifdef CONFIG_ALTIVEC | 
|  | 822 | memset(current->thread.vr, 0, sizeof(current->thread.vr)); | 
|  | 823 | memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr)); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 824 | current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */ | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 825 | current->thread.vrsave = 0; | 
|  | 826 | current->thread.used_vr = 0; | 
|  | 827 | #endif /* CONFIG_ALTIVEC */ | 
|  | 828 | #ifdef CONFIG_SPE | 
|  | 829 | memset(current->thread.evr, 0, sizeof(current->thread.evr)); | 
|  | 830 | current->thread.acc = 0; | 
|  | 831 | current->thread.spefscr = 0; | 
|  | 832 | current->thread.used_spe = 0; | 
|  | 833 | #endif /* CONFIG_SPE */ | 
|  | 834 | } | 
|  | 835 |  | 
|  | 836 | #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | 
|  | 837 | | PR_FP_EXC_RES | PR_FP_EXC_INV) | 
|  | 838 |  | 
|  | 839 | int set_fpexc_mode(struct task_struct *tsk, unsigned int val) | 
|  | 840 | { | 
|  | 841 | struct pt_regs *regs = tsk->thread.regs; | 
|  | 842 |  | 
|  | 843 | /* This is a bit hairy.  If we are an SPE enabled  processor | 
|  | 844 | * (have embedded fp) we store the IEEE exception enable flags in | 
|  | 845 | * fpexc_mode.  fpexc_mode is also used for setting FP exception | 
|  | 846 | * mode (asyn, precise, disabled) for 'Classic' FP. */ | 
|  | 847 | if (val & PR_FP_EXC_SW_ENABLE) { | 
|  | 848 | #ifdef CONFIG_SPE | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 849 | if (cpu_has_feature(CPU_FTR_SPE)) { | 
|  | 850 | tsk->thread.fpexc_mode = val & | 
|  | 851 | (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); | 
|  | 852 | return 0; | 
|  | 853 | } else { | 
|  | 854 | return -EINVAL; | 
|  | 855 | } | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 856 | #else | 
|  | 857 | return -EINVAL; | 
|  | 858 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 859 | } | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 860 |  | 
|  | 861 | /* on a CONFIG_SPE this does not hurt us.  The bits that | 
|  | 862 | * __pack_fe01 use do not overlap with bits used for | 
|  | 863 | * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits | 
|  | 864 | * on CONFIG_SPE implementations are reserved so writing to | 
|  | 865 | * them does not change anything */ | 
|  | 866 | if (val > PR_FP_EXC_PRECISE) | 
|  | 867 | return -EINVAL; | 
|  | 868 | tsk->thread.fpexc_mode = __pack_fe01(val); | 
|  | 869 | if (regs != NULL && (regs->msr & MSR_FP) != 0) | 
|  | 870 | regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | 
|  | 871 | | tsk->thread.fpexc_mode; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 872 | return 0; | 
|  | 873 | } | 
|  | 874 |  | 
|  | 875 | int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) | 
|  | 876 | { | 
|  | 877 | unsigned int val; | 
|  | 878 |  | 
|  | 879 | if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) | 
|  | 880 | #ifdef CONFIG_SPE | 
| Kumar Gala | 5e14d21 | 2007-09-13 01:44:20 -0500 | [diff] [blame] | 881 | if (cpu_has_feature(CPU_FTR_SPE)) | 
|  | 882 | val = tsk->thread.fpexc_mode; | 
|  | 883 | else | 
|  | 884 | return -EINVAL; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 885 | #else | 
|  | 886 | return -EINVAL; | 
|  | 887 | #endif | 
|  | 888 | else | 
|  | 889 | val = __unpack_fe01(tsk->thread.fpexc_mode); | 
|  | 890 | return put_user(val, (unsigned int __user *) adr); | 
|  | 891 | } | 
|  | 892 |  | 
| Paul Mackerras | fab5db9 | 2006-06-07 16:14:40 +1000 | [diff] [blame] | 893 | int set_endian(struct task_struct *tsk, unsigned int val) | 
|  | 894 | { | 
|  | 895 | struct pt_regs *regs = tsk->thread.regs; | 
|  | 896 |  | 
|  | 897 | if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || | 
|  | 898 | (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) | 
|  | 899 | return -EINVAL; | 
|  | 900 |  | 
|  | 901 | if (regs == NULL) | 
|  | 902 | return -EINVAL; | 
|  | 903 |  | 
|  | 904 | if (val == PR_ENDIAN_BIG) | 
|  | 905 | regs->msr &= ~MSR_LE; | 
|  | 906 | else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) | 
|  | 907 | regs->msr |= MSR_LE; | 
|  | 908 | else | 
|  | 909 | return -EINVAL; | 
|  | 910 |  | 
|  | 911 | return 0; | 
|  | 912 | } | 
|  | 913 |  | 
|  | 914 | int get_endian(struct task_struct *tsk, unsigned long adr) | 
|  | 915 | { | 
|  | 916 | struct pt_regs *regs = tsk->thread.regs; | 
|  | 917 | unsigned int val; | 
|  | 918 |  | 
|  | 919 | if (!cpu_has_feature(CPU_FTR_PPC_LE) && | 
|  | 920 | !cpu_has_feature(CPU_FTR_REAL_LE)) | 
|  | 921 | return -EINVAL; | 
|  | 922 |  | 
|  | 923 | if (regs == NULL) | 
|  | 924 | return -EINVAL; | 
|  | 925 |  | 
|  | 926 | if (regs->msr & MSR_LE) { | 
|  | 927 | if (cpu_has_feature(CPU_FTR_REAL_LE)) | 
|  | 928 | val = PR_ENDIAN_LITTLE; | 
|  | 929 | else | 
|  | 930 | val = PR_ENDIAN_PPC_LITTLE; | 
|  | 931 | } else | 
|  | 932 | val = PR_ENDIAN_BIG; | 
|  | 933 |  | 
|  | 934 | return put_user(val, (unsigned int __user *)adr); | 
|  | 935 | } | 
|  | 936 |  | 
| Paul Mackerras | e9370ae | 2006-06-07 16:15:39 +1000 | [diff] [blame] | 937 | int set_unalign_ctl(struct task_struct *tsk, unsigned int val) | 
|  | 938 | { | 
|  | 939 | tsk->thread.align_ctl = val; | 
|  | 940 | return 0; | 
|  | 941 | } | 
|  | 942 |  | 
|  | 943 | int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) | 
|  | 944 | { | 
|  | 945 | return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); | 
|  | 946 | } | 
|  | 947 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 948 | #define TRUNC_PTR(x)	((typeof(x))(((unsigned long)(x)) & 0xffffffff)) | 
|  | 949 |  | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 950 | int sys_clone(unsigned long clone_flags, unsigned long usp, | 
|  | 951 | int __user *parent_tidp, void __user *child_threadptr, | 
|  | 952 | int __user *child_tidp, int p6, | 
|  | 953 | struct pt_regs *regs) | 
|  | 954 | { | 
|  | 955 | CHECK_FULL_REGS(regs); | 
|  | 956 | if (usp == 0) | 
|  | 957 | usp = regs->gpr[1];	/* stack pointer for child */ | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 958 | #ifdef CONFIG_PPC64 | 
|  | 959 | if (test_thread_flag(TIF_32BIT)) { | 
|  | 960 | parent_tidp = TRUNC_PTR(parent_tidp); | 
|  | 961 | child_tidp = TRUNC_PTR(child_tidp); | 
|  | 962 | } | 
|  | 963 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 964 | return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp); | 
|  | 965 | } | 
|  | 966 |  | 
|  | 967 | int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3, | 
|  | 968 | unsigned long p4, unsigned long p5, unsigned long p6, | 
|  | 969 | struct pt_regs *regs) | 
|  | 970 | { | 
|  | 971 | CHECK_FULL_REGS(regs); | 
|  | 972 | return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL); | 
|  | 973 | } | 
|  | 974 |  | 
|  | 975 | int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3, | 
|  | 976 | unsigned long p4, unsigned long p5, unsigned long p6, | 
|  | 977 | struct pt_regs *regs) | 
|  | 978 | { | 
|  | 979 | CHECK_FULL_REGS(regs); | 
|  | 980 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1], | 
|  | 981 | regs, 0, NULL, NULL); | 
|  | 982 | } | 
|  | 983 |  | 
|  | 984 | int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2, | 
|  | 985 | unsigned long a3, unsigned long a4, unsigned long a5, | 
|  | 986 | struct pt_regs *regs) | 
|  | 987 | { | 
|  | 988 | int error; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 989 | char *filename; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 990 |  | 
|  | 991 | filename = getname((char __user *) a0); | 
|  | 992 | error = PTR_ERR(filename); | 
|  | 993 | if (IS_ERR(filename)) | 
|  | 994 | goto out; | 
|  | 995 | flush_fp_to_thread(current); | 
|  | 996 | flush_altivec_to_thread(current); | 
|  | 997 | flush_spe_to_thread(current); | 
| Paul Mackerras | 20c8c21 | 2005-09-28 20:28:14 +1000 | [diff] [blame] | 998 | error = do_execve(filename, (char __user * __user *) a1, | 
|  | 999 | (char __user * __user *) a2, regs); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1000 | putname(filename); | 
|  | 1001 | out: | 
|  | 1002 | return error; | 
|  | 1003 | } | 
|  | 1004 |  | 
| Paul Mackerras | bb72c48 | 2007-02-19 11:42:42 +1100 | [diff] [blame] | 1005 | #ifdef CONFIG_IRQSTACKS | 
|  | 1006 | static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, | 
|  | 1007 | unsigned long nbytes) | 
|  | 1008 | { | 
|  | 1009 | unsigned long stack_page; | 
|  | 1010 | unsigned long cpu = task_cpu(p); | 
|  | 1011 |  | 
|  | 1012 | /* | 
|  | 1013 | * Avoid crashing if the stack has overflowed and corrupted | 
|  | 1014 | * task_cpu(p), which is in the thread_info struct. | 
|  | 1015 | */ | 
|  | 1016 | if (cpu < NR_CPUS && cpu_possible(cpu)) { | 
|  | 1017 | stack_page = (unsigned long) hardirq_ctx[cpu]; | 
|  | 1018 | if (sp >= stack_page + sizeof(struct thread_struct) | 
|  | 1019 | && sp <= stack_page + THREAD_SIZE - nbytes) | 
|  | 1020 | return 1; | 
|  | 1021 |  | 
|  | 1022 | stack_page = (unsigned long) softirq_ctx[cpu]; | 
|  | 1023 | if (sp >= stack_page + sizeof(struct thread_struct) | 
|  | 1024 | && sp <= stack_page + THREAD_SIZE - nbytes) | 
|  | 1025 | return 1; | 
|  | 1026 | } | 
|  | 1027 | return 0; | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 | #else | 
|  | 1031 | #define valid_irq_stack(sp, p, nb)	0 | 
|  | 1032 | #endif /* CONFIG_IRQSTACKS */ | 
|  | 1033 |  | 
| Anton Blanchard | 2f25194 | 2006-03-27 11:46:18 +1100 | [diff] [blame] | 1034 | int validate_sp(unsigned long sp, struct task_struct *p, | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1035 | unsigned long nbytes) | 
|  | 1036 | { | 
| Al Viro | 0cec6fd | 2006-01-12 01:06:02 -0800 | [diff] [blame] | 1037 | unsigned long stack_page = (unsigned long)task_stack_page(p); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1038 |  | 
|  | 1039 | if (sp >= stack_page + sizeof(struct thread_struct) | 
|  | 1040 | && sp <= stack_page + THREAD_SIZE - nbytes) | 
|  | 1041 | return 1; | 
|  | 1042 |  | 
| Paul Mackerras | bb72c48 | 2007-02-19 11:42:42 +1100 | [diff] [blame] | 1043 | return valid_irq_stack(sp, p, nbytes); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1044 | } | 
|  | 1045 |  | 
| Anton Blanchard | 2f25194 | 2006-03-27 11:46:18 +1100 | [diff] [blame] | 1046 | EXPORT_SYMBOL(validate_sp); | 
|  | 1047 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1048 | unsigned long get_wchan(struct task_struct *p) | 
|  | 1049 | { | 
|  | 1050 | unsigned long ip, sp; | 
|  | 1051 | int count = 0; | 
|  | 1052 |  | 
|  | 1053 | if (!p || p == current || p->state == TASK_RUNNING) | 
|  | 1054 | return 0; | 
|  | 1055 |  | 
|  | 1056 | sp = p->thread.ksp; | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1057 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1058 | return 0; | 
|  | 1059 |  | 
|  | 1060 | do { | 
|  | 1061 | sp = *(unsigned long *)sp; | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1062 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1063 | return 0; | 
|  | 1064 | if (count > 0) { | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1065 | ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1066 | if (!in_sched_functions(ip)) | 
|  | 1067 | return ip; | 
|  | 1068 | } | 
|  | 1069 | } while (count++ < 16); | 
|  | 1070 | return 0; | 
|  | 1071 | } | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1072 |  | 
| Johannes Berg | c4d04be | 2008-11-20 03:24:07 +0000 | [diff] [blame] | 1073 | static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1074 |  | 
|  | 1075 | void show_stack(struct task_struct *tsk, unsigned long *stack) | 
|  | 1076 | { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1077 | unsigned long sp, ip, lr, newsp; | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1078 | int count = 0; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1079 | int firstframe = 1; | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1080 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
|  | 1081 | int curr_frame = current->curr_ret_stack; | 
|  | 1082 | extern void return_to_handler(void); | 
| Steven Rostedt | 9135c3c | 2009-09-15 08:20:15 -0700 | [diff] [blame] | 1083 | unsigned long rth = (unsigned long)return_to_handler; | 
|  | 1084 | unsigned long mrth = -1; | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1085 | #ifdef CONFIG_PPC64 | 
| Steven Rostedt | 9135c3c | 2009-09-15 08:20:15 -0700 | [diff] [blame] | 1086 | extern void mod_return_to_handler(void); | 
|  | 1087 | rth = *(unsigned long *)rth; | 
|  | 1088 | mrth = (unsigned long)mod_return_to_handler; | 
|  | 1089 | mrth = *(unsigned long *)mrth; | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1090 | #endif | 
|  | 1091 | #endif | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1092 |  | 
|  | 1093 | sp = (unsigned long) stack; | 
|  | 1094 | if (tsk == NULL) | 
|  | 1095 | tsk = current; | 
|  | 1096 | if (sp == 0) { | 
|  | 1097 | if (tsk == current) | 
|  | 1098 | asm("mr %0,1" : "=r" (sp)); | 
|  | 1099 | else | 
|  | 1100 | sp = tsk->thread.ksp; | 
|  | 1101 | } | 
|  | 1102 |  | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1103 | lr = 0; | 
|  | 1104 | printk("Call Trace:\n"); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1105 | do { | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1106 | if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1107 | return; | 
|  | 1108 |  | 
|  | 1109 | stack = (unsigned long *) sp; | 
|  | 1110 | newsp = stack[0]; | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1111 | ip = stack[STACK_FRAME_LR_SAVE]; | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1112 | if (!firstframe || ip != lr) { | 
| Benjamin Herrenschmidt | 058c78f | 2008-07-07 13:44:31 +1000 | [diff] [blame] | 1113 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1114 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 
| Steven Rostedt | 9135c3c | 2009-09-15 08:20:15 -0700 | [diff] [blame] | 1115 | if ((ip == rth || ip == mrth) && curr_frame >= 0) { | 
| Steven Rostedt | 6794c78 | 2009-02-09 21:10:27 -0800 | [diff] [blame] | 1116 | printk(" (%pS)", | 
|  | 1117 | (void *)current->ret_stack[curr_frame].ret); | 
|  | 1118 | curr_frame--; | 
|  | 1119 | } | 
|  | 1120 | #endif | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1121 | if (firstframe) | 
|  | 1122 | printk(" (unreliable)"); | 
|  | 1123 | printk("\n"); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1124 | } | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1125 | firstframe = 0; | 
|  | 1126 |  | 
|  | 1127 | /* | 
|  | 1128 | * See if this is an exception frame. | 
|  | 1129 | * We look for the "regshere" marker in the current frame. | 
|  | 1130 | */ | 
| Benjamin Herrenschmidt | ec2b36b | 2008-04-17 14:34:59 +1000 | [diff] [blame] | 1131 | if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) | 
|  | 1132 | && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1133 | struct pt_regs *regs = (struct pt_regs *) | 
|  | 1134 | (sp + STACK_FRAME_OVERHEAD); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1135 | lr = regs->link; | 
| Benjamin Herrenschmidt | 058c78f | 2008-07-07 13:44:31 +1000 | [diff] [blame] | 1136 | printk("--- Exception: %lx at %pS\n    LR = %pS\n", | 
|  | 1137 | regs->trap, (void *)regs->nip, (void *)lr); | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1138 | firstframe = 1; | 
|  | 1139 | } | 
|  | 1140 |  | 
|  | 1141 | sp = newsp; | 
|  | 1142 | } while (count++ < kstack_depth_to_print); | 
| Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1143 | } | 
| Paul Mackerras | 06d67d5 | 2005-10-10 22:29:05 +1000 | [diff] [blame] | 1144 |  | 
|  | 1145 | void dump_stack(void) | 
|  | 1146 | { | 
|  | 1147 | show_stack(current, NULL); | 
|  | 1148 | } | 
|  | 1149 | EXPORT_SYMBOL(dump_stack); | 
| Anton Blanchard | cb2c9b2 | 2006-02-13 14:48:35 +1100 | [diff] [blame] | 1150 |  | 
|  | 1151 | #ifdef CONFIG_PPC64 | 
|  | 1152 | void ppc64_runlatch_on(void) | 
|  | 1153 | { | 
|  | 1154 | unsigned long ctrl; | 
|  | 1155 |  | 
|  | 1156 | if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { | 
|  | 1157 | HMT_medium(); | 
|  | 1158 |  | 
|  | 1159 | ctrl = mfspr(SPRN_CTRLF); | 
|  | 1160 | ctrl |= CTRL_RUNLATCH; | 
|  | 1161 | mtspr(SPRN_CTRLT, ctrl); | 
|  | 1162 |  | 
|  | 1163 | set_thread_flag(TIF_RUNLATCH); | 
|  | 1164 | } | 
|  | 1165 | } | 
|  | 1166 |  | 
|  | 1167 | void ppc64_runlatch_off(void) | 
|  | 1168 | { | 
|  | 1169 | unsigned long ctrl; | 
|  | 1170 |  | 
|  | 1171 | if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) { | 
|  | 1172 | HMT_medium(); | 
|  | 1173 |  | 
|  | 1174 | clear_thread_flag(TIF_RUNLATCH); | 
|  | 1175 |  | 
|  | 1176 | ctrl = mfspr(SPRN_CTRLF); | 
|  | 1177 | ctrl &= ~CTRL_RUNLATCH; | 
|  | 1178 | mtspr(SPRN_CTRLT, ctrl); | 
|  | 1179 | } | 
|  | 1180 | } | 
|  | 1181 | #endif | 
| Benjamin Herrenschmidt | f6a6168 | 2008-04-18 16:56:17 +1000 | [diff] [blame] | 1182 |  | 
|  | 1183 | #if THREAD_SHIFT < PAGE_SHIFT | 
|  | 1184 |  | 
|  | 1185 | static struct kmem_cache *thread_info_cache; | 
|  | 1186 |  | 
|  | 1187 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | 
|  | 1188 | { | 
|  | 1189 | struct thread_info *ti; | 
|  | 1190 |  | 
|  | 1191 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | 
|  | 1192 | if (unlikely(ti == NULL)) | 
|  | 1193 | return NULL; | 
|  | 1194 | #ifdef CONFIG_DEBUG_STACK_USAGE | 
|  | 1195 | memset(ti, 0, THREAD_SIZE); | 
|  | 1196 | #endif | 
|  | 1197 | return ti; | 
|  | 1198 | } | 
|  | 1199 |  | 
|  | 1200 | void free_thread_info(struct thread_info *ti) | 
|  | 1201 | { | 
|  | 1202 | kmem_cache_free(thread_info_cache, ti); | 
|  | 1203 | } | 
|  | 1204 |  | 
|  | 1205 | void thread_info_cache_init(void) | 
|  | 1206 | { | 
|  | 1207 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | 
|  | 1208 | THREAD_SIZE, 0, NULL); | 
|  | 1209 | BUG_ON(thread_info_cache == NULL); | 
|  | 1210 | } | 
|  | 1211 |  | 
|  | 1212 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | 
| Anton Blanchard | d839088 | 2009-02-22 01:50:03 +0000 | [diff] [blame] | 1213 |  | 
|  | 1214 | unsigned long arch_align_stack(unsigned long sp) | 
|  | 1215 | { | 
|  | 1216 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 
|  | 1217 | sp -= get_random_int() & ~PAGE_MASK; | 
|  | 1218 | return sp & ~0xf; | 
|  | 1219 | } | 
| Anton Blanchard | 912f9ee | 2009-02-22 01:50:04 +0000 | [diff] [blame] | 1220 |  | 
|  | 1221 | static inline unsigned long brk_rnd(void) | 
|  | 1222 | { | 
|  | 1223 | unsigned long rnd = 0; | 
|  | 1224 |  | 
|  | 1225 | /* 8MB for 32bit, 1GB for 64bit */ | 
|  | 1226 | if (is_32bit_task()) | 
|  | 1227 | rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); | 
|  | 1228 | else | 
|  | 1229 | rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); | 
|  | 1230 |  | 
|  | 1231 | return rnd << PAGE_SHIFT; | 
|  | 1232 | } | 
|  | 1233 |  | 
|  | 1234 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 
|  | 1235 | { | 
| Anton Blanchard | 8bbde7a | 2009-09-21 16:52:35 +0000 | [diff] [blame] | 1236 | unsigned long base = mm->brk; | 
|  | 1237 | unsigned long ret; | 
|  | 1238 |  | 
| Kumar Gala | ce7a35c | 2009-10-16 07:05:17 +0000 | [diff] [blame] | 1239 | #ifdef CONFIG_PPC_STD_MMU_64 | 
| Anton Blanchard | 8bbde7a | 2009-09-21 16:52:35 +0000 | [diff] [blame] | 1240 | /* | 
|  | 1241 | * If we are using 1TB segments and we are allowed to randomise | 
|  | 1242 | * the heap, we can put it above 1TB so it is backed by a 1TB | 
|  | 1243 | * segment. Otherwise the heap will be in the bottom 1TB | 
|  | 1244 | * which always uses 256MB segments and this may result in a | 
|  | 1245 | * performance penalty. | 
|  | 1246 | */ | 
|  | 1247 | if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) | 
|  | 1248 | base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); | 
|  | 1249 | #endif | 
|  | 1250 |  | 
|  | 1251 | ret = PAGE_ALIGN(base + brk_rnd()); | 
| Anton Blanchard | 912f9ee | 2009-02-22 01:50:04 +0000 | [diff] [blame] | 1252 |  | 
|  | 1253 | if (ret < mm->brk) | 
|  | 1254 | return mm->brk; | 
|  | 1255 |  | 
|  | 1256 | return ret; | 
|  | 1257 | } | 
| Anton Blanchard | 501cb16 | 2009-02-22 01:50:07 +0000 | [diff] [blame] | 1258 |  | 
|  | 1259 | unsigned long randomize_et_dyn(unsigned long base) | 
|  | 1260 | { | 
|  | 1261 | unsigned long ret = PAGE_ALIGN(base + brk_rnd()); | 
|  | 1262 |  | 
|  | 1263 | if (ret < base) | 
|  | 1264 | return base; | 
|  | 1265 |  | 
|  | 1266 | return ret; | 
|  | 1267 | } |