Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/m68k/kernel/process.c |
| 3 | * |
| 4 | * Copyright (C) 1995 Hamish Macdonald |
| 5 | * |
| 6 | * 68060 fixes by Jesper Skov |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * This file handles the architecture-dependent parts of process handling.. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/errno.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/fs.h> |
| 20 | #include <linux/smp.h> |
| 21 | #include <linux/stddef.h> |
| 22 | #include <linux/unistd.h> |
| 23 | #include <linux/ptrace.h> |
| 24 | #include <linux/user.h> |
| 25 | #include <linux/reboot.h> |
| 26 | #include <linux/init_task.h> |
| 27 | #include <linux/mqueue.h> |
Frederic Weisbecker | 5b57ba3 | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 28 | #include <linux/rcupdate.h> |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 29 | |
| 30 | #include <asm/uaccess.h> |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 31 | #include <asm/traps.h> |
| 32 | #include <asm/machdep.h> |
| 33 | #include <asm/setup.h> |
| 34 | #include <asm/pgtable.h> |
| 35 | |
| 36 | |
| 37 | asmlinkage void ret_from_fork(void); |
Al Viro | 533e690 | 2012-09-16 12:05:09 -0400 | [diff] [blame] | 38 | asmlinkage void ret_from_kernel_thread(void); |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 39 | |
| 40 | |
| 41 | /* |
| 42 | * Return saved PC from a blocked thread |
| 43 | */ |
| 44 | unsigned long thread_saved_pc(struct task_struct *tsk) |
| 45 | { |
| 46 | struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; |
| 47 | /* Check whether the thread is blocked in resume() */ |
| 48 | if (in_sched_functions(sw->retpc)) |
| 49 | return ((unsigned long *)sw->a6)[1]; |
| 50 | else |
| 51 | return sw->retpc; |
| 52 | } |
| 53 | |
| 54 | /* |
| 55 | * The idle loop on an m68k.. |
| 56 | */ |
| 57 | static void default_idle(void) |
| 58 | { |
| 59 | if (!need_resched()) |
| 60 | #if defined(MACH_ATARI_ONLY) |
| 61 | /* block out HSYNC on the atari (falcon) */ |
| 62 | __asm__("stop #0x2200" : : : "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #else |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 64 | __asm__("stop #0x2000" : : : "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | #endif |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | void (*idle)(void) = default_idle; |
| 69 | |
| 70 | /* |
| 71 | * The idle thread. There's no useful work to be |
| 72 | * done, so just try to conserve power and have a |
| 73 | * low exit latency (ie sit in a loop waiting for |
| 74 | * somebody to say that they'd like to reschedule) |
| 75 | */ |
| 76 | void cpu_idle(void) |
| 77 | { |
| 78 | /* endless idle loop with no priority at all */ |
| 79 | while (1) { |
Frederic Weisbecker | 5b57ba3 | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 80 | rcu_idle_enter(); |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 81 | while (!need_resched()) |
| 82 | idle(); |
Frederic Weisbecker | 5b57ba3 | 2012-08-22 17:27:34 +0200 | [diff] [blame] | 83 | rcu_idle_exit(); |
Linus Torvalds | b57cb72 | 2012-03-21 18:17:51 -0700 | [diff] [blame] | 84 | schedule_preempt_disabled(); |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 85 | } |
| 86 | } |
| 87 | |
| 88 | void machine_restart(char * __unused) |
| 89 | { |
| 90 | if (mach_reset) |
| 91 | mach_reset(); |
| 92 | for (;;); |
| 93 | } |
| 94 | |
| 95 | void machine_halt(void) |
| 96 | { |
| 97 | if (mach_halt) |
| 98 | mach_halt(); |
| 99 | for (;;); |
| 100 | } |
| 101 | |
| 102 | void machine_power_off(void) |
| 103 | { |
| 104 | if (mach_power_off) |
| 105 | mach_power_off(); |
| 106 | for (;;); |
| 107 | } |
| 108 | |
| 109 | void (*pm_power_off)(void) = machine_power_off; |
| 110 | EXPORT_SYMBOL(pm_power_off); |
| 111 | |
| 112 | void show_regs(struct pt_regs * regs) |
| 113 | { |
| 114 | printk("\n"); |
| 115 | printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n", |
| 116 | regs->format, regs->vector, regs->pc, regs->sr, print_tainted()); |
| 117 | printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n", |
| 118 | regs->orig_d0, regs->d0, regs->a2, regs->a1); |
| 119 | printk("A0: %08lx D5: %08lx D4: %08lx\n", |
| 120 | regs->a0, regs->d5, regs->d4); |
| 121 | printk("D3: %08lx D2: %08lx D1: %08lx\n", |
| 122 | regs->d3, regs->d2, regs->d1); |
| 123 | if (!(regs->sr & PS_S)) |
| 124 | printk("USP: %08lx\n", rdusp()); |
| 125 | } |
| 126 | |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 127 | void flush_thread(void) |
| 128 | { |
| 129 | current->thread.fs = __USER_DS; |
| 130 | #ifdef CONFIG_FPU |
| 131 | if (!FPU_IS_EMU) { |
| 132 | unsigned long zero = 0; |
| 133 | asm volatile("frestore %0": :"m" (zero)); |
| 134 | } |
| 135 | #endif |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * "m68k_fork()".. By the time we get here, the |
| 140 | * non-volatile registers have also been saved on the |
| 141 | * stack. We do some ugly pointer stuff here.. (see |
| 142 | * also copy_thread) |
| 143 | */ |
| 144 | |
| 145 | asmlinkage int m68k_fork(struct pt_regs *regs) |
| 146 | { |
| 147 | #ifdef CONFIG_MMU |
| 148 | return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL); |
| 149 | #else |
| 150 | return -EINVAL; |
| 151 | #endif |
| 152 | } |
| 153 | |
| 154 | asmlinkage int m68k_vfork(struct pt_regs *regs) |
| 155 | { |
| 156 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, |
| 157 | NULL, NULL); |
| 158 | } |
| 159 | |
| 160 | asmlinkage int m68k_clone(struct pt_regs *regs) |
| 161 | { |
| 162 | unsigned long clone_flags; |
| 163 | unsigned long newsp; |
| 164 | int __user *parent_tidptr, *child_tidptr; |
| 165 | |
| 166 | /* syscall2 puts clone_flags in d1 and usp in d2 */ |
| 167 | clone_flags = regs->d1; |
| 168 | newsp = regs->d2; |
| 169 | parent_tidptr = (int __user *)regs->d3; |
| 170 | child_tidptr = (int __user *)regs->d4; |
| 171 | if (!newsp) |
| 172 | newsp = rdusp(); |
| 173 | return do_fork(clone_flags, newsp, regs, 0, |
| 174 | parent_tidptr, child_tidptr); |
| 175 | } |
| 176 | |
| 177 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
Al Viro | 533e690 | 2012-09-16 12:05:09 -0400 | [diff] [blame] | 178 | unsigned long arg, |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 179 | struct task_struct * p, struct pt_regs * regs) |
| 180 | { |
| 181 | struct pt_regs * childregs; |
Al Viro | 533e690 | 2012-09-16 12:05:09 -0400 | [diff] [blame] | 182 | struct switch_stack *childstack; |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 183 | |
| 184 | childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1; |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 185 | childstack = ((struct switch_stack *) childregs) - 1; |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 186 | |
| 187 | p->thread.usp = usp; |
| 188 | p->thread.ksp = (unsigned long)childstack; |
Al Viro | 533e690 | 2012-09-16 12:05:09 -0400 | [diff] [blame] | 189 | p->thread.esp0 = (unsigned long)childregs; |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 190 | |
| 191 | /* |
| 192 | * Must save the current SFC/DFC value, NOT the value when |
| 193 | * the parent was last descheduled - RGH 10-08-96 |
| 194 | */ |
| 195 | p->thread.fs = get_fs().seg; |
| 196 | |
Al Viro | 533e690 | 2012-09-16 12:05:09 -0400 | [diff] [blame] | 197 | if (unlikely(!regs)) { |
| 198 | /* kernel thread */ |
| 199 | memset(childstack, 0, |
| 200 | sizeof(struct switch_stack) + sizeof(struct pt_regs)); |
| 201 | childregs->sr = PS_S; |
| 202 | childstack->a3 = usp; /* function */ |
| 203 | childstack->d7 = arg; |
| 204 | childstack->retpc = (unsigned long)ret_from_kernel_thread; |
| 205 | p->thread.usp = 0; |
| 206 | return 0; |
| 207 | } |
| 208 | *childregs = *regs; |
| 209 | childregs->d0 = 0; |
| 210 | |
| 211 | *childstack = ((struct switch_stack *) regs)[-1]; |
| 212 | childstack->retpc = (unsigned long)ret_from_fork; |
| 213 | |
| 214 | if (clone_flags & CLONE_SETTLS) |
| 215 | task_thread_info(p)->tp_value = regs->d5; |
| 216 | |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 217 | #ifdef CONFIG_FPU |
| 218 | if (!FPU_IS_EMU) { |
| 219 | /* Copy the current fpu state */ |
| 220 | asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory"); |
| 221 | |
| 222 | if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) { |
| 223 | if (CPU_IS_COLDFIRE) { |
| 224 | asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t" |
| 225 | "fmovel %/fpiar,%1\n\t" |
| 226 | "fmovel %/fpcr,%2\n\t" |
| 227 | "fmovel %/fpsr,%3" |
| 228 | : |
| 229 | : "m" (p->thread.fp[0]), |
| 230 | "m" (p->thread.fpcntl[0]), |
| 231 | "m" (p->thread.fpcntl[1]), |
| 232 | "m" (p->thread.fpcntl[2]) |
| 233 | : "memory"); |
| 234 | } else { |
| 235 | asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t" |
| 236 | "fmoveml %/fpiar/%/fpcr/%/fpsr,%1" |
| 237 | : |
| 238 | : "m" (p->thread.fp[0]), |
| 239 | "m" (p->thread.fpcntl[0]) |
| 240 | : "memory"); |
| 241 | } |
| 242 | } |
| 243 | |
| 244 | /* Restore the state in case the fpu was busy */ |
| 245 | asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0])); |
| 246 | } |
| 247 | #endif /* CONFIG_FPU */ |
| 248 | |
| 249 | return 0; |
| 250 | } |
| 251 | |
| 252 | /* Fill in the fpu structure for a core dump. */ |
| 253 | #ifdef CONFIG_FPU |
| 254 | int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu) |
| 255 | { |
| 256 | char fpustate[216]; |
| 257 | |
| 258 | if (FPU_IS_EMU) { |
| 259 | int i; |
| 260 | |
| 261 | memcpy(fpu->fpcntl, current->thread.fpcntl, 12); |
| 262 | memcpy(fpu->fpregs, current->thread.fp, 96); |
| 263 | /* Convert internal fpu reg representation |
| 264 | * into long double format |
| 265 | */ |
| 266 | for (i = 0; i < 24; i += 3) |
| 267 | fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) | |
| 268 | ((fpu->fpregs[i] & 0x0000ffff) << 16); |
| 269 | return 1; |
| 270 | } |
| 271 | |
| 272 | /* First dump the fpu context to avoid protocol violation. */ |
| 273 | asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory"); |
| 274 | if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2]) |
| 275 | return 0; |
| 276 | |
| 277 | if (CPU_IS_COLDFIRE) { |
| 278 | asm volatile ("fmovel %/fpiar,%0\n\t" |
| 279 | "fmovel %/fpcr,%1\n\t" |
| 280 | "fmovel %/fpsr,%2\n\t" |
| 281 | "fmovemd %/fp0-%/fp7,%3" |
| 282 | : |
| 283 | : "m" (fpu->fpcntl[0]), |
| 284 | "m" (fpu->fpcntl[1]), |
| 285 | "m" (fpu->fpcntl[2]), |
| 286 | "m" (fpu->fpregs[0]) |
| 287 | : "memory"); |
| 288 | } else { |
| 289 | asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0" |
| 290 | : |
| 291 | : "m" (fpu->fpcntl[0]) |
| 292 | : "memory"); |
| 293 | asm volatile ("fmovemx %/fp0-%/fp7,%0" |
| 294 | : |
| 295 | : "m" (fpu->fpregs[0]) |
| 296 | : "memory"); |
| 297 | } |
| 298 | |
| 299 | return 1; |
| 300 | } |
| 301 | EXPORT_SYMBOL(dump_fpu); |
| 302 | #endif /* CONFIG_FPU */ |
| 303 | |
Greg Ungerer | fde3944 | 2012-02-09 14:18:46 +1000 | [diff] [blame] | 304 | unsigned long get_wchan(struct task_struct *p) |
| 305 | { |
| 306 | unsigned long fp, pc; |
| 307 | unsigned long stack_page; |
| 308 | int count = 0; |
| 309 | if (!p || p == current || p->state == TASK_RUNNING) |
| 310 | return 0; |
| 311 | |
| 312 | stack_page = (unsigned long)task_stack_page(p); |
| 313 | fp = ((struct switch_stack *)p->thread.ksp)->a6; |
| 314 | do { |
| 315 | if (fp < stack_page+sizeof(struct thread_info) || |
| 316 | fp >= 8184+stack_page) |
| 317 | return 0; |
| 318 | pc = ((unsigned long *)fp)[1]; |
| 319 | if (!in_sched_functions(pc)) |
| 320 | return pc; |
| 321 | fp = *(unsigned long *) fp; |
| 322 | } while (count++ < 16); |
| 323 | return 0; |
| 324 | } |