blob: 8a2bea34ddd2635ca344ea564385483c9919c59d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $
2 *
3 * linux/arch/sh/kernel/process.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8 */
9
10/*
11 * This file handles the architecture-dependent parts of process handling..
12 */
13
14#include <linux/module.h>
15#include <linux/unistd.h>
16#include <linux/mm.h>
17#include <linux/elfcore.h>
18#include <linux/slab.h>
19#include <linux/a.out.h>
20#include <linux/ptrace.h>
21#include <linux/platform.h>
22#include <linux/kallsyms.h>
23
24#include <asm/io.h>
25#include <asm/uaccess.h>
26#include <asm/mmu_context.h>
27#include <asm/elf.h>
28#if defined(CONFIG_SH_HS7751RVOIP)
29#include <asm/hs7751rvoip/hs7751rvoip.h>
30#elif defined(CONFIG_SH_RTS7751R2D)
31#include <asm/rts7751r2d/rts7751r2d.h>
32#endif
33
34static int hlt_counter=0;
35
36int ubc_usercnt = 0;
37
38#define HARD_IDLE_TIMEOUT (HZ / 3)
39
40void disable_hlt(void)
41{
42 hlt_counter++;
43}
44
45EXPORT_SYMBOL(disable_hlt);
46
47void enable_hlt(void)
48{
49 hlt_counter--;
50}
51
52EXPORT_SYMBOL(enable_hlt);
53
Nick Piggin64c7c8f2005-11-08 21:39:04 -080054void cpu_idle(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070055{
56 /* endless idle loop with no priority at all */
57 while (1) {
58 if (hlt_counter) {
Nick Piggin64c7c8f2005-11-08 21:39:04 -080059 while (!need_resched())
60 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 } else {
62 while (!need_resched())
63 cpu_sleep();
64 }
65
Nick Piggin5bfb5d62005-11-08 21:39:01 -080066 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 schedule();
Nick Piggin5bfb5d62005-11-08 21:39:01 -080068 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 }
70}
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072void machine_restart(char * __unused)
73{
74 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
75 asm volatile("ldc %0, sr\n\t"
76 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
77}
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079void machine_halt(void)
80{
81#if defined(CONFIG_SH_HS7751RVOIP)
82 unsigned short value;
83
84 value = ctrl_inw(PA_OUTPORTR);
85 ctrl_outw((value & 0xffdf), PA_OUTPORTR);
86#elif defined(CONFIG_SH_RTS7751R2D)
87 ctrl_outw(0x0001, PA_POWOFF);
88#endif
89 while (1)
90 cpu_sleep();
91}
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093void machine_power_off(void)
94{
95#if defined(CONFIG_SH_HS7751RVOIP)
96 unsigned short value;
97
98 value = ctrl_inw(PA_OUTPORTR);
99 ctrl_outw((value & 0xffdf), PA_OUTPORTR);
100#elif defined(CONFIG_SH_RTS7751R2D)
101 ctrl_outw(0x0001, PA_POWOFF);
102#endif
103}
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105void show_regs(struct pt_regs * regs)
106{
107 printk("\n");
108 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm);
109 print_symbol("PC is at %s\n", regs->pc);
110 printk("PC : %08lx SP : %08lx SR : %08lx ",
111 regs->pc, regs->regs[15], regs->sr);
112#ifdef CONFIG_MMU
113 printk("TEA : %08x ", ctrl_inl(MMU_TEA));
114#else
115 printk(" ");
116#endif
117 printk("%s\n", print_tainted());
118
119 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
120 regs->regs[0],regs->regs[1],
121 regs->regs[2],regs->regs[3]);
122 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
123 regs->regs[4],regs->regs[5],
124 regs->regs[6],regs->regs[7]);
125 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
126 regs->regs[8],regs->regs[9],
127 regs->regs[10],regs->regs[11]);
128 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
129 regs->regs[12],regs->regs[13],
130 regs->regs[14]);
131 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
132 regs->mach, regs->macl, regs->gbr, regs->pr);
133
134 /*
135 * If we're in kernel mode, dump the stack too..
136 */
137 if (!user_mode(regs)) {
138 extern void show_task(unsigned long *sp);
139 unsigned long sp = regs->regs[15];
140
141 show_task((unsigned long *)sp);
142 }
143}
144
145/*
146 * Create a kernel thread
147 */
148
149/*
150 * This is the mechanism for creating a new kernel thread.
151 *
152 */
153extern void kernel_thread_helper(void);
154__asm__(".align 5\n"
155 "kernel_thread_helper:\n\t"
156 "jsr @r5\n\t"
157 " nop\n\t"
158 "mov.l 1f, r1\n\t"
159 "jsr @r1\n\t"
160 " mov r0, r4\n\t"
161 ".align 2\n\t"
162 "1:.long do_exit");
163
164int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
165{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
166 struct pt_regs regs;
167
168 memset(&regs, 0, sizeof(regs));
169 regs.regs[4] = (unsigned long) arg;
170 regs.regs[5] = (unsigned long) fn;
171
172 regs.pc = (unsigned long) kernel_thread_helper;
173 regs.sr = (1 << 30);
174
175 /* Ok, create the new process.. */
176 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
177}
178
179/*
180 * Free current thread data structures etc..
181 */
182void exit_thread(void)
183{
184 if (current->thread.ubc_pc) {
185 current->thread.ubc_pc = 0;
186 ubc_usercnt -= 1;
187 }
188}
189
190void flush_thread(void)
191{
192#if defined(CONFIG_SH_FPU)
193 struct task_struct *tsk = current;
194 struct pt_regs *regs = (struct pt_regs *)
195 ((unsigned long)tsk->thread_info
196 + THREAD_SIZE - sizeof(struct pt_regs)
197 - sizeof(unsigned long));
198
199 /* Forget lazy FPU state */
200 clear_fpu(tsk, regs);
201 clear_used_math();
202#endif
203}
204
205void release_thread(struct task_struct *dead_task)
206{
207 /* do nothing */
208}
209
210/* Fill in the fpu structure for a core dump.. */
211int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
212{
213 int fpvalid = 0;
214
215#if defined(CONFIG_SH_FPU)
216 struct task_struct *tsk = current;
217
218 fpvalid = !!tsk_used_math(tsk);
219 if (fpvalid) {
220 unlazy_fpu(tsk, regs);
221 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
222 }
223#endif
224
225 return fpvalid;
226}
227
228/*
229 * Capture the user space registers if the task is not running (in user space)
230 */
231int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
232{
233 struct pt_regs ptregs;
234
235 ptregs = *(struct pt_regs *)
236 ((unsigned long)tsk->thread_info + THREAD_SIZE
237 - sizeof(struct pt_regs)
238#ifdef CONFIG_SH_DSP
239 - sizeof(struct pt_dspregs)
240#endif
241 - sizeof(unsigned long));
242 elf_core_copy_regs(regs, &ptregs);
243
244 return 1;
245}
246
247int
248dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
249{
250 int fpvalid = 0;
251
252#if defined(CONFIG_SH_FPU)
253 fpvalid = !!tsk_used_math(tsk);
254 if (fpvalid) {
255 struct pt_regs *regs = (struct pt_regs *)
256 ((unsigned long)tsk->thread_info
257 + THREAD_SIZE - sizeof(struct pt_regs)
258 - sizeof(unsigned long));
259 unlazy_fpu(tsk, regs);
260 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
261 }
262#endif
263
264 return fpvalid;
265}
266
267asmlinkage void ret_from_fork(void);
268
269int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
270 unsigned long unused,
271 struct task_struct *p, struct pt_regs *regs)
272{
273 struct pt_regs *childregs;
274#if defined(CONFIG_SH_FPU)
275 struct task_struct *tsk = current;
276
277 unlazy_fpu(tsk, regs);
278 p->thread.fpu = tsk->thread.fpu;
279 copy_to_stopped_child_used_math(p);
280#endif
281
282 childregs = ((struct pt_regs *)
283 (THREAD_SIZE + (unsigned long) p->thread_info)
284#ifdef CONFIG_SH_DSP
285 - sizeof(struct pt_dspregs)
286#endif
287 - sizeof(unsigned long)) - 1;
288 *childregs = *regs;
289
290 if (user_mode(regs)) {
291 childregs->regs[15] = usp;
292 } else {
293 childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
294 }
295 if (clone_flags & CLONE_SETTLS) {
296 childregs->gbr = childregs->regs[0];
297 }
298 childregs->regs[0] = 0; /* Set return value for child */
299
300 p->thread.sp = (unsigned long) childregs;
301 p->thread.pc = (unsigned long) ret_from_fork;
302
303 p->thread.ubc_pc = 0;
304
305 return 0;
306}
307
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308/* Tracing by user break controller. */
309static void
310ubc_set_tracing(int asid, unsigned long pc)
311{
312 ctrl_outl(pc, UBC_BARA);
313
314 /* We don't have any ASID settings for the SH-2! */
315 if (cpu_data->type != CPU_SH7604)
316 ctrl_outb(asid, UBC_BASRA);
317
318 ctrl_outl(0, UBC_BAMRA);
319
320 if (cpu_data->type == CPU_SH7729) {
321 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
322 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
323 } else {
324 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
325 ctrl_outw(BRCR_PCBA, UBC_BRCR);
326 }
327}
328
329/*
330 * switch_to(x,y) should switch tasks from x to y.
331 *
332 */
333struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
334{
335#if defined(CONFIG_SH_FPU)
336 struct pt_regs *regs = (struct pt_regs *)
337 ((unsigned long)prev->thread_info
338 + THREAD_SIZE - sizeof(struct pt_regs)
339 - sizeof(unsigned long));
340 unlazy_fpu(prev, regs);
341#endif
342
343#ifdef CONFIG_PREEMPT
344 {
345 unsigned long flags;
346 struct pt_regs *regs;
347
348 local_irq_save(flags);
349 regs = (struct pt_regs *)
350 ((unsigned long)prev->thread_info
351 + THREAD_SIZE - sizeof(struct pt_regs)
352#ifdef CONFIG_SH_DSP
353 - sizeof(struct pt_dspregs)
354#endif
355 - sizeof(unsigned long));
356 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
357 int offset = (int)regs->regs[15];
358
359 /* Reset stack pointer: clear critical region mark */
360 regs->regs[15] = regs->regs[1];
361 if (regs->pc < regs->regs[0])
362 /* Go to rewind point */
363 regs->pc = regs->regs[0] + offset;
364 }
365 local_irq_restore(flags);
366 }
367#endif
368
369 /*
370 * Restore the kernel mode register
371 * k7 (r7_bank1)
372 */
373 asm volatile("ldc %0, r7_bank"
374 : /* no output */
375 : "r" (next->thread_info));
376
377#ifdef CONFIG_MMU
378 /* If no tasks are using the UBC, we're done */
379 if (ubc_usercnt == 0)
380 /* If no tasks are using the UBC, we're done */;
381 else if (next->thread.ubc_pc && next->mm) {
382 ubc_set_tracing(next->mm->context & MMU_CONTEXT_ASID_MASK,
383 next->thread.ubc_pc);
384 } else {
385 ctrl_outw(0, UBC_BBRA);
386 ctrl_outw(0, UBC_BBRB);
387 }
388#endif
389
390 return prev;
391}
392
393asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
394 unsigned long r6, unsigned long r7,
395 struct pt_regs regs)
396{
397#ifdef CONFIG_MMU
398 return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
399#else
400 /* fork almost works, enough to trick you into looking elsewhere :-( */
401 return -EINVAL;
402#endif
403}
404
405asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
406 unsigned long parent_tidptr,
407 unsigned long child_tidptr,
408 struct pt_regs regs)
409{
410 if (!newsp)
411 newsp = regs.regs[15];
412 return do_fork(clone_flags, newsp, &regs, 0,
413 (int __user *)parent_tidptr, (int __user *)child_tidptr);
414}
415
416/*
417 * This is trivial, and on the face of it looks like it
418 * could equally well be done in user mode.
419 *
420 * Not so, for quite unobvious reasons - register pressure.
421 * In user mode vfork() cannot have a stack frame, and if
422 * done by calling the "clone()" system call directly, you
423 * do not have enough call-clobbered registers to hold all
424 * the information you need.
425 */
426asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
427 unsigned long r6, unsigned long r7,
428 struct pt_regs regs)
429{
430 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
431 0, NULL, NULL);
432}
433
434/*
435 * sys_execve() executes a new program.
436 */
437asmlinkage int sys_execve(char *ufilename, char **uargv,
438 char **uenvp, unsigned long r7,
439 struct pt_regs regs)
440{
441 int error;
442 char *filename;
443
444 filename = getname((char __user *)ufilename);
445 error = PTR_ERR(filename);
446 if (IS_ERR(filename))
447 goto out;
448
449 error = do_execve(filename,
450 (char __user * __user *)uargv,
451 (char __user * __user *)uenvp,
452 &regs);
453 if (error == 0) {
454 task_lock(current);
455 current->ptrace &= ~PT_DTRACE;
456 task_unlock(current);
457 }
458 putname(filename);
459out:
460 return error;
461}
462
463unsigned long get_wchan(struct task_struct *p)
464{
465 unsigned long schedule_frame;
466 unsigned long pc;
467
468 if (!p || p == current || p->state == TASK_RUNNING)
469 return 0;
470
471 /*
472 * The same comment as on the Alpha applies here, too ...
473 */
474 pc = thread_saved_pc(p);
475 if (in_sched_functions(pc)) {
476 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
477 return (unsigned long)((unsigned long *)schedule_frame)[1];
478 }
479 return pc;
480}
481
482asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
483 unsigned long r6, unsigned long r7,
484 struct pt_regs regs)
485{
486 /* Clear tracing. */
487 ctrl_outw(0, UBC_BBRA);
488 ctrl_outw(0, UBC_BBRB);
489 current->thread.ubc_pc = 0;
490 ubc_usercnt -= 1;
491
492 force_sig(SIGTRAP, current);
493}
494
495asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
496 unsigned long r6, unsigned long r7,
497 struct pt_regs regs)
498{
499 regs.pc -= 2;
500 force_sig(SIGTRAP, current);
501}