blob: 84f4e97e30745ad4d8e9b5d9e032dae5b775542b [file] [log] [blame]
David Howellsb920de12008-02-08 04:19:31 -08001/* MN10300 Process handling code
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/errno.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/smp.h>
David Howellsb920de12008-02-08 04:19:31 -080017#include <linux/stddef.h>
18#include <linux/unistd.h>
19#include <linux/ptrace.h>
David Howellsb920de12008-02-08 04:19:31 -080020#include <linux/user.h>
David Howellsb920de12008-02-08 04:19:31 -080021#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/reboot.h>
24#include <linux/percpu.h>
25#include <linux/err.h>
26#include <linux/fs.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/slab.h>
Frederic Weisbecker5b0753a2012-08-22 17:27:34 +020028#include <linux/rcupdate.h>
David Howellsb920de12008-02-08 04:19:31 -080029#include <asm/uaccess.h>
30#include <asm/pgtable.h>
David Howellsb920de12008-02-08 04:19:31 -080031#include <asm/io.h>
32#include <asm/processor.h>
33#include <asm/mmu_context.h>
34#include <asm/fpu.h>
35#include <asm/reset-regs.h>
36#include <asm/gdb-stub.h>
37#include "internal.h"
38
39/*
David Howellsb920de12008-02-08 04:19:31 -080040 * return saved PC of a blocked thread.
41 */
42unsigned long thread_saved_pc(struct task_struct *tsk)
43{
44 return ((unsigned long *) tsk->thread.sp)[3];
45}
46
47/*
48 * power off function, if any
49 */
50void (*pm_power_off)(void);
51EXPORT_SYMBOL(pm_power_off);
52
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010053#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
David Howellsb920de12008-02-08 04:19:31 -080054/*
55 * we use this if we don't have any better idle routine
56 */
57static void default_idle(void)
58{
59 local_irq_disable();
60 if (!need_resched())
61 safe_halt();
62 else
63 local_irq_enable();
64}
65
Akira Takeuchi368dd5a2010-10-27 17:28:55 +010066#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
67/*
68 * On SMP it's slightly faster (but much more power-consuming!)
69 * to poll the ->work.need_resched flag instead of waiting for the
70 * cross-CPU IPI to arrive. Use this option with caution.
71 */
72static inline void poll_idle(void)
73{
74 int oldval;
75
76 local_irq_enable();
77
78 /*
79 * Deal with another CPU just having chosen a thread to
80 * run here:
81 */
82 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
83
84 if (!oldval) {
85 set_thread_flag(TIF_POLLING_NRFLAG);
86 while (!need_resched())
87 cpu_relax();
88 clear_thread_flag(TIF_POLLING_NRFLAG);
89 } else {
90 set_need_resched();
91 }
92}
93#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
94
David Howellsb920de12008-02-08 04:19:31 -080095/*
96 * the idle thread
97 * - there's no useful work to be done, so just try to conserve power and have
98 * a low exit latency (ie sit in a loop waiting for somebody to say that
99 * they'd like to reschedule)
100 */
101void cpu_idle(void)
102{
David Howellsb920de12008-02-08 04:19:31 -0800103 /* endless idle loop with no priority at all */
104 for (;;) {
Frederic Weisbecker5b0753a2012-08-22 17:27:34 +0200105 rcu_idle_enter();
David Howellsb920de12008-02-08 04:19:31 -0800106 while (!need_resched()) {
107 void (*idle)(void);
108
109 smp_rmb();
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100110 if (!idle) {
111#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
112 idle = poll_idle;
113#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
David Howellsb920de12008-02-08 04:19:31 -0800114 idle = default_idle;
Akira Takeuchi368dd5a2010-10-27 17:28:55 +0100115#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
116 }
David Howellsb920de12008-02-08 04:19:31 -0800117 idle();
118 }
Frederic Weisbecker5b0753a2012-08-22 17:27:34 +0200119 rcu_idle_exit();
David Howellsb920de12008-02-08 04:19:31 -0800120
Thomas Gleixnerbd2f5532011-03-21 12:33:18 +0100121 schedule_preempt_disabled();
David Howellsb920de12008-02-08 04:19:31 -0800122 }
123}
124
125void release_segments(struct mm_struct *mm)
126{
127}
128
129void machine_restart(char *cmd)
130{
David Howells044264b2011-03-18 16:54:31 +0000131#ifdef CONFIG_KERNEL_DEBUGGER
David Howellsb920de12008-02-08 04:19:31 -0800132 gdbstub_exit(0);
133#endif
134
135#ifdef mn10300_unit_hard_reset
136 mn10300_unit_hard_reset();
137#else
138 mn10300_proc_hard_reset();
139#endif
140}
141
142void machine_halt(void)
143{
David Howells044264b2011-03-18 16:54:31 +0000144#ifdef CONFIG_KERNEL_DEBUGGER
David Howellsb920de12008-02-08 04:19:31 -0800145 gdbstub_exit(0);
146#endif
147}
148
149void machine_power_off(void)
150{
David Howells044264b2011-03-18 16:54:31 +0000151#ifdef CONFIG_KERNEL_DEBUGGER
David Howellsb920de12008-02-08 04:19:31 -0800152 gdbstub_exit(0);
153#endif
154}
155
156void show_regs(struct pt_regs *regs)
157{
158}
159
160/*
David Howellsb920de12008-02-08 04:19:31 -0800161 * free current thread data structures etc..
162 */
163void exit_thread(void)
164{
165 exit_fpu();
166}
167
168void flush_thread(void)
169{
170 flush_fpu();
171}
172
173void release_thread(struct task_struct *dead_task)
174{
175}
176
177/*
178 * we do not have to muck with descriptors here, that is
179 * done in switch_mm() as needed.
180 */
181void copy_segments(struct task_struct *p, struct mm_struct *new_mm)
182{
183}
184
185/*
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700186 * this gets called so that we can store lazy state into memory and copy the
187 * current task into the new thread.
David Howellsb920de12008-02-08 04:19:31 -0800188 */
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700189int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
David Howellsb920de12008-02-08 04:19:31 -0800190{
Suresh Siddha55ccf3f2012-05-16 15:03:51 -0700191 unlazy_fpu(src);
192 *dst = *src;
193 return 0;
David Howellsb920de12008-02-08 04:19:31 -0800194}
195
196/*
197 * set up the kernel stack for a new thread and copy arch-specific thread
198 * control information
199 */
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -0700200int copy_thread(unsigned long clone_flags,
David Howellsb920de12008-02-08 04:19:31 -0800201 unsigned long c_usp, unsigned long ustk_size,
Al Viroafa86fc2012-10-22 22:51:14 -0400202 struct task_struct *p)
David Howellsb920de12008-02-08 04:19:31 -0800203{
David Howells7c7fcf72010-10-27 17:29:01 +0100204 struct thread_info *ti = task_thread_info(p);
Al Viro255461c2012-09-19 13:05:49 -0400205 struct pt_regs *c_regs;
David Howellsb920de12008-02-08 04:19:31 -0800206 unsigned long c_ksp;
207
David Howellsb920de12008-02-08 04:19:31 -0800208 c_ksp = (unsigned long) task_stack_page(p) + THREAD_SIZE;
209
210 /* allocate the userspace exception frame and set it up */
211 c_ksp -= sizeof(struct pt_regs);
Al Viro255461c2012-09-19 13:05:49 -0400212 c_regs = (struct pt_regs *) c_ksp;
David Howellsb920de12008-02-08 04:19:31 -0800213 c_ksp -= 12; /* allocate function call ABI slack */
214
David Howellsb920de12008-02-08 04:19:31 -0800215 /* set up things up so the scheduler can start the new task */
Al Viro61b7fbc2012-09-22 18:18:23 -0400216 p->thread.uregs = c_regs;
Al Viro255461c2012-09-19 13:05:49 -0400217 ti->frame = c_regs;
218 p->thread.a3 = (unsigned long) c_regs;
David Howellsb920de12008-02-08 04:19:31 -0800219 p->thread.sp = c_ksp;
Al Viro255461c2012-09-19 13:05:49 -0400220 p->thread.wchan = p->thread.pc;
David Howellsb920de12008-02-08 04:19:31 -0800221 p->thread.usp = c_usp;
222
Al Viro1ea2a012012-10-21 16:43:13 -0400223 if (unlikely(p->flags & PF_KTHREAD)) {
Al Viro61b7fbc2012-09-22 18:18:23 -0400224 memset(c_regs, 0, sizeof(struct pt_regs));
225 c_regs->a0 = c_usp; /* function */
226 c_regs->d0 = ustk_size; /* argument */
227 local_save_flags(c_regs->epsw);
228 c_regs->epsw |= EPSW_IE | EPSW_IM_7;
229 p->thread.pc = (unsigned long) ret_from_kernel_thread;
230 return 0;
231 }
Al Viro1ea2a012012-10-21 16:43:13 -0400232 *c_regs = *current_pt_regs();
233 if (c_usp)
234 c_regs->sp = c_usp;
Al Viro61b7fbc2012-09-22 18:18:23 -0400235 c_regs->epsw &= ~EPSW_FE; /* my FPU */
236
David Howellsb920de12008-02-08 04:19:31 -0800237 /* the new TLS pointer is passed in as arg #5 to sys_clone() */
238 if (clone_flags & CLONE_SETTLS)
Al Viro61b7fbc2012-09-22 18:18:23 -0400239 c_regs->e2 = current_frame()->d3;
David Howellsb920de12008-02-08 04:19:31 -0800240
David Howellsb920de12008-02-08 04:19:31 -0800241 p->thread.pc = (unsigned long) ret_from_fork;
David Howellsb920de12008-02-08 04:19:31 -0800242
243 return 0;
244}
245
David Howellsb920de12008-02-08 04:19:31 -0800246unsigned long get_wchan(struct task_struct *p)
247{
248 return p->thread.wchan;
249}