blob: 9c2a7d8c56fb243398d351881915bad0299a619f [file] [log] [blame]
Jeff Dike995473a2006-09-27 01:50:40 -07001/*
2 * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/sched.h"
7#include "linux/slab.h"
8#include "linux/ptrace.h"
9#include "linux/proc_fs.h"
10#include "linux/file.h"
11#include "linux/errno.h"
12#include "linux/init.h"
13#include "asm/uaccess.h"
14#include "asm/atomic.h"
15#include "kern_util.h"
Jeff Dike4ff83ce2007-05-06 14:51:08 -070016#include "as-layout.h"
Jeff Dike995473a2006-09-27 01:50:40 -070017#include "skas.h"
18#include "os.h"
19#include "user_util.h"
20#include "tlb.h"
21#include "kern.h"
22#include "mode.h"
23#include "registers.h"
24
25void switch_to_skas(void *prev, void *next)
26{
27 struct task_struct *from, *to;
28
29 from = prev;
30 to = next;
31
32 /* XXX need to check runqueues[cpu].idle */
33 if(current->pid == 0)
34 switch_timers(0);
35
36 switch_threads(&from->thread.mode.skas.switch_buf,
37 &to->thread.mode.skas.switch_buf);
38
39 arch_switch_to_skas(current->thread.prev_sched, current);
40
41 if(current->pid == 0)
42 switch_timers(1);
43}
44
45extern void schedule_tail(struct task_struct *prev);
46
47/* This is called magically, by its address being stuffed in a jmp_buf
48 * and being longjmp-d to.
49 */
50void new_thread_handler(void)
51{
52 int (*fn)(void *), n;
53 void *arg;
54
55 if(current->thread.prev_sched != NULL)
56 schedule_tail(current->thread.prev_sched);
57 current->thread.prev_sched = NULL;
58
59 fn = current->thread.request.u.thread.proc;
60 arg = current->thread.request.u.thread.arg;
61
62 /* The return value is 1 if the kernel thread execs a process,
63 * 0 if it just exits
64 */
65 n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
66 if(n == 1){
67 /* Handle any immediate reschedules or signals */
68 interrupt_end();
69 userspace(&current->thread.regs.regs);
70 }
71 else do_exit(0);
72}
73
74void release_thread_skas(struct task_struct *task)
75{
76}
77
78/* Called magically, see new_thread_handler above */
79void fork_handler(void)
80{
81 force_flush_all();
82 if(current->thread.prev_sched == NULL)
83 panic("blech");
84
85 schedule_tail(current->thread.prev_sched);
86
87 /* XXX: if interrupt_end() calls schedule, this call to
88 * arch_switch_to_skas isn't needed. We could want to apply this to
89 * improve performance. -bb */
90 arch_switch_to_skas(current->thread.prev_sched, current);
91
92 current->thread.prev_sched = NULL;
93
94/* Handle any immediate reschedules or signals */
95 interrupt_end();
96
97 userspace(&current->thread.regs.regs);
98}
99
100int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
101 unsigned long stack_top, struct task_struct * p,
102 struct pt_regs *regs)
103{
104 void (*handler)(void);
105
106 if(current->thread.forking){
107 memcpy(&p->thread.regs.regs.skas, &regs->regs.skas,
108 sizeof(p->thread.regs.regs.skas));
109 REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.skas.regs, 0);
110 if(sp != 0) REGS_SP(p->thread.regs.regs.skas.regs) = sp;
111
112 handler = fork_handler;
113
114 arch_copy_thread(&current->thread.arch, &p->thread.arch);
115 }
116 else {
117 init_thread_registers(&p->thread.regs.regs);
118 p->thread.request.u.thread = current->thread.request.u.thread;
119 handler = new_thread_handler;
120 }
121
122 new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
123 handler);
124 return(0);
125}
126
127int new_mm(unsigned long stack)
128{
129 int fd;
130
131 fd = os_open_file("/proc/mm", of_cloexec(of_write(OPENFLAGS())), 0);
132 if(fd < 0)
133 return(fd);
134
135 if(skas_needs_stub)
136 map_stub_pages(fd, CONFIG_STUB_CODE, CONFIG_STUB_DATA, stack);
137
138 return(fd);
139}
140
141void init_idle_skas(void)
142{
143 cpu_tasks[current_thread->cpu].pid = os_getpid();
144 default_idle();
145}
146
147extern void start_kernel(void);
148
149static int start_kernel_proc(void *unused)
150{
151 int pid;
152
153 block_signals();
154 pid = os_getpid();
155
156 cpu_tasks[0].pid = pid;
157 cpu_tasks[0].task = current;
158#ifdef CONFIG_SMP
159 cpu_online_map = cpumask_of_cpu(0);
160#endif
161 start_kernel();
162 return(0);
163}
164
165extern int userspace_pid[];
166
167int start_uml_skas(void)
168{
169 if(proc_mm)
170 userspace_pid[0] = start_userspace(0);
171
172 init_new_thread_signals();
173
174 init_task.thread.request.u.thread.proc = start_kernel_proc;
175 init_task.thread.request.u.thread.arg = NULL;
176 return(start_idle_thread(task_stack_page(&init_task),
177 &init_task.thread.mode.skas.switch_buf));
178}
179
180int external_pid_skas(struct task_struct *task)
181{
182#warning Need to look up userspace_pid by cpu
183 return(userspace_pid[0]);
184}
185
186int thread_pid_skas(struct task_struct *task)
187{
188#warning Need to look up userspace_pid by cpu
189 return(userspace_pid[0]);
190}
191
192void kill_off_processes_skas(void)
193{
194 if(proc_mm)
195#warning need to loop over userspace_pids in kill_off_processes_skas
196 os_kill_ptraced_process(userspace_pid[0], 1);
197 else {
198 struct task_struct *p;
199 int pid, me;
200
201 me = os_getpid();
202 for_each_process(p){
203 if(p->mm == NULL)
204 continue;
205
206 pid = p->mm->context.skas.id.u.pid;
207 os_kill_ptraced_process(pid, 1);
208 }
209 }
210}
211
212unsigned long current_stub_stack(void)
213{
214 if(current->mm == NULL)
215 return(0);
216
217 return(current->mm->context.skas.id.stack);
218}