blob: 7a943696f9507b64230c3528c4ccfddb77795227 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#include "linux/config.h"
8#include "linux/kernel.h"
9#include "linux/sched.h"
10#include "linux/interrupt.h"
11#include "linux/mm.h"
12#include "linux/slab.h"
13#include "linux/utsname.h"
14#include "linux/fs.h"
15#include "linux/utime.h"
16#include "linux/smp_lock.h"
17#include "linux/module.h"
18#include "linux/init.h"
19#include "linux/capability.h"
20#include "linux/vmalloc.h"
21#include "linux/spinlock.h"
22#include "linux/proc_fs.h"
23#include "linux/ptrace.h"
24#include "linux/random.h"
25#include "asm/unistd.h"
26#include "asm/mman.h"
27#include "asm/segment.h"
28#include "asm/stat.h"
29#include "asm/pgtable.h"
30#include "asm/processor.h"
31#include "asm/tlbflush.h"
32#include "asm/uaccess.h"
33#include "asm/user.h"
34#include "user_util.h"
35#include "kern_util.h"
36#include "kern.h"
37#include "signal_kern.h"
38#include "signal_user.h"
39#include "init.h"
40#include "irq_user.h"
41#include "mem_user.h"
42#include "time_user.h"
43#include "tlb.h"
44#include "frame_kern.h"
45#include "sigcontext.h"
46#include "2_5compat.h"
47#include "os.h"
48#include "mode.h"
49#include "mode_kern.h"
50#include "choose-mode.h"
51
52/* This is a per-cpu array. A processor only modifies its entry and it only
53 * cares about its entry, so it's OK if another processor is modifying its
54 * entry.
55 */
56struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
57
58struct task_struct *get_task(int pid, int require)
59{
60 struct task_struct *ret;
61
62 read_lock(&tasklist_lock);
63 ret = find_task_by_pid(pid);
64 read_unlock(&tasklist_lock);
65
66 if(require && (ret == NULL)) panic("get_task couldn't find a task\n");
67 return(ret);
68}
69
70int external_pid(void *t)
71{
72 struct task_struct *task = t ? t : current;
73
74 return(CHOOSE_MODE_PROC(external_pid_tt, external_pid_skas, task));
75}
76
77int pid_to_processor_id(int pid)
78{
79 int i;
80
81 for(i = 0; i < ncpus; i++){
82 if(cpu_tasks[i].pid == pid) return(i);
83 }
84 return(-1);
85}
86
87void free_stack(unsigned long stack, int order)
88{
89 free_pages(stack, order);
90}
91
92unsigned long alloc_stack(int order, int atomic)
93{
94 unsigned long page;
95 int flags = GFP_KERNEL;
96
97 if(atomic) flags |= GFP_ATOMIC;
98 page = __get_free_pages(flags, order);
99 if(page == 0)
100 return(0);
101 stack_protections(page);
102 return(page);
103}
104
105int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
106{
107 int pid;
108
109 current->thread.request.u.thread.proc = fn;
110 current->thread.request.u.thread.arg = arg;
111 pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0, NULL, 0, NULL,
112 NULL);
113 if(pid < 0)
114 panic("do_fork failed in kernel_thread, errno = %d", pid);
115 return(pid);
116}
117
118void switch_mm(struct mm_struct *prev, struct mm_struct *next,
119 struct task_struct *tsk)
120{
121 int cpu = smp_processor_id();
122
123 if (prev != next)
124 cpu_clear(cpu, prev->cpu_vm_mask);
125 cpu_set(cpu, next->cpu_vm_mask);
126}
127
128void set_current(void *t)
129{
130 struct task_struct *task = t;
131
132 cpu_tasks[task->thread_info->cpu] = ((struct cpu_task)
133 { external_pid(task), task });
134}
135
136void *_switch_to(void *prev, void *next, void *last)
137{
138 return(CHOOSE_MODE(switch_to_tt(prev, next),
139 switch_to_skas(prev, next)));
140}
141
142void interrupt_end(void)
143{
144 if(need_resched()) schedule();
145 if(test_tsk_thread_flag(current, TIF_SIGPENDING)) do_signal();
146}
147
148void release_thread(struct task_struct *task)
149{
150 CHOOSE_MODE(release_thread_tt(task), release_thread_skas(task));
151}
152
153void exit_thread(void)
154{
155 CHOOSE_MODE(exit_thread_tt(), exit_thread_skas());
156 unprotect_stack((unsigned long) current_thread);
157}
158
159void *get_current(void)
160{
161 return(current);
162}
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
165 unsigned long stack_top, struct task_struct * p,
166 struct pt_regs *regs)
167{
168 p->thread = (struct thread_struct) INIT_THREAD;
169 return(CHOOSE_MODE_PROC(copy_thread_tt, copy_thread_skas, nr,
170 clone_flags, sp, stack_top, p, regs));
171}
172
173void initial_thread_cb(void (*proc)(void *), void *arg)
174{
175 int save_kmalloc_ok = kmalloc_ok;
176
177 kmalloc_ok = 0;
178 CHOOSE_MODE_PROC(initial_thread_cb_tt, initial_thread_cb_skas, proc,
179 arg);
180 kmalloc_ok = save_kmalloc_ok;
181}
182
183unsigned long stack_sp(unsigned long page)
184{
185 return(page + PAGE_SIZE - sizeof(void *));
186}
187
188int current_pid(void)
189{
190 return(current->pid);
191}
192
193void default_idle(void)
194{
195 uml_idle_timer();
196
197 atomic_inc(&init_mm.mm_count);
198 current->mm = &init_mm;
199 current->active_mm = &init_mm;
200
201 while(1){
202 /* endless idle loop with no priority at all */
203 SET_PRI(current);
204
205 /*
206 * although we are an idle CPU, we do not want to
207 * get into the scheduler unnecessarily.
208 */
209 if(need_resched())
210 schedule();
211
212 idle_sleep(10);
213 }
214}
215
216void cpu_idle(void)
217{
218 CHOOSE_MODE(init_idle_tt(), init_idle_skas());
219}
220
221int page_size(void)
222{
223 return(PAGE_SIZE);
224}
225
226unsigned long page_mask(void)
227{
228 return(PAGE_MASK);
229}
230
231void *um_virt_to_phys(struct task_struct *task, unsigned long addr,
232 pte_t *pte_out)
233{
234 pgd_t *pgd;
235 pud_t *pud;
236 pmd_t *pmd;
237 pte_t *pte;
238
239 if(task->mm == NULL)
240 return(ERR_PTR(-EINVAL));
241 pgd = pgd_offset(task->mm, addr);
242 if(!pgd_present(*pgd))
243 return(ERR_PTR(-EINVAL));
244
245 pud = pud_offset(pgd, addr);
246 if(!pud_present(*pud))
247 return(ERR_PTR(-EINVAL));
248
249 pmd = pmd_offset(pud, addr);
250 if(!pmd_present(*pmd))
251 return(ERR_PTR(-EINVAL));
252
253 pte = pte_offset_kernel(pmd, addr);
254 if(!pte_present(*pte))
255 return(ERR_PTR(-EINVAL));
256
257 if(pte_out != NULL)
258 *pte_out = *pte;
259 return((void *) (pte_val(*pte) & PAGE_MASK) + (addr & ~PAGE_MASK));
260}
261
262char *current_cmd(void)
263{
264#if defined(CONFIG_SMP) || defined(CONFIG_HIGHMEM)
265 return("(Unknown)");
266#else
267 void *addr = um_virt_to_phys(current, current->mm->arg_start, NULL);
268 return IS_ERR(addr) ? "(Unknown)": __va((unsigned long) addr);
269#endif
270}
271
272void force_sigbus(void)
273{
274 printk(KERN_ERR "Killing pid %d because of a lack of memory\n",
275 current->pid);
276 lock_kernel();
277 sigaddset(&current->pending.signal, SIGBUS);
278 recalc_sigpending();
279 current->flags |= PF_SIGNALED;
280 do_exit(SIGBUS | 0x80);
281}
282
283void dump_thread(struct pt_regs *regs, struct user *u)
284{
285}
286
287void enable_hlt(void)
288{
289 panic("enable_hlt");
290}
291
292EXPORT_SYMBOL(enable_hlt);
293
294void disable_hlt(void)
295{
296 panic("disable_hlt");
297}
298
299EXPORT_SYMBOL(disable_hlt);
300
301void *um_kmalloc(int size)
302{
303 return(kmalloc(size, GFP_KERNEL));
304}
305
306void *um_kmalloc_atomic(int size)
307{
308 return(kmalloc(size, GFP_ATOMIC));
309}
310
311void *um_vmalloc(int size)
312{
313 return(vmalloc(size));
314}
315
316unsigned long get_fault_addr(void)
317{
318 return((unsigned long) current->thread.fault_addr);
319}
320
321EXPORT_SYMBOL(get_fault_addr);
322
323void not_implemented(void)
324{
325 printk(KERN_DEBUG "Something isn't implemented in here\n");
326}
327
328EXPORT_SYMBOL(not_implemented);
329
330int user_context(unsigned long sp)
331{
332 unsigned long stack;
333
334 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
335 return(stack != (unsigned long) current_thread);
336}
337
338extern void remove_umid_dir(void);
339
340__uml_exitcall(remove_umid_dir);
341
342extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
343
344void do_uml_exitcalls(void)
345{
346 exitcall_t *call;
347
348 call = &__uml_exitcall_end;
349 while (--call >= &__uml_exitcall_begin)
350 (*call)();
351}
352
353char *uml_strdup(char *string)
354{
355 char *new;
356
357 new = kmalloc(strlen(string) + 1, GFP_KERNEL);
358 if(new == NULL) return(NULL);
359 strcpy(new, string);
360 return(new);
361}
362
363void *get_init_task(void)
364{
365 return(&init_thread_union.thread_info.task);
366}
367
368int copy_to_user_proc(void __user *to, void *from, int size)
369{
370 return(copy_to_user(to, from, size));
371}
372
373int copy_from_user_proc(void *to, void __user *from, int size)
374{
375 return(copy_from_user(to, from, size));
376}
377
378int clear_user_proc(void __user *buf, int size)
379{
380 return(clear_user(buf, size));
381}
382
383int strlen_user_proc(char __user *str)
384{
385 return(strlen_user(str));
386}
387
388int smp_sigio_handler(void)
389{
390#ifdef CONFIG_SMP
391 int cpu = current_thread->cpu;
392 IPI_handler(cpu);
393 if(cpu != 0)
394 return(1);
395#endif
396 return(0);
397}
398
399int um_in_interrupt(void)
400{
401 return(in_interrupt());
402}
403
404int cpu(void)
405{
406 return(current_thread->cpu);
407}
408
409static atomic_t using_sysemu = ATOMIC_INIT(0);
410int sysemu_supported;
411
412void set_using_sysemu(int value)
413{
414 if (value > sysemu_supported)
415 return;
416 atomic_set(&using_sysemu, value);
417}
418
419int get_using_sysemu(void)
420{
421 return atomic_read(&using_sysemu);
422}
423
424static int proc_read_sysemu(char *buf, char **start, off_t offset, int size,int *eof, void *data)
425{
426 if (snprintf(buf, size, "%d\n", get_using_sysemu()) < size) /*No overflow*/
427 *eof = 1;
428
429 return strlen(buf);
430}
431
432static int proc_write_sysemu(struct file *file,const char *buf, unsigned long count,void *data)
433{
434 char tmp[2];
435
436 if (copy_from_user(tmp, buf, 1))
437 return -EFAULT;
438
439 if (tmp[0] >= '0' && tmp[0] <= '2')
440 set_using_sysemu(tmp[0] - '0');
441 return count; /*We use the first char, but pretend to write everything*/
442}
443
444int __init make_proc_sysemu(void)
445{
446 struct proc_dir_entry *ent;
447 if (!sysemu_supported)
448 return 0;
449
450 ent = create_proc_entry("sysemu", 0600, &proc_root);
451
452 if (ent == NULL)
453 {
454 printk("Failed to register /proc/sysemu\n");
455 return(0);
456 }
457
458 ent->read_proc = proc_read_sysemu;
459 ent->write_proc = proc_write_sysemu;
460
461 return 0;
462}
463
464late_initcall(make_proc_sysemu);
465
466int singlestepping(void * t)
467{
468 struct task_struct *task = t ? t : current;
469
470 if ( ! (task->ptrace & PT_DTRACE) )
471 return(0);
472
473 if (task->thread.singlestep_syscall)
474 return(1);
475
476 return 2;
477}
478
479unsigned long arch_align_stack(unsigned long sp)
480{
481 if (randomize_va_space)
482 sp -= get_random_int() % 8192;
483 return sp & ~0xf;
484}
485
486
487/*
488 * Overrides for Emacs so that we follow Linus's tabbing style.
489 * Emacs will notice this stuff at the end of the file and automatically
490 * adjust the settings for this buffer only. This must remain at the end
491 * of the file.
492 * ---------------------------------------------------------------------------
493 * Local variables:
494 * c-file-style: "linux"
495 * End:
496 */