blob: 3060ed97b755f836f076c54bee60740ebc41e1be [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86-64/kernel/process.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 *
9 * X86-64 port
10 * Andi Kleen.
Ashok Raj76e4f662005-06-25 14:55:00 -070011 *
12 * CPU hotplug support - ashok.raj@intel.com
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
14 */
15
16/*
17 * This file handles the architecture-dependent parts of process handling..
18 */
19
20#include <stdarg.h>
21
Ashok Raj76e4f662005-06-25 14:55:00 -070022#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/elfcore.h>
28#include <linux/smp.h>
29#include <linux/slab.h>
30#include <linux/user.h>
31#include <linux/module.h>
32#include <linux/a.out.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/ptrace.h>
36#include <linux/utsname.h>
37#include <linux/random.h>
Rusty Lynch73649da2005-06-23 00:09:23 -070038#include <linux/kprobes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40#include <asm/uaccess.h>
41#include <asm/pgtable.h>
42#include <asm/system.h>
43#include <asm/io.h>
44#include <asm/processor.h>
45#include <asm/i387.h>
46#include <asm/mmu_context.h>
47#include <asm/pda.h>
48#include <asm/prctl.h>
49#include <asm/kdebug.h>
50#include <asm/desc.h>
51#include <asm/proto.h>
52#include <asm/ia32.h>
53
54asmlinkage extern void ret_from_fork(void);
55
56unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
57
58static atomic_t hlt_counter = ATOMIC_INIT(0);
59
60unsigned long boot_option_idle_override = 0;
61EXPORT_SYMBOL(boot_option_idle_override);
62
63/*
64 * Powermanagement idle function, if any..
65 */
66void (*pm_idle)(void);
67static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
68
69void disable_hlt(void)
70{
71 atomic_inc(&hlt_counter);
72}
73
74EXPORT_SYMBOL(disable_hlt);
75
76void enable_hlt(void)
77{
78 atomic_dec(&hlt_counter);
79}
80
81EXPORT_SYMBOL(enable_hlt);
82
83/*
84 * We use this if we don't have any better
85 * idle routine..
86 */
87void default_idle(void)
88{
Nick Piggin64c7c8f2005-11-08 21:39:04 -080089 local_irq_enable();
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 if (!atomic_read(&hlt_counter)) {
Nick Piggin64c7c8f2005-11-08 21:39:04 -080092 clear_thread_flag(TIF_POLLING_NRFLAG);
93 smp_mb__after_clear_bit();
94 while (!need_resched()) {
95 local_irq_disable();
96 if (!need_resched())
97 safe_halt();
98 else
99 local_irq_enable();
100 }
101 set_thread_flag(TIF_POLLING_NRFLAG);
102 } else {
103 while (!need_resched())
104 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 }
106}
107
108/*
109 * On SMP it's slightly faster (but much more power-consuming!)
110 * to poll the ->need_resched flag instead of waiting for the
111 * cross-CPU IPI to arrive. Use this option with caution.
112 */
113static void poll_idle (void)
114{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 local_irq_enable();
116
Nick Piggin64c7c8f2005-11-08 21:39:04 -0800117 asm volatile(
118 "2:"
119 "testl %0,%1;"
120 "rep; nop;"
121 "je 2b;"
122 : :
123 "i" (_TIF_NEED_RESCHED),
124 "m" (current_thread_info()->flags));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125}
126
127void cpu_idle_wait(void)
128{
129 unsigned int cpu, this_cpu = get_cpu();
130 cpumask_t map;
131
132 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
133 put_cpu();
134
135 cpus_clear(map);
136 for_each_online_cpu(cpu) {
137 per_cpu(cpu_idle_state, cpu) = 1;
138 cpu_set(cpu, map);
139 }
140
141 __get_cpu_var(cpu_idle_state) = 0;
142
143 wmb();
144 do {
145 ssleep(1);
146 for_each_online_cpu(cpu) {
Andi Kleena88cde12005-11-05 17:25:54 +0100147 if (cpu_isset(cpu, map) &&
148 !per_cpu(cpu_idle_state, cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 cpu_clear(cpu, map);
150 }
151 cpus_and(map, map, cpu_online_map);
152 } while (!cpus_empty(map));
153}
154EXPORT_SYMBOL_GPL(cpu_idle_wait);
155
Ashok Raj76e4f662005-06-25 14:55:00 -0700156#ifdef CONFIG_HOTPLUG_CPU
157DECLARE_PER_CPU(int, cpu_state);
158
159#include <asm/nmi.h>
Shaohua Li1fa744e2006-01-06 00:12:20 -0800160/* We halt the CPU with physical CPU hotplug */
Ashok Raj76e4f662005-06-25 14:55:00 -0700161static inline void play_dead(void)
162{
163 idle_task_exit();
164 wbinvd();
165 mb();
166 /* Ack it */
167 __get_cpu_var(cpu_state) = CPU_DEAD;
168
Shaohua Li1fa744e2006-01-06 00:12:20 -0800169 local_irq_disable();
Ashok Raj76e4f662005-06-25 14:55:00 -0700170 while (1)
Shaohua Li1fa744e2006-01-06 00:12:20 -0800171 halt();
Ashok Raj76e4f662005-06-25 14:55:00 -0700172}
173#else
174static inline void play_dead(void)
175{
176 BUG();
177}
178#endif /* CONFIG_HOTPLUG_CPU */
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180/*
181 * The idle thread. There's no useful work to be
182 * done, so just try to conserve power and have a
183 * low exit latency (ie sit in a loop waiting for
184 * somebody to say that they'd like to reschedule)
185 */
186void cpu_idle (void)
187{
Nick Piggin64c7c8f2005-11-08 21:39:04 -0800188 set_thread_flag(TIF_POLLING_NRFLAG);
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 /* endless idle loop with no priority at all */
191 while (1) {
192 while (!need_resched()) {
193 void (*idle)(void);
194
195 if (__get_cpu_var(cpu_idle_state))
196 __get_cpu_var(cpu_idle_state) = 0;
197
198 rmb();
199 idle = pm_idle;
200 if (!idle)
201 idle = default_idle;
Ashok Raj76e4f662005-06-25 14:55:00 -0700202 if (cpu_is_offline(smp_processor_id()))
203 play_dead();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 idle();
205 }
206
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800207 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 schedule();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800209 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211}
212
213/*
214 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
215 * which can obviate IPI to trigger checking of need_resched.
216 * We execute MONITOR against need_resched and enter optimized wait state
217 * through MWAIT. Whenever someone changes need_resched, we would be woken
218 * up from MWAIT (without an IPI).
219 */
220static void mwait_idle(void)
221{
222 local_irq_enable();
223
Nick Piggin64c7c8f2005-11-08 21:39:04 -0800224 while (!need_resched()) {
225 __monitor((void *)&current_thread_info()->flags, 0, 0);
226 smp_mb();
227 if (need_resched())
228 break;
229 __mwait(0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231}
232
Ashok Raje6982c62005-06-25 14:54:58 -0700233void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
235 static int printed;
236 if (cpu_has(c, X86_FEATURE_MWAIT)) {
237 /*
238 * Skip, if setup has overridden idle.
239 * One CPU supports mwait => All CPUs supports mwait
240 */
241 if (!pm_idle) {
242 if (!printed) {
243 printk("using mwait in idle threads.\n");
244 printed = 1;
245 }
246 pm_idle = mwait_idle;
247 }
248 }
249}
250
251static int __init idle_setup (char *str)
252{
253 if (!strncmp(str, "poll", 4)) {
254 printk("using polling idle threads.\n");
255 pm_idle = poll_idle;
256 }
257
258 boot_option_idle_override = 1;
259 return 1;
260}
261
262__setup("idle=", idle_setup);
263
264/* Prints also some state that isn't saved in the pt_regs */
265void __show_regs(struct pt_regs * regs)
266{
267 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
268 unsigned int fsindex,gsindex;
269 unsigned int ds,cs,es;
270
271 printk("\n");
272 print_modules();
Andi Kleen9acf23c2005-09-12 18:49:24 +0200273 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
274 current->pid, current->comm, print_tainted(),
275 system_utsname.release,
276 (int)strcspn(system_utsname.version, " "),
277 system_utsname.version);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
279 printk_address(regs->rip);
Andi Kleena88cde12005-11-05 17:25:54 +0100280 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
281 regs->eflags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
283 regs->rax, regs->rbx, regs->rcx);
284 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
285 regs->rdx, regs->rsi, regs->rdi);
286 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
287 regs->rbp, regs->r8, regs->r9);
288 printk("R10: %016lx R11: %016lx R12: %016lx\n",
289 regs->r10, regs->r11, regs->r12);
290 printk("R13: %016lx R14: %016lx R15: %016lx\n",
291 regs->r13, regs->r14, regs->r15);
292
293 asm("movl %%ds,%0" : "=r" (ds));
294 asm("movl %%cs,%0" : "=r" (cs));
295 asm("movl %%es,%0" : "=r" (es));
296 asm("movl %%fs,%0" : "=r" (fsindex));
297 asm("movl %%gs,%0" : "=r" (gsindex));
298
299 rdmsrl(MSR_FS_BASE, fs);
300 rdmsrl(MSR_GS_BASE, gs);
301 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
302
303 asm("movq %%cr0, %0": "=r" (cr0));
304 asm("movq %%cr2, %0": "=r" (cr2));
305 asm("movq %%cr3, %0": "=r" (cr3));
306 asm("movq %%cr4, %0": "=r" (cr4));
307
308 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
309 fs,fsindex,gs,gsindex,shadowgs);
310 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
311 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
312}
313
314void show_regs(struct pt_regs *regs)
315{
Zwane Mwaikamboc078d322005-09-06 15:16:16 -0700316 printk("CPU %d:", smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 __show_regs(regs);
318 show_trace(&regs->rsp);
319}
320
321/*
322 * Free current thread data structures etc..
323 */
324void exit_thread(void)
325{
326 struct task_struct *me = current;
327 struct thread_struct *t = &me->thread;
Rusty Lynch73649da2005-06-23 00:09:23 -0700328
329 /*
330 * Remove function-return probe instances associated with this task
331 * and put them back on the free list. Do not insert an exit probe for
332 * this function, it will be disabled by kprobe_flush_task if you do.
333 */
334 kprobe_flush_task(me);
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (me->thread.io_bitmap_ptr) {
337 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
338
339 kfree(t->io_bitmap_ptr);
340 t->io_bitmap_ptr = NULL;
341 /*
342 * Careful, clear this in the TSS too:
343 */
344 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
345 t->io_bitmap_max = 0;
346 put_cpu();
347 }
348}
349
350void flush_thread(void)
351{
352 struct task_struct *tsk = current;
353 struct thread_info *t = current_thread_info();
354
355 if (t->flags & _TIF_ABI_PENDING)
356 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
357
358 tsk->thread.debugreg0 = 0;
359 tsk->thread.debugreg1 = 0;
360 tsk->thread.debugreg2 = 0;
361 tsk->thread.debugreg3 = 0;
362 tsk->thread.debugreg6 = 0;
363 tsk->thread.debugreg7 = 0;
364 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
365 /*
366 * Forget coprocessor state..
367 */
368 clear_fpu(tsk);
369 clear_used_math();
370}
371
372void release_thread(struct task_struct *dead_task)
373{
374 if (dead_task->mm) {
375 if (dead_task->mm->context.size) {
376 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
377 dead_task->comm,
378 dead_task->mm->context.ldt,
379 dead_task->mm->context.size);
380 BUG();
381 }
382 }
383}
384
385static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
386{
387 struct user_desc ud = {
388 .base_addr = addr,
389 .limit = 0xfffff,
390 .seg_32bit = 1,
391 .limit_in_pages = 1,
392 .useable = 1,
393 };
394 struct n_desc_struct *desc = (void *)t->thread.tls_array;
395 desc += tls;
396 desc->a = LDT_entry_a(&ud);
397 desc->b = LDT_entry_b(&ud);
398}
399
400static inline u32 read_32bit_tls(struct task_struct *t, int tls)
401{
402 struct desc_struct *desc = (void *)t->thread.tls_array;
403 desc += tls;
404 return desc->base0 |
405 (((u32)desc->base1) << 16) |
406 (((u32)desc->base2) << 24);
407}
408
409/*
410 * This gets called before we allocate a new thread and copy
411 * the current task into it.
412 */
413void prepare_to_copy(struct task_struct *tsk)
414{
415 unlazy_fpu(tsk);
416}
417
418int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
419 unsigned long unused,
420 struct task_struct * p, struct pt_regs * regs)
421{
422 int err;
423 struct pt_regs * childregs;
424 struct task_struct *me = current;
425
Andi Kleena88cde12005-11-05 17:25:54 +0100426 childregs = ((struct pt_regs *)
427 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 *childregs = *regs;
429
430 childregs->rax = 0;
431 childregs->rsp = rsp;
Andi Kleena88cde12005-11-05 17:25:54 +0100432 if (rsp == ~0UL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 childregs->rsp = (unsigned long)childregs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
435 p->thread.rsp = (unsigned long) childregs;
436 p->thread.rsp0 = (unsigned long) (childregs+1);
437 p->thread.userrsp = me->thread.userrsp;
438
439 set_ti_thread_flag(p->thread_info, TIF_FORK);
440
441 p->thread.fs = me->thread.fs;
442 p->thread.gs = me->thread.gs;
443
H. J. Lufd51f662005-05-01 08:58:48 -0700444 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
445 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
446 asm("mov %%es,%0" : "=m" (p->thread.es));
447 asm("mov %%ds,%0" : "=m" (p->thread.ds));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
449 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
450 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
451 if (!p->thread.io_bitmap_ptr) {
452 p->thread.io_bitmap_max = 0;
453 return -ENOMEM;
454 }
Andi Kleena88cde12005-11-05 17:25:54 +0100455 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
456 IO_BITMAP_BYTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 }
458
459 /*
460 * Set a new TLS for the child thread?
461 */
462 if (clone_flags & CLONE_SETTLS) {
463#ifdef CONFIG_IA32_EMULATION
464 if (test_thread_flag(TIF_IA32))
465 err = ia32_child_tls(p, childregs);
466 else
467#endif
468 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
469 if (err)
470 goto out;
471 }
472 err = 0;
473out:
474 if (err && p->thread.io_bitmap_ptr) {
475 kfree(p->thread.io_bitmap_ptr);
476 p->thread.io_bitmap_max = 0;
477 }
478 return err;
479}
480
481/*
482 * This special macro can be used to load a debugging register
483 */
484#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
485
486/*
487 * switch_to(x,y) should switch tasks from x to y.
488 *
489 * This could still be optimized:
490 * - fold all the options into a flag word and test it with a single test.
491 * - could test fs/gs bitsliced
492 */
Andi Kleena88cde12005-11-05 17:25:54 +0100493struct task_struct *
494__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495{
496 struct thread_struct *prev = &prev_p->thread,
497 *next = &next_p->thread;
498 int cpu = smp_processor_id();
499 struct tss_struct *tss = &per_cpu(init_tss, cpu);
500
501 unlazy_fpu(prev_p);
502
503 /*
504 * Reload esp0, LDT and the page table pointer:
505 */
506 tss->rsp0 = next->rsp0;
507
508 /*
509 * Switch DS and ES.
510 * This won't pick up thread selector changes, but I guess that is ok.
511 */
H. J. Lufd51f662005-05-01 08:58:48 -0700512 asm volatile("mov %%es,%0" : "=m" (prev->es));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 if (unlikely(next->es | prev->es))
514 loadsegment(es, next->es);
515
H. J. Lufd51f662005-05-01 08:58:48 -0700516 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 if (unlikely(next->ds | prev->ds))
518 loadsegment(ds, next->ds);
519
520 load_TLS(next, cpu);
521
522 /*
523 * Switch FS and GS.
524 */
525 {
526 unsigned fsindex;
527 asm volatile("movl %%fs,%0" : "=r" (fsindex));
528 /* segment register != 0 always requires a reload.
529 also reload when it has changed.
530 when prev process used 64bit base always reload
531 to avoid an information leak. */
532 if (unlikely(fsindex | next->fsindex | prev->fs)) {
533 loadsegment(fs, next->fsindex);
534 /* check if the user used a selector != 0
535 * if yes clear 64bit base, since overloaded base
536 * is always mapped to the Null selector
537 */
538 if (fsindex)
539 prev->fs = 0;
540 }
541 /* when next process has a 64bit base use it */
542 if (next->fs)
543 wrmsrl(MSR_FS_BASE, next->fs);
544 prev->fsindex = fsindex;
545 }
546 {
547 unsigned gsindex;
548 asm volatile("movl %%gs,%0" : "=r" (gsindex));
549 if (unlikely(gsindex | next->gsindex | prev->gs)) {
550 load_gs_index(next->gsindex);
551 if (gsindex)
552 prev->gs = 0;
553 }
554 if (next->gs)
555 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
556 prev->gsindex = gsindex;
557 }
558
559 /*
560 * Switch the PDA context.
561 */
562 prev->userrsp = read_pda(oldrsp);
563 write_pda(oldrsp, next->userrsp);
564 write_pda(pcurrent, next_p);
Andi Kleena88cde12005-11-05 17:25:54 +0100565 write_pda(kernelstack,
566 (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
568 /*
569 * Now maybe reload the debug registers
570 */
571 if (unlikely(next->debugreg7)) {
572 loaddebug(next, 0);
573 loaddebug(next, 1);
574 loaddebug(next, 2);
575 loaddebug(next, 3);
576 /* no 4 and 5 */
577 loaddebug(next, 6);
578 loaddebug(next, 7);
579 }
580
581
582 /*
583 * Handle the IO bitmap
584 */
585 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
586 if (next->io_bitmap_ptr)
587 /*
588 * Copy the relevant range of the IO bitmap.
589 * Normally this is 128 bytes or less:
590 */
591 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
592 max(prev->io_bitmap_max, next->io_bitmap_max));
593 else {
594 /*
595 * Clear any possible leftover bits:
596 */
597 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
598 }
599 }
600
601 return prev_p;
602}
603
604/*
605 * sys_execve() executes a new program.
606 */
607asmlinkage
608long sys_execve(char __user *name, char __user * __user *argv,
609 char __user * __user *envp, struct pt_regs regs)
610{
611 long error;
612 char * filename;
613
614 filename = getname(name);
615 error = PTR_ERR(filename);
616 if (IS_ERR(filename))
617 return error;
618 error = do_execve(filename, argv, envp, &regs);
619 if (error == 0) {
620 task_lock(current);
621 current->ptrace &= ~PT_DTRACE;
622 task_unlock(current);
623 }
624 putname(filename);
625 return error;
626}
627
628void set_personality_64bit(void)
629{
630 /* inherit personality from parent */
631
632 /* Make sure to be in 64bit mode */
633 clear_thread_flag(TIF_IA32);
634
635 /* TBD: overwrites user setup. Should have two bits.
636 But 64bit processes have always behaved this way,
637 so it's not too bad. The main problem is just that
638 32bit childs are affected again. */
639 current->personality &= ~READ_IMPLIES_EXEC;
640}
641
642asmlinkage long sys_fork(struct pt_regs *regs)
643{
644 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
645}
646
Andi Kleena88cde12005-11-05 17:25:54 +0100647asmlinkage long
648sys_clone(unsigned long clone_flags, unsigned long newsp,
649 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
651 if (!newsp)
652 newsp = regs->rsp;
653 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
654}
655
656/*
657 * This is trivial, and on the face of it looks like it
658 * could equally well be done in user mode.
659 *
660 * Not so, for quite unobvious reasons - register pressure.
661 * In user mode vfork() cannot have a stack frame, and if
662 * done by calling the "clone()" system call directly, you
663 * do not have enough call-clobbered registers to hold all
664 * the information you need.
665 */
666asmlinkage long sys_vfork(struct pt_regs *regs)
667{
668 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
669 NULL, NULL);
670}
671
672unsigned long get_wchan(struct task_struct *p)
673{
674 unsigned long stack;
675 u64 fp,rip;
676 int count = 0;
677
678 if (!p || p == current || p->state==TASK_RUNNING)
679 return 0;
680 stack = (unsigned long)p->thread_info;
681 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
682 return 0;
683 fp = *(u64 *)(p->thread.rsp);
684 do {
Andi Kleena88cde12005-11-05 17:25:54 +0100685 if (fp < (unsigned long)stack ||
686 fp > (unsigned long)stack+THREAD_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 return 0;
688 rip = *(u64 *)(fp+8);
689 if (!in_sched_functions(rip))
690 return rip;
691 fp = *(u64 *)fp;
692 } while (count++ < 16);
693 return 0;
694}
695
696long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
697{
698 int ret = 0;
699 int doit = task == current;
700 int cpu;
701
702 switch (code) {
703 case ARCH_SET_GS:
Suresh Siddha84929802005-06-21 17:14:32 -0700704 if (addr >= TASK_SIZE_OF(task))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 return -EPERM;
706 cpu = get_cpu();
707 /* handle small bases via the GDT because that's faster to
708 switch. */
709 if (addr <= 0xffffffff) {
710 set_32bit_tls(task, GS_TLS, addr);
711 if (doit) {
712 load_TLS(&task->thread, cpu);
713 load_gs_index(GS_TLS_SEL);
714 }
715 task->thread.gsindex = GS_TLS_SEL;
716 task->thread.gs = 0;
717 } else {
718 task->thread.gsindex = 0;
719 task->thread.gs = addr;
720 if (doit) {
Andi Kleena88cde12005-11-05 17:25:54 +0100721 load_gs_index(0);
722 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 }
724 }
725 put_cpu();
726 break;
727 case ARCH_SET_FS:
728 /* Not strictly needed for fs, but do it for symmetry
729 with gs */
Suresh Siddha84929802005-06-21 17:14:32 -0700730 if (addr >= TASK_SIZE_OF(task))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return -EPERM;
732 cpu = get_cpu();
733 /* handle small bases via the GDT because that's faster to
734 switch. */
735 if (addr <= 0xffffffff) {
736 set_32bit_tls(task, FS_TLS, addr);
737 if (doit) {
738 load_TLS(&task->thread, cpu);
Andi Kleena88cde12005-11-05 17:25:54 +0100739 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 }
741 task->thread.fsindex = FS_TLS_SEL;
742 task->thread.fs = 0;
743 } else {
744 task->thread.fsindex = 0;
745 task->thread.fs = addr;
746 if (doit) {
747 /* set the selector to 0 to not confuse
748 __switch_to */
Andi Kleena88cde12005-11-05 17:25:54 +0100749 asm volatile("movl %0,%%fs" :: "r" (0));
750 ret = checking_wrmsrl(MSR_FS_BASE, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 }
752 }
753 put_cpu();
754 break;
755 case ARCH_GET_FS: {
756 unsigned long base;
757 if (task->thread.fsindex == FS_TLS_SEL)
758 base = read_32bit_tls(task, FS_TLS);
Andi Kleena88cde12005-11-05 17:25:54 +0100759 else if (doit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 rdmsrl(MSR_FS_BASE, base);
Andi Kleena88cde12005-11-05 17:25:54 +0100761 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 base = task->thread.fs;
763 ret = put_user(base, (unsigned long __user *)addr);
764 break;
765 }
766 case ARCH_GET_GS: {
767 unsigned long base;
768 if (task->thread.gsindex == GS_TLS_SEL)
769 base = read_32bit_tls(task, GS_TLS);
Andi Kleena88cde12005-11-05 17:25:54 +0100770 else if (doit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 rdmsrl(MSR_KERNEL_GS_BASE, base);
Andi Kleena88cde12005-11-05 17:25:54 +0100772 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 base = task->thread.gs;
774 ret = put_user(base, (unsigned long __user *)addr);
775 break;
776 }
777
778 default:
779 ret = -EINVAL;
780 break;
781 }
782
783 return ret;
784}
785
786long sys_arch_prctl(int code, unsigned long addr)
787{
788 return do_arch_prctl(current, code, addr);
789}
790
791/*
792 * Capture the user space registers if the task is not running (in user space)
793 */
794int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
795{
796 struct pt_regs *pp, ptregs;
797
798 pp = (struct pt_regs *)(tsk->thread.rsp0);
799 --pp;
800
801 ptregs = *pp;
802 ptregs.cs &= 0xffff;
803 ptregs.ss &= 0xffff;
804
805 elf_core_copy_regs(regs, &ptregs);
806
807 return 1;
808}
809
810unsigned long arch_align_stack(unsigned long sp)
811{
812 if (randomize_va_space)
813 sp -= get_random_int() % 8192;
814 return sp & ~0xf;
815}