blob: 3b7a1ddcc0bce7eca8c989906fa377d4f0322f50 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 */
7
8/*
9 * This file handles the architecture-dependent parts of process handling..
10 */
11
12#include <stdarg.h>
13
Zwane Mwaikambof3705132005-06-25 14:54:50 -070014#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/errno.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/elfcore.h>
21#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/stddef.h>
23#include <linux/slab.h>
24#include <linux/vmalloc.h>
25#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/utsname.h>
28#include <linux/delay.h>
29#include <linux/reboot.h>
30#include <linux/init.h>
31#include <linux/mc146818rtc.h>
32#include <linux/module.h>
33#include <linux/kallsyms.h>
34#include <linux/ptrace.h>
35#include <linux/random.h>
Andi Kleenc16b63e2006-09-26 10:52:28 +020036#include <linux/personality.h>
Ingo Molnar74167342007-02-16 01:28:07 -080037#include <linux/tick.h>
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020038#include <linux/percpu.h>
Erik Bosman529e25f2008-04-14 00:24:18 +020039#include <linux/prctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41#include <asm/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/ldt.h>
46#include <asm/processor.h>
47#include <asm/i387.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/desc.h>
49#ifdef CONFIG_MATH_EMULATION
50#include <asm/math_emu.h>
51#endif
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <linux/err.h>
54
Zwane Mwaikambof3705132005-06-25 14:54:50 -070055#include <asm/tlbflush.h>
56#include <asm/cpu.h>
Thomas Gleixner718fc132008-01-30 13:30:17 +010057#include <asm/kdebug.h>
Zwane Mwaikambof3705132005-06-25 14:54:50 -070058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
60
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +020061DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
62EXPORT_PER_CPU_SYMBOL(current_task);
63
64DEFINE_PER_CPU(int, cpu_number);
65EXPORT_PER_CPU_SYMBOL(cpu_number);
66
Linus Torvalds1da177e2005-04-16 15:20:36 -070067/*
68 * Return saved PC of a blocked thread.
69 */
70unsigned long thread_saved_pc(struct task_struct *tsk)
71{
H. Peter Anvinfaca6222008-01-30 13:31:02 +010072 return ((unsigned long *)tsk->thread.sp)[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
Zwane Mwaikambof3705132005-06-25 14:54:50 -070075#ifdef CONFIG_HOTPLUG_CPU
76#include <asm/nmi.h>
Glauber Costa1481a3d2008-06-04 15:35:03 -030077
78static void cpu_exit_clear(void)
79{
80 int cpu = raw_smp_processor_id();
81
82 idle_task_exit();
83
84 cpu_uninit();
85 irq_ctx_exit(cpu);
86
87 cpu_clear(cpu, cpu_callout_map);
88 cpu_clear(cpu, cpu_callin_map);
89
90 numa_remove_cpu(cpu);
91}
92
Zwane Mwaikambof3705132005-06-25 14:54:50 -070093/* We don't actually take CPU down, just spin without interrupts. */
94static inline void play_dead(void)
95{
Li Shaohuae1367da2005-06-25 14:54:56 -070096 /* This must be done before dead CPU ack */
97 cpu_exit_clear();
Li Shaohuae1367da2005-06-25 14:54:56 -070098 mb();
Zwane Mwaikambof3705132005-06-25 14:54:50 -070099 /* Ack it */
100 __get_cpu_var(cpu_state) = CPU_DEAD;
101
Li Shaohuae1367da2005-06-25 14:54:56 -0700102 /*
103 * With physical CPU hotplug, we should halt the cpu
104 */
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700105 local_irq_disable();
Mark Langsdorf394a1502008-08-14 09:11:26 -0500106 /* mask all interrupts, flush any and all caches, and halt */
107 wbinvd_halt();
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700108}
109#else
110static inline void play_dead(void)
111{
112 BUG();
113}
114#endif /* CONFIG_HOTPLUG_CPU */
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116/*
117 * The idle thread. There's no useful work to be
118 * done, so just try to conserve power and have a
119 * low exit latency (ie sit in a loop waiting for
120 * somebody to say that they'd like to reschedule)
121 */
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700122void cpu_idle(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800124 int cpu = smp_processor_id();
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700125
Andi Kleen495ab9c2006-06-26 13:59:11 +0200126 current_thread_info()->status |= TS_POLLING;
Nick Piggin64c7c8f2005-11-08 21:39:04 -0800127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 /* endless idle loop with no priority at all */
129 while (1) {
Thomas Gleixnerb8f8c3c2008-07-18 17:27:28 +0200130 tick_nohz_stop_sched_tick(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 while (!need_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700133 check_pgt_cache();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 rmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Benjamin LaHaise0723a692008-01-30 13:33:13 +0100136 if (rcu_pending(cpu))
137 rcu_check_callbacks(cpu, 0);
138
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700139 if (cpu_is_offline(cpu))
140 play_dead();
141
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200142 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200144 /* Don't trace irqs off for idle */
145 stop_critical_timings();
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200146 pm_idle();
Steven Rostedt6cd8a4b2008-05-12 21:20:42 +0200147 start_critical_timings();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 }
Ingo Molnar74167342007-02-16 01:28:07 -0800149 tick_nohz_restart_sched_tick();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800150 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 schedule();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800152 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 }
154}
155
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200156void __show_registers(struct pt_regs *regs, int all)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
Alan Sternbb1995d2007-07-21 17:10:42 +0200159 unsigned long d0, d1, d2, d3, d6, d7;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100160 unsigned long sp;
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200161 unsigned short ss, gs;
162
163 if (user_mode_vm(regs)) {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100164 sp = regs->sp;
165 ss = regs->ss & 0xffff;
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200166 savesegment(gs, gs);
167 } else {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100168 sp = (unsigned long) (&regs->sp);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200169 savesegment(ss, ss);
170 savesegment(gs, gs);
171 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173 printk("\n");
Linus Torvalds60812a42007-10-19 15:06:00 -0700174 printk("Pid: %d, comm: %s %s (%s %.*s)\n",
175 task_pid_nr(current), current->comm,
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200176 print_tainted(), init_utsname()->release,
177 (int)strcspn(init_utsname()->version, " "),
178 init_utsname()->version);
179
180 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
Harvey Harrison92bc2052008-02-08 12:09:56 -0800181 (u16)regs->cs, regs->ip, regs->flags,
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200182 smp_processor_id());
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100183 print_symbol("EIP is at %s\n", regs->ip);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100186 regs->ax, regs->bx, regs->cx, regs->dx);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200187 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100188 regs->si, regs->di, regs->bp, sp);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200189 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
Harvey Harrison92bc2052008-02-08 12:09:56 -0800190 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200191
192 if (!all)
193 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -0700195 cr0 = read_cr0();
196 cr2 = read_cr2();
197 cr3 = read_cr3();
Zachary Amsdenff6e8c02006-01-06 00:11:50 -0800198 cr4 = read_cr4_safe();
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200199 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
200 cr0, cr2, cr3, cr4);
Alan Sternbb1995d2007-07-21 17:10:42 +0200201
202 get_debugreg(d0, 0);
203 get_debugreg(d1, 1);
204 get_debugreg(d2, 2);
205 get_debugreg(d3, 3);
206 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
207 d0, d1, d2, d3);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200208
Alan Sternbb1995d2007-07-21 17:10:42 +0200209 get_debugreg(d6, 6);
210 get_debugreg(d7, 7);
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200211 printk("DR6: %08lx DR7: %08lx\n",
212 d6, d7);
213}
Alan Sternbb1995d2007-07-21 17:10:42 +0200214
Pavel Emelyanov9d975eb2007-10-19 20:35:03 +0200215void show_regs(struct pt_regs *regs)
216{
217 __show_registers(regs, 1);
Arjan van de Ven5bc27dc2008-01-30 13:33:07 +0100218 show_trace(NULL, regs, &regs->sp, regs->bp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221/*
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100222 * This gets run with %bx containing the
223 * function to call, and %dx containing
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 * the "args".
225 */
226extern void kernel_thread_helper(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228/*
229 * Create a kernel thread
230 */
231int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
232{
233 struct pt_regs regs;
234
235 memset(&regs, 0, sizeof(regs));
236
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100237 regs.bx = (unsigned long) fn;
238 regs.dx = (unsigned long) arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100240 regs.ds = __USER_DS;
241 regs.es = __USER_DS;
242 regs.fs = __KERNEL_PERCPU;
243 regs.orig_ax = -1;
244 regs.ip = (unsigned long) kernel_thread_helper;
245 regs.cs = __KERNEL_CS | get_kernel_rpl();
246 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Ok, create the new process.. */
Andi Kleen8cf2c512006-10-21 18:37:02 +0200249 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700251EXPORT_SYMBOL(kernel_thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
253/*
254 * Free current thread data structures etc..
255 */
256void exit_thread(void)
257{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 /* The process may have allocated an io port bitmap... nuke it. */
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400259 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
260 struct task_struct *tsk = current;
261 struct thread_struct *t = &tsk->thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 int cpu = get_cpu();
263 struct tss_struct *tss = &per_cpu(init_tss, cpu);
264
265 kfree(t->io_bitmap_ptr);
266 t->io_bitmap_ptr = NULL;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400267 clear_thread_flag(TIF_IO_BITMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 /*
269 * Careful, clear this in the TSS too:
270 */
271 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
272 t->io_bitmap_max = 0;
273 tss->io_bitmap_owner = NULL;
274 tss->io_bitmap_max = 0;
Rusty Russella75c54f2007-05-02 19:27:13 +0200275 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 put_cpu();
277 }
278}
279
280void flush_thread(void)
281{
282 struct task_struct *tsk = current;
283
Roland McGrath0f534092008-01-30 13:30:59 +0100284 tsk->thread.debugreg0 = 0;
285 tsk->thread.debugreg1 = 0;
286 tsk->thread.debugreg2 = 0;
287 tsk->thread.debugreg3 = 0;
288 tsk->thread.debugreg6 = 0;
289 tsk->thread.debugreg7 = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400291 clear_tsk_thread_flag(tsk, TIF_DEBUG);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 /*
293 * Forget coprocessor state..
294 */
Suresh Siddha75118a82008-06-13 15:47:12 -0700295 tsk->fpu_counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 clear_fpu(tsk);
297 clear_used_math();
298}
299
300void release_thread(struct task_struct *dead_task)
301{
Zachary Amsden26849272006-01-06 00:11:59 -0800302 BUG_ON(dead_task->mm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 release_vm86_irqs(dead_task);
304}
305
306/*
307 * This gets called before we allocate a new thread and copy
308 * the current task into it.
309 */
310void prepare_to_copy(struct task_struct *tsk)
311{
312 unlazy_fpu(tsk);
313}
314
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100315int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 unsigned long unused,
317 struct task_struct * p, struct pt_regs * regs)
318{
319 struct pt_regs * childregs;
320 struct task_struct *tsk;
321 int err;
322
akpm@osdl.org07b047f2006-01-12 01:05:41 -0800323 childregs = task_pt_regs(p);
Alexander Nybergf48d9662005-05-05 16:15:03 -0700324 *childregs = *regs;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100325 childregs->ax = 0;
326 childregs->sp = sp;
Alexander Nybergf48d9662005-05-05 16:15:03 -0700327
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100328 p->thread.sp = (unsigned long) childregs;
329 p->thread.sp0 = (unsigned long) (childregs+1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100331 p->thread.ip = (unsigned long) ret_from_fork;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100333 savesegment(gs, p->thread.gs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 tsk = current;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400336 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
Alexey Dobriyan52978be2006-09-30 23:27:21 -0700337 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
338 IO_BITMAP_BYTES, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (!p->thread.io_bitmap_ptr) {
340 p->thread.io_bitmap_max = 0;
341 return -ENOMEM;
342 }
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400343 set_tsk_thread_flag(p, TIF_IO_BITMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
345
Roland McGrathefd1ca52008-01-30 13:30:46 +0100346 err = 0;
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 /*
349 * Set a new TLS for the child thread?
350 */
Roland McGrathefd1ca52008-01-30 13:30:46 +0100351 if (clone_flags & CLONE_SETTLS)
352 err = do_set_thread_area(p, -1,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100353 (struct user_desc __user *)childregs->si, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 if (err && p->thread.io_bitmap_ptr) {
356 kfree(p->thread.io_bitmap_ptr);
357 p->thread.io_bitmap_max = 0;
358 }
359 return err;
360}
361
Ingo Molnar513ad842008-02-21 05:18:40 +0100362void
363start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
364{
365 __asm__("movl %0, %%gs" :: "r"(0));
366 regs->fs = 0;
367 set_fs(USER_DS);
368 regs->ds = __USER_DS;
369 regs->es = __USER_DS;
370 regs->ss = __USER_DS;
371 regs->cs = __USER_CS;
372 regs->ip = new_ip;
373 regs->sp = new_sp;
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700374 /*
375 * Free the old FP and other extended state
376 */
377 free_thread_xstate(current);
Ingo Molnar513ad842008-02-21 05:18:40 +0100378}
379EXPORT_SYMBOL_GPL(start_thread);
380
Jan Beulichbdb4f152008-01-30 13:31:21 +0100381static void hard_disable_TSC(void)
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700382{
383 write_cr4(read_cr4() | X86_CR4_TSD);
384}
Erik Bosman529e25f2008-04-14 00:24:18 +0200385
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700386void disable_TSC(void)
387{
388 preempt_disable();
389 if (!test_and_set_thread_flag(TIF_NOTSC))
390 /*
391 * Must flip the CPU state synchronously with
392 * TIF_NOTSC in the current running context.
393 */
394 hard_disable_TSC();
395 preempt_enable();
396}
Erik Bosman529e25f2008-04-14 00:24:18 +0200397
Jan Beulichbdb4f152008-01-30 13:31:21 +0100398static void hard_enable_TSC(void)
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700399{
400 write_cr4(read_cr4() & ~X86_CR4_TSD);
401}
Erik Bosman529e25f2008-04-14 00:24:18 +0200402
Ingo Molnara4928cf2008-04-23 13:20:56 +0200403static void enable_TSC(void)
Erik Bosman529e25f2008-04-14 00:24:18 +0200404{
405 preempt_disable();
406 if (test_and_clear_thread_flag(TIF_NOTSC))
407 /*
408 * Must flip the CPU state synchronously with
409 * TIF_NOTSC in the current running context.
410 */
411 hard_enable_TSC();
412 preempt_enable();
413}
414
415int get_tsc_mode(unsigned long adr)
416{
417 unsigned int val;
418
419 if (test_thread_flag(TIF_NOTSC))
420 val = PR_TSC_SIGSEGV;
421 else
422 val = PR_TSC_ENABLE;
423
424 return put_user(val, (unsigned int __user *)adr);
425}
426
427int set_tsc_mode(unsigned int val)
428{
429 if (val == PR_TSC_SIGSEGV)
430 disable_TSC();
431 else if (val == PR_TSC_ENABLE)
432 enable_TSC();
433 else
434 return -EINVAL;
435
436 return 0;
437}
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700438
439static noinline void
440__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
441 struct tss_struct *tss)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
Roland McGrath7e991602008-01-30 13:30:54 +0100443 struct thread_struct *prev, *next;
Markus Metzgereee3af42008-01-30 13:31:09 +0100444 unsigned long debugctl;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400445
Roland McGrath7e991602008-01-30 13:30:54 +0100446 prev = &prev_p->thread;
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400447 next = &next_p->thread;
448
Markus Metzgereee3af42008-01-30 13:31:09 +0100449 debugctl = prev->debugctlmsr;
450 if (next->ds_area_msr != prev->ds_area_msr) {
451 /* we clear debugctl to make sure DS
452 * is not in use when we change it */
453 debugctl = 0;
Jan Beulich5b0e5082008-03-10 13:11:17 +0000454 update_debugctlmsr(0);
Markus Metzgereee3af42008-01-30 13:31:09 +0100455 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
456 }
457
458 if (next->debugctlmsr != debugctl)
Jan Beulich5b0e5082008-03-10 13:11:17 +0000459 update_debugctlmsr(next->debugctlmsr);
Roland McGrath7e991602008-01-30 13:30:54 +0100460
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400461 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
Roland McGrath0f534092008-01-30 13:30:59 +0100462 set_debugreg(next->debugreg0, 0);
463 set_debugreg(next->debugreg1, 1);
464 set_debugreg(next->debugreg2, 2);
465 set_debugreg(next->debugreg3, 3);
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400466 /* no 4 and 5 */
Roland McGrath0f534092008-01-30 13:30:59 +0100467 set_debugreg(next->debugreg6, 6);
468 set_debugreg(next->debugreg7, 7);
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400469 }
470
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700471 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
472 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
473 /* prev and next are different */
474 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
475 hard_disable_TSC();
476 else
477 hard_enable_TSC();
478 }
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700479
Ingo Molnarb4ef95d2008-02-26 09:40:27 +0100480#ifdef X86_BTS
Markus Metzgereee3af42008-01-30 13:31:09 +0100481 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
482 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
483
484 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
485 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
Ingo Molnarb4ef95d2008-02-26 09:40:27 +0100486#endif
Markus Metzgereee3af42008-01-30 13:31:09 +0100487
488
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400489 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 /*
491 * Disable the bitmap via an invalid offset. We still cache
492 * the previous bitmap owner and the IO bitmap contents:
493 */
Rusty Russella75c54f2007-05-02 19:27:13 +0200494 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 return;
496 }
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (likely(next == tss->io_bitmap_owner)) {
499 /*
500 * Previous owner of the bitmap (hence the bitmap content)
501 * matches the next task, we dont have to do anything but
502 * to set a valid offset in the TSS:
503 */
Rusty Russella75c54f2007-05-02 19:27:13 +0200504 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return;
506 }
507 /*
508 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
509 * and we let the task to get a GPF in case an I/O instruction
510 * is performed. The handler of the GPF will verify that the
511 * faulting task has a valid I/O bitmap and, it true, does the
512 * real copy and restart the instruction. This will save us
513 * redundant copies when the currently switched task does not
514 * perform any I/O during its timeslice.
515 */
Rusty Russella75c54f2007-05-02 19:27:13 +0200516 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519/*
520 * switch_to(x,yn) should switch tasks from x to y.
521 *
522 * We fsave/fwait so that an exception goes off at the right time
523 * (as a call from the fsave or fwait in effect) rather than to
524 * the wrong process. Lazy FP saving no longer makes any sense
525 * with modern CPU's, and this simplifies a lot of things (SMP
526 * and UP become the same).
527 *
528 * NOTE! We used to use the x86 hardware context switching. The
529 * reason for not using it any more becomes apparent when you
530 * try to recover gracefully from saved state that is no longer
531 * valid (stale segment register values in particular). With the
532 * hardware task-switch, there is no way to fix up bad state in
533 * a reasonable manner.
534 *
535 * The fact that Intel documents the hardware task-switching to
536 * be slow is a fairly red herring - this code is not noticeably
537 * faster. However, there _is_ some room for improvement here,
538 * so the performance issues may eventually be a valid point.
539 * More important, however, is the fact that this allows us much
540 * more flexibility.
541 *
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100542 * The return value (in %ax) will be the "prev" task after
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 * the task-switch, and shows up in ret_from_fork in entry.S,
544 * for example.
545 */
Harvey Harrison75604d72008-01-30 13:31:17 +0100546struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547{
548 struct thread_struct *prev = &prev_p->thread,
549 *next = &next_p->thread;
550 int cpu = smp_processor_id();
551 struct tss_struct *tss = &per_cpu(init_tss, cpu);
552
553 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
554
555 __unlazy_fpu(prev_p);
556
Chuck Ebbertacc20762006-12-07 02:14:01 +0100557
558 /* we're going to use this soon, after a few expensive things */
559 if (next_p->fpu_counter > 5)
Suresh Siddha61c46282008-03-10 15:28:04 -0700560 prefetch(next->xstate);
Chuck Ebbertacc20762006-12-07 02:14:01 +0100561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 /*
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700563 * Reload esp0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 */
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100565 load_sp0(tss, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 /*
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +0100568 * Save away %gs. No need to save %fs, as it was saved on the
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100569 * stack on entry. No need to save %es and %ds, as those are
570 * always kernel segments while inside the kernel. Doing this
571 * before setting the new TLS descriptors avoids the situation
572 * where we temporarily have non-reloadable segments in %fs
573 * and %gs. This could be an issue if the NMI handler ever
574 * used %fs or %gs (it does not today), or if the kernel is
575 * running inside of a hypervisor layer.
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700576 */
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +0100577 savesegment(gs, prev->gs);
Zachary Amsdene7a2ff52005-09-03 15:56:39 -0700578
579 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 * Load the per-thread Thread-Local Storage descriptor.
581 */
582 load_TLS(next, cpu);
583
584 /*
Zachary Amsden8b151142007-02-13 13:26:21 +0100585 * Restore IOPL if needed. In normal use, the flags restore
586 * in the switch assembly will handle this. But if the kernel
587 * is running virtualized at a non-zero CPL, the popf will
588 * not restore flags, so it must be done in a separate step.
589 */
590 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
591 set_iopl_mask(next->iopl);
592
593 /*
Stephane Eranianb3cf2572006-07-09 21:12:39 -0400594 * Now maybe handle debug registers and/or IO bitmaps
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 */
Andrea Arcangelicf99aba2007-07-15 23:41:33 -0700596 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
597 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
598 __switch_to_xtra(prev_p, next_p, tss);
Andrea Arcangeliffaa8bd2005-06-27 14:36:36 -0700599
Zachary Amsden9226d122007-02-13 13:26:21 +0100600 /*
601 * Leave lazy mode, flushing any hypercalls made here.
602 * This must be done before restoring TLS segments so
603 * the GDT and LDT are properly updated, and must be
604 * done before math_state_restore, so the TS bit is up
605 * to date.
606 */
607 arch_leave_lazy_cpu_mode();
608
Chuck Ebbertacc20762006-12-07 02:14:01 +0100609 /* If the task has used fpu the last 5 timeslices, just do a full
610 * restore of the math state immediately to avoid the trap; the
611 * chances of needing FPU soon are obviously high now
Suresh Siddha870568b2008-06-02 15:57:27 -0700612 *
613 * tsk_used_math() checks prevent calling math_state_restore(),
614 * which can sleep in the case of !tsk_used_math()
Chuck Ebbertacc20762006-12-07 02:14:01 +0100615 */
Suresh Siddha870568b2008-06-02 15:57:27 -0700616 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
Chuck Ebbertacc20762006-12-07 02:14:01 +0100617 math_state_restore();
618
Zachary Amsden9226d122007-02-13 13:26:21 +0100619 /*
620 * Restore %gs if needed (which is common)
621 */
622 if (prev->gs | next->gs)
623 loadsegment(gs, next->gs);
624
Jeremy Fitzhardinge7c3576d2007-05-02 19:27:16 +0200625 x86_write_percpu(current_task, next_p);
Zachary Amsden9226d122007-02-13 13:26:21 +0100626
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 return prev_p;
628}
629
630asmlinkage int sys_fork(struct pt_regs regs)
631{
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100632 return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633}
634
635asmlinkage int sys_clone(struct pt_regs regs)
636{
637 unsigned long clone_flags;
638 unsigned long newsp;
639 int __user *parent_tidptr, *child_tidptr;
640
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100641 clone_flags = regs.bx;
642 newsp = regs.cx;
643 parent_tidptr = (int __user *)regs.dx;
644 child_tidptr = (int __user *)regs.di;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 if (!newsp)
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100646 newsp = regs.sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
648}
649
650/*
651 * This is trivial, and on the face of it looks like it
652 * could equally well be done in user mode.
653 *
654 * Not so, for quite unobvious reasons - register pressure.
655 * In user mode vfork() cannot have a stack frame, and if
656 * done by calling the "clone()" system call directly, you
657 * do not have enough call-clobbered registers to hold all
658 * the information you need.
659 */
660asmlinkage int sys_vfork(struct pt_regs regs)
661{
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100662 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663}
664
665/*
666 * sys_execve() executes a new program.
667 */
668asmlinkage int sys_execve(struct pt_regs regs)
669{
670 int error;
671 char * filename;
672
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100673 filename = getname((char __user *) regs.bx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 error = PTR_ERR(filename);
675 if (IS_ERR(filename))
676 goto out;
677 error = do_execve(filename,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100678 (char __user * __user *) regs.cx,
679 (char __user * __user *) regs.dx,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 &regs);
681 if (error == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 /* Make sure we don't return using sysenter.. */
683 set_thread_flag(TIF_IRET);
684 }
685 putname(filename);
686out:
687 return error;
688}
689
690#define top_esp (THREAD_SIZE - sizeof(unsigned long))
691#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
692
693unsigned long get_wchan(struct task_struct *p)
694{
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100695 unsigned long bp, sp, ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 unsigned long stack_page;
697 int count = 0;
698 if (!p || p == current || p->state == TASK_RUNNING)
699 return 0;
Al Viro65e0fdf2006-01-12 01:05:41 -0800700 stack_page = (unsigned long)task_stack_page(p);
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100701 sp = p->thread.sp;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100702 if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 return 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100704 /* include/asm-i386/system.h:switch_to() pushes bp last. */
705 bp = *(unsigned long *) sp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 do {
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100707 if (bp < stack_page || bp > top_ebp+stack_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 return 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100709 ip = *(unsigned long *) (bp+4);
710 if (!in_sched_functions(ip))
711 return ip;
712 bp = *(unsigned long *) bp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 } while (count++ < 16);
714 return 0;
715}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717unsigned long arch_align_stack(unsigned long sp)
718{
Andi Kleenc16b63e2006-09-26 10:52:28 +0200719 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 sp -= get_random_int() % 8192;
721 return sp & ~0xf;
722}
Jiri Kosinac1d171a2008-01-30 13:30:40 +0100723
724unsigned long arch_randomize_brk(struct mm_struct *mm)
725{
726 unsigned long range_end = mm->brk + 0x02000000;
727 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
728}