blob: 6c9dd922ac0d8b34ff9cac883ef86bf8843cb912 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
Hiroshi Shimamoto66125382008-01-30 13:31:03 +01006 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * X86-64 port
8 * Andi Kleen.
Ashok Raj76e4f662005-06-25 14:55:00 -07009 *
10 * CPU hotplug support - ashok.raj@intel.com
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
Ingo Molnar42059422008-02-14 09:44:08 +010017#include <linux/stackprotector.h>
Ashok Raj76e4f662005-06-25 14:55:00 -070018#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
20#include <linux/sched.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010021#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/elfcore.h>
25#include <linux/smp.h>
26#include <linux/slab.h>
27#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/interrupt.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010029#include <linux/delay.h>
30#include <linux/module.h>
31#include <linux/ptrace.h>
Andi Kleen95833c82006-01-11 22:44:36 +010032#include <linux/notifier.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080033#include <linux/kprobes.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070034#include <linux/kdebug.h>
Chris Wright02290682007-10-12 23:04:07 +020035#include <linux/tick.h>
Erik Bosman529e25f2008-04-14 00:24:18 +020036#include <linux/prctl.h>
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030037#include <linux/uaccess.h>
38#include <linux/io.h>
Frederic Weisbecker8b96f012008-12-06 03:40:00 +010039#include <linux/ftrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/pgtable.h>
42#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <asm/processor.h>
44#include <asm/i387.h>
45#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/prctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/desc.h>
48#include <asm/proto.h>
49#include <asm/ia32.h>
Andi Kleen95833c82006-01-11 22:44:36 +010050#include <asm/idle.h>
Jaswinder Singhbbc1f692008-07-21 21:34:13 +053051#include <asm/syscalls.h>
K.Prasad66cb5912009-06-01 23:44:55 +053052#include <asm/debugreg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54asmlinkage extern void ret_from_fork(void);
55
Brian Gerst3d1e42a2009-01-19 00:38:58 +090056DEFINE_PER_CPU(unsigned long, old_rsp);
Brian Gerstc2558e02009-01-19 00:38:59 +090057static DEFINE_PER_CPU(unsigned char, is_idle);
Brian Gerst3d1e42a2009-01-19 00:38:58 +090058
Alan Sterne041c682006-03-27 01:16:30 -080059static ATOMIC_NOTIFIER_HEAD(idle_notifier);
Andi Kleen95833c82006-01-11 22:44:36 +010060
61void idle_notifier_register(struct notifier_block *n)
62{
Alan Sterne041c682006-03-27 01:16:30 -080063 atomic_notifier_chain_register(&idle_notifier, n);
Andi Kleen95833c82006-01-11 22:44:36 +010064}
Venkatesh Pallipadic7d87d72008-10-16 16:34:43 -040065EXPORT_SYMBOL_GPL(idle_notifier_register);
66
67void idle_notifier_unregister(struct notifier_block *n)
68{
69 atomic_notifier_chain_unregister(&idle_notifier, n);
70}
71EXPORT_SYMBOL_GPL(idle_notifier_unregister);
Andi Kleen95833c82006-01-11 22:44:36 +010072
Andi Kleen95833c82006-01-11 22:44:36 +010073void enter_idle(void)
74{
Brian Gerstc2558e02009-01-19 00:38:59 +090075 percpu_write(is_idle, 1);
Alan Sterne041c682006-03-27 01:16:30 -080076 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
Andi Kleen95833c82006-01-11 22:44:36 +010077}
78
79static void __exit_idle(void)
80{
Brian Gerstc2558e02009-01-19 00:38:59 +090081 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
Andi Kleena15da492006-09-26 10:52:40 +020082 return;
Alan Sterne041c682006-03-27 01:16:30 -080083 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
Andi Kleen95833c82006-01-11 22:44:36 +010084}
85
86/* Called from interrupts to signify idle end */
87void exit_idle(void)
88{
Andi Kleena15da492006-09-26 10:52:40 +020089 /* idle loop has pid 0 */
90 if (current->pid)
Andi Kleen95833c82006-01-11 22:44:36 +010091 return;
92 __exit_idle();
93}
94
Alex Nixon913da642008-09-03 14:30:23 +010095#ifndef CONFIG_SMP
Ashok Raj76e4f662005-06-25 14:55:00 -070096static inline void play_dead(void)
97{
98 BUG();
99}
Alex Nixon913da642008-09-03 14:30:23 +0100100#endif
Ashok Raj76e4f662005-06-25 14:55:00 -0700101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/*
103 * The idle thread. There's no useful work to be
104 * done, so just try to conserve power and have a
105 * low exit latency (ie sit in a loop waiting for
106 * somebody to say that they'd like to reschedule)
107 */
Pavel Machekb10db7f2008-01-30 13:30:00 +0100108void cpu_idle(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
Andi Kleen495ab9c2006-06-26 13:59:11 +0200110 current_thread_info()->status |= TS_POLLING;
Arjan van de Vence22bd92008-05-12 15:44:31 +0200111
Arjan van de Vence22bd92008-05-12 15:44:31 +0200112 /*
Tejun Heo5c79d2a2009-02-11 16:31:00 +0900113 * If we're the non-boot CPU, nothing set the stack canary up
114 * for us. CPU0 already has it initialized but no harm in
115 * doing it again. This is a good place for updating it, as
116 * we wont ever return from this function (so the invalid
117 * canaries already on the stack wont ever trigger).
Arjan van de Vence22bd92008-05-12 15:44:31 +0200118 */
Ingo Molnar18aa8bb2008-02-14 09:42:02 +0100119 boot_init_stack_canary();
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 /* endless idle loop with no priority at all */
122 while (1) {
Thomas Gleixnerb8f8c3c2008-07-18 17:27:28 +0200123 tick_nohz_stop_sched_tick(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 while (!need_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 rmb();
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200127
Ashok Raj76e4f662005-06-25 14:55:00 -0700128 if (cpu_is_offline(smp_processor_id()))
129 play_dead();
Venkatesh Pallipadid331e732006-12-07 02:14:13 +0100130 /*
131 * Idle routines should keep interrupts disabled
132 * from here on, until they go to idle.
133 * Otherwise, idle callbacks can misfire.
134 */
135 local_irq_disable();
Andi Kleen95833c82006-01-11 22:44:36 +0100136 enter_idle();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200137 /* Don't trace irqs off for idle */
138 stop_critical_timings();
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200139 pm_idle();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200140 start_critical_timings();
Robert Schönec882e0f2010-06-14 13:37:20 +0200141
Andi Kleena15da492006-09-26 10:52:40 +0200142 /* In many cases the interrupt that ended idle
143 has already called exit_idle. But some idle
144 loops can be woken up without interrupt. */
Andi Kleen95833c82006-01-11 22:44:36 +0100145 __exit_idle();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 }
147
Chris Wright02290682007-10-12 23:04:07 +0200148 tick_nohz_restart_sched_tick();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800149 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 schedule();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800151 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 }
153}
154
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100155/* Prints also some state that isn't saved in the pt_regs */
Pekka Enberge2ce07c2008-04-03 16:40:48 +0300156void __show_regs(struct pt_regs *regs, int all)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157{
158 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
Alan Sternbb1995d2007-07-21 17:10:42 +0200159 unsigned long d0, d1, d2, d3, d6, d7;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100160 unsigned int fsindex, gsindex;
161 unsigned int ds, cs, es;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Andy Isaacson814e2c82009-12-08 00:29:42 -0800163 show_regs_common();
Pekka Enbergd015a092009-12-28 10:26:59 +0200164 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
Arjan van de Venaafbd7e2008-01-30 13:33:08 +0100165 printk_address(regs->ip, 1);
Pekka Enbergd015a092009-12-28 10:26:59 +0200166 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300167 regs->sp, regs->flags);
Pekka Enbergd015a092009-12-28 10:26:59 +0200168 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100169 regs->ax, regs->bx, regs->cx);
Pekka Enbergd015a092009-12-28 10:26:59 +0200170 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100171 regs->dx, regs->si, regs->di);
Pekka Enbergd015a092009-12-28 10:26:59 +0200172 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100173 regs->bp, regs->r8, regs->r9);
Pekka Enbergd015a092009-12-28 10:26:59 +0200174 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300175 regs->r10, regs->r11, regs->r12);
Pekka Enbergd015a092009-12-28 10:26:59 +0200176 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300177 regs->r13, regs->r14, regs->r15);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300179 asm("movl %%ds,%0" : "=r" (ds));
180 asm("movl %%cs,%0" : "=r" (cs));
181 asm("movl %%es,%0" : "=r" (es));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 asm("movl %%fs,%0" : "=r" (fsindex));
183 asm("movl %%gs,%0" : "=r" (gsindex));
184
185 rdmsrl(MSR_FS_BASE, fs);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300186 rdmsrl(MSR_GS_BASE, gs);
187 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Pekka Enberge2ce07c2008-04-03 16:40:48 +0300189 if (!all)
190 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Glauber de Oliveira Costaf51c9452007-07-22 11:12:29 +0200192 cr0 = read_cr0();
193 cr2 = read_cr2();
194 cr3 = read_cr3();
195 cr4 = read_cr4();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
Pekka Enbergd015a092009-12-28 10:26:59 +0200197 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300198 fs, fsindex, gs, gsindex, shadowgs);
Pekka Enbergd015a092009-12-28 10:26:59 +0200199 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300200 es, cr0);
Pekka Enbergd015a092009-12-28 10:26:59 +0200201 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300202 cr4);
Alan Sternbb1995d2007-07-21 17:10:42 +0200203
204 get_debugreg(d0, 0);
205 get_debugreg(d1, 1);
206 get_debugreg(d2, 2);
Pekka Enbergd015a092009-12-28 10:26:59 +0200207 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
Alan Sternbb1995d2007-07-21 17:10:42 +0200208 get_debugreg(d3, 3);
209 get_debugreg(d6, 6);
210 get_debugreg(d7, 7);
Pekka Enbergd015a092009-12-28 10:26:59 +0200211 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214void release_thread(struct task_struct *dead_task)
215{
216 if (dead_task->mm) {
217 if (dead_task->mm->context.size) {
218 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
219 dead_task->comm,
220 dead_task->mm->context.ldt,
221 dead_task->mm->context.size);
222 BUG();
223 }
224 }
225}
226
227static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
228{
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100229 struct user_desc ud = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 .base_addr = addr,
231 .limit = 0xfffff,
232 .seg_32bit = 1,
233 .limit_in_pages = 1,
234 .useable = 1,
235 };
Jan Engelhardtade1af72008-01-30 13:33:23 +0100236 struct desc_struct *desc = t->thread.tls_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 desc += tls;
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +0100238 fill_ldt(desc, &ud);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239}
240
241static inline u32 read_32bit_tls(struct task_struct *t, int tls)
242{
Roland McGrath91394eb2008-01-30 13:30:45 +0100243 return get_desc_base(&t->thread.tls_array[tls]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
246/*
247 * This gets called before we allocate a new thread and copy
248 * the current task into it.
249 */
250void prepare_to_copy(struct task_struct *tsk)
251{
252 unlazy_fpu(tsk);
253}
254
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -0700255int copy_thread(unsigned long clone_flags, unsigned long sp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 unsigned long unused,
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300257 struct task_struct *p, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258{
259 int err;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300260 struct pt_regs *childregs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 struct task_struct *me = current;
262
Andi Kleena88cde12005-11-05 17:25:54 +0100263 childregs = ((struct pt_regs *)
Al Viro57eafdc2006-01-12 01:05:39 -0800264 (THREAD_SIZE + task_stack_page(p))) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 *childregs = *regs;
266
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100267 childregs->ax = 0;
Brian Gerstfa4b8f82009-12-09 12:34:41 -0500268 if (user_mode(regs))
269 childregs->sp = sp;
270 else
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100271 childregs->sp = (unsigned long)childregs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100273 p->thread.sp = (unsigned long) childregs;
274 p->thread.sp0 = (unsigned long) (childregs+1);
275 p->thread.usersp = me->thread.usersp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Al Viroe4f17c42006-01-12 01:05:38 -0800277 set_tsk_thread_flag(p, TIF_FORK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
K.Prasad66cb5912009-06-01 23:44:55 +0530279 p->thread.io_bitmap_ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400281 savesegment(gs, p->thread.gsindex);
H. Peter Anvin7ce5a2b2010-04-23 16:17:40 -0700282 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400283 savesegment(fs, p->thread.fsindex);
H. Peter Anvin7ce5a2b2010-04-23 16:17:40 -0700284 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400285 savesegment(es, p->thread.es);
286 savesegment(ds, p->thread.ds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287
K.Prasad66cb5912009-06-01 23:44:55 +0530288 err = -ENOMEM;
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200289 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
K.Prasad66cb5912009-06-01 23:44:55 +0530290
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200291 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
293 if (!p->thread.io_bitmap_ptr) {
294 p->thread.io_bitmap_max = 0;
295 return -ENOMEM;
296 }
Andi Kleena88cde12005-11-05 17:25:54 +0100297 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
298 IO_BITMAP_BYTES);
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200299 set_tsk_thread_flag(p, TIF_IO_BITMAP);
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302 /*
303 * Set a new TLS for the child thread?
304 */
305 if (clone_flags & CLONE_SETTLS) {
306#ifdef CONFIG_IA32_EMULATION
307 if (test_thread_flag(TIF_IA32))
Roland McGrathefd1ca52008-01-30 13:30:46 +0100308 err = do_set_thread_area(p, -1,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100309 (struct user_desc __user *)childregs->si, 0);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300310 else
311#endif
312 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
313 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 goto out;
315 }
316 err = 0;
317out:
318 if (err && p->thread.io_bitmap_ptr) {
319 kfree(p->thread.io_bitmap_ptr);
320 p->thread.io_bitmap_max = 0;
321 }
K.Prasad66cb5912009-06-01 23:44:55 +0530322
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 return err;
324}
325
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700326static void
327start_thread_common(struct pt_regs *regs, unsigned long new_ip,
328 unsigned long new_sp,
329 unsigned int _cs, unsigned int _ss, unsigned int _ds)
Ingo Molnar513ad842008-02-21 05:18:40 +0100330{
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400331 loadsegment(fs, 0);
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700332 loadsegment(es, _ds);
333 loadsegment(ds, _ds);
Ingo Molnar513ad842008-02-21 05:18:40 +0100334 load_gs_index(0);
335 regs->ip = new_ip;
336 regs->sp = new_sp;
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900337 percpu_write(old_rsp, new_sp);
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700338 regs->cs = _cs;
339 regs->ss = _ss;
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700340 regs->flags = X86_EFLAGS_IF;
Ingo Molnar513ad842008-02-21 05:18:40 +0100341 set_fs(USER_DS);
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700342 /*
343 * Free the old FP and other extended state
344 */
345 free_thread_xstate(current);
Ingo Molnar513ad842008-02-21 05:18:40 +0100346}
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700347
348void
349start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
350{
351 start_thread_common(regs, new_ip, new_sp,
352 __USER_CS, __USER_DS, 0);
353}
Ingo Molnar513ad842008-02-21 05:18:40 +0100354
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700355#ifdef CONFIG_IA32_EMULATION
356void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
357{
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700358 start_thread_common(regs, new_ip, new_sp,
359 __USER32_CS, __USER32_DS, __USER32_DS);
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700360}
361#endif
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200362
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363/*
364 * switch_to(x,y) should switch tasks from x to y.
365 *
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100366 * This could still be optimized:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 * - fold all the options into a flag word and test it with a single test.
368 * - could test fs/gs bitsliced
Andi Kleen099f3182006-02-03 21:51:38 +0100369 *
370 * Kprobes not supported here. Set the probe on schedule instead.
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100371 * Function graph tracer not supported too.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 */
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100373__notrace_funcgraph struct task_struct *
Andi Kleena88cde12005-11-05 17:25:54 +0100374__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700376 struct thread_struct *prev = &prev_p->thread;
377 struct thread_struct *next = &next_p->thread;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100378 int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 struct tss_struct *tss = &per_cpu(init_tss, cpu);
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400380 unsigned fsindex, gsindex;
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700381 bool preload_fpu;
382
383 /*
384 * If the task has used fpu the last 5 timeslices, just do a full
385 * restore of the math state immediately to avoid the trap; the
386 * chances of needing FPU soon are obviously high now
387 */
388 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200390 /* we're going to use this soon, after a few expensive things */
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700391 if (preload_fpu)
Avi Kivity86603282010-05-06 11:45:46 +0300392 prefetch(next->fpu.state);
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 /*
395 * Reload esp0, LDT and the page table pointer:
396 */
Glauber de Oliveira Costa7818a1e2008-01-30 13:31:31 +0100397 load_sp0(tss, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300399 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 * Switch DS and ES.
401 * This won't pick up thread selector changes, but I guess that is ok.
402 */
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400403 savesegment(es, prev->es);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 if (unlikely(next->es | prev->es))
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300405 loadsegment(es, next->es);
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400406
407 savesegment(ds, prev->ds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 if (unlikely(next->ds | prev->ds))
409 loadsegment(ds, next->ds);
410
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400411
412 /* We must save %fs and %gs before load_TLS() because
413 * %fs and %gs may be cleared by load_TLS().
414 *
415 * (e.g. xen_load_tls())
416 */
417 savesegment(fs, fsindex);
418 savesegment(gs, gsindex);
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 load_TLS(next, cpu);
421
Jeremy Fitzhardinge16d9dbf2009-04-24 00:50:27 -0700422 /* Must be after DS reload */
Brian Gersta4d4fbc2010-09-03 21:17:12 -0400423 __unlazy_fpu(prev_p);
Jeremy Fitzhardinge16d9dbf2009-04-24 00:50:27 -0700424
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700425 /* Make sure cpu is ready for new context */
426 if (preload_fpu)
427 clts();
428
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400429 /*
430 * Leave lazy mode, flushing any hypercalls made here.
431 * This must be done before restoring TLS segments so
432 * the GDT and LDT are properly updated, and must be
433 * done before math_state_restore, so the TS bit is up
434 * to date.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800436 arch_end_context_switch(next_p);
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400437
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300438 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 * Switch FS and GS.
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700440 *
441 * Segment register != 0 always requires a reload. Also
442 * reload when it has changed. When prev process used 64bit
443 * base always reload to avoid an information leak.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 */
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700445 if (unlikely(fsindex | next->fsindex | prev->fs)) {
446 loadsegment(fs, next->fsindex);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300447 /*
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700448 * Check if the user used a selector != 0; if yes
449 * clear 64bit base, since overloaded base is always
450 * mapped to the Null selector
451 */
452 if (fsindex)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300453 prev->fs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700455 /* when next process has a 64bit base use it */
456 if (next->fs)
457 wrmsrl(MSR_FS_BASE, next->fs);
458 prev->fsindex = fsindex;
459
460 if (unlikely(gsindex | next->gsindex | prev->gs)) {
461 load_gs_index(next->gsindex);
462 if (gsindex)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300463 prev->gs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 }
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700465 if (next->gs)
466 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
467 prev->gsindex = gsindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300469 /*
Jan Beulich45948d72006-03-25 16:29:25 +0100470 * Switch the PDA and FPU contexts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 */
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900472 prev->usersp = percpu_read(old_rsp);
473 percpu_write(old_rsp, next->usersp);
Brian Gerstc6f5e0a2009-01-19 00:38:58 +0900474 percpu_write(current_task, next_p);
Andi Kleen18bd0572006-04-20 02:36:45 +0200475
Brian Gerst9af45652009-01-19 00:38:58 +0900476 percpu_write(kernel_stack,
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700477 (unsigned long)task_stack_page(next_p) +
Brian Gerst9af45652009-01-19 00:38:58 +0900478 THREAD_SIZE - KERNEL_STACK_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
480 /*
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200481 * Now maybe reload the debug registers and handle I/O bitmaps
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 */
Markus Metzgereee3af42008-01-30 13:31:09 +0100483 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
484 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200485 __switch_to_xtra(prev_p, next_p, tss);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700487 /*
488 * Preload the FPU context, now that we've determined that the
489 * task is likely to be using it.
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200490 */
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700491 if (preload_fpu)
492 __math_state_restore();
K.Prasad66cb5912009-06-01 23:44:55 +0530493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 return prev_p;
495}
496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497void set_personality_64bit(void)
498{
499 /* inherit personality from parent */
500
501 /* Make sure to be in 64bit mode */
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100502 clear_thread_flag(TIF_IA32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Stephen Wilson375906f2011-03-13 15:49:14 -0400504 /* Ensure the corresponding mm is not marked. */
505 if (current->mm)
506 current->mm->context.ia32_compat = 0;
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 /* TBD: overwrites user setup. Should have two bits.
509 But 64bit processes have always behaved this way,
510 so it's not too bad. The main problem is just that
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100511 32bit childs are affected again. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 current->personality &= ~READ_IMPLIES_EXEC;
513}
514
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800515void set_personality_ia32(void)
516{
517 /* inherit personality from parent */
518
519 /* Make sure to be in 32bit mode */
520 set_thread_flag(TIF_IA32);
Oleg Nesterov1252f232010-02-16 15:02:13 +0100521 current->personality |= force_personality32;
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800522
Stephen Wilson375906f2011-03-13 15:49:14 -0400523 /* Mark the associated mm as containing 32-bit tasks. */
524 if (current->mm)
525 current->mm->context.ia32_compat = 1;
526
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800527 /* Prepare the first "return" to user space */
528 current_thread_info()->status |= TS_COMPAT;
529}
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531unsigned long get_wchan(struct task_struct *p)
532{
533 unsigned long stack;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300534 u64 fp, ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 int count = 0;
536
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300537 if (!p || p == current || p->state == TASK_RUNNING)
538 return 0;
Al Viro57eafdc2006-01-12 01:05:39 -0800539 stack = (unsigned long)task_stack_page(p);
David Rientjese1e23bb2008-10-07 14:15:11 -0700540 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 return 0;
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100542 fp = *(u64 *)(p->thread.sp);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300543 do {
Andi Kleena88cde12005-11-05 17:25:54 +0100544 if (fp < (unsigned long)stack ||
David Rientjese1e23bb2008-10-07 14:15:11 -0700545 fp >= (unsigned long)stack+THREAD_SIZE)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300546 return 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100547 ip = *(u64 *)(fp+8);
548 if (!in_sched_functions(ip))
549 return ip;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300550 fp = *(u64 *)fp;
551 } while (count++ < 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 return 0;
553}
554
555long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300556{
557 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 int doit = task == current;
559 int cpu;
560
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300561 switch (code) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 case ARCH_SET_GS:
Suresh Siddha84929802005-06-21 17:14:32 -0700563 if (addr >= TASK_SIZE_OF(task))
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300564 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 cpu = get_cpu();
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300566 /* handle small bases via the GDT because that's faster to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 switch. */
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300568 if (addr <= 0xffffffff) {
569 set_32bit_tls(task, GS_TLS, addr);
570 if (doit) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 load_TLS(&task->thread, cpu);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300572 load_gs_index(GS_TLS_SEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 }
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300574 task->thread.gsindex = GS_TLS_SEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 task->thread.gs = 0;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300576 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 task->thread.gsindex = 0;
578 task->thread.gs = addr;
579 if (doit) {
Andi Kleena88cde12005-11-05 17:25:54 +0100580 load_gs_index(0);
581 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 }
584 put_cpu();
585 break;
586 case ARCH_SET_FS:
587 /* Not strictly needed for fs, but do it for symmetry
588 with gs */
Suresh Siddha84929802005-06-21 17:14:32 -0700589 if (addr >= TASK_SIZE_OF(task))
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100590 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 cpu = get_cpu();
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100592 /* handle small bases via the GDT because that's faster to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 switch. */
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100594 if (addr <= 0xffffffff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 set_32bit_tls(task, FS_TLS, addr);
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100596 if (doit) {
597 load_TLS(&task->thread, cpu);
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400598 loadsegment(fs, FS_TLS_SEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 }
600 task->thread.fsindex = FS_TLS_SEL;
601 task->thread.fs = 0;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100602 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 task->thread.fsindex = 0;
604 task->thread.fs = addr;
605 if (doit) {
606 /* set the selector to 0 to not confuse
607 __switch_to */
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400608 loadsegment(fs, 0);
Andi Kleena88cde12005-11-05 17:25:54 +0100609 ret = checking_wrmsrl(MSR_FS_BASE, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 }
611 }
612 put_cpu();
613 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100614 case ARCH_GET_FS: {
615 unsigned long base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 if (task->thread.fsindex == FS_TLS_SEL)
617 base = read_32bit_tls(task, FS_TLS);
Andi Kleena88cde12005-11-05 17:25:54 +0100618 else if (doit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 rdmsrl(MSR_FS_BASE, base);
Andi Kleena88cde12005-11-05 17:25:54 +0100620 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 base = task->thread.fs;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100622 ret = put_user(base, (unsigned long __user *)addr);
623 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 }
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100625 case ARCH_GET_GS: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 unsigned long base;
John Blackwood97c28032006-04-07 19:50:25 +0200627 unsigned gsindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 if (task->thread.gsindex == GS_TLS_SEL)
629 base = read_32bit_tls(task, GS_TLS);
John Blackwood97c28032006-04-07 19:50:25 +0200630 else if (doit) {
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400631 savesegment(gs, gsindex);
John Blackwood97c28032006-04-07 19:50:25 +0200632 if (gsindex)
633 rdmsrl(MSR_KERNEL_GS_BASE, base);
634 else
635 base = task->thread.gs;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300636 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 base = task->thread.gs;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100638 ret = put_user(base, (unsigned long __user *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 break;
640 }
641
642 default:
643 ret = -EINVAL;
644 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100647 return ret;
648}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650long sys_arch_prctl(int code, unsigned long addr)
651{
652 return do_arch_prctl(current, code, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
654
Stefani Seibold89240ba2009-11-03 10:22:40 +0100655unsigned long KSTK_ESP(struct task_struct *task)
656{
657 return (test_tsk_thread_flag(task, TIF_IA32)) ?
658 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
659}