blob: f693e44e1bf63a303a3e53a00d927721e6602e86 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
Hiroshi Shimamoto66125382008-01-30 13:31:03 +01006 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * X86-64 port
8 * Andi Kleen.
Ashok Raj76e4f662005-06-25 14:55:00 -07009 *
10 * CPU hotplug support - ashok.raj@intel.com
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
Ingo Molnar42059422008-02-14 09:44:08 +010017#include <linux/stackprotector.h>
Ashok Raj76e4f662005-06-25 14:55:00 -070018#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
20#include <linux/sched.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010021#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/elfcore.h>
25#include <linux/smp.h>
26#include <linux/slab.h>
27#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/interrupt.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010029#include <linux/delay.h>
30#include <linux/module.h>
31#include <linux/ptrace.h>
Andi Kleen95833c82006-01-11 22:44:36 +010032#include <linux/notifier.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080033#include <linux/kprobes.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070034#include <linux/kdebug.h>
Chris Wright02290682007-10-12 23:04:07 +020035#include <linux/tick.h>
Erik Bosman529e25f2008-04-14 00:24:18 +020036#include <linux/prctl.h>
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030037#include <linux/uaccess.h>
38#include <linux/io.h>
Frederic Weisbecker8b96f012008-12-06 03:40:00 +010039#include <linux/ftrace.h>
Len Browna0bfa132011-04-01 19:34:59 -040040#include <linux/cpuidle.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/pgtable.h>
43#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/processor.h>
45#include <asm/i387.h>
46#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/prctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/desc.h>
49#include <asm/proto.h>
50#include <asm/ia32.h>
Andi Kleen95833c82006-01-11 22:44:36 +010051#include <asm/idle.h>
Jaswinder Singhbbc1f692008-07-21 21:34:13 +053052#include <asm/syscalls.h>
K.Prasad66cb5912009-06-01 23:44:55 +053053#include <asm/debugreg.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55asmlinkage extern void ret_from_fork(void);
56
Brian Gerst3d1e42a2009-01-19 00:38:58 +090057DEFINE_PER_CPU(unsigned long, old_rsp);
Brian Gerstc2558e02009-01-19 00:38:59 +090058static DEFINE_PER_CPU(unsigned char, is_idle);
Brian Gerst3d1e42a2009-01-19 00:38:58 +090059
Alan Sterne041c682006-03-27 01:16:30 -080060static ATOMIC_NOTIFIER_HEAD(idle_notifier);
Andi Kleen95833c82006-01-11 22:44:36 +010061
62void idle_notifier_register(struct notifier_block *n)
63{
Alan Sterne041c682006-03-27 01:16:30 -080064 atomic_notifier_chain_register(&idle_notifier, n);
Andi Kleen95833c82006-01-11 22:44:36 +010065}
Venkatesh Pallipadic7d87d72008-10-16 16:34:43 -040066EXPORT_SYMBOL_GPL(idle_notifier_register);
67
68void idle_notifier_unregister(struct notifier_block *n)
69{
70 atomic_notifier_chain_unregister(&idle_notifier, n);
71}
72EXPORT_SYMBOL_GPL(idle_notifier_unregister);
Andi Kleen95833c82006-01-11 22:44:36 +010073
Andi Kleen95833c82006-01-11 22:44:36 +010074void enter_idle(void)
75{
Brian Gerstc2558e02009-01-19 00:38:59 +090076 percpu_write(is_idle, 1);
Alan Sterne041c682006-03-27 01:16:30 -080077 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
Andi Kleen95833c82006-01-11 22:44:36 +010078}
79
80static void __exit_idle(void)
81{
Brian Gerstc2558e02009-01-19 00:38:59 +090082 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
Andi Kleena15da492006-09-26 10:52:40 +020083 return;
Alan Sterne041c682006-03-27 01:16:30 -080084 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
Andi Kleen95833c82006-01-11 22:44:36 +010085}
86
87/* Called from interrupts to signify idle end */
88void exit_idle(void)
89{
Andi Kleena15da492006-09-26 10:52:40 +020090 /* idle loop has pid 0 */
91 if (current->pid)
Andi Kleen95833c82006-01-11 22:44:36 +010092 return;
93 __exit_idle();
94}
95
Alex Nixon913da642008-09-03 14:30:23 +010096#ifndef CONFIG_SMP
Ashok Raj76e4f662005-06-25 14:55:00 -070097static inline void play_dead(void)
98{
99 BUG();
100}
Alex Nixon913da642008-09-03 14:30:23 +0100101#endif
Ashok Raj76e4f662005-06-25 14:55:00 -0700102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103/*
104 * The idle thread. There's no useful work to be
105 * done, so just try to conserve power and have a
106 * low exit latency (ie sit in a loop waiting for
107 * somebody to say that they'd like to reschedule)
108 */
Pavel Machekb10db7f2008-01-30 13:30:00 +0100109void cpu_idle(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110{
Andi Kleen495ab9c2006-06-26 13:59:11 +0200111 current_thread_info()->status |= TS_POLLING;
Arjan van de Vence22bd92008-05-12 15:44:31 +0200112
Arjan van de Vence22bd92008-05-12 15:44:31 +0200113 /*
Tejun Heo5c79d2a2009-02-11 16:31:00 +0900114 * If we're the non-boot CPU, nothing set the stack canary up
115 * for us. CPU0 already has it initialized but no harm in
116 * doing it again. This is a good place for updating it, as
117 * we wont ever return from this function (so the invalid
118 * canaries already on the stack wont ever trigger).
Arjan van de Vence22bd92008-05-12 15:44:31 +0200119 */
Ingo Molnar18aa8bb2008-02-14 09:42:02 +0100120 boot_init_stack_canary();
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 /* endless idle loop with no priority at all */
123 while (1) {
Thomas Gleixnerb8f8c3c2008-07-18 17:27:28 +0200124 tick_nohz_stop_sched_tick(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 while (!need_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 rmb();
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200128
Ashok Raj76e4f662005-06-25 14:55:00 -0700129 if (cpu_is_offline(smp_processor_id()))
130 play_dead();
Venkatesh Pallipadid331e732006-12-07 02:14:13 +0100131 /*
132 * Idle routines should keep interrupts disabled
133 * from here on, until they go to idle.
134 * Otherwise, idle callbacks can misfire.
135 */
136 local_irq_disable();
Andi Kleen95833c82006-01-11 22:44:36 +0100137 enter_idle();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200138 /* Don't trace irqs off for idle */
139 stop_critical_timings();
Len Browna0bfa132011-04-01 19:34:59 -0400140 if (cpuidle_idle_call())
141 pm_idle();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200142 start_critical_timings();
Robert Schönec882e0f2010-06-14 13:37:20 +0200143
Andi Kleena15da492006-09-26 10:52:40 +0200144 /* In many cases the interrupt that ended idle
145 has already called exit_idle. But some idle
146 loops can be woken up without interrupt. */
Andi Kleen95833c82006-01-11 22:44:36 +0100147 __exit_idle();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 }
149
Chris Wright02290682007-10-12 23:04:07 +0200150 tick_nohz_restart_sched_tick();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800151 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 schedule();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800153 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155}
156
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100157/* Prints also some state that isn't saved in the pt_regs */
Pekka Enberge2ce07c2008-04-03 16:40:48 +0300158void __show_regs(struct pt_regs *regs, int all)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159{
160 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
Alan Sternbb1995d2007-07-21 17:10:42 +0200161 unsigned long d0, d1, d2, d3, d6, d7;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100162 unsigned int fsindex, gsindex;
163 unsigned int ds, cs, es;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Andy Isaacson814e2c82009-12-08 00:29:42 -0800165 show_regs_common();
Pekka Enbergd015a092009-12-28 10:26:59 +0200166 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
Arjan van de Venaafbd7e2008-01-30 13:33:08 +0100167 printk_address(regs->ip, 1);
Pekka Enbergd015a092009-12-28 10:26:59 +0200168 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300169 regs->sp, regs->flags);
Pekka Enbergd015a092009-12-28 10:26:59 +0200170 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100171 regs->ax, regs->bx, regs->cx);
Pekka Enbergd015a092009-12-28 10:26:59 +0200172 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100173 regs->dx, regs->si, regs->di);
Pekka Enbergd015a092009-12-28 10:26:59 +0200174 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100175 regs->bp, regs->r8, regs->r9);
Pekka Enbergd015a092009-12-28 10:26:59 +0200176 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300177 regs->r10, regs->r11, regs->r12);
Pekka Enbergd015a092009-12-28 10:26:59 +0200178 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300179 regs->r13, regs->r14, regs->r15);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300181 asm("movl %%ds,%0" : "=r" (ds));
182 asm("movl %%cs,%0" : "=r" (cs));
183 asm("movl %%es,%0" : "=r" (es));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 asm("movl %%fs,%0" : "=r" (fsindex));
185 asm("movl %%gs,%0" : "=r" (gsindex));
186
187 rdmsrl(MSR_FS_BASE, fs);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300188 rdmsrl(MSR_GS_BASE, gs);
189 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Pekka Enberge2ce07c2008-04-03 16:40:48 +0300191 if (!all)
192 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Glauber de Oliveira Costaf51c9452007-07-22 11:12:29 +0200194 cr0 = read_cr0();
195 cr2 = read_cr2();
196 cr3 = read_cr3();
197 cr4 = read_cr4();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Pekka Enbergd015a092009-12-28 10:26:59 +0200199 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300200 fs, fsindex, gs, gsindex, shadowgs);
Pekka Enbergd015a092009-12-28 10:26:59 +0200201 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300202 es, cr0);
Pekka Enbergd015a092009-12-28 10:26:59 +0200203 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300204 cr4);
Alan Sternbb1995d2007-07-21 17:10:42 +0200205
206 get_debugreg(d0, 0);
207 get_debugreg(d1, 1);
208 get_debugreg(d2, 2);
Pekka Enbergd015a092009-12-28 10:26:59 +0200209 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
Alan Sternbb1995d2007-07-21 17:10:42 +0200210 get_debugreg(d3, 3);
211 get_debugreg(d6, 6);
212 get_debugreg(d7, 7);
Pekka Enbergd015a092009-12-28 10:26:59 +0200213 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216void release_thread(struct task_struct *dead_task)
217{
218 if (dead_task->mm) {
219 if (dead_task->mm->context.size) {
220 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
221 dead_task->comm,
222 dead_task->mm->context.ldt,
223 dead_task->mm->context.size);
224 BUG();
225 }
226 }
227}
228
229static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
230{
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100231 struct user_desc ud = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 .base_addr = addr,
233 .limit = 0xfffff,
234 .seg_32bit = 1,
235 .limit_in_pages = 1,
236 .useable = 1,
237 };
Jan Engelhardtade1af72008-01-30 13:33:23 +0100238 struct desc_struct *desc = t->thread.tls_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 desc += tls;
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +0100240 fill_ldt(desc, &ud);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241}
242
243static inline u32 read_32bit_tls(struct task_struct *t, int tls)
244{
Roland McGrath91394eb2008-01-30 13:30:45 +0100245 return get_desc_base(&t->thread.tls_array[tls]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247
248/*
249 * This gets called before we allocate a new thread and copy
250 * the current task into it.
251 */
252void prepare_to_copy(struct task_struct *tsk)
253{
254 unlazy_fpu(tsk);
255}
256
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -0700257int copy_thread(unsigned long clone_flags, unsigned long sp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 unsigned long unused,
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300259 struct task_struct *p, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260{
261 int err;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300262 struct pt_regs *childregs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 struct task_struct *me = current;
264
Andi Kleena88cde12005-11-05 17:25:54 +0100265 childregs = ((struct pt_regs *)
Al Viro57eafdc2006-01-12 01:05:39 -0800266 (THREAD_SIZE + task_stack_page(p))) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 *childregs = *regs;
268
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100269 childregs->ax = 0;
Brian Gerstfa4b8f82009-12-09 12:34:41 -0500270 if (user_mode(regs))
271 childregs->sp = sp;
272 else
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100273 childregs->sp = (unsigned long)childregs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100275 p->thread.sp = (unsigned long) childregs;
276 p->thread.sp0 = (unsigned long) (childregs+1);
277 p->thread.usersp = me->thread.usersp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Al Viroe4f17c42006-01-12 01:05:38 -0800279 set_tsk_thread_flag(p, TIF_FORK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
K.Prasad66cb5912009-06-01 23:44:55 +0530281 p->thread.io_bitmap_ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400283 savesegment(gs, p->thread.gsindex);
H. Peter Anvin7ce5a2b2010-04-23 16:17:40 -0700284 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400285 savesegment(fs, p->thread.fsindex);
H. Peter Anvin7ce5a2b2010-04-23 16:17:40 -0700286 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400287 savesegment(es, p->thread.es);
288 savesegment(ds, p->thread.ds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
K.Prasad66cb5912009-06-01 23:44:55 +0530290 err = -ENOMEM;
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200291 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
K.Prasad66cb5912009-06-01 23:44:55 +0530292
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200293 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
295 if (!p->thread.io_bitmap_ptr) {
296 p->thread.io_bitmap_max = 0;
297 return -ENOMEM;
298 }
Andi Kleena88cde12005-11-05 17:25:54 +0100299 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
300 IO_BITMAP_BYTES);
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200301 set_tsk_thread_flag(p, TIF_IO_BITMAP);
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100302 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304 /*
305 * Set a new TLS for the child thread?
306 */
307 if (clone_flags & CLONE_SETTLS) {
308#ifdef CONFIG_IA32_EMULATION
309 if (test_thread_flag(TIF_IA32))
Roland McGrathefd1ca52008-01-30 13:30:46 +0100310 err = do_set_thread_area(p, -1,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100311 (struct user_desc __user *)childregs->si, 0);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300312 else
313#endif
314 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
315 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 goto out;
317 }
318 err = 0;
319out:
320 if (err && p->thread.io_bitmap_ptr) {
321 kfree(p->thread.io_bitmap_ptr);
322 p->thread.io_bitmap_max = 0;
323 }
K.Prasad66cb5912009-06-01 23:44:55 +0530324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 return err;
326}
327
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700328static void
329start_thread_common(struct pt_regs *regs, unsigned long new_ip,
330 unsigned long new_sp,
331 unsigned int _cs, unsigned int _ss, unsigned int _ds)
Ingo Molnar513ad842008-02-21 05:18:40 +0100332{
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400333 loadsegment(fs, 0);
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700334 loadsegment(es, _ds);
335 loadsegment(ds, _ds);
Ingo Molnar513ad842008-02-21 05:18:40 +0100336 load_gs_index(0);
337 regs->ip = new_ip;
338 regs->sp = new_sp;
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900339 percpu_write(old_rsp, new_sp);
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700340 regs->cs = _cs;
341 regs->ss = _ss;
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700342 regs->flags = X86_EFLAGS_IF;
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700343 /*
344 * Free the old FP and other extended state
345 */
346 free_thread_xstate(current);
Ingo Molnar513ad842008-02-21 05:18:40 +0100347}
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700348
349void
350start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
351{
352 start_thread_common(regs, new_ip, new_sp,
353 __USER_CS, __USER_DS, 0);
354}
Ingo Molnar513ad842008-02-21 05:18:40 +0100355
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700356#ifdef CONFIG_IA32_EMULATION
357void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
358{
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700359 start_thread_common(regs, new_ip, new_sp,
360 __USER32_CS, __USER32_DS, __USER32_DS);
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700361}
362#endif
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364/*
365 * switch_to(x,y) should switch tasks from x to y.
366 *
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100367 * This could still be optimized:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 * - fold all the options into a flag word and test it with a single test.
369 * - could test fs/gs bitsliced
Andi Kleen099f3182006-02-03 21:51:38 +0100370 *
371 * Kprobes not supported here. Set the probe on schedule instead.
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100372 * Function graph tracer not supported too.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 */
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100374__notrace_funcgraph struct task_struct *
Andi Kleena88cde12005-11-05 17:25:54 +0100375__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700377 struct thread_struct *prev = &prev_p->thread;
378 struct thread_struct *next = &next_p->thread;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100379 int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 struct tss_struct *tss = &per_cpu(init_tss, cpu);
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400381 unsigned fsindex, gsindex;
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700382 bool preload_fpu;
383
384 /*
385 * If the task has used fpu the last 5 timeslices, just do a full
386 * restore of the math state immediately to avoid the trap; the
387 * chances of needing FPU soon are obviously high now
388 */
389 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200391 /* we're going to use this soon, after a few expensive things */
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700392 if (preload_fpu)
Avi Kivity86603282010-05-06 11:45:46 +0300393 prefetch(next->fpu.state);
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 /*
396 * Reload esp0, LDT and the page table pointer:
397 */
Glauber de Oliveira Costa7818a1e2008-01-30 13:31:31 +0100398 load_sp0(tss, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300400 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 * Switch DS and ES.
402 * This won't pick up thread selector changes, but I guess that is ok.
403 */
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400404 savesegment(es, prev->es);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 if (unlikely(next->es | prev->es))
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300406 loadsegment(es, next->es);
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400407
408 savesegment(ds, prev->ds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 if (unlikely(next->ds | prev->ds))
410 loadsegment(ds, next->ds);
411
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400412
413 /* We must save %fs and %gs before load_TLS() because
414 * %fs and %gs may be cleared by load_TLS().
415 *
416 * (e.g. xen_load_tls())
417 */
418 savesegment(fs, fsindex);
419 savesegment(gs, gsindex);
420
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 load_TLS(next, cpu);
422
Jeremy Fitzhardinge16d9dbf2009-04-24 00:50:27 -0700423 /* Must be after DS reload */
Brian Gersta4d4fbc2010-09-03 21:17:12 -0400424 __unlazy_fpu(prev_p);
Jeremy Fitzhardinge16d9dbf2009-04-24 00:50:27 -0700425
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700426 /* Make sure cpu is ready for new context */
427 if (preload_fpu)
428 clts();
429
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400430 /*
431 * Leave lazy mode, flushing any hypercalls made here.
432 * This must be done before restoring TLS segments so
433 * the GDT and LDT are properly updated, and must be
434 * done before math_state_restore, so the TS bit is up
435 * to date.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800437 arch_end_context_switch(next_p);
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400438
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300439 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 * Switch FS and GS.
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700441 *
442 * Segment register != 0 always requires a reload. Also
443 * reload when it has changed. When prev process used 64bit
444 * base always reload to avoid an information leak.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 */
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700446 if (unlikely(fsindex | next->fsindex | prev->fs)) {
447 loadsegment(fs, next->fsindex);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300448 /*
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700449 * Check if the user used a selector != 0; if yes
450 * clear 64bit base, since overloaded base is always
451 * mapped to the Null selector
452 */
453 if (fsindex)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300454 prev->fs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 }
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700456 /* when next process has a 64bit base use it */
457 if (next->fs)
458 wrmsrl(MSR_FS_BASE, next->fs);
459 prev->fsindex = fsindex;
460
461 if (unlikely(gsindex | next->gsindex | prev->gs)) {
462 load_gs_index(next->gsindex);
463 if (gsindex)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300464 prev->gs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 }
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700466 if (next->gs)
467 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
468 prev->gsindex = gsindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300470 /*
Jan Beulich45948d72006-03-25 16:29:25 +0100471 * Switch the PDA and FPU contexts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 */
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900473 prev->usersp = percpu_read(old_rsp);
474 percpu_write(old_rsp, next->usersp);
Brian Gerstc6f5e0a2009-01-19 00:38:58 +0900475 percpu_write(current_task, next_p);
Andi Kleen18bd0572006-04-20 02:36:45 +0200476
Brian Gerst9af45652009-01-19 00:38:58 +0900477 percpu_write(kernel_stack,
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700478 (unsigned long)task_stack_page(next_p) +
Brian Gerst9af45652009-01-19 00:38:58 +0900479 THREAD_SIZE - KERNEL_STACK_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
481 /*
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200482 * Now maybe reload the debug registers and handle I/O bitmaps
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 */
Markus Metzgereee3af42008-01-30 13:31:09 +0100484 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
485 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200486 __switch_to_xtra(prev_p, next_p, tss);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700488 /*
489 * Preload the FPU context, now that we've determined that the
490 * task is likely to be using it.
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200491 */
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700492 if (preload_fpu)
493 __math_state_restore();
K.Prasad66cb5912009-06-01 23:44:55 +0530494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 return prev_p;
496}
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498void set_personality_64bit(void)
499{
500 /* inherit personality from parent */
501
502 /* Make sure to be in 64bit mode */
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100503 clear_thread_flag(TIF_IA32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Stephen Wilson375906f2011-03-13 15:49:14 -0400505 /* Ensure the corresponding mm is not marked. */
506 if (current->mm)
507 current->mm->context.ia32_compat = 0;
508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 /* TBD: overwrites user setup. Should have two bits.
510 But 64bit processes have always behaved this way,
511 so it's not too bad. The main problem is just that
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100512 32bit childs are affected again. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 current->personality &= ~READ_IMPLIES_EXEC;
514}
515
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800516void set_personality_ia32(void)
517{
518 /* inherit personality from parent */
519
520 /* Make sure to be in 32bit mode */
521 set_thread_flag(TIF_IA32);
Oleg Nesterov1252f232010-02-16 15:02:13 +0100522 current->personality |= force_personality32;
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800523
Stephen Wilson375906f2011-03-13 15:49:14 -0400524 /* Mark the associated mm as containing 32-bit tasks. */
525 if (current->mm)
526 current->mm->context.ia32_compat = 1;
527
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800528 /* Prepare the first "return" to user space */
529 current_thread_info()->status |= TS_COMPAT;
530}
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532unsigned long get_wchan(struct task_struct *p)
533{
534 unsigned long stack;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300535 u64 fp, ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 int count = 0;
537
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300538 if (!p || p == current || p->state == TASK_RUNNING)
539 return 0;
Al Viro57eafdc2006-01-12 01:05:39 -0800540 stack = (unsigned long)task_stack_page(p);
David Rientjese1e23bb2008-10-07 14:15:11 -0700541 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 return 0;
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100543 fp = *(u64 *)(p->thread.sp);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300544 do {
Andi Kleena88cde12005-11-05 17:25:54 +0100545 if (fp < (unsigned long)stack ||
David Rientjese1e23bb2008-10-07 14:15:11 -0700546 fp >= (unsigned long)stack+THREAD_SIZE)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300547 return 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100548 ip = *(u64 *)(fp+8);
549 if (!in_sched_functions(ip))
550 return ip;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300551 fp = *(u64 *)fp;
552 } while (count++ < 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 return 0;
554}
555
556long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300557{
558 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 int doit = task == current;
560 int cpu;
561
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300562 switch (code) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 case ARCH_SET_GS:
Suresh Siddha84929802005-06-21 17:14:32 -0700564 if (addr >= TASK_SIZE_OF(task))
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300565 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 cpu = get_cpu();
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300567 /* handle small bases via the GDT because that's faster to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 switch. */
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300569 if (addr <= 0xffffffff) {
570 set_32bit_tls(task, GS_TLS, addr);
571 if (doit) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 load_TLS(&task->thread, cpu);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300573 load_gs_index(GS_TLS_SEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 }
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300575 task->thread.gsindex = GS_TLS_SEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 task->thread.gs = 0;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300577 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 task->thread.gsindex = 0;
579 task->thread.gs = addr;
580 if (doit) {
Andi Kleena88cde12005-11-05 17:25:54 +0100581 load_gs_index(0);
582 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 }
585 put_cpu();
586 break;
587 case ARCH_SET_FS:
588 /* Not strictly needed for fs, but do it for symmetry
589 with gs */
Suresh Siddha84929802005-06-21 17:14:32 -0700590 if (addr >= TASK_SIZE_OF(task))
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100591 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 cpu = get_cpu();
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100593 /* handle small bases via the GDT because that's faster to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 switch. */
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100595 if (addr <= 0xffffffff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 set_32bit_tls(task, FS_TLS, addr);
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100597 if (doit) {
598 load_TLS(&task->thread, cpu);
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400599 loadsegment(fs, FS_TLS_SEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 }
601 task->thread.fsindex = FS_TLS_SEL;
602 task->thread.fs = 0;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100603 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 task->thread.fsindex = 0;
605 task->thread.fs = addr;
606 if (doit) {
607 /* set the selector to 0 to not confuse
608 __switch_to */
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400609 loadsegment(fs, 0);
Andi Kleena88cde12005-11-05 17:25:54 +0100610 ret = checking_wrmsrl(MSR_FS_BASE, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 }
612 }
613 put_cpu();
614 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100615 case ARCH_GET_FS: {
616 unsigned long base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 if (task->thread.fsindex == FS_TLS_SEL)
618 base = read_32bit_tls(task, FS_TLS);
Andi Kleena88cde12005-11-05 17:25:54 +0100619 else if (doit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 rdmsrl(MSR_FS_BASE, base);
Andi Kleena88cde12005-11-05 17:25:54 +0100621 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 base = task->thread.fs;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100623 ret = put_user(base, (unsigned long __user *)addr);
624 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 }
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100626 case ARCH_GET_GS: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 unsigned long base;
John Blackwood97c28032006-04-07 19:50:25 +0200628 unsigned gsindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 if (task->thread.gsindex == GS_TLS_SEL)
630 base = read_32bit_tls(task, GS_TLS);
John Blackwood97c28032006-04-07 19:50:25 +0200631 else if (doit) {
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400632 savesegment(gs, gsindex);
John Blackwood97c28032006-04-07 19:50:25 +0200633 if (gsindex)
634 rdmsrl(MSR_KERNEL_GS_BASE, base);
635 else
636 base = task->thread.gs;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300637 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 base = task->thread.gs;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100639 ret = put_user(base, (unsigned long __user *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 break;
641 }
642
643 default:
644 ret = -EINVAL;
645 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100646 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100648 return ret;
649}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
651long sys_arch_prctl(int code, unsigned long addr)
652{
653 return do_arch_prctl(current, code, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654}
655
Stefani Seibold89240ba2009-11-03 10:22:40 +0100656unsigned long KSTK_ESP(struct task_struct *task)
657{
658 return (test_tsk_thread_flag(task, TIF_IA32)) ?
659 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
660}