blob: 64e926c89a6fcd25a1d52efd474696d5bdb02e4f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
Hiroshi Shimamoto66125382008-01-30 13:31:03 +01006 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * X86-64 port
8 * Andi Kleen.
Ashok Raj76e4f662005-06-25 14:55:00 -07009 *
10 * CPU hotplug support - ashok.raj@intel.com
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
Ingo Molnar42059422008-02-14 09:44:08 +010017#include <linux/stackprotector.h>
Ashok Raj76e4f662005-06-25 14:55:00 -070018#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
20#include <linux/sched.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010021#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/elfcore.h>
25#include <linux/smp.h>
26#include <linux/slab.h>
27#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/interrupt.h>
Hiroshi Shimamoto66125382008-01-30 13:31:03 +010029#include <linux/delay.h>
30#include <linux/module.h>
31#include <linux/ptrace.h>
Andi Kleen95833c82006-01-11 22:44:36 +010032#include <linux/notifier.h>
bibo maoc6fd91f2006-03-26 01:38:20 -080033#include <linux/kprobes.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070034#include <linux/kdebug.h>
Chris Wright02290682007-10-12 23:04:07 +020035#include <linux/tick.h>
Erik Bosman529e25f2008-04-14 00:24:18 +020036#include <linux/prctl.h>
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -030037#include <linux/uaccess.h>
38#include <linux/io.h>
Frederic Weisbecker8b96f012008-12-06 03:40:00 +010039#include <linux/ftrace.h>
Len Browna0bfa132011-04-01 19:34:59 -040040#include <linux/cpuidle.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <asm/pgtable.h>
43#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/processor.h>
45#include <asm/i387.h>
46#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <asm/prctl.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/desc.h>
49#include <asm/proto.h>
50#include <asm/ia32.h>
Andi Kleen95833c82006-01-11 22:44:36 +010051#include <asm/idle.h>
Jaswinder Singhbbc1f692008-07-21 21:34:13 +053052#include <asm/syscalls.h>
K.Prasad66cb5912009-06-01 23:44:55 +053053#include <asm/debugreg.h>
Don Zickusb227e232011-09-30 15:06:22 -040054#include <asm/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56asmlinkage extern void ret_from_fork(void);
57
Brian Gerst3d1e42a2009-01-19 00:38:58 +090058DEFINE_PER_CPU(unsigned long, old_rsp);
Brian Gerstc2558e02009-01-19 00:38:59 +090059static DEFINE_PER_CPU(unsigned char, is_idle);
Brian Gerst3d1e42a2009-01-19 00:38:58 +090060
Alan Sterne041c682006-03-27 01:16:30 -080061static ATOMIC_NOTIFIER_HEAD(idle_notifier);
Andi Kleen95833c82006-01-11 22:44:36 +010062
63void idle_notifier_register(struct notifier_block *n)
64{
Alan Sterne041c682006-03-27 01:16:30 -080065 atomic_notifier_chain_register(&idle_notifier, n);
Andi Kleen95833c82006-01-11 22:44:36 +010066}
Venkatesh Pallipadic7d87d72008-10-16 16:34:43 -040067EXPORT_SYMBOL_GPL(idle_notifier_register);
68
69void idle_notifier_unregister(struct notifier_block *n)
70{
71 atomic_notifier_chain_unregister(&idle_notifier, n);
72}
73EXPORT_SYMBOL_GPL(idle_notifier_unregister);
Andi Kleen95833c82006-01-11 22:44:36 +010074
Andi Kleen95833c82006-01-11 22:44:36 +010075void enter_idle(void)
76{
Brian Gerstc2558e02009-01-19 00:38:59 +090077 percpu_write(is_idle, 1);
Alan Sterne041c682006-03-27 01:16:30 -080078 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
Andi Kleen95833c82006-01-11 22:44:36 +010079}
80
81static void __exit_idle(void)
82{
Brian Gerstc2558e02009-01-19 00:38:59 +090083 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
Andi Kleena15da492006-09-26 10:52:40 +020084 return;
Alan Sterne041c682006-03-27 01:16:30 -080085 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
Andi Kleen95833c82006-01-11 22:44:36 +010086}
87
88/* Called from interrupts to signify idle end */
89void exit_idle(void)
90{
Andi Kleena15da492006-09-26 10:52:40 +020091 /* idle loop has pid 0 */
92 if (current->pid)
Andi Kleen95833c82006-01-11 22:44:36 +010093 return;
94 __exit_idle();
95}
96
Alex Nixon913da642008-09-03 14:30:23 +010097#ifndef CONFIG_SMP
Ashok Raj76e4f662005-06-25 14:55:00 -070098static inline void play_dead(void)
99{
100 BUG();
101}
Alex Nixon913da642008-09-03 14:30:23 +0100102#endif
Ashok Raj76e4f662005-06-25 14:55:00 -0700103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/*
105 * The idle thread. There's no useful work to be
106 * done, so just try to conserve power and have a
107 * low exit latency (ie sit in a loop waiting for
108 * somebody to say that they'd like to reschedule)
109 */
Pavel Machekb10db7f2008-01-30 13:30:00 +0100110void cpu_idle(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
Andi Kleen495ab9c2006-06-26 13:59:11 +0200112 current_thread_info()->status |= TS_POLLING;
Arjan van de Vence22bd92008-05-12 15:44:31 +0200113
Arjan van de Vence22bd92008-05-12 15:44:31 +0200114 /*
Tejun Heo5c79d2a2009-02-11 16:31:00 +0900115 * If we're the non-boot CPU, nothing set the stack canary up
116 * for us. CPU0 already has it initialized but no harm in
117 * doing it again. This is a good place for updating it, as
118 * we wont ever return from this function (so the invalid
119 * canaries already on the stack wont ever trigger).
Arjan van de Vence22bd92008-05-12 15:44:31 +0200120 */
Ingo Molnar18aa8bb2008-02-14 09:42:02 +0100121 boot_init_stack_canary();
122
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 /* endless idle loop with no priority at all */
124 while (1) {
Frederic Weisbeckere37e1122011-10-07 18:22:08 +0200125 tick_nohz_idle_enter();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 while (!need_resched()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 rmb();
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200129
Ashok Raj76e4f662005-06-25 14:55:00 -0700130 if (cpu_is_offline(smp_processor_id()))
131 play_dead();
Venkatesh Pallipadid331e732006-12-07 02:14:13 +0100132 /*
133 * Idle routines should keep interrupts disabled
134 * from here on, until they go to idle.
135 * Otherwise, idle callbacks can misfire.
136 */
Don Zickusb227e232011-09-30 15:06:22 -0400137 local_touch_nmi();
Venkatesh Pallipadid331e732006-12-07 02:14:13 +0100138 local_irq_disable();
Andi Kleen95833c82006-01-11 22:44:36 +0100139 enter_idle();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200140 /* Don't trace irqs off for idle */
141 stop_critical_timings();
Frederic Weisbeckere37e1122011-10-07 18:22:08 +0200142
143 /* enter_idle() needs rcu for notifiers */
144 rcu_idle_enter();
145
Len Browna0bfa132011-04-01 19:34:59 -0400146 if (cpuidle_idle_call())
147 pm_idle();
Frederic Weisbeckere37e1122011-10-07 18:22:08 +0200148
149 rcu_idle_exit();
Steven Rostedt81d68a92008-05-12 21:20:42 +0200150 start_critical_timings();
Robert Schönec882e0f2010-06-14 13:37:20 +0200151
Andi Kleena15da492006-09-26 10:52:40 +0200152 /* In many cases the interrupt that ended idle
153 has already called exit_idle. But some idle
154 loops can be woken up without interrupt. */
Andi Kleen95833c82006-01-11 22:44:36 +0100155 __exit_idle();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 }
157
Frederic Weisbeckere37e1122011-10-07 18:22:08 +0200158 tick_nohz_idle_exit();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800159 preempt_enable_no_resched();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 schedule();
Nick Piggin5bfb5d62005-11-08 21:39:01 -0800161 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 }
163}
164
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100165/* Prints also some state that isn't saved in the pt_regs */
Pekka Enberge2ce07c2008-04-03 16:40:48 +0300166void __show_regs(struct pt_regs *regs, int all)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
Alan Sternbb1995d2007-07-21 17:10:42 +0200169 unsigned long d0, d1, d2, d3, d6, d7;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100170 unsigned int fsindex, gsindex;
171 unsigned int ds, cs, es;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Andy Isaacson814e2c82009-12-08 00:29:42 -0800173 show_regs_common();
Pekka Enbergd015a092009-12-28 10:26:59 +0200174 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
Arjan van de Venaafbd7e2008-01-30 13:33:08 +0100175 printk_address(regs->ip, 1);
Pekka Enbergd015a092009-12-28 10:26:59 +0200176 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300177 regs->sp, regs->flags);
Pekka Enbergd015a092009-12-28 10:26:59 +0200178 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100179 regs->ax, regs->bx, regs->cx);
Pekka Enbergd015a092009-12-28 10:26:59 +0200180 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100181 regs->dx, regs->si, regs->di);
Pekka Enbergd015a092009-12-28 10:26:59 +0200182 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100183 regs->bp, regs->r8, regs->r9);
Pekka Enbergd015a092009-12-28 10:26:59 +0200184 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300185 regs->r10, regs->r11, regs->r12);
Pekka Enbergd015a092009-12-28 10:26:59 +0200186 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300187 regs->r13, regs->r14, regs->r15);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300189 asm("movl %%ds,%0" : "=r" (ds));
190 asm("movl %%cs,%0" : "=r" (cs));
191 asm("movl %%es,%0" : "=r" (es));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 asm("movl %%fs,%0" : "=r" (fsindex));
193 asm("movl %%gs,%0" : "=r" (gsindex));
194
195 rdmsrl(MSR_FS_BASE, fs);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300196 rdmsrl(MSR_GS_BASE, gs);
197 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Pekka Enberge2ce07c2008-04-03 16:40:48 +0300199 if (!all)
200 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Glauber de Oliveira Costaf51c9452007-07-22 11:12:29 +0200202 cr0 = read_cr0();
203 cr2 = read_cr2();
204 cr3 = read_cr3();
205 cr4 = read_cr4();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Pekka Enbergd015a092009-12-28 10:26:59 +0200207 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300208 fs, fsindex, gs, gsindex, shadowgs);
Pekka Enbergd015a092009-12-28 10:26:59 +0200209 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300210 es, cr0);
Pekka Enbergd015a092009-12-28 10:26:59 +0200211 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
Gustavo F. Padovan8092c652008-07-29 02:48:52 -0300212 cr4);
Alan Sternbb1995d2007-07-21 17:10:42 +0200213
214 get_debugreg(d0, 0);
215 get_debugreg(d1, 1);
216 get_debugreg(d2, 2);
Pekka Enbergd015a092009-12-28 10:26:59 +0200217 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
Alan Sternbb1995d2007-07-21 17:10:42 +0200218 get_debugreg(d3, 3);
219 get_debugreg(d6, 6);
220 get_debugreg(d7, 7);
Pekka Enbergd015a092009-12-28 10:26:59 +0200221 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224void release_thread(struct task_struct *dead_task)
225{
226 if (dead_task->mm) {
227 if (dead_task->mm->context.size) {
228 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
229 dead_task->comm,
230 dead_task->mm->context.ldt,
231 dead_task->mm->context.size);
232 BUG();
233 }
234 }
235}
236
237static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
238{
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100239 struct user_desc ud = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 .base_addr = addr,
241 .limit = 0xfffff,
242 .seg_32bit = 1,
243 .limit_in_pages = 1,
244 .useable = 1,
245 };
Jan Engelhardtade1af72008-01-30 13:33:23 +0100246 struct desc_struct *desc = t->thread.tls_array;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 desc += tls;
Glauber de Oliveira Costa80fbb692008-01-30 13:31:13 +0100248 fill_ldt(desc, &ud);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251static inline u32 read_32bit_tls(struct task_struct *t, int tls)
252{
Roland McGrath91394eb2008-01-30 13:30:45 +0100253 return get_desc_base(&t->thread.tls_array[tls]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256/*
257 * This gets called before we allocate a new thread and copy
258 * the current task into it.
259 */
260void prepare_to_copy(struct task_struct *tsk)
261{
262 unlazy_fpu(tsk);
263}
264
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -0700265int copy_thread(unsigned long clone_flags, unsigned long sp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 unsigned long unused,
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300267 struct task_struct *p, struct pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268{
269 int err;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300270 struct pt_regs *childregs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 struct task_struct *me = current;
272
Andi Kleena88cde12005-11-05 17:25:54 +0100273 childregs = ((struct pt_regs *)
Al Viro57eafdc2006-01-12 01:05:39 -0800274 (THREAD_SIZE + task_stack_page(p))) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 *childregs = *regs;
276
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100277 childregs->ax = 0;
Brian Gerstfa4b8f82009-12-09 12:34:41 -0500278 if (user_mode(regs))
279 childregs->sp = sp;
280 else
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100281 childregs->sp = (unsigned long)childregs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100283 p->thread.sp = (unsigned long) childregs;
284 p->thread.sp0 = (unsigned long) (childregs+1);
285 p->thread.usersp = me->thread.usersp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
Al Viroe4f17c42006-01-12 01:05:38 -0800287 set_tsk_thread_flag(p, TIF_FORK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
K.Prasad66cb5912009-06-01 23:44:55 +0530289 p->thread.io_bitmap_ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400291 savesegment(gs, p->thread.gsindex);
H. Peter Anvin7ce5a2b2010-04-23 16:17:40 -0700292 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400293 savesegment(fs, p->thread.fsindex);
H. Peter Anvin7ce5a2b2010-04-23 16:17:40 -0700294 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400295 savesegment(es, p->thread.es);
296 savesegment(ds, p->thread.ds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
K.Prasad66cb5912009-06-01 23:44:55 +0530298 err = -ENOMEM;
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200299 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
K.Prasad66cb5912009-06-01 23:44:55 +0530300
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200301 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
303 if (!p->thread.io_bitmap_ptr) {
304 p->thread.io_bitmap_max = 0;
305 return -ENOMEM;
306 }
Andi Kleena88cde12005-11-05 17:25:54 +0100307 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
308 IO_BITMAP_BYTES);
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200309 set_tsk_thread_flag(p, TIF_IO_BITMAP);
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 /*
313 * Set a new TLS for the child thread?
314 */
315 if (clone_flags & CLONE_SETTLS) {
316#ifdef CONFIG_IA32_EMULATION
317 if (test_thread_flag(TIF_IA32))
Roland McGrathefd1ca52008-01-30 13:30:46 +0100318 err = do_set_thread_area(p, -1,
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100319 (struct user_desc __user *)childregs->si, 0);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300320 else
321#endif
322 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
323 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 goto out;
325 }
326 err = 0;
327out:
328 if (err && p->thread.io_bitmap_ptr) {
329 kfree(p->thread.io_bitmap_ptr);
330 p->thread.io_bitmap_max = 0;
331 }
K.Prasad66cb5912009-06-01 23:44:55 +0530332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 return err;
334}
335
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700336static void
337start_thread_common(struct pt_regs *regs, unsigned long new_ip,
338 unsigned long new_sp,
339 unsigned int _cs, unsigned int _ss, unsigned int _ds)
Ingo Molnar513ad842008-02-21 05:18:40 +0100340{
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400341 loadsegment(fs, 0);
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700342 loadsegment(es, _ds);
343 loadsegment(ds, _ds);
Ingo Molnar513ad842008-02-21 05:18:40 +0100344 load_gs_index(0);
345 regs->ip = new_ip;
346 regs->sp = new_sp;
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900347 percpu_write(old_rsp, new_sp);
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700348 regs->cs = _cs;
349 regs->ss = _ss;
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700350 regs->flags = X86_EFLAGS_IF;
Suresh Siddhaaa283f42008-03-10 15:28:05 -0700351 /*
352 * Free the old FP and other extended state
353 */
354 free_thread_xstate(current);
Ingo Molnar513ad842008-02-21 05:18:40 +0100355}
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700356
357void
358start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
359{
360 start_thread_common(regs, new_ip, new_sp,
361 __USER_CS, __USER_DS, 0);
362}
Ingo Molnar513ad842008-02-21 05:18:40 +0100363
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700364#ifdef CONFIG_IA32_EMULATION
365void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
366{
H. Peter Anvine634d8f2009-10-09 15:56:53 -0700367 start_thread_common(regs, new_ip, new_sp,
368 __USER32_CS, __USER32_DS, __USER32_DS);
H. Peter Anvina6f05a62009-10-08 18:02:54 -0700369}
370#endif
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372/*
373 * switch_to(x,y) should switch tasks from x to y.
374 *
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100375 * This could still be optimized:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 * - fold all the options into a flag word and test it with a single test.
377 * - could test fs/gs bitsliced
Andi Kleen099f3182006-02-03 21:51:38 +0100378 *
379 * Kprobes not supported here. Set the probe on schedule instead.
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100380 * Function graph tracer not supported too.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 */
Frederic Weisbecker8b96f012008-12-06 03:40:00 +0100382__notrace_funcgraph struct task_struct *
Andi Kleena88cde12005-11-05 17:25:54 +0100383__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700385 struct thread_struct *prev = &prev_p->thread;
386 struct thread_struct *next = &next_p->thread;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100387 int cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 struct tss_struct *tss = &per_cpu(init_tss, cpu);
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400389 unsigned fsindex, gsindex;
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700390 bool preload_fpu;
391
392 /*
393 * If the task has used fpu the last 5 timeslices, just do a full
394 * restore of the math state immediately to avoid the trap; the
395 * chances of needing FPU soon are obviously high now
396 */
397 preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200399 /* we're going to use this soon, after a few expensive things */
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700400 if (preload_fpu)
Avi Kivity86603282010-05-06 11:45:46 +0300401 prefetch(next->fpu.state);
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 /*
404 * Reload esp0, LDT and the page table pointer:
405 */
Glauber de Oliveira Costa7818a1e2008-01-30 13:31:31 +0100406 load_sp0(tss, next);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300408 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 * Switch DS and ES.
410 * This won't pick up thread selector changes, but I guess that is ok.
411 */
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400412 savesegment(es, prev->es);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 if (unlikely(next->es | prev->es))
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300414 loadsegment(es, next->es);
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400415
416 savesegment(ds, prev->ds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 if (unlikely(next->ds | prev->ds))
418 loadsegment(ds, next->ds);
419
Jeremy Fitzhardinge478de5a2008-06-25 00:19:24 -0400420
421 /* We must save %fs and %gs before load_TLS() because
422 * %fs and %gs may be cleared by load_TLS().
423 *
424 * (e.g. xen_load_tls())
425 */
426 savesegment(fs, fsindex);
427 savesegment(gs, gsindex);
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 load_TLS(next, cpu);
430
Jeremy Fitzhardinge16d9dbf2009-04-24 00:50:27 -0700431 /* Must be after DS reload */
Brian Gersta4d4fbc2010-09-03 21:17:12 -0400432 __unlazy_fpu(prev_p);
Jeremy Fitzhardinge16d9dbf2009-04-24 00:50:27 -0700433
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700434 /* Make sure cpu is ready for new context */
435 if (preload_fpu)
436 clts();
437
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400438 /*
439 * Leave lazy mode, flushing any hypercalls made here.
440 * This must be done before restoring TLS segments so
441 * the GDT and LDT are properly updated, and must be
442 * done before math_state_restore, so the TS bit is up
443 * to date.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 */
Jeremy Fitzhardinge224101e2009-02-18 11:18:57 -0800445 arch_end_context_switch(next_p);
Jeremy Fitzhardinge3fe0a632008-06-25 00:19:23 -0400446
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300447 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 * Switch FS and GS.
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700449 *
450 * Segment register != 0 always requires a reload. Also
451 * reload when it has changed. When prev process used 64bit
452 * base always reload to avoid an information leak.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 */
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700454 if (unlikely(fsindex | next->fsindex | prev->fs)) {
455 loadsegment(fs, next->fsindex);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300456 /*
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700457 * Check if the user used a selector != 0; if yes
458 * clear 64bit base, since overloaded base is always
459 * mapped to the Null selector
460 */
461 if (fsindex)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300462 prev->fs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700464 /* when next process has a 64bit base use it */
465 if (next->fs)
466 wrmsrl(MSR_FS_BASE, next->fs);
467 prev->fsindex = fsindex;
468
469 if (unlikely(gsindex | next->gsindex | prev->gs)) {
470 load_gs_index(next->gsindex);
471 if (gsindex)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300472 prev->gs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700474 if (next->gs)
475 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
476 prev->gsindex = gsindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300478 /*
Jan Beulich45948d72006-03-25 16:29:25 +0100479 * Switch the PDA and FPU contexts.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 */
Brian Gerst3d1e42a2009-01-19 00:38:58 +0900481 prev->usersp = percpu_read(old_rsp);
482 percpu_write(old_rsp, next->usersp);
Brian Gerstc6f5e0a2009-01-19 00:38:58 +0900483 percpu_write(current_task, next_p);
Andi Kleen18bd0572006-04-20 02:36:45 +0200484
Brian Gerst9af45652009-01-19 00:38:58 +0900485 percpu_write(kernel_stack,
Jeremy Fitzhardinge87b935a2008-07-08 15:06:26 -0700486 (unsigned long)task_stack_page(next_p) +
Brian Gerst9af45652009-01-19 00:38:58 +0900487 THREAD_SIZE - KERNEL_STACK_OFFSET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489 /*
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200490 * Now maybe reload the debug registers and handle I/O bitmaps
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 */
Markus Metzgereee3af42008-01-30 13:31:09 +0100492 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
493 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
Stephane Eraniand3a4f482006-09-26 10:52:28 +0200494 __switch_to_xtra(prev_p, next_p, tss);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700496 /*
497 * Preload the FPU context, now that we've determined that the
498 * task is likely to be using it.
Arjan van de Vene07e23e2006-09-26 10:52:36 +0200499 */
Jeremy Fitzhardinge17950c52009-04-24 01:01:01 -0700500 if (preload_fpu)
501 __math_state_restore();
K.Prasad66cb5912009-06-01 23:44:55 +0530502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 return prev_p;
504}
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506void set_personality_64bit(void)
507{
508 /* inherit personality from parent */
509
510 /* Make sure to be in 64bit mode */
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100511 clear_thread_flag(TIF_IA32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Stephen Wilson375906f2011-03-13 15:49:14 -0400513 /* Ensure the corresponding mm is not marked. */
514 if (current->mm)
515 current->mm->context.ia32_compat = 0;
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 /* TBD: overwrites user setup. Should have two bits.
518 But 64bit processes have always behaved this way,
519 so it's not too bad. The main problem is just that
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100520 32bit childs are affected again. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 current->personality &= ~READ_IMPLIES_EXEC;
522}
523
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800524void set_personality_ia32(void)
525{
526 /* inherit personality from parent */
527
528 /* Make sure to be in 32bit mode */
529 set_thread_flag(TIF_IA32);
Oleg Nesterov1252f232010-02-16 15:02:13 +0100530 current->personality |= force_personality32;
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800531
Stephen Wilson375906f2011-03-13 15:49:14 -0400532 /* Mark the associated mm as containing 32-bit tasks. */
533 if (current->mm)
534 current->mm->context.ia32_compat = 1;
535
H. Peter Anvin05d43ed2010-01-28 22:14:43 -0800536 /* Prepare the first "return" to user space */
537 current_thread_info()->status |= TS_COMPAT;
538}
539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540unsigned long get_wchan(struct task_struct *p)
541{
542 unsigned long stack;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300543 u64 fp, ip;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 int count = 0;
545
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300546 if (!p || p == current || p->state == TASK_RUNNING)
547 return 0;
Al Viro57eafdc2006-01-12 01:05:39 -0800548 stack = (unsigned long)task_stack_page(p);
David Rientjese1e23bb2008-10-07 14:15:11 -0700549 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 return 0;
H. Peter Anvinfaca6222008-01-30 13:31:02 +0100551 fp = *(u64 *)(p->thread.sp);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300552 do {
Andi Kleena88cde12005-11-05 17:25:54 +0100553 if (fp < (unsigned long)stack ||
David Rientjese1e23bb2008-10-07 14:15:11 -0700554 fp >= (unsigned long)stack+THREAD_SIZE)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300555 return 0;
H. Peter Anvin65ea5b02008-01-30 13:30:56 +0100556 ip = *(u64 *)(fp+8);
557 if (!in_sched_functions(ip))
558 return ip;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300559 fp = *(u64 *)fp;
560 } while (count++ < 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 return 0;
562}
563
564long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300565{
566 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 int doit = task == current;
568 int cpu;
569
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300570 switch (code) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 case ARCH_SET_GS:
Suresh Siddha84929802005-06-21 17:14:32 -0700572 if (addr >= TASK_SIZE_OF(task))
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300573 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 cpu = get_cpu();
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300575 /* handle small bases via the GDT because that's faster to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 switch. */
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300577 if (addr <= 0xffffffff) {
578 set_32bit_tls(task, GS_TLS, addr);
579 if (doit) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 load_TLS(&task->thread, cpu);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300581 load_gs_index(GS_TLS_SEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 }
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300583 task->thread.gsindex = GS_TLS_SEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 task->thread.gs = 0;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300585 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 task->thread.gsindex = 0;
587 task->thread.gs = addr;
588 if (doit) {
Andi Kleena88cde12005-11-05 17:25:54 +0100589 load_gs_index(0);
590 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300591 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 }
593 put_cpu();
594 break;
595 case ARCH_SET_FS:
596 /* Not strictly needed for fs, but do it for symmetry
597 with gs */
Suresh Siddha84929802005-06-21 17:14:32 -0700598 if (addr >= TASK_SIZE_OF(task))
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100599 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 cpu = get_cpu();
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100601 /* handle small bases via the GDT because that's faster to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 switch. */
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100603 if (addr <= 0xffffffff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 set_32bit_tls(task, FS_TLS, addr);
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100605 if (doit) {
606 load_TLS(&task->thread, cpu);
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400607 loadsegment(fs, FS_TLS_SEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 }
609 task->thread.fsindex = FS_TLS_SEL;
610 task->thread.fs = 0;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100611 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 task->thread.fsindex = 0;
613 task->thread.fs = addr;
614 if (doit) {
615 /* set the selector to 0 to not confuse
616 __switch_to */
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400617 loadsegment(fs, 0);
Andi Kleena88cde12005-11-05 17:25:54 +0100618 ret = checking_wrmsrl(MSR_FS_BASE, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 }
620 }
621 put_cpu();
622 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100623 case ARCH_GET_FS: {
624 unsigned long base;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 if (task->thread.fsindex == FS_TLS_SEL)
626 base = read_32bit_tls(task, FS_TLS);
Andi Kleena88cde12005-11-05 17:25:54 +0100627 else if (doit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 rdmsrl(MSR_FS_BASE, base);
Andi Kleena88cde12005-11-05 17:25:54 +0100629 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 base = task->thread.fs;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100631 ret = put_user(base, (unsigned long __user *)addr);
632 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100634 case ARCH_GET_GS: {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 unsigned long base;
John Blackwood97c28032006-04-07 19:50:25 +0200636 unsigned gsindex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 if (task->thread.gsindex == GS_TLS_SEL)
638 base = read_32bit_tls(task, GS_TLS);
John Blackwood97c28032006-04-07 19:50:25 +0200639 else if (doit) {
Jeremy Fitzhardingeada85702008-06-25 00:19:00 -0400640 savesegment(gs, gsindex);
John Blackwood97c28032006-04-07 19:50:25 +0200641 if (gsindex)
642 rdmsrl(MSR_KERNEL_GS_BASE, base);
643 else
644 base = task->thread.gs;
Gustavo F. Padovan7de08b42008-07-29 02:48:51 -0300645 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 base = task->thread.gs;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100647 ret = put_user(base, (unsigned long __user *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 break;
649 }
650
651 default:
652 ret = -EINVAL;
653 break;
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100654 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Hiroshi Shimamoto66125382008-01-30 13:31:03 +0100656 return ret;
657}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658
659long sys_arch_prctl(int code, unsigned long addr)
660{
661 return do_arch_prctl(current, code, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
Stefani Seibold89240ba2009-11-03 10:22:40 +0100664unsigned long KSTK_ESP(struct task_struct *task)
665{
666 return (test_tsk_thread_flag(task, TIF_IA32)) ?
667 (task_pt_regs(task)->sp) : ((task)->thread.usersp);
668}