| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | *  Copyright (C) 1995  Linus Torvalds | 
|  | 3 | * | 
|  | 4 | *  Pentium III FXSR, SSE support | 
|  | 5 | *	Gareth Hughes <gareth@valinux.com>, May 2000 | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 6 | * | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | *  X86-64 port | 
|  | 8 | *	Andi Kleen. | 
| Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 9 | * | 
|  | 10 | *	CPU hotplug support - ashok.raj@intel.com | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | */ | 
|  | 12 |  | 
|  | 13 | /* | 
|  | 14 | * This file handles the architecture-dependent parts of process handling.. | 
|  | 15 | */ | 
|  | 16 |  | 
| Ingo Molnar | 4205942 | 2008-02-14 09:44:08 +0100 | [diff] [blame] | 17 | #include <linux/stackprotector.h> | 
| Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 18 | #include <linux/cpu.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/errno.h> | 
|  | 20 | #include <linux/sched.h> | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 21 | #include <linux/fs.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/kernel.h> | 
|  | 23 | #include <linux/mm.h> | 
|  | 24 | #include <linux/elfcore.h> | 
|  | 25 | #include <linux/smp.h> | 
|  | 26 | #include <linux/slab.h> | 
|  | 27 | #include <linux/user.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | #include <linux/interrupt.h> | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 29 | #include <linux/delay.h> | 
|  | 30 | #include <linux/module.h> | 
|  | 31 | #include <linux/ptrace.h> | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 32 | #include <linux/notifier.h> | 
| bibo mao | c6fd91f | 2006-03-26 01:38:20 -0800 | [diff] [blame] | 33 | #include <linux/kprobes.h> | 
| Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 34 | #include <linux/kdebug.h> | 
| Chris Wright | 0229068 | 2007-10-12 23:04:07 +0200 | [diff] [blame] | 35 | #include <linux/tick.h> | 
| Erik Bosman | 529e25f | 2008-04-14 00:24:18 +0200 | [diff] [blame] | 36 | #include <linux/prctl.h> | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 37 | #include <linux/uaccess.h> | 
|  | 38 | #include <linux/io.h> | 
| Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 39 | #include <linux/ftrace.h> | 
| Len Brown | a0bfa13 | 2011-04-01 19:34:59 -0400 | [diff] [blame] | 40 | #include <linux/cpuidle.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <asm/pgtable.h> | 
|  | 43 | #include <asm/system.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <asm/processor.h> | 
|  | 45 | #include <asm/i387.h> | 
|  | 46 | #include <asm/mmu_context.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | #include <asm/prctl.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #include <asm/desc.h> | 
|  | 49 | #include <asm/proto.h> | 
|  | 50 | #include <asm/ia32.h> | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 51 | #include <asm/idle.h> | 
| Jaswinder Singh | bbc1f69 | 2008-07-21 21:34:13 +0530 | [diff] [blame] | 52 | #include <asm/syscalls.h> | 
| K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 53 | #include <asm/debugreg.h> | 
| Don Zickus | b227e23 | 2011-09-30 15:06:22 -0400 | [diff] [blame] | 54 | #include <asm/nmi.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 |  | 
|  | 56 | asmlinkage extern void ret_from_fork(void); | 
|  | 57 |  | 
| Brian Gerst | 3d1e42a | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 58 | DEFINE_PER_CPU(unsigned long, old_rsp); | 
| Brian Gerst | c2558e0 | 2009-01-19 00:38:59 +0900 | [diff] [blame] | 59 | static DEFINE_PER_CPU(unsigned char, is_idle); | 
| Brian Gerst | 3d1e42a | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 60 |  | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 61 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 62 |  | 
|  | 63 | void idle_notifier_register(struct notifier_block *n) | 
|  | 64 | { | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 65 | atomic_notifier_chain_register(&idle_notifier, n); | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 66 | } | 
| Venkatesh Pallipadi | c7d87d7 | 2008-10-16 16:34:43 -0400 | [diff] [blame] | 67 | EXPORT_SYMBOL_GPL(idle_notifier_register); | 
|  | 68 |  | 
|  | 69 | void idle_notifier_unregister(struct notifier_block *n) | 
|  | 70 | { | 
|  | 71 | atomic_notifier_chain_unregister(&idle_notifier, n); | 
|  | 72 | } | 
|  | 73 | EXPORT_SYMBOL_GPL(idle_notifier_unregister); | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 74 |  | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 75 | void enter_idle(void) | 
|  | 76 | { | 
| Brian Gerst | c2558e0 | 2009-01-19 00:38:59 +0900 | [diff] [blame] | 77 | percpu_write(is_idle, 1); | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 78 | atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 79 | } | 
|  | 80 |  | 
|  | 81 | static void __exit_idle(void) | 
|  | 82 | { | 
| Brian Gerst | c2558e0 | 2009-01-19 00:38:59 +0900 | [diff] [blame] | 83 | if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) | 
| Andi Kleen | a15da49 | 2006-09-26 10:52:40 +0200 | [diff] [blame] | 84 | return; | 
| Alan Stern | e041c68 | 2006-03-27 01:16:30 -0800 | [diff] [blame] | 85 | atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 86 | } | 
|  | 87 |  | 
|  | 88 | /* Called from interrupts to signify idle end */ | 
|  | 89 | void exit_idle(void) | 
|  | 90 | { | 
| Andi Kleen | a15da49 | 2006-09-26 10:52:40 +0200 | [diff] [blame] | 91 | /* idle loop has pid 0 */ | 
|  | 92 | if (current->pid) | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 93 | return; | 
|  | 94 | __exit_idle(); | 
|  | 95 | } | 
|  | 96 |  | 
| Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 97 | #ifndef CONFIG_SMP | 
| Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 98 | static inline void play_dead(void) | 
|  | 99 | { | 
|  | 100 | BUG(); | 
|  | 101 | } | 
| Alex Nixon | 913da64 | 2008-09-03 14:30:23 +0100 | [diff] [blame] | 102 | #endif | 
| Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 103 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | /* | 
|  | 105 | * The idle thread. There's no useful work to be | 
|  | 106 | * done, so just try to conserve power and have a | 
|  | 107 | * low exit latency (ie sit in a loop waiting for | 
|  | 108 | * somebody to say that they'd like to reschedule) | 
|  | 109 | */ | 
| Pavel Machek | b10db7f | 2008-01-30 13:30:00 +0100 | [diff] [blame] | 110 | void cpu_idle(void) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | { | 
| Andi Kleen | 495ab9c | 2006-06-26 13:59:11 +0200 | [diff] [blame] | 112 | current_thread_info()->status |= TS_POLLING; | 
| Arjan van de Ven | ce22bd9 | 2008-05-12 15:44:31 +0200 | [diff] [blame] | 113 |  | 
| Arjan van de Ven | ce22bd9 | 2008-05-12 15:44:31 +0200 | [diff] [blame] | 114 | /* | 
| Tejun Heo | 5c79d2a | 2009-02-11 16:31:00 +0900 | [diff] [blame] | 115 | * If we're the non-boot CPU, nothing set the stack canary up | 
|  | 116 | * for us.  CPU0 already has it initialized but no harm in | 
|  | 117 | * doing it again.  This is a good place for updating it, as | 
|  | 118 | * we wont ever return from this function (so the invalid | 
|  | 119 | * canaries already on the stack wont ever trigger). | 
| Arjan van de Ven | ce22bd9 | 2008-05-12 15:44:31 +0200 | [diff] [blame] | 120 | */ | 
| Ingo Molnar | 18aa8bb | 2008-02-14 09:42:02 +0100 | [diff] [blame] | 121 | boot_init_stack_canary(); | 
|  | 122 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | /* endless idle loop with no priority at all */ | 
|  | 124 | while (1) { | 
| Thomas Gleixner | b8f8c3c | 2008-07-18 17:27:28 +0200 | [diff] [blame] | 125 | tick_nohz_stop_sched_tick(1); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | while (!need_resched()) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | rmb(); | 
| Thomas Gleixner | 6ddd2a2 | 2008-06-09 16:59:53 +0200 | [diff] [blame] | 129 |  | 
| Ashok Raj | 76e4f66 | 2005-06-25 14:55:00 -0700 | [diff] [blame] | 130 | if (cpu_is_offline(smp_processor_id())) | 
|  | 131 | play_dead(); | 
| Venkatesh Pallipadi | d331e73 | 2006-12-07 02:14:13 +0100 | [diff] [blame] | 132 | /* | 
|  | 133 | * Idle routines should keep interrupts disabled | 
|  | 134 | * from here on, until they go to idle. | 
|  | 135 | * Otherwise, idle callbacks can misfire. | 
|  | 136 | */ | 
| Don Zickus | b227e23 | 2011-09-30 15:06:22 -0400 | [diff] [blame] | 137 | local_touch_nmi(); | 
| Venkatesh Pallipadi | d331e73 | 2006-12-07 02:14:13 +0100 | [diff] [blame] | 138 | local_irq_disable(); | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 139 | enter_idle(); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 140 | /* Don't trace irqs off for idle */ | 
|  | 141 | stop_critical_timings(); | 
| Len Brown | a0bfa13 | 2011-04-01 19:34:59 -0400 | [diff] [blame] | 142 | if (cpuidle_idle_call()) | 
|  | 143 | pm_idle(); | 
| Steven Rostedt | 81d68a9 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 144 | start_critical_timings(); | 
| Robert Schöne | c882e0f | 2010-06-14 13:37:20 +0200 | [diff] [blame] | 145 |  | 
| Andi Kleen | a15da49 | 2006-09-26 10:52:40 +0200 | [diff] [blame] | 146 | /* In many cases the interrupt that ended idle | 
|  | 147 | has already called exit_idle. But some idle | 
|  | 148 | loops can be woken up without interrupt. */ | 
| Andi Kleen | 95833c8 | 2006-01-11 22:44:36 +0100 | [diff] [blame] | 149 | __exit_idle(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | } | 
|  | 151 |  | 
| Chris Wright | 0229068 | 2007-10-12 23:04:07 +0200 | [diff] [blame] | 152 | tick_nohz_restart_sched_tick(); | 
| Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 153 | preempt_enable_no_resched(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | schedule(); | 
| Nick Piggin | 5bfb5d6 | 2005-11-08 21:39:01 -0800 | [diff] [blame] | 155 | preempt_disable(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | } | 
|  | 157 | } | 
|  | 158 |  | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 159 | /* Prints also some state that isn't saved in the pt_regs */ | 
| Pekka Enberg | e2ce07c | 2008-04-03 16:40:48 +0300 | [diff] [blame] | 160 | void __show_regs(struct pt_regs *regs, int all) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { | 
|  | 162 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; | 
| Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 163 | unsigned long d0, d1, d2, d3, d6, d7; | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 164 | unsigned int fsindex, gsindex; | 
|  | 165 | unsigned int ds, cs, es; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 |  | 
| Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 167 | show_regs_common(); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 168 | printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); | 
| Arjan van de Ven | aafbd7e | 2008-01-30 13:33:08 +0100 | [diff] [blame] | 169 | printk_address(regs->ip, 1); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 170 | printk(KERN_DEFAULT "RSP: %04lx:%016lx  EFLAGS: %08lx\n", regs->ss, | 
| Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 171 | regs->sp, regs->flags); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 172 | printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n", | 
| H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 173 | regs->ax, regs->bx, regs->cx); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 174 | printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n", | 
| H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 175 | regs->dx, regs->si, regs->di); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 176 | printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n", | 
| H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 177 | regs->bp, regs->r8, regs->r9); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 178 | printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n", | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 179 | regs->r10, regs->r11, regs->r12); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 180 | printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 181 | regs->r13, regs->r14, regs->r15); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 |  | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 183 | asm("movl %%ds,%0" : "=r" (ds)); | 
|  | 184 | asm("movl %%cs,%0" : "=r" (cs)); | 
|  | 185 | asm("movl %%es,%0" : "=r" (es)); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | asm("movl %%fs,%0" : "=r" (fsindex)); | 
|  | 187 | asm("movl %%gs,%0" : "=r" (gsindex)); | 
|  | 188 |  | 
|  | 189 | rdmsrl(MSR_FS_BASE, fs); | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 190 | rdmsrl(MSR_GS_BASE, gs); | 
|  | 191 | rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 |  | 
| Pekka Enberg | e2ce07c | 2008-04-03 16:40:48 +0300 | [diff] [blame] | 193 | if (!all) | 
|  | 194 | return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 |  | 
| Glauber de Oliveira Costa | f51c945 | 2007-07-22 11:12:29 +0200 | [diff] [blame] | 196 | cr0 = read_cr0(); | 
|  | 197 | cr2 = read_cr2(); | 
|  | 198 | cr3 = read_cr3(); | 
|  | 199 | cr4 = read_cr4(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 |  | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 201 | printk(KERN_DEFAULT "FS:  %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 202 | fs, fsindex, gs, gsindex, shadowgs); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 203 | printk(KERN_DEFAULT "CS:  %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, | 
| Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 204 | es, cr0); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 205 | printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, | 
| Gustavo F. Padovan | 8092c65 | 2008-07-29 02:48:52 -0300 | [diff] [blame] | 206 | cr4); | 
| Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 207 |  | 
|  | 208 | get_debugreg(d0, 0); | 
|  | 209 | get_debugreg(d1, 1); | 
|  | 210 | get_debugreg(d2, 2); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 211 | printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); | 
| Alan Stern | bb1995d | 2007-07-21 17:10:42 +0200 | [diff] [blame] | 212 | get_debugreg(d3, 3); | 
|  | 213 | get_debugreg(d6, 6); | 
|  | 214 | get_debugreg(d7, 7); | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 215 | printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | } | 
|  | 217 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | void release_thread(struct task_struct *dead_task) | 
|  | 219 | { | 
|  | 220 | if (dead_task->mm) { | 
|  | 221 | if (dead_task->mm->context.size) { | 
|  | 222 | printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", | 
|  | 223 | dead_task->comm, | 
|  | 224 | dead_task->mm->context.ldt, | 
|  | 225 | dead_task->mm->context.size); | 
|  | 226 | BUG(); | 
|  | 227 | } | 
|  | 228 | } | 
|  | 229 | } | 
|  | 230 |  | 
|  | 231 | static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) | 
|  | 232 | { | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 233 | struct user_desc ud = { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 234 | .base_addr = addr, | 
|  | 235 | .limit = 0xfffff, | 
|  | 236 | .seg_32bit = 1, | 
|  | 237 | .limit_in_pages = 1, | 
|  | 238 | .useable = 1, | 
|  | 239 | }; | 
| Jan Engelhardt | ade1af7 | 2008-01-30 13:33:23 +0100 | [diff] [blame] | 240 | struct desc_struct *desc = t->thread.tls_array; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | desc += tls; | 
| Glauber de Oliveira Costa | 80fbb69 | 2008-01-30 13:31:13 +0100 | [diff] [blame] | 242 | fill_ldt(desc, &ud); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 243 | } | 
|  | 244 |  | 
|  | 245 | static inline u32 read_32bit_tls(struct task_struct *t, int tls) | 
|  | 246 | { | 
| Roland McGrath | 91394eb | 2008-01-30 13:30:45 +0100 | [diff] [blame] | 247 | return get_desc_base(&t->thread.tls_array[tls]); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | } | 
|  | 249 |  | 
|  | 250 | /* | 
|  | 251 | * This gets called before we allocate a new thread and copy | 
|  | 252 | * the current task into it. | 
|  | 253 | */ | 
|  | 254 | void prepare_to_copy(struct task_struct *tsk) | 
|  | 255 | { | 
|  | 256 | unlazy_fpu(tsk); | 
|  | 257 | } | 
|  | 258 |  | 
| Alexey Dobriyan | 6f2c55b | 2009-04-02 16:56:59 -0700 | [diff] [blame] | 259 | int copy_thread(unsigned long clone_flags, unsigned long sp, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | unsigned long unused, | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 261 | struct task_struct *p, struct pt_regs *regs) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | { | 
|  | 263 | int err; | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 264 | struct pt_regs *childregs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | struct task_struct *me = current; | 
|  | 266 |  | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 267 | childregs = ((struct pt_regs *) | 
| Al Viro | 57eafdc | 2006-01-12 01:05:39 -0800 | [diff] [blame] | 268 | (THREAD_SIZE + task_stack_page(p))) - 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | *childregs = *regs; | 
|  | 270 |  | 
| H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 271 | childregs->ax = 0; | 
| Brian Gerst | fa4b8f8 | 2009-12-09 12:34:41 -0500 | [diff] [blame] | 272 | if (user_mode(regs)) | 
|  | 273 | childregs->sp = sp; | 
|  | 274 | else | 
| H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 275 | childregs->sp = (unsigned long)childregs; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 |  | 
| H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 277 | p->thread.sp = (unsigned long) childregs; | 
|  | 278 | p->thread.sp0 = (unsigned long) (childregs+1); | 
|  | 279 | p->thread.usersp = me->thread.usersp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 |  | 
| Al Viro | e4f17c4 | 2006-01-12 01:05:38 -0800 | [diff] [blame] | 281 | set_tsk_thread_flag(p, TIF_FORK); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 |  | 
| K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 283 | p->thread.io_bitmap_ptr = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 |  | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 285 | savesegment(gs, p->thread.gsindex); | 
| H. Peter Anvin | 7ce5a2b | 2010-04-23 16:17:40 -0700 | [diff] [blame] | 286 | p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs; | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 287 | savesegment(fs, p->thread.fsindex); | 
| H. Peter Anvin | 7ce5a2b | 2010-04-23 16:17:40 -0700 | [diff] [blame] | 288 | p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs; | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 289 | savesegment(es, p->thread.es); | 
|  | 290 | savesegment(ds, p->thread.ds); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 |  | 
| K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 292 | err = -ENOMEM; | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 293 | memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); | 
| K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 294 |  | 
| Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 295 | if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); | 
|  | 297 | if (!p->thread.io_bitmap_ptr) { | 
|  | 298 | p->thread.io_bitmap_max = 0; | 
|  | 299 | return -ENOMEM; | 
|  | 300 | } | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 301 | memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, | 
|  | 302 | IO_BITMAP_BYTES); | 
| Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 303 | set_tsk_thread_flag(p, TIF_IO_BITMAP); | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 304 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 |  | 
|  | 306 | /* | 
|  | 307 | * Set a new TLS for the child thread? | 
|  | 308 | */ | 
|  | 309 | if (clone_flags & CLONE_SETTLS) { | 
|  | 310 | #ifdef CONFIG_IA32_EMULATION | 
|  | 311 | if (test_thread_flag(TIF_IA32)) | 
| Roland McGrath | efd1ca5 | 2008-01-30 13:30:46 +0100 | [diff] [blame] | 312 | err = do_set_thread_area(p, -1, | 
| H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 313 | (struct user_desc __user *)childregs->si, 0); | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 314 | else | 
|  | 315 | #endif | 
|  | 316 | err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); | 
|  | 317 | if (err) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | goto out; | 
|  | 319 | } | 
|  | 320 | err = 0; | 
|  | 321 | out: | 
|  | 322 | if (err && p->thread.io_bitmap_ptr) { | 
|  | 323 | kfree(p->thread.io_bitmap_ptr); | 
|  | 324 | p->thread.io_bitmap_max = 0; | 
|  | 325 | } | 
| K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 326 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | return err; | 
|  | 328 | } | 
|  | 329 |  | 
| H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 330 | static void | 
|  | 331 | start_thread_common(struct pt_regs *regs, unsigned long new_ip, | 
|  | 332 | unsigned long new_sp, | 
|  | 333 | unsigned int _cs, unsigned int _ss, unsigned int _ds) | 
| Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 334 | { | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 335 | loadsegment(fs, 0); | 
| H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 336 | loadsegment(es, _ds); | 
|  | 337 | loadsegment(ds, _ds); | 
| Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 338 | load_gs_index(0); | 
|  | 339 | regs->ip		= new_ip; | 
|  | 340 | regs->sp		= new_sp; | 
| Brian Gerst | 3d1e42a | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 341 | percpu_write(old_rsp, new_sp); | 
| H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 342 | regs->cs		= _cs; | 
|  | 343 | regs->ss		= _ss; | 
| H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 344 | regs->flags		= X86_EFLAGS_IF; | 
| Suresh Siddha | aa283f4 | 2008-03-10 15:28:05 -0700 | [diff] [blame] | 345 | /* | 
|  | 346 | * Free the old FP and other extended state | 
|  | 347 | */ | 
|  | 348 | free_thread_xstate(current); | 
| Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 349 | } | 
| H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 350 |  | 
|  | 351 | void | 
|  | 352 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | 
|  | 353 | { | 
|  | 354 | start_thread_common(regs, new_ip, new_sp, | 
|  | 355 | __USER_CS, __USER_DS, 0); | 
|  | 356 | } | 
| Ingo Molnar | 513ad84 | 2008-02-21 05:18:40 +0100 | [diff] [blame] | 357 |  | 
| H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 358 | #ifdef CONFIG_IA32_EMULATION | 
|  | 359 | void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) | 
|  | 360 | { | 
| H. Peter Anvin | e634d8f | 2009-10-09 15:56:53 -0700 | [diff] [blame] | 361 | start_thread_common(regs, new_ip, new_sp, | 
|  | 362 | __USER32_CS, __USER32_DS, __USER32_DS); | 
| H. Peter Anvin | a6f05a6 | 2009-10-08 18:02:54 -0700 | [diff] [blame] | 363 | } | 
|  | 364 | #endif | 
| Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 365 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | /* | 
|  | 367 | *	switch_to(x,y) should switch tasks from x to y. | 
|  | 368 | * | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 369 | * This could still be optimized: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | * - fold all the options into a flag word and test it with a single test. | 
|  | 371 | * - could test fs/gs bitsliced | 
| Andi Kleen | 099f318 | 2006-02-03 21:51:38 +0100 | [diff] [blame] | 372 | * | 
|  | 373 | * Kprobes not supported here. Set the probe on schedule instead. | 
| Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 374 | * Function graph tracer not supported too. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | */ | 
| Frederic Weisbecker | 8b96f01 | 2008-12-06 03:40:00 +0100 | [diff] [blame] | 376 | __notrace_funcgraph struct task_struct * | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 377 | __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { | 
| Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 379 | struct thread_struct *prev = &prev_p->thread; | 
|  | 380 | struct thread_struct *next = &next_p->thread; | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 381 | int cpu = smp_processor_id(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | 
| Jeremy Fitzhardinge | 478de5a | 2008-06-25 00:19:24 -0400 | [diff] [blame] | 383 | unsigned fsindex, gsindex; | 
| Jeremy Fitzhardinge | 17950c5 | 2009-04-24 01:01:01 -0700 | [diff] [blame] | 384 | bool preload_fpu; | 
|  | 385 |  | 
|  | 386 | /* | 
|  | 387 | * If the task has used fpu the last 5 timeslices, just do a full | 
|  | 388 | * restore of the math state immediately to avoid the trap; the | 
|  | 389 | * chances of needing FPU soon are obviously high now | 
|  | 390 | */ | 
|  | 391 | preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 |  | 
| Arjan van de Ven | e07e23e | 2006-09-26 10:52:36 +0200 | [diff] [blame] | 393 | /* we're going to use this soon, after a few expensive things */ | 
| Jeremy Fitzhardinge | 17950c5 | 2009-04-24 01:01:01 -0700 | [diff] [blame] | 394 | if (preload_fpu) | 
| Avi Kivity | 8660328 | 2010-05-06 11:45:46 +0300 | [diff] [blame] | 395 | prefetch(next->fpu.state); | 
| Arjan van de Ven | e07e23e | 2006-09-26 10:52:36 +0200 | [diff] [blame] | 396 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | /* | 
|  | 398 | * Reload esp0, LDT and the page table pointer: | 
|  | 399 | */ | 
| Glauber de Oliveira Costa | 7818a1e | 2008-01-30 13:31:31 +0100 | [diff] [blame] | 400 | load_sp0(tss, next); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 |  | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 402 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | * Switch DS and ES. | 
|  | 404 | * This won't pick up thread selector changes, but I guess that is ok. | 
|  | 405 | */ | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 406 | savesegment(es, prev->es); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | if (unlikely(next->es | prev->es)) | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 408 | loadsegment(es, next->es); | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 409 |  | 
|  | 410 | savesegment(ds, prev->ds); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | if (unlikely(next->ds | prev->ds)) | 
|  | 412 | loadsegment(ds, next->ds); | 
|  | 413 |  | 
| Jeremy Fitzhardinge | 478de5a | 2008-06-25 00:19:24 -0400 | [diff] [blame] | 414 |  | 
|  | 415 | /* We must save %fs and %gs before load_TLS() because | 
|  | 416 | * %fs and %gs may be cleared by load_TLS(). | 
|  | 417 | * | 
|  | 418 | * (e.g. xen_load_tls()) | 
|  | 419 | */ | 
|  | 420 | savesegment(fs, fsindex); | 
|  | 421 | savesegment(gs, gsindex); | 
|  | 422 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | load_TLS(next, cpu); | 
|  | 424 |  | 
| Jeremy Fitzhardinge | 16d9dbf | 2009-04-24 00:50:27 -0700 | [diff] [blame] | 425 | /* Must be after DS reload */ | 
| Brian Gerst | a4d4fbc | 2010-09-03 21:17:12 -0400 | [diff] [blame] | 426 | __unlazy_fpu(prev_p); | 
| Jeremy Fitzhardinge | 16d9dbf | 2009-04-24 00:50:27 -0700 | [diff] [blame] | 427 |  | 
| Jeremy Fitzhardinge | 17950c5 | 2009-04-24 01:01:01 -0700 | [diff] [blame] | 428 | /* Make sure cpu is ready for new context */ | 
|  | 429 | if (preload_fpu) | 
|  | 430 | clts(); | 
|  | 431 |  | 
| Jeremy Fitzhardinge | 3fe0a63 | 2008-06-25 00:19:23 -0400 | [diff] [blame] | 432 | /* | 
|  | 433 | * Leave lazy mode, flushing any hypercalls made here. | 
|  | 434 | * This must be done before restoring TLS segments so | 
|  | 435 | * the GDT and LDT are properly updated, and must be | 
|  | 436 | * done before math_state_restore, so the TS bit is up | 
|  | 437 | * to date. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 438 | */ | 
| Jeremy Fitzhardinge | 224101e | 2009-02-18 11:18:57 -0800 | [diff] [blame] | 439 | arch_end_context_switch(next_p); | 
| Jeremy Fitzhardinge | 3fe0a63 | 2008-06-25 00:19:23 -0400 | [diff] [blame] | 440 |  | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 441 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | * Switch FS and GS. | 
| Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 443 | * | 
|  | 444 | * Segment register != 0 always requires a reload.  Also | 
|  | 445 | * reload when it has changed.  When prev process used 64bit | 
|  | 446 | * base always reload to avoid an information leak. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | */ | 
| Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 448 | if (unlikely(fsindex | next->fsindex | prev->fs)) { | 
|  | 449 | loadsegment(fs, next->fsindex); | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 450 | /* | 
| Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 451 | * Check if the user used a selector != 0; if yes | 
|  | 452 | *  clear 64bit base, since overloaded base is always | 
|  | 453 | *  mapped to the Null selector | 
|  | 454 | */ | 
|  | 455 | if (fsindex) | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 456 | prev->fs = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | } | 
| Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 458 | /* when next process has a 64bit base use it */ | 
|  | 459 | if (next->fs) | 
|  | 460 | wrmsrl(MSR_FS_BASE, next->fs); | 
|  | 461 | prev->fsindex = fsindex; | 
|  | 462 |  | 
|  | 463 | if (unlikely(gsindex | next->gsindex | prev->gs)) { | 
|  | 464 | load_gs_index(next->gsindex); | 
|  | 465 | if (gsindex) | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 466 | prev->gs = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | } | 
| Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 468 | if (next->gs) | 
|  | 469 | wrmsrl(MSR_KERNEL_GS_BASE, next->gs); | 
|  | 470 | prev->gsindex = gsindex; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 |  | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 472 | /* | 
| Jan Beulich | 45948d7 | 2006-03-25 16:29:25 +0100 | [diff] [blame] | 473 | * Switch the PDA and FPU contexts. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | */ | 
| Brian Gerst | 3d1e42a | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 475 | prev->usersp = percpu_read(old_rsp); | 
|  | 476 | percpu_write(old_rsp, next->usersp); | 
| Brian Gerst | c6f5e0a | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 477 | percpu_write(current_task, next_p); | 
| Andi Kleen | 18bd057 | 2006-04-20 02:36:45 +0200 | [diff] [blame] | 478 |  | 
| Brian Gerst | 9af4565 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 479 | percpu_write(kernel_stack, | 
| Jeremy Fitzhardinge | 87b935a | 2008-07-08 15:06:26 -0700 | [diff] [blame] | 480 | (unsigned long)task_stack_page(next_p) + | 
| Brian Gerst | 9af4565 | 2009-01-19 00:38:58 +0900 | [diff] [blame] | 481 | THREAD_SIZE - KERNEL_STACK_OFFSET); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 |  | 
|  | 483 | /* | 
| Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 484 | * Now maybe reload the debug registers and handle I/O bitmaps | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | */ | 
| Markus Metzger | eee3af4 | 2008-01-30 13:31:09 +0100 | [diff] [blame] | 486 | if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT || | 
|  | 487 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | 
| Stephane Eranian | d3a4f48 | 2006-09-26 10:52:28 +0200 | [diff] [blame] | 488 | __switch_to_xtra(prev_p, next_p, tss); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 |  | 
| Jeremy Fitzhardinge | 17950c5 | 2009-04-24 01:01:01 -0700 | [diff] [blame] | 490 | /* | 
|  | 491 | * Preload the FPU context, now that we've determined that the | 
|  | 492 | * task is likely to be using it. | 
| Arjan van de Ven | e07e23e | 2006-09-26 10:52:36 +0200 | [diff] [blame] | 493 | */ | 
| Jeremy Fitzhardinge | 17950c5 | 2009-04-24 01:01:01 -0700 | [diff] [blame] | 494 | if (preload_fpu) | 
|  | 495 | __math_state_restore(); | 
| K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 496 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 497 | return prev_p; | 
|  | 498 | } | 
|  | 499 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | void set_personality_64bit(void) | 
|  | 501 | { | 
|  | 502 | /* inherit personality from parent */ | 
|  | 503 |  | 
|  | 504 | /* Make sure to be in 64bit mode */ | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 505 | clear_thread_flag(TIF_IA32); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 506 |  | 
| Stephen Wilson | 375906f | 2011-03-13 15:49:14 -0400 | [diff] [blame] | 507 | /* Ensure the corresponding mm is not marked. */ | 
|  | 508 | if (current->mm) | 
|  | 509 | current->mm->context.ia32_compat = 0; | 
|  | 510 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 511 | /* TBD: overwrites user setup. Should have two bits. | 
|  | 512 | But 64bit processes have always behaved this way, | 
|  | 513 | so it's not too bad. The main problem is just that | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 514 | 32bit childs are affected again. */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | current->personality &= ~READ_IMPLIES_EXEC; | 
|  | 516 | } | 
|  | 517 |  | 
| H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 518 | void set_personality_ia32(void) | 
|  | 519 | { | 
|  | 520 | /* inherit personality from parent */ | 
|  | 521 |  | 
|  | 522 | /* Make sure to be in 32bit mode */ | 
|  | 523 | set_thread_flag(TIF_IA32); | 
| Oleg Nesterov | 1252f23 | 2010-02-16 15:02:13 +0100 | [diff] [blame] | 524 | current->personality |= force_personality32; | 
| H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 525 |  | 
| Stephen Wilson | 375906f | 2011-03-13 15:49:14 -0400 | [diff] [blame] | 526 | /* Mark the associated mm as containing 32-bit tasks. */ | 
|  | 527 | if (current->mm) | 
|  | 528 | current->mm->context.ia32_compat = 1; | 
|  | 529 |  | 
| H. Peter Anvin | 05d43ed | 2010-01-28 22:14:43 -0800 | [diff] [blame] | 530 | /* Prepare the first "return" to user space */ | 
|  | 531 | current_thread_info()->status |= TS_COMPAT; | 
|  | 532 | } | 
|  | 533 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | unsigned long get_wchan(struct task_struct *p) | 
|  | 535 | { | 
|  | 536 | unsigned long stack; | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 537 | u64 fp, ip; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 538 | int count = 0; | 
|  | 539 |  | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 540 | if (!p || p == current || p->state == TASK_RUNNING) | 
|  | 541 | return 0; | 
| Al Viro | 57eafdc | 2006-01-12 01:05:39 -0800 | [diff] [blame] | 542 | stack = (unsigned long)task_stack_page(p); | 
| David Rientjes | e1e23bb | 2008-10-07 14:15:11 -0700 | [diff] [blame] | 543 | if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 544 | return 0; | 
| H. Peter Anvin | faca622 | 2008-01-30 13:31:02 +0100 | [diff] [blame] | 545 | fp = *(u64 *)(p->thread.sp); | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 546 | do { | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 547 | if (fp < (unsigned long)stack || | 
| David Rientjes | e1e23bb | 2008-10-07 14:15:11 -0700 | [diff] [blame] | 548 | fp >= (unsigned long)stack+THREAD_SIZE) | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 549 | return 0; | 
| H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 550 | ip = *(u64 *)(fp+8); | 
|  | 551 | if (!in_sched_functions(ip)) | 
|  | 552 | return ip; | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 553 | fp = *(u64 *)fp; | 
|  | 554 | } while (count++ < 16); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | return 0; | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 559 | { | 
|  | 560 | int ret = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | int doit = task == current; | 
|  | 562 | int cpu; | 
|  | 563 |  | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 564 | switch (code) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 | case ARCH_SET_GS: | 
| Suresh Siddha | 8492980 | 2005-06-21 17:14:32 -0700 | [diff] [blame] | 566 | if (addr >= TASK_SIZE_OF(task)) | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 567 | return -EPERM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | cpu = get_cpu(); | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 569 | /* handle small bases via the GDT because that's faster to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | switch. */ | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 571 | if (addr <= 0xffffffff) { | 
|  | 572 | set_32bit_tls(task, GS_TLS, addr); | 
|  | 573 | if (doit) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | load_TLS(&task->thread, cpu); | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 575 | load_gs_index(GS_TLS_SEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | } | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 577 | task->thread.gsindex = GS_TLS_SEL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | task->thread.gs = 0; | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 579 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | task->thread.gsindex = 0; | 
|  | 581 | task->thread.gs = addr; | 
|  | 582 | if (doit) { | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 583 | load_gs_index(0); | 
|  | 584 | ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 585 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | } | 
|  | 587 | put_cpu(); | 
|  | 588 | break; | 
|  | 589 | case ARCH_SET_FS: | 
|  | 590 | /* Not strictly needed for fs, but do it for symmetry | 
|  | 591 | with gs */ | 
| Suresh Siddha | 8492980 | 2005-06-21 17:14:32 -0700 | [diff] [blame] | 592 | if (addr >= TASK_SIZE_OF(task)) | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 593 | return -EPERM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | cpu = get_cpu(); | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 595 | /* handle small bases via the GDT because that's faster to | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | switch. */ | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 597 | if (addr <= 0xffffffff) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | set_32bit_tls(task, FS_TLS, addr); | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 599 | if (doit) { | 
|  | 600 | load_TLS(&task->thread, cpu); | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 601 | loadsegment(fs, FS_TLS_SEL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | } | 
|  | 603 | task->thread.fsindex = FS_TLS_SEL; | 
|  | 604 | task->thread.fs = 0; | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 605 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | task->thread.fsindex = 0; | 
|  | 607 | task->thread.fs = addr; | 
|  | 608 | if (doit) { | 
|  | 609 | /* set the selector to 0 to not confuse | 
|  | 610 | __switch_to */ | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 611 | loadsegment(fs, 0); | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 612 | ret = checking_wrmsrl(MSR_FS_BASE, addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | } | 
|  | 614 | } | 
|  | 615 | put_cpu(); | 
|  | 616 | break; | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 617 | case ARCH_GET_FS: { | 
|  | 618 | unsigned long base; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | if (task->thread.fsindex == FS_TLS_SEL) | 
|  | 620 | base = read_32bit_tls(task, FS_TLS); | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 621 | else if (doit) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 622 | rdmsrl(MSR_FS_BASE, base); | 
| Andi Kleen | a88cde1 | 2005-11-05 17:25:54 +0100 | [diff] [blame] | 623 | else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | base = task->thread.fs; | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 625 | ret = put_user(base, (unsigned long __user *)addr); | 
|  | 626 | break; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | } | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 628 | case ARCH_GET_GS: { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | unsigned long base; | 
| John Blackwood | 97c2803 | 2006-04-07 19:50:25 +0200 | [diff] [blame] | 630 | unsigned gsindex; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | if (task->thread.gsindex == GS_TLS_SEL) | 
|  | 632 | base = read_32bit_tls(task, GS_TLS); | 
| John Blackwood | 97c2803 | 2006-04-07 19:50:25 +0200 | [diff] [blame] | 633 | else if (doit) { | 
| Jeremy Fitzhardinge | ada8570 | 2008-06-25 00:19:00 -0400 | [diff] [blame] | 634 | savesegment(gs, gsindex); | 
| John Blackwood | 97c2803 | 2006-04-07 19:50:25 +0200 | [diff] [blame] | 635 | if (gsindex) | 
|  | 636 | rdmsrl(MSR_KERNEL_GS_BASE, base); | 
|  | 637 | else | 
|  | 638 | base = task->thread.gs; | 
| Gustavo F. Padovan | 7de08b4 | 2008-07-29 02:48:51 -0300 | [diff] [blame] | 639 | } else | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 640 | base = task->thread.gs; | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 641 | ret = put_user(base, (unsigned long __user *)addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | break; | 
|  | 643 | } | 
|  | 644 |  | 
|  | 645 | default: | 
|  | 646 | ret = -EINVAL; | 
|  | 647 | break; | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 648 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 |  | 
| Hiroshi Shimamoto | 6612538 | 2008-01-30 13:31:03 +0100 | [diff] [blame] | 650 | return ret; | 
|  | 651 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 |  | 
|  | 653 | long sys_arch_prctl(int code, unsigned long addr) | 
|  | 654 | { | 
|  | 655 | return do_arch_prctl(current, code, addr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 656 | } | 
|  | 657 |  | 
| Stefani Seibold | 89240ba | 2009-11-03 10:22:40 +0100 | [diff] [blame] | 658 | unsigned long KSTK_ESP(struct task_struct *task) | 
|  | 659 | { | 
|  | 660 | return (test_tsk_thread_flag(task, TIF_IA32)) ? | 
|  | 661 | (task_pt_regs(task)->sp) : ((task)->thread.usersp); | 
|  | 662 | } |