| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 1 | #include <linux/errno.h> | 
 | 2 | #include <linux/kernel.h> | 
 | 3 | #include <linux/mm.h> | 
 | 4 | #include <linux/smp.h> | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 5 | #include <linux/prctl.h> | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 6 | #include <linux/slab.h> | 
 | 7 | #include <linux/sched.h> | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 8 | #include <linux/module.h> | 
 | 9 | #include <linux/pm.h> | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 10 | #include <linux/clockchips.h> | 
| Amerigo Wang | 9d62dcd | 2009-05-11 22:05:28 -0400 | [diff] [blame] | 11 | #include <linux/random.h> | 
| Avi Kivity | 7c68af6 | 2009-09-19 09:40:22 +0300 | [diff] [blame] | 12 | #include <linux/user-return-notifier.h> | 
| Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 13 | #include <linux/dmi.h> | 
 | 14 | #include <linux/utsname.h> | 
| Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 15 | #include <linux/stackprotector.h> | 
 | 16 | #include <linux/tick.h> | 
 | 17 | #include <linux/cpuidle.h> | 
| Arjan van de Ven | 6161352 | 2009-09-17 16:11:28 +0200 | [diff] [blame] | 18 | #include <trace/events/power.h> | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 19 | #include <linux/hw_breakpoint.h> | 
| Borislav Petkov | 93789b3 | 2011-01-20 15:42:52 +0100 | [diff] [blame] | 20 | #include <asm/cpu.h> | 
| Ivan Vecera | d3ec5ca | 2008-11-11 14:33:44 +0100 | [diff] [blame] | 21 | #include <asm/apic.h> | 
| Jaswinder Singh Rajput | 2c1b284 | 2009-04-11 00:03:10 +0530 | [diff] [blame] | 22 | #include <asm/syscalls.h> | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 23 | #include <asm/idle.h> | 
 | 24 | #include <asm/uaccess.h> | 
 | 25 | #include <asm/i387.h> | 
| Linus Torvalds | 1361b83 | 2012-02-21 13:19:22 -0800 | [diff] [blame] | 26 | #include <asm/fpu-internal.h> | 
| K.Prasad | 66cb591 | 2009-06-01 23:44:55 +0530 | [diff] [blame] | 27 | #include <asm/debugreg.h> | 
| Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 28 | #include <asm/nmi.h> | 
 | 29 |  | 
 | 30 | #ifdef CONFIG_X86_64 | 
 | 31 | static DEFINE_PER_CPU(unsigned char, is_idle); | 
 | 32 | static ATOMIC_NOTIFIER_HEAD(idle_notifier); | 
 | 33 |  | 
 | 34 | void idle_notifier_register(struct notifier_block *n) | 
 | 35 | { | 
 | 36 | 	atomic_notifier_chain_register(&idle_notifier, n); | 
 | 37 | } | 
 | 38 | EXPORT_SYMBOL_GPL(idle_notifier_register); | 
 | 39 |  | 
 | 40 | void idle_notifier_unregister(struct notifier_block *n) | 
 | 41 | { | 
 | 42 | 	atomic_notifier_chain_unregister(&idle_notifier, n); | 
 | 43 | } | 
 | 44 | EXPORT_SYMBOL_GPL(idle_notifier_unregister); | 
 | 45 | #endif | 
| Zhao Yakui | c1e3b37 | 2008-06-24 17:58:53 +0800 | [diff] [blame] | 46 |  | 
| Suresh Siddha | aa283f4 | 2008-03-10 15:28:05 -0700 | [diff] [blame] | 47 | struct kmem_cache *task_xstate_cachep; | 
| Sheng Yang | 5ee481d | 2010-05-17 17:22:23 +0800 | [diff] [blame] | 48 | EXPORT_SYMBOL_GPL(task_xstate_cachep); | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 49 |  | 
 | 50 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | 
 | 51 | { | 
| Avi Kivity | 8660328 | 2010-05-06 11:45:46 +0300 | [diff] [blame] | 52 | 	int ret; | 
 | 53 |  | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 54 | 	*dst = *src; | 
| Avi Kivity | 8660328 | 2010-05-06 11:45:46 +0300 | [diff] [blame] | 55 | 	if (fpu_allocated(&src->thread.fpu)) { | 
 | 56 | 		memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); | 
 | 57 | 		ret = fpu_alloc(&dst->thread.fpu); | 
 | 58 | 		if (ret) | 
 | 59 | 			return ret; | 
 | 60 | 		fpu_copy(&dst->thread.fpu, &src->thread.fpu); | 
| Suresh Siddha | aa283f4 | 2008-03-10 15:28:05 -0700 | [diff] [blame] | 61 | 	} | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 62 | 	return 0; | 
 | 63 | } | 
 | 64 |  | 
| Suresh Siddha | aa283f4 | 2008-03-10 15:28:05 -0700 | [diff] [blame] | 65 | void free_thread_xstate(struct task_struct *tsk) | 
 | 66 | { | 
| Avi Kivity | 8660328 | 2010-05-06 11:45:46 +0300 | [diff] [blame] | 67 | 	fpu_free(&tsk->thread.fpu); | 
| Suresh Siddha | aa283f4 | 2008-03-10 15:28:05 -0700 | [diff] [blame] | 68 | } | 
 | 69 |  | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 70 | void free_thread_info(struct thread_info *ti) | 
 | 71 | { | 
| Suresh Siddha | aa283f4 | 2008-03-10 15:28:05 -0700 | [diff] [blame] | 72 | 	free_thread_xstate(ti->task); | 
| Zhao Jin | c812d8f | 2011-08-20 21:24:57 +0800 | [diff] [blame] | 73 | 	free_pages((unsigned long)ti, THREAD_ORDER); | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 74 | } | 
 | 75 |  | 
 | 76 | void arch_task_cache_init(void) | 
 | 77 | { | 
 | 78 |         task_xstate_cachep = | 
 | 79 |         	kmem_cache_create("task_xstate", xstate_size, | 
 | 80 | 				  __alignof__(union thread_xstate), | 
| Vegard Nossum | 2dff440 | 2008-05-31 15:56:17 +0200 | [diff] [blame] | 81 | 				  SLAB_PANIC | SLAB_NOTRACK, NULL); | 
| Suresh Siddha | 61c4628 | 2008-03-10 15:28:04 -0700 | [diff] [blame] | 82 | } | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 83 |  | 
| Thomas Gleixner | 00dba56 | 2008-06-09 18:35:28 +0200 | [diff] [blame] | 84 | /* | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 85 |  * Free current thread data structures etc.. | 
 | 86 |  */ | 
 | 87 | void exit_thread(void) | 
 | 88 | { | 
 | 89 | 	struct task_struct *me = current; | 
 | 90 | 	struct thread_struct *t = &me->thread; | 
| Thomas Gleixner | 250981e | 2009-03-16 13:07:21 +0100 | [diff] [blame] | 91 | 	unsigned long *bp = t->io_bitmap_ptr; | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 92 |  | 
| Thomas Gleixner | 250981e | 2009-03-16 13:07:21 +0100 | [diff] [blame] | 93 | 	if (bp) { | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 94 | 		struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | 
 | 95 |  | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 96 | 		t->io_bitmap_ptr = NULL; | 
 | 97 | 		clear_thread_flag(TIF_IO_BITMAP); | 
 | 98 | 		/* | 
 | 99 | 		 * Careful, clear this in the TSS too: | 
 | 100 | 		 */ | 
 | 101 | 		memset(tss->io_bitmap, 0xff, t->io_bitmap_max); | 
 | 102 | 		t->io_bitmap_max = 0; | 
 | 103 | 		put_cpu(); | 
| Thomas Gleixner | 250981e | 2009-03-16 13:07:21 +0100 | [diff] [blame] | 104 | 		kfree(bp); | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 105 | 	} | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 106 | } | 
 | 107 |  | 
| Brian Gerst | 3bef444 | 2010-01-13 10:45:55 -0500 | [diff] [blame] | 108 | void show_regs(struct pt_regs *regs) | 
 | 109 | { | 
 | 110 | 	show_registers(regs); | 
| Namhyung Kim | e8e999c | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 111 | 	show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0); | 
| Brian Gerst | 3bef444 | 2010-01-13 10:45:55 -0500 | [diff] [blame] | 112 | } | 
 | 113 |  | 
| Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 114 | void show_regs_common(void) | 
 | 115 | { | 
| Naga Chumbalkar | 84e383b | 2011-02-14 22:47:17 +0000 | [diff] [blame] | 116 | 	const char *vendor, *product, *board; | 
| Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 117 |  | 
| Naga Chumbalkar | 84e383b | 2011-02-14 22:47:17 +0000 | [diff] [blame] | 118 | 	vendor = dmi_get_system_info(DMI_SYS_VENDOR); | 
 | 119 | 	if (!vendor) | 
 | 120 | 		vendor = ""; | 
| Andy Isaacson | a1884b8 | 2009-12-08 00:30:21 -0800 | [diff] [blame] | 121 | 	product = dmi_get_system_info(DMI_PRODUCT_NAME); | 
 | 122 | 	if (!product) | 
 | 123 | 		product = ""; | 
| Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 124 |  | 
| Naga Chumbalkar | 84e383b | 2011-02-14 22:47:17 +0000 | [diff] [blame] | 125 | 	/* Board Name is optional */ | 
 | 126 | 	board = dmi_get_system_info(DMI_BOARD_NAME); | 
 | 127 |  | 
| Pekka Enberg | d015a09 | 2009-12-28 10:26:59 +0200 | [diff] [blame] | 128 | 	printk(KERN_CONT "\n"); | 
| Naga Chumbalkar | 84e383b | 2011-02-14 22:47:17 +0000 | [diff] [blame] | 129 | 	printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s", | 
| Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 130 | 		current->pid, current->comm, print_tainted(), | 
 | 131 | 		init_utsname()->release, | 
 | 132 | 		(int)strcspn(init_utsname()->version, " "), | 
| Naga Chumbalkar | 84e383b | 2011-02-14 22:47:17 +0000 | [diff] [blame] | 133 | 		init_utsname()->version); | 
| Jan Beulich | fd8fa4d3 | 2011-02-17 15:56:58 +0000 | [diff] [blame] | 134 | 	printk(KERN_CONT " %s %s", vendor, product); | 
 | 135 | 	if (board) | 
 | 136 | 		printk(KERN_CONT "/%s", board); | 
| Naga Chumbalkar | 84e383b | 2011-02-14 22:47:17 +0000 | [diff] [blame] | 137 | 	printk(KERN_CONT "\n"); | 
| Andy Isaacson | 814e2c8 | 2009-12-08 00:29:42 -0800 | [diff] [blame] | 138 | } | 
 | 139 |  | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 140 | void flush_thread(void) | 
 | 141 | { | 
 | 142 | 	struct task_struct *tsk = current; | 
 | 143 |  | 
| Frederic Weisbecker | 24f1e32c | 2009-09-09 19:22:48 +0200 | [diff] [blame] | 144 | 	flush_ptrace_hw_breakpoint(tsk); | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 145 | 	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | 
 | 146 | 	/* | 
 | 147 | 	 * Forget coprocessor state.. | 
 | 148 | 	 */ | 
 | 149 | 	tsk->fpu_counter = 0; | 
 | 150 | 	clear_fpu(tsk); | 
 | 151 | 	clear_used_math(); | 
 | 152 | } | 
 | 153 |  | 
 | 154 | static void hard_disable_TSC(void) | 
 | 155 | { | 
 | 156 | 	write_cr4(read_cr4() | X86_CR4_TSD); | 
 | 157 | } | 
 | 158 |  | 
 | 159 | void disable_TSC(void) | 
 | 160 | { | 
 | 161 | 	preempt_disable(); | 
 | 162 | 	if (!test_and_set_thread_flag(TIF_NOTSC)) | 
 | 163 | 		/* | 
 | 164 | 		 * Must flip the CPU state synchronously with | 
 | 165 | 		 * TIF_NOTSC in the current running context. | 
 | 166 | 		 */ | 
 | 167 | 		hard_disable_TSC(); | 
 | 168 | 	preempt_enable(); | 
 | 169 | } | 
 | 170 |  | 
 | 171 | static void hard_enable_TSC(void) | 
 | 172 | { | 
 | 173 | 	write_cr4(read_cr4() & ~X86_CR4_TSD); | 
 | 174 | } | 
 | 175 |  | 
 | 176 | static void enable_TSC(void) | 
 | 177 | { | 
 | 178 | 	preempt_disable(); | 
 | 179 | 	if (test_and_clear_thread_flag(TIF_NOTSC)) | 
 | 180 | 		/* | 
 | 181 | 		 * Must flip the CPU state synchronously with | 
 | 182 | 		 * TIF_NOTSC in the current running context. | 
 | 183 | 		 */ | 
 | 184 | 		hard_enable_TSC(); | 
 | 185 | 	preempt_enable(); | 
 | 186 | } | 
 | 187 |  | 
 | 188 | int get_tsc_mode(unsigned long adr) | 
 | 189 | { | 
 | 190 | 	unsigned int val; | 
 | 191 |  | 
 | 192 | 	if (test_thread_flag(TIF_NOTSC)) | 
 | 193 | 		val = PR_TSC_SIGSEGV; | 
 | 194 | 	else | 
 | 195 | 		val = PR_TSC_ENABLE; | 
 | 196 |  | 
 | 197 | 	return put_user(val, (unsigned int __user *)adr); | 
 | 198 | } | 
 | 199 |  | 
 | 200 | int set_tsc_mode(unsigned int val) | 
 | 201 | { | 
 | 202 | 	if (val == PR_TSC_SIGSEGV) | 
 | 203 | 		disable_TSC(); | 
 | 204 | 	else if (val == PR_TSC_ENABLE) | 
 | 205 | 		enable_TSC(); | 
 | 206 | 	else | 
 | 207 | 		return -EINVAL; | 
 | 208 |  | 
 | 209 | 	return 0; | 
 | 210 | } | 
 | 211 |  | 
 | 212 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 
 | 213 | 		      struct tss_struct *tss) | 
 | 214 | { | 
 | 215 | 	struct thread_struct *prev, *next; | 
 | 216 |  | 
 | 217 | 	prev = &prev_p->thread; | 
 | 218 | 	next = &next_p->thread; | 
 | 219 |  | 
| Peter Zijlstra | ea8e61b | 2010-03-25 14:51:51 +0100 | [diff] [blame] | 220 | 	if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^ | 
 | 221 | 	    test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) { | 
 | 222 | 		unsigned long debugctl = get_debugctlmsr(); | 
 | 223 |  | 
 | 224 | 		debugctl &= ~DEBUGCTLMSR_BTF; | 
 | 225 | 		if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) | 
 | 226 | 			debugctl |= DEBUGCTLMSR_BTF; | 
 | 227 |  | 
 | 228 | 		update_debugctlmsr(debugctl); | 
 | 229 | 	} | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 230 |  | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 231 | 	if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ | 
 | 232 | 	    test_tsk_thread_flag(next_p, TIF_NOTSC)) { | 
 | 233 | 		/* prev and next are different */ | 
 | 234 | 		if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | 
 | 235 | 			hard_disable_TSC(); | 
 | 236 | 		else | 
 | 237 | 			hard_enable_TSC(); | 
 | 238 | 	} | 
 | 239 |  | 
 | 240 | 	if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | 
 | 241 | 		/* | 
 | 242 | 		 * Copy the relevant range of the IO bitmap. | 
 | 243 | 		 * Normally this is 128 bytes or less: | 
 | 244 | 		 */ | 
 | 245 | 		memcpy(tss->io_bitmap, next->io_bitmap_ptr, | 
 | 246 | 		       max(prev->io_bitmap_max, next->io_bitmap_max)); | 
 | 247 | 	} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { | 
 | 248 | 		/* | 
 | 249 | 		 * Clear any possible leftover bits: | 
 | 250 | 		 */ | 
 | 251 | 		memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); | 
 | 252 | 	} | 
| Avi Kivity | 7c68af6 | 2009-09-19 09:40:22 +0300 | [diff] [blame] | 253 | 	propagate_user_return_notify(prev_p, next_p); | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 254 | } | 
 | 255 |  | 
 | 256 | int sys_fork(struct pt_regs *regs) | 
 | 257 | { | 
 | 258 | 	return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL); | 
 | 259 | } | 
 | 260 |  | 
 | 261 | /* | 
 | 262 |  * This is trivial, and on the face of it looks like it | 
 | 263 |  * could equally well be done in user mode. | 
 | 264 |  * | 
 | 265 |  * Not so, for quite unobvious reasons - register pressure. | 
 | 266 |  * In user mode vfork() cannot have a stack frame, and if | 
 | 267 |  * done by calling the "clone()" system call directly, you | 
 | 268 |  * do not have enough call-clobbered registers to hold all | 
 | 269 |  * the information you need. | 
 | 270 |  */ | 
 | 271 | int sys_vfork(struct pt_regs *regs) | 
 | 272 | { | 
 | 273 | 	return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, | 
 | 274 | 		       NULL, NULL); | 
 | 275 | } | 
 | 276 |  | 
| Brian Gerst | f839bbc | 2009-12-09 19:01:56 -0500 | [diff] [blame] | 277 | long | 
 | 278 | sys_clone(unsigned long clone_flags, unsigned long newsp, | 
 | 279 | 	  void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) | 
 | 280 | { | 
 | 281 | 	if (!newsp) | 
 | 282 | 		newsp = regs->sp; | 
 | 283 | 	return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); | 
 | 284 | } | 
 | 285 |  | 
| Brian Gerst | df59e7b | 2009-12-09 12:34:44 -0500 | [diff] [blame] | 286 | /* | 
 | 287 |  * This gets run with %si containing the | 
 | 288 |  * function to call, and %di containing | 
 | 289 |  * the "args". | 
 | 290 |  */ | 
 | 291 | extern void kernel_thread_helper(void); | 
 | 292 |  | 
 | 293 | /* | 
 | 294 |  * Create a kernel thread | 
 | 295 |  */ | 
 | 296 | int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) | 
 | 297 | { | 
 | 298 | 	struct pt_regs regs; | 
 | 299 |  | 
 | 300 | 	memset(®s, 0, sizeof(regs)); | 
 | 301 |  | 
 | 302 | 	regs.si = (unsigned long) fn; | 
 | 303 | 	regs.di = (unsigned long) arg; | 
 | 304 |  | 
 | 305 | #ifdef CONFIG_X86_32 | 
 | 306 | 	regs.ds = __USER_DS; | 
 | 307 | 	regs.es = __USER_DS; | 
 | 308 | 	regs.fs = __KERNEL_PERCPU; | 
 | 309 | 	regs.gs = __KERNEL_STACK_CANARY; | 
| Cyrill Gorcunov | 864a092 | 2010-01-13 10:16:07 +0000 | [diff] [blame] | 310 | #else | 
 | 311 | 	regs.ss = __KERNEL_DS; | 
| Brian Gerst | df59e7b | 2009-12-09 12:34:44 -0500 | [diff] [blame] | 312 | #endif | 
 | 313 |  | 
 | 314 | 	regs.orig_ax = -1; | 
 | 315 | 	regs.ip = (unsigned long) kernel_thread_helper; | 
 | 316 | 	regs.cs = __KERNEL_CS | get_kernel_rpl(); | 
| Seiichi Ikarashi | 1cf8343 | 2011-12-06 17:58:14 +0900 | [diff] [blame] | 317 | 	regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1; | 
| Brian Gerst | df59e7b | 2009-12-09 12:34:44 -0500 | [diff] [blame] | 318 |  | 
 | 319 | 	/* Ok, create the new process.. */ | 
 | 320 | 	return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); | 
 | 321 | } | 
 | 322 | EXPORT_SYMBOL(kernel_thread); | 
| Jeremy Fitzhardinge | 389d1fb | 2009-02-27 13:25:28 -0800 | [diff] [blame] | 323 |  | 
 | 324 | /* | 
| Brian Gerst | 11cf88b | 2009-12-09 19:01:53 -0500 | [diff] [blame] | 325 |  * sys_execve() executes a new program. | 
 | 326 |  */ | 
| David Howells | d762746 | 2010-08-17 23:52:56 +0100 | [diff] [blame] | 327 | long sys_execve(const char __user *name, | 
 | 328 | 		const char __user *const __user *argv, | 
 | 329 | 		const char __user *const __user *envp, struct pt_regs *regs) | 
| Brian Gerst | 11cf88b | 2009-12-09 19:01:53 -0500 | [diff] [blame] | 330 | { | 
 | 331 | 	long error; | 
 | 332 | 	char *filename; | 
 | 333 |  | 
 | 334 | 	filename = getname(name); | 
 | 335 | 	error = PTR_ERR(filename); | 
 | 336 | 	if (IS_ERR(filename)) | 
 | 337 | 		return error; | 
 | 338 | 	error = do_execve(filename, argv, envp, regs); | 
 | 339 |  | 
 | 340 | #ifdef CONFIG_X86_32 | 
 | 341 | 	if (error == 0) { | 
 | 342 | 		/* Make sure we don't return using sysenter.. */ | 
 | 343 |                 set_thread_flag(TIF_IRET); | 
 | 344 |         } | 
 | 345 | #endif | 
 | 346 |  | 
 | 347 | 	putname(filename); | 
 | 348 | 	return error; | 
 | 349 | } | 
| Thomas Gleixner | 09fd4b4 | 2008-06-09 18:04:27 +0200 | [diff] [blame] | 350 |  | 
 | 351 | /* | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 352 |  * Idle related variables and functions | 
 | 353 |  */ | 
| Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 354 | unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 355 | EXPORT_SYMBOL(boot_option_idle_override); | 
 | 356 |  | 
 | 357 | /* | 
 | 358 |  * Powermanagement idle function, if any.. | 
 | 359 |  */ | 
 | 360 | void (*pm_idle)(void); | 
| Andy Whitcroft | 60b8b1d | 2011-06-14 12:45:10 -0700 | [diff] [blame] | 361 | #ifdef CONFIG_APM_MODULE | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 362 | EXPORT_SYMBOL(pm_idle); | 
| Len Brown | 06ae40c | 2011-04-01 15:28:09 -0400 | [diff] [blame] | 363 | #endif | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 364 |  | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 365 | static inline int hlt_use_halt(void) | 
 | 366 | { | 
 | 367 | 	return 1; | 
 | 368 | } | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 369 |  | 
| Richard Weinberger | 90e2401 | 2012-03-25 23:00:04 +0200 | [diff] [blame] | 370 | #ifndef CONFIG_SMP | 
 | 371 | static inline void play_dead(void) | 
 | 372 | { | 
 | 373 | 	BUG(); | 
 | 374 | } | 
 | 375 | #endif | 
 | 376 |  | 
 | 377 | #ifdef CONFIG_X86_64 | 
 | 378 | void enter_idle(void) | 
 | 379 | { | 
 | 380 | 	percpu_write(is_idle, 1); | 
 | 381 | 	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL); | 
 | 382 | } | 
 | 383 |  | 
 | 384 | static void __exit_idle(void) | 
 | 385 | { | 
 | 386 | 	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0) | 
 | 387 | 		return; | 
 | 388 | 	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL); | 
 | 389 | } | 
 | 390 |  | 
 | 391 | /* Called from interrupts to signify idle end */ | 
 | 392 | void exit_idle(void) | 
 | 393 | { | 
 | 394 | 	/* idle loop has pid 0 */ | 
 | 395 | 	if (current->pid) | 
 | 396 | 		return; | 
 | 397 | 	__exit_idle(); | 
 | 398 | } | 
 | 399 | #endif | 
 | 400 |  | 
 | 401 | /* | 
 | 402 |  * The idle thread. There's no useful work to be | 
 | 403 |  * done, so just try to conserve power and have a | 
 | 404 |  * low exit latency (ie sit in a loop waiting for | 
 | 405 |  * somebody to say that they'd like to reschedule) | 
 | 406 |  */ | 
 | 407 | void cpu_idle(void) | 
 | 408 | { | 
 | 409 | 	/* | 
 | 410 | 	 * If we're the non-boot CPU, nothing set the stack canary up | 
 | 411 | 	 * for us.  CPU0 already has it initialized but no harm in | 
 | 412 | 	 * doing it again.  This is a good place for updating it, as | 
 | 413 | 	 * we wont ever return from this function (so the invalid | 
 | 414 | 	 * canaries already on the stack wont ever trigger). | 
 | 415 | 	 */ | 
 | 416 | 	boot_init_stack_canary(); | 
 | 417 | 	current_thread_info()->status |= TS_POLLING; | 
 | 418 |  | 
 | 419 | 	while (1) { | 
 | 420 | 		tick_nohz_idle_enter(); | 
 | 421 |  | 
 | 422 | 		while (!need_resched()) { | 
 | 423 | 			rmb(); | 
 | 424 |  | 
 | 425 | 			if (cpu_is_offline(smp_processor_id())) | 
 | 426 | 				play_dead(); | 
 | 427 |  | 
 | 428 | 			/* | 
 | 429 | 			 * Idle routines should keep interrupts disabled | 
 | 430 | 			 * from here on, until they go to idle. | 
 | 431 | 			 * Otherwise, idle callbacks can misfire. | 
 | 432 | 			 */ | 
 | 433 | 			local_touch_nmi(); | 
 | 434 | 			local_irq_disable(); | 
 | 435 |  | 
 | 436 | 			enter_idle(); | 
 | 437 |  | 
 | 438 | 			/* Don't trace irqs off for idle */ | 
 | 439 | 			stop_critical_timings(); | 
 | 440 |  | 
 | 441 | 			/* enter_idle() needs rcu for notifiers */ | 
 | 442 | 			rcu_idle_enter(); | 
 | 443 |  | 
 | 444 | 			if (cpuidle_idle_call()) | 
 | 445 | 				pm_idle(); | 
 | 446 |  | 
 | 447 | 			rcu_idle_exit(); | 
 | 448 | 			start_critical_timings(); | 
 | 449 |  | 
 | 450 | 			/* In many cases the interrupt that ended idle | 
 | 451 | 			   has already called exit_idle. But some idle | 
 | 452 | 			   loops can be woken up without interrupt. */ | 
 | 453 | 			__exit_idle(); | 
 | 454 | 		} | 
 | 455 |  | 
 | 456 | 		tick_nohz_idle_exit(); | 
 | 457 | 		preempt_enable_no_resched(); | 
 | 458 | 		schedule(); | 
 | 459 | 		preempt_disable(); | 
 | 460 | 	} | 
 | 461 | } | 
 | 462 |  | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 463 | /* | 
 | 464 |  * We use this if we don't have any better | 
 | 465 |  * idle routine.. | 
 | 466 |  */ | 
 | 467 | void default_idle(void) | 
 | 468 | { | 
 | 469 | 	if (hlt_use_halt()) { | 
| Steven Rostedt | 4845465 | 2012-02-07 09:40:30 -0500 | [diff] [blame] | 470 | 		trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); | 
 | 471 | 		trace_cpu_idle_rcuidle(1, smp_processor_id()); | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 472 | 		current_thread_info()->status &= ~TS_POLLING; | 
 | 473 | 		/* | 
 | 474 | 		 * TS_POLLING-cleared state must be visible before we | 
 | 475 | 		 * test NEED_RESCHED: | 
 | 476 | 		 */ | 
 | 477 | 		smp_mb(); | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 478 |  | 
 | 479 | 		if (!need_resched()) | 
 | 480 | 			safe_halt();	/* enables interrupts racelessly */ | 
 | 481 | 		else | 
 | 482 | 			local_irq_enable(); | 
 | 483 | 		current_thread_info()->status |= TS_POLLING; | 
| Steven Rostedt | 4845465 | 2012-02-07 09:40:30 -0500 | [diff] [blame] | 484 | 		trace_power_end_rcuidle(smp_processor_id()); | 
 | 485 | 		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 486 | 	} else { | 
 | 487 | 		local_irq_enable(); | 
 | 488 | 		/* loop is done by the caller */ | 
 | 489 | 		cpu_relax(); | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 490 | 	} | 
 | 491 | } | 
| Andy Whitcroft | 60b8b1d | 2011-06-14 12:45:10 -0700 | [diff] [blame] | 492 | #ifdef CONFIG_APM_MODULE | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 493 | EXPORT_SYMBOL(default_idle); | 
 | 494 | #endif | 
 | 495 |  | 
| Konrad Rzeszutek Wilk | e5fd47b | 2011-11-21 18:02:02 -0500 | [diff] [blame] | 496 | bool set_pm_idle_to_default(void) | 
 | 497 | { | 
 | 498 | 	bool ret = !!pm_idle; | 
 | 499 |  | 
 | 500 | 	pm_idle = default_idle; | 
 | 501 |  | 
 | 502 | 	return ret; | 
 | 503 | } | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 504 | void stop_this_cpu(void *dummy) | 
 | 505 | { | 
 | 506 | 	local_irq_disable(); | 
 | 507 | 	/* | 
 | 508 | 	 * Remove this CPU: | 
 | 509 | 	 */ | 
| Rusty Russell | 4f06289 | 2009-03-13 14:49:54 +1030 | [diff] [blame] | 510 | 	set_cpu_online(smp_processor_id(), false); | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 511 | 	disable_local_APIC(); | 
 | 512 |  | 
 | 513 | 	for (;;) { | 
 | 514 | 		if (hlt_works(smp_processor_id())) | 
 | 515 | 			halt(); | 
 | 516 | 	} | 
 | 517 | } | 
 | 518 |  | 
 | 519 | static void do_nothing(void *unused) | 
 | 520 | { | 
 | 521 | } | 
 | 522 |  | 
 | 523 | /* | 
 | 524 |  * cpu_idle_wait - Used to ensure that all the CPUs discard old value of | 
 | 525 |  * pm_idle and update to new pm_idle value. Required while changing pm_idle | 
 | 526 |  * handler on SMP systems. | 
 | 527 |  * | 
 | 528 |  * Caller must have changed pm_idle to the new value before the call. Old | 
 | 529 |  * pm_idle value will not be used by any CPU after the return of this function. | 
 | 530 |  */ | 
 | 531 | void cpu_idle_wait(void) | 
 | 532 | { | 
 | 533 | 	smp_mb(); | 
 | 534 | 	/* kick all the CPUs so that they exit out of pm_idle */ | 
 | 535 | 	smp_call_function(do_nothing, NULL, 1); | 
 | 536 | } | 
 | 537 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 
 | 538 |  | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 539 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ | 
 | 540 | static void mwait_idle(void) | 
 | 541 | { | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 542 | 	if (!need_resched()) { | 
| Steven Rostedt | 4845465 | 2012-02-07 09:40:30 -0500 | [diff] [blame] | 543 | 		trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id()); | 
 | 544 | 		trace_cpu_idle_rcuidle(1, smp_processor_id()); | 
| Christoph Lameter | 349c004 | 2011-03-12 12:50:10 +0100 | [diff] [blame] | 545 | 		if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 546 | 			clflush((void *)¤t_thread_info()->flags); | 
 | 547 |  | 
 | 548 | 		__monitor((void *)¤t_thread_info()->flags, 0, 0); | 
 | 549 | 		smp_mb(); | 
 | 550 | 		if (!need_resched()) | 
 | 551 | 			__sti_mwait(0, 0); | 
 | 552 | 		else | 
 | 553 | 			local_irq_enable(); | 
| Steven Rostedt | 4845465 | 2012-02-07 09:40:30 -0500 | [diff] [blame] | 554 | 		trace_power_end_rcuidle(smp_processor_id()); | 
 | 555 | 		trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 556 | 	} else | 
 | 557 | 		local_irq_enable(); | 
 | 558 | } | 
 | 559 |  | 
 | 560 | /* | 
 | 561 |  * On SMP it's slightly faster (but much more power-consuming!) | 
 | 562 |  * to poll the ->work.need_resched flag instead of waiting for the | 
 | 563 |  * cross-CPU IPI to arrive. Use this option with caution. | 
 | 564 |  */ | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 565 | static void poll_idle(void) | 
 | 566 | { | 
| Steven Rostedt | 4845465 | 2012-02-07 09:40:30 -0500 | [diff] [blame] | 567 | 	trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id()); | 
 | 568 | 	trace_cpu_idle_rcuidle(0, smp_processor_id()); | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 569 | 	local_irq_enable(); | 
 | 570 | 	while (!need_resched()) | 
 | 571 | 		cpu_relax(); | 
| Steven Rostedt | 4845465 | 2012-02-07 09:40:30 -0500 | [diff] [blame] | 572 | 	trace_power_end_rcuidle(smp_processor_id()); | 
 | 573 | 	trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 574 | } | 
 | 575 |  | 
 | 576 | /* | 
 | 577 |  * mwait selection logic: | 
 | 578 |  * | 
 | 579 |  * It depends on the CPU. For AMD CPUs that support MWAIT this is | 
 | 580 |  * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings | 
 | 581 |  * then depend on a clock divisor and current Pstate of the core. If | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 582 |  * all cores of a processor are in halt state (C1) the processor can | 
 | 583 |  * enter the C1E (C1 enhanced) state. If mwait is used this will never | 
 | 584 |  * happen. | 
 | 585 |  * | 
 | 586 |  * idle=mwait overrides this decision and forces the usage of mwait. | 
 | 587 |  */ | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 588 |  | 
 | 589 | #define MWAIT_INFO			0x05 | 
 | 590 | #define MWAIT_ECX_EXTENDED_INFO		0x01 | 
 | 591 | #define MWAIT_EDX_C1			0xf0 | 
 | 592 |  | 
| Borislav Petkov | 1c9d16e | 2011-02-11 18:17:54 +0100 | [diff] [blame] | 593 | int mwait_usable(const struct cpuinfo_x86 *c) | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 594 | { | 
| Thomas Gleixner | 09fd4b4 | 2008-06-09 18:04:27 +0200 | [diff] [blame] | 595 | 	u32 eax, ebx, ecx, edx; | 
 | 596 |  | 
| Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 597 | 	if (boot_option_idle_override == IDLE_FORCE_MWAIT) | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 598 | 		return 1; | 
 | 599 |  | 
| Thomas Gleixner | 09fd4b4 | 2008-06-09 18:04:27 +0200 | [diff] [blame] | 600 | 	if (c->cpuid_level < MWAIT_INFO) | 
 | 601 | 		return 0; | 
 | 602 |  | 
 | 603 | 	cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx); | 
 | 604 | 	/* Check, whether EDX has extended info about MWAIT */ | 
 | 605 | 	if (!(ecx & MWAIT_ECX_EXTENDED_INFO)) | 
 | 606 | 		return 1; | 
 | 607 |  | 
 | 608 | 	/* | 
 | 609 | 	 * edx enumeratios MONITOR/MWAIT extensions. Check, whether | 
 | 610 | 	 * C1  supports MWAIT | 
 | 611 | 	 */ | 
 | 612 | 	return (edx & MWAIT_EDX_C1); | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 613 | } | 
 | 614 |  | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 615 | bool amd_e400_c1e_detected; | 
 | 616 | EXPORT_SYMBOL(amd_e400_c1e_detected); | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 617 |  | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 618 | static cpumask_var_t amd_e400_c1e_mask; | 
| Thomas Gleixner | 4faac97 | 2008-09-22 18:54:29 +0200 | [diff] [blame] | 619 |  | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 620 | void amd_e400_remove_cpu(int cpu) | 
| Thomas Gleixner | 4faac97 | 2008-09-22 18:54:29 +0200 | [diff] [blame] | 621 | { | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 622 | 	if (amd_e400_c1e_mask != NULL) | 
 | 623 | 		cpumask_clear_cpu(cpu, amd_e400_c1e_mask); | 
| Thomas Gleixner | 4faac97 | 2008-09-22 18:54:29 +0200 | [diff] [blame] | 624 | } | 
 | 625 |  | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 626 | /* | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 627 |  * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 628 |  * pending message MSR. If we detect C1E, then we handle it the same | 
 | 629 |  * way as C3 power states (local apic timer and TSC stop) | 
 | 630 |  */ | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 631 | static void amd_e400_idle(void) | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 632 | { | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 633 | 	if (need_resched()) | 
 | 634 | 		return; | 
 | 635 |  | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 636 | 	if (!amd_e400_c1e_detected) { | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 637 | 		u32 lo, hi; | 
 | 638 |  | 
 | 639 | 		rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi); | 
| Michal Schmidt | e8c534e | 2010-07-27 18:53:35 +0200 | [diff] [blame] | 640 |  | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 641 | 		if (lo & K8_INTP_C1E_ACTIVE_MASK) { | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 642 | 			amd_e400_c1e_detected = true; | 
| Venki Pallipadi | 40fb171 | 2008-11-17 16:11:37 -0800 | [diff] [blame] | 643 | 			if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 
| Andreas Herrmann | 09bfeea | 2008-09-18 21:12:10 +0200 | [diff] [blame] | 644 | 				mark_tsc_unstable("TSC halt in AMD C1E"); | 
 | 645 | 			printk(KERN_INFO "System has AMD C1E enabled\n"); | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 646 | 		} | 
 | 647 | 	} | 
 | 648 |  | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 649 | 	if (amd_e400_c1e_detected) { | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 650 | 		int cpu = smp_processor_id(); | 
 | 651 |  | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 652 | 		if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) { | 
 | 653 | 			cpumask_set_cpu(cpu, amd_e400_c1e_mask); | 
| Thomas Gleixner | 0beefa2 | 2008-06-17 09:12:03 +0200 | [diff] [blame] | 654 | 			/* | 
| Suresh Siddha | f833bab | 2009-08-17 14:34:59 -0700 | [diff] [blame] | 655 | 			 * Force broadcast so ACPI can not interfere. | 
| Thomas Gleixner | 0beefa2 | 2008-06-17 09:12:03 +0200 | [diff] [blame] | 656 | 			 */ | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 657 | 			clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE, | 
 | 658 | 					   &cpu); | 
 | 659 | 			printk(KERN_INFO "Switch to broadcast mode on CPU%d\n", | 
 | 660 | 			       cpu); | 
 | 661 | 		} | 
 | 662 | 		clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 
| Thomas Gleixner | 0beefa2 | 2008-06-17 09:12:03 +0200 | [diff] [blame] | 663 |  | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 664 | 		default_idle(); | 
| Thomas Gleixner | 0beefa2 | 2008-06-17 09:12:03 +0200 | [diff] [blame] | 665 |  | 
 | 666 | 		/* | 
 | 667 | 		 * The switch back from broadcast mode needs to be | 
 | 668 | 		 * called with interrupts disabled. | 
 | 669 | 		 */ | 
 | 670 | 		 local_irq_disable(); | 
 | 671 | 		 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 
 | 672 | 		 local_irq_enable(); | 
| Thomas Gleixner | aa276e1 | 2008-06-09 19:15:00 +0200 | [diff] [blame] | 673 | 	} else | 
 | 674 | 		default_idle(); | 
 | 675 | } | 
 | 676 |  | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 677 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 
 | 678 | { | 
| Ingo Molnar | 3e5095d | 2009-01-27 17:07:08 +0100 | [diff] [blame] | 679 | #ifdef CONFIG_SMP | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 680 | 	if (pm_idle == poll_idle && smp_num_siblings > 1) { | 
| Mike Travis | d6dd692 | 2010-03-05 13:10:38 -0600 | [diff] [blame] | 681 | 		printk_once(KERN_WARNING "WARNING: polling idle and HT enabled," | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 682 | 			" performance may degrade.\n"); | 
 | 683 | 	} | 
 | 684 | #endif | 
| Thomas Gleixner | 6ddd2a2 | 2008-06-09 16:59:53 +0200 | [diff] [blame] | 685 | 	if (pm_idle) | 
 | 686 | 		return; | 
 | 687 |  | 
| Thomas Gleixner | e9623b3 | 2008-05-16 22:55:26 +0200 | [diff] [blame] | 688 | 	if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) { | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 689 | 		/* | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 690 | 		 * One CPU supports mwait => All CPUs supports mwait | 
 | 691 | 		 */ | 
| Thomas Gleixner | 6ddd2a2 | 2008-06-09 16:59:53 +0200 | [diff] [blame] | 692 | 		printk(KERN_INFO "using mwait in idle threads.\n"); | 
 | 693 | 		pm_idle = mwait_idle; | 
| Hans Rosenfeld | 9d8888c | 2010-07-28 19:09:31 +0200 | [diff] [blame] | 694 | 	} else if (cpu_has_amd_erratum(amd_erratum_400)) { | 
 | 695 | 		/* E400: APIC timer interrupt does not wake up CPU from C1e */ | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 696 | 		printk(KERN_INFO "using AMD E400 aware idle routine\n"); | 
 | 697 | 		pm_idle = amd_e400_idle; | 
| Thomas Gleixner | 6ddd2a2 | 2008-06-09 16:59:53 +0200 | [diff] [blame] | 698 | 	} else | 
 | 699 | 		pm_idle = default_idle; | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 700 | } | 
 | 701 |  | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 702 | void __init init_amd_e400_c1e_mask(void) | 
| Rusty Russell | 30e1e6d | 2009-03-17 14:50:34 +1030 | [diff] [blame] | 703 | { | 
| Len Brown | 02c68a0 | 2011-04-01 16:59:53 -0400 | [diff] [blame] | 704 | 	/* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */ | 
 | 705 | 	if (pm_idle == amd_e400_idle) | 
 | 706 | 		zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL); | 
| Rusty Russell | 30e1e6d | 2009-03-17 14:50:34 +1030 | [diff] [blame] | 707 | } | 
 | 708 |  | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 709 | static int __init idle_setup(char *str) | 
 | 710 | { | 
| Cyrill Gorcunov | ab6bc3e | 2008-07-05 15:53:36 +0400 | [diff] [blame] | 711 | 	if (!str) | 
 | 712 | 		return -EINVAL; | 
 | 713 |  | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 714 | 	if (!strcmp(str, "poll")) { | 
 | 715 | 		printk("using polling idle threads.\n"); | 
 | 716 | 		pm_idle = poll_idle; | 
| Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 717 | 		boot_option_idle_override = IDLE_POLL; | 
 | 718 | 	} else if (!strcmp(str, "mwait")) { | 
 | 719 | 		boot_option_idle_override = IDLE_FORCE_MWAIT; | 
| Linus Torvalds | af0d6a0 | 2011-06-01 02:07:22 +0900 | [diff] [blame] | 720 | 		WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n"); | 
| Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 721 | 	} else if (!strcmp(str, "halt")) { | 
| Zhao Yakui | c1e3b37 | 2008-06-24 17:58:53 +0800 | [diff] [blame] | 722 | 		/* | 
 | 723 | 		 * When the boot option of idle=halt is added, halt is | 
 | 724 | 		 * forced to be used for CPU idle. In such case CPU C2/C3 | 
 | 725 | 		 * won't be used again. | 
 | 726 | 		 * To continue to load the CPU idle driver, don't touch | 
 | 727 | 		 * the boot_option_idle_override. | 
 | 728 | 		 */ | 
 | 729 | 		pm_idle = default_idle; | 
| Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 730 | 		boot_option_idle_override = IDLE_HALT; | 
| Zhao Yakui | da5e09a | 2008-06-24 18:01:09 +0800 | [diff] [blame] | 731 | 	} else if (!strcmp(str, "nomwait")) { | 
 | 732 | 		/* | 
 | 733 | 		 * If the boot option of "idle=nomwait" is added, | 
 | 734 | 		 * it means that mwait will be disabled for CPU C2/C3 | 
 | 735 | 		 * states. In such case it won't touch the variable | 
 | 736 | 		 * of boot_option_idle_override. | 
 | 737 | 		 */ | 
| Thomas Renninger | d189604 | 2010-11-03 17:06:14 +0100 | [diff] [blame] | 738 | 		boot_option_idle_override = IDLE_NOMWAIT; | 
| Zhao Yakui | c1e3b37 | 2008-06-24 17:58:53 +0800 | [diff] [blame] | 739 | 	} else | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 740 | 		return -1; | 
 | 741 |  | 
| Peter Zijlstra | 7f424a8 | 2008-04-25 17:39:01 +0200 | [diff] [blame] | 742 | 	return 0; | 
 | 743 | } | 
 | 744 | early_param("idle", idle_setup); | 
 | 745 |  | 
| Amerigo Wang | 9d62dcd | 2009-05-11 22:05:28 -0400 | [diff] [blame] | 746 | unsigned long arch_align_stack(unsigned long sp) | 
 | 747 | { | 
 | 748 | 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | 
 | 749 | 		sp -= get_random_int() % 8192; | 
 | 750 | 	return sp & ~0xf; | 
 | 751 | } | 
 | 752 |  | 
 | 753 | unsigned long arch_randomize_brk(struct mm_struct *mm) | 
 | 754 | { | 
 | 755 | 	unsigned long range_end = mm->brk + 0x02000000; | 
 | 756 | 	return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | 
 | 757 | } | 
 | 758 |  |