blob: a33afaa5ddb78e2594f706b826fee37a9dd02129 [file] [log] [blame]
Suresh Siddha61c46282008-03-10 15:28:04 -07001#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/smp.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -08005#include <linux/prctl.h>
Suresh Siddha61c46282008-03-10 15:28:04 -07006#include <linux/slab.h>
7#include <linux/sched.h>
Peter Zijlstra7f424a82008-04-25 17:39:01 +02008#include <linux/module.h>
9#include <linux/pm.h>
Thomas Gleixneraa276e12008-06-09 19:15:00 +020010#include <linux/clockchips.h>
Amerigo Wang9d62dcd2009-05-11 22:05:28 -040011#include <linux/random.h>
Avi Kivity7c68af62009-09-19 09:40:22 +030012#include <linux/user-return-notifier.h>
Andy Isaacson814e2c82009-12-08 00:29:42 -080013#include <linux/dmi.h>
14#include <linux/utsname.h>
Richard Weinberger90e24012012-03-25 23:00:04 +020015#include <linux/stackprotector.h>
16#include <linux/tick.h>
17#include <linux/cpuidle.h>
Arjan van de Ven61613522009-09-17 16:11:28 +020018#include <trace/events/power.h>
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +020019#include <linux/hw_breakpoint.h>
Borislav Petkov93789b32011-01-20 15:42:52 +010020#include <asm/cpu.h>
Ivan Vecerad3ec5ca2008-11-11 14:33:44 +010021#include <asm/apic.h>
Jaswinder Singh Rajput2c1b2842009-04-11 00:03:10 +053022#include <asm/syscalls.h>
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080023#include <asm/idle.h>
24#include <asm/uaccess.h>
25#include <asm/i387.h>
Linus Torvalds1361b832012-02-21 13:19:22 -080026#include <asm/fpu-internal.h>
K.Prasad66cb5912009-06-01 23:44:55 +053027#include <asm/debugreg.h>
Richard Weinberger90e24012012-03-25 23:00:04 +020028#include <asm/nmi.h>
29
30#ifdef CONFIG_X86_64
31static DEFINE_PER_CPU(unsigned char, is_idle);
32static ATOMIC_NOTIFIER_HEAD(idle_notifier);
33
34void idle_notifier_register(struct notifier_block *n)
35{
36 atomic_notifier_chain_register(&idle_notifier, n);
37}
38EXPORT_SYMBOL_GPL(idle_notifier_register);
39
40void idle_notifier_unregister(struct notifier_block *n)
41{
42 atomic_notifier_chain_unregister(&idle_notifier, n);
43}
44EXPORT_SYMBOL_GPL(idle_notifier_unregister);
45#endif
Zhao Yakuic1e3b372008-06-24 17:58:53 +080046
Suresh Siddhaaa283f42008-03-10 15:28:05 -070047struct kmem_cache *task_xstate_cachep;
Sheng Yang5ee481d2010-05-17 17:22:23 +080048EXPORT_SYMBOL_GPL(task_xstate_cachep);
Suresh Siddha61c46282008-03-10 15:28:04 -070049
50int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
51{
Avi Kivity86603282010-05-06 11:45:46 +030052 int ret;
53
Suresh Siddha61c46282008-03-10 15:28:04 -070054 *dst = *src;
Avi Kivity86603282010-05-06 11:45:46 +030055 if (fpu_allocated(&src->thread.fpu)) {
56 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
57 ret = fpu_alloc(&dst->thread.fpu);
58 if (ret)
59 return ret;
60 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070061 }
Suresh Siddha61c46282008-03-10 15:28:04 -070062 return 0;
63}
64
Suresh Siddhaaa283f42008-03-10 15:28:05 -070065void free_thread_xstate(struct task_struct *tsk)
66{
Avi Kivity86603282010-05-06 11:45:46 +030067 fpu_free(&tsk->thread.fpu);
Suresh Siddhaaa283f42008-03-10 15:28:05 -070068}
69
Suresh Siddha61c46282008-03-10 15:28:04 -070070void free_thread_info(struct thread_info *ti)
71{
Suresh Siddhaaa283f42008-03-10 15:28:05 -070072 free_thread_xstate(ti->task);
Zhao Jinc812d8f2011-08-20 21:24:57 +080073 free_pages((unsigned long)ti, THREAD_ORDER);
Suresh Siddha61c46282008-03-10 15:28:04 -070074}
75
76void arch_task_cache_init(void)
77{
78 task_xstate_cachep =
79 kmem_cache_create("task_xstate", xstate_size,
80 __alignof__(union thread_xstate),
Vegard Nossum2dff4402008-05-31 15:56:17 +020081 SLAB_PANIC | SLAB_NOTRACK, NULL);
Suresh Siddha61c46282008-03-10 15:28:04 -070082}
Peter Zijlstra7f424a82008-04-25 17:39:01 +020083
Thomas Gleixner00dba562008-06-09 18:35:28 +020084/*
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080085 * Free current thread data structures etc..
86 */
87void exit_thread(void)
88{
89 struct task_struct *me = current;
90 struct thread_struct *t = &me->thread;
Thomas Gleixner250981e2009-03-16 13:07:21 +010091 unsigned long *bp = t->io_bitmap_ptr;
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080092
Thomas Gleixner250981e2009-03-16 13:07:21 +010093 if (bp) {
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080094 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
95
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -080096 t->io_bitmap_ptr = NULL;
97 clear_thread_flag(TIF_IO_BITMAP);
98 /*
99 * Careful, clear this in the TSS too:
100 */
101 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
102 t->io_bitmap_max = 0;
103 put_cpu();
Thomas Gleixner250981e2009-03-16 13:07:21 +0100104 kfree(bp);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800105 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800106}
107
Brian Gerst3bef4442010-01-13 10:45:55 -0500108void show_regs(struct pt_regs *regs)
109{
110 show_registers(regs);
Namhyung Kime8e999c2011-03-18 11:40:06 +0900111 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs), 0);
Brian Gerst3bef4442010-01-13 10:45:55 -0500112}
113
Andy Isaacson814e2c82009-12-08 00:29:42 -0800114void show_regs_common(void)
115{
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000116 const char *vendor, *product, *board;
Andy Isaacson814e2c82009-12-08 00:29:42 -0800117
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000118 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
119 if (!vendor)
120 vendor = "";
Andy Isaacsona1884b82009-12-08 00:30:21 -0800121 product = dmi_get_system_info(DMI_PRODUCT_NAME);
122 if (!product)
123 product = "";
Andy Isaacson814e2c82009-12-08 00:29:42 -0800124
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000125 /* Board Name is optional */
126 board = dmi_get_system_info(DMI_BOARD_NAME);
127
Pekka Enbergd015a092009-12-28 10:26:59 +0200128 printk(KERN_CONT "\n");
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000129 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
Andy Isaacson814e2c82009-12-08 00:29:42 -0800130 current->pid, current->comm, print_tainted(),
131 init_utsname()->release,
132 (int)strcspn(init_utsname()->version, " "),
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000133 init_utsname()->version);
Jan Beulichfd8fa4d32011-02-17 15:56:58 +0000134 printk(KERN_CONT " %s %s", vendor, product);
135 if (board)
136 printk(KERN_CONT "/%s", board);
Naga Chumbalkar84e383b2011-02-14 22:47:17 +0000137 printk(KERN_CONT "\n");
Andy Isaacson814e2c82009-12-08 00:29:42 -0800138}
139
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800140void flush_thread(void)
141{
142 struct task_struct *tsk = current;
143
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +0200144 flush_ptrace_hw_breakpoint(tsk);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800145 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
146 /*
147 * Forget coprocessor state..
148 */
149 tsk->fpu_counter = 0;
150 clear_fpu(tsk);
151 clear_used_math();
152}
153
154static void hard_disable_TSC(void)
155{
156 write_cr4(read_cr4() | X86_CR4_TSD);
157}
158
159void disable_TSC(void)
160{
161 preempt_disable();
162 if (!test_and_set_thread_flag(TIF_NOTSC))
163 /*
164 * Must flip the CPU state synchronously with
165 * TIF_NOTSC in the current running context.
166 */
167 hard_disable_TSC();
168 preempt_enable();
169}
170
171static void hard_enable_TSC(void)
172{
173 write_cr4(read_cr4() & ~X86_CR4_TSD);
174}
175
176static void enable_TSC(void)
177{
178 preempt_disable();
179 if (test_and_clear_thread_flag(TIF_NOTSC))
180 /*
181 * Must flip the CPU state synchronously with
182 * TIF_NOTSC in the current running context.
183 */
184 hard_enable_TSC();
185 preempt_enable();
186}
187
188int get_tsc_mode(unsigned long adr)
189{
190 unsigned int val;
191
192 if (test_thread_flag(TIF_NOTSC))
193 val = PR_TSC_SIGSEGV;
194 else
195 val = PR_TSC_ENABLE;
196
197 return put_user(val, (unsigned int __user *)adr);
198}
199
200int set_tsc_mode(unsigned int val)
201{
202 if (val == PR_TSC_SIGSEGV)
203 disable_TSC();
204 else if (val == PR_TSC_ENABLE)
205 enable_TSC();
206 else
207 return -EINVAL;
208
209 return 0;
210}
211
212void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
213 struct tss_struct *tss)
214{
215 struct thread_struct *prev, *next;
216
217 prev = &prev_p->thread;
218 next = &next_p->thread;
219
Peter Zijlstraea8e61b2010-03-25 14:51:51 +0100220 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
221 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
222 unsigned long debugctl = get_debugctlmsr();
223
224 debugctl &= ~DEBUGCTLMSR_BTF;
225 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
226 debugctl |= DEBUGCTLMSR_BTF;
227
228 update_debugctlmsr(debugctl);
229 }
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800230
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800231 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
232 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
233 /* prev and next are different */
234 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
235 hard_disable_TSC();
236 else
237 hard_enable_TSC();
238 }
239
240 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
241 /*
242 * Copy the relevant range of the IO bitmap.
243 * Normally this is 128 bytes or less:
244 */
245 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
246 max(prev->io_bitmap_max, next->io_bitmap_max));
247 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
248 /*
249 * Clear any possible leftover bits:
250 */
251 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
252 }
Avi Kivity7c68af62009-09-19 09:40:22 +0300253 propagate_user_return_notify(prev_p, next_p);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800254}
255
256int sys_fork(struct pt_regs *regs)
257{
258 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
259}
260
261/*
262 * This is trivial, and on the face of it looks like it
263 * could equally well be done in user mode.
264 *
265 * Not so, for quite unobvious reasons - register pressure.
266 * In user mode vfork() cannot have a stack frame, and if
267 * done by calling the "clone()" system call directly, you
268 * do not have enough call-clobbered registers to hold all
269 * the information you need.
270 */
271int sys_vfork(struct pt_regs *regs)
272{
273 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
274 NULL, NULL);
275}
276
Brian Gerstf839bbc2009-12-09 19:01:56 -0500277long
278sys_clone(unsigned long clone_flags, unsigned long newsp,
279 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
280{
281 if (!newsp)
282 newsp = regs->sp;
283 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
284}
285
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500286/*
287 * This gets run with %si containing the
288 * function to call, and %di containing
289 * the "args".
290 */
291extern void kernel_thread_helper(void);
292
293/*
294 * Create a kernel thread
295 */
296int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
297{
298 struct pt_regs regs;
299
300 memset(&regs, 0, sizeof(regs));
301
302 regs.si = (unsigned long) fn;
303 regs.di = (unsigned long) arg;
304
305#ifdef CONFIG_X86_32
306 regs.ds = __USER_DS;
307 regs.es = __USER_DS;
308 regs.fs = __KERNEL_PERCPU;
309 regs.gs = __KERNEL_STACK_CANARY;
Cyrill Gorcunov864a0922010-01-13 10:16:07 +0000310#else
311 regs.ss = __KERNEL_DS;
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500312#endif
313
314 regs.orig_ax = -1;
315 regs.ip = (unsigned long) kernel_thread_helper;
316 regs.cs = __KERNEL_CS | get_kernel_rpl();
Seiichi Ikarashi1cf83432011-12-06 17:58:14 +0900317 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
Brian Gerstdf59e7b2009-12-09 12:34:44 -0500318
319 /* Ok, create the new process.. */
320 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
321}
322EXPORT_SYMBOL(kernel_thread);
Jeremy Fitzhardinge389d1fb2009-02-27 13:25:28 -0800323
324/*
Brian Gerst11cf88b2009-12-09 19:01:53 -0500325 * sys_execve() executes a new program.
326 */
David Howellsd7627462010-08-17 23:52:56 +0100327long sys_execve(const char __user *name,
328 const char __user *const __user *argv,
329 const char __user *const __user *envp, struct pt_regs *regs)
Brian Gerst11cf88b2009-12-09 19:01:53 -0500330{
331 long error;
332 char *filename;
333
334 filename = getname(name);
335 error = PTR_ERR(filename);
336 if (IS_ERR(filename))
337 return error;
338 error = do_execve(filename, argv, envp, regs);
339
340#ifdef CONFIG_X86_32
341 if (error == 0) {
342 /* Make sure we don't return using sysenter.. */
343 set_thread_flag(TIF_IRET);
344 }
345#endif
346
347 putname(filename);
348 return error;
349}
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200350
351/*
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200352 * Idle related variables and functions
353 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100354unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200355EXPORT_SYMBOL(boot_option_idle_override);
356
357/*
358 * Powermanagement idle function, if any..
359 */
360void (*pm_idle)(void);
Andy Whitcroft60b8b1d2011-06-14 12:45:10 -0700361#ifdef CONFIG_APM_MODULE
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200362EXPORT_SYMBOL(pm_idle);
Len Brown06ae40c2011-04-01 15:28:09 -0400363#endif
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200364
365#ifdef CONFIG_X86_32
366/*
367 * This halt magic was a workaround for ancient floppy DMA
368 * wreckage. It should be safe to remove.
369 */
370static int hlt_counter;
371void disable_hlt(void)
372{
373 hlt_counter++;
374}
375EXPORT_SYMBOL(disable_hlt);
376
377void enable_hlt(void)
378{
379 hlt_counter--;
380}
381EXPORT_SYMBOL(enable_hlt);
382
383static inline int hlt_use_halt(void)
384{
385 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
386}
387#else
388static inline int hlt_use_halt(void)
389{
390 return 1;
391}
392#endif
393
Richard Weinberger90e24012012-03-25 23:00:04 +0200394#ifndef CONFIG_SMP
395static inline void play_dead(void)
396{
397 BUG();
398}
399#endif
400
401#ifdef CONFIG_X86_64
402void enter_idle(void)
403{
404 percpu_write(is_idle, 1);
405 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
406}
407
408static void __exit_idle(void)
409{
410 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
411 return;
412 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
413}
414
415/* Called from interrupts to signify idle end */
416void exit_idle(void)
417{
418 /* idle loop has pid 0 */
419 if (current->pid)
420 return;
421 __exit_idle();
422}
423#endif
424
425/*
426 * The idle thread. There's no useful work to be
427 * done, so just try to conserve power and have a
428 * low exit latency (ie sit in a loop waiting for
429 * somebody to say that they'd like to reschedule)
430 */
431void cpu_idle(void)
432{
433 /*
434 * If we're the non-boot CPU, nothing set the stack canary up
435 * for us. CPU0 already has it initialized but no harm in
436 * doing it again. This is a good place for updating it, as
437 * we wont ever return from this function (so the invalid
438 * canaries already on the stack wont ever trigger).
439 */
440 boot_init_stack_canary();
441 current_thread_info()->status |= TS_POLLING;
442
443 while (1) {
444 tick_nohz_idle_enter();
445
446 while (!need_resched()) {
447 rmb();
448
449 if (cpu_is_offline(smp_processor_id()))
450 play_dead();
451
452 /*
453 * Idle routines should keep interrupts disabled
454 * from here on, until they go to idle.
455 * Otherwise, idle callbacks can misfire.
456 */
457 local_touch_nmi();
458 local_irq_disable();
459
460 enter_idle();
461
462 /* Don't trace irqs off for idle */
463 stop_critical_timings();
464
465 /* enter_idle() needs rcu for notifiers */
466 rcu_idle_enter();
467
468 if (cpuidle_idle_call())
469 pm_idle();
470
471 rcu_idle_exit();
472 start_critical_timings();
473
474 /* In many cases the interrupt that ended idle
475 has already called exit_idle. But some idle
476 loops can be woken up without interrupt. */
477 __exit_idle();
478 }
479
480 tick_nohz_idle_exit();
481 preempt_enable_no_resched();
482 schedule();
483 preempt_disable();
484 }
485}
486
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200487/*
488 * We use this if we don't have any better
489 * idle routine..
490 */
491void default_idle(void)
492{
493 if (hlt_use_halt()) {
Steven Rostedt48454652012-02-07 09:40:30 -0500494 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
495 trace_cpu_idle_rcuidle(1, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200496 current_thread_info()->status &= ~TS_POLLING;
497 /*
498 * TS_POLLING-cleared state must be visible before we
499 * test NEED_RESCHED:
500 */
501 smp_mb();
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200502
503 if (!need_resched())
504 safe_halt(); /* enables interrupts racelessly */
505 else
506 local_irq_enable();
507 current_thread_info()->status |= TS_POLLING;
Steven Rostedt48454652012-02-07 09:40:30 -0500508 trace_power_end_rcuidle(smp_processor_id());
509 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200510 } else {
511 local_irq_enable();
512 /* loop is done by the caller */
513 cpu_relax();
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200514 }
515}
Andy Whitcroft60b8b1d2011-06-14 12:45:10 -0700516#ifdef CONFIG_APM_MODULE
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200517EXPORT_SYMBOL(default_idle);
518#endif
519
Konrad Rzeszutek Wilke5fd47b2011-11-21 18:02:02 -0500520bool set_pm_idle_to_default(void)
521{
522 bool ret = !!pm_idle;
523
524 pm_idle = default_idle;
525
526 return ret;
527}
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200528void stop_this_cpu(void *dummy)
529{
530 local_irq_disable();
531 /*
532 * Remove this CPU:
533 */
Rusty Russell4f062892009-03-13 14:49:54 +1030534 set_cpu_online(smp_processor_id(), false);
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200535 disable_local_APIC();
536
537 for (;;) {
538 if (hlt_works(smp_processor_id()))
539 halt();
540 }
541}
542
543static void do_nothing(void *unused)
544{
545}
546
547/*
548 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
549 * pm_idle and update to new pm_idle value. Required while changing pm_idle
550 * handler on SMP systems.
551 *
552 * Caller must have changed pm_idle to the new value before the call. Old
553 * pm_idle value will not be used by any CPU after the return of this function.
554 */
555void cpu_idle_wait(void)
556{
557 smp_mb();
558 /* kick all the CPUs so that they exit out of pm_idle */
559 smp_call_function(do_nothing, NULL, 1);
560}
561EXPORT_SYMBOL_GPL(cpu_idle_wait);
562
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200563/* Default MONITOR/MWAIT with no hints, used for default C1 state */
564static void mwait_idle(void)
565{
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200566 if (!need_resched()) {
Steven Rostedt48454652012-02-07 09:40:30 -0500567 trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
568 trace_cpu_idle_rcuidle(1, smp_processor_id());
Christoph Lameter349c0042011-03-12 12:50:10 +0100569 if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200570 clflush((void *)&current_thread_info()->flags);
571
572 __monitor((void *)&current_thread_info()->flags, 0, 0);
573 smp_mb();
574 if (!need_resched())
575 __sti_mwait(0, 0);
576 else
577 local_irq_enable();
Steven Rostedt48454652012-02-07 09:40:30 -0500578 trace_power_end_rcuidle(smp_processor_id());
579 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200580 } else
581 local_irq_enable();
582}
583
584/*
585 * On SMP it's slightly faster (but much more power-consuming!)
586 * to poll the ->work.need_resched flag instead of waiting for the
587 * cross-CPU IPI to arrive. Use this option with caution.
588 */
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200589static void poll_idle(void)
590{
Steven Rostedt48454652012-02-07 09:40:30 -0500591 trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
592 trace_cpu_idle_rcuidle(0, smp_processor_id());
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200593 local_irq_enable();
594 while (!need_resched())
595 cpu_relax();
Steven Rostedt48454652012-02-07 09:40:30 -0500596 trace_power_end_rcuidle(smp_processor_id());
597 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200598}
599
600/*
601 * mwait selection logic:
602 *
603 * It depends on the CPU. For AMD CPUs that support MWAIT this is
604 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
605 * then depend on a clock divisor and current Pstate of the core. If
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200606 * all cores of a processor are in halt state (C1) the processor can
607 * enter the C1E (C1 enhanced) state. If mwait is used this will never
608 * happen.
609 *
610 * idle=mwait overrides this decision and forces the usage of mwait.
611 */
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200612
613#define MWAIT_INFO 0x05
614#define MWAIT_ECX_EXTENDED_INFO 0x01
615#define MWAIT_EDX_C1 0xf0
616
Borislav Petkov1c9d16e2011-02-11 18:17:54 +0100617int mwait_usable(const struct cpuinfo_x86 *c)
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200618{
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200619 u32 eax, ebx, ecx, edx;
620
Thomas Renningerd1896042010-11-03 17:06:14 +0100621 if (boot_option_idle_override == IDLE_FORCE_MWAIT)
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200622 return 1;
623
Thomas Gleixner09fd4b42008-06-09 18:04:27 +0200624 if (c->cpuid_level < MWAIT_INFO)
625 return 0;
626
627 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
628 /* Check, whether EDX has extended info about MWAIT */
629 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
630 return 1;
631
632 /*
633 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
634 * C1 supports MWAIT
635 */
636 return (edx & MWAIT_EDX_C1);
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200637}
638
Len Brown02c68a02011-04-01 16:59:53 -0400639bool amd_e400_c1e_detected;
640EXPORT_SYMBOL(amd_e400_c1e_detected);
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200641
Len Brown02c68a02011-04-01 16:59:53 -0400642static cpumask_var_t amd_e400_c1e_mask;
Thomas Gleixner4faac972008-09-22 18:54:29 +0200643
Len Brown02c68a02011-04-01 16:59:53 -0400644void amd_e400_remove_cpu(int cpu)
Thomas Gleixner4faac972008-09-22 18:54:29 +0200645{
Len Brown02c68a02011-04-01 16:59:53 -0400646 if (amd_e400_c1e_mask != NULL)
647 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner4faac972008-09-22 18:54:29 +0200648}
649
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200650/*
Len Brown02c68a02011-04-01 16:59:53 -0400651 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200652 * pending message MSR. If we detect C1E, then we handle it the same
653 * way as C3 power states (local apic timer and TSC stop)
654 */
Len Brown02c68a02011-04-01 16:59:53 -0400655static void amd_e400_idle(void)
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200656{
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200657 if (need_resched())
658 return;
659
Len Brown02c68a02011-04-01 16:59:53 -0400660 if (!amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200661 u32 lo, hi;
662
663 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
Michal Schmidte8c534e2010-07-27 18:53:35 +0200664
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200665 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
Len Brown02c68a02011-04-01 16:59:53 -0400666 amd_e400_c1e_detected = true;
Venki Pallipadi40fb1712008-11-17 16:11:37 -0800667 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
Andreas Herrmann09bfeea2008-09-18 21:12:10 +0200668 mark_tsc_unstable("TSC halt in AMD C1E");
669 printk(KERN_INFO "System has AMD C1E enabled\n");
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200670 }
671 }
672
Len Brown02c68a02011-04-01 16:59:53 -0400673 if (amd_e400_c1e_detected) {
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200674 int cpu = smp_processor_id();
675
Len Brown02c68a02011-04-01 16:59:53 -0400676 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
677 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200678 /*
Suresh Siddhaf833bab2009-08-17 14:34:59 -0700679 * Force broadcast so ACPI can not interfere.
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200680 */
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200681 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
682 &cpu);
683 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
684 cpu);
685 }
686 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200687
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200688 default_idle();
Thomas Gleixner0beefa22008-06-17 09:12:03 +0200689
690 /*
691 * The switch back from broadcast mode needs to be
692 * called with interrupts disabled.
693 */
694 local_irq_disable();
695 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
696 local_irq_enable();
Thomas Gleixneraa276e12008-06-09 19:15:00 +0200697 } else
698 default_idle();
699}
700
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200701void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
702{
Ingo Molnar3e5095d2009-01-27 17:07:08 +0100703#ifdef CONFIG_SMP
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200704 if (pm_idle == poll_idle && smp_num_siblings > 1) {
Mike Travisd6dd6922010-03-05 13:10:38 -0600705 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200706 " performance may degrade.\n");
707 }
708#endif
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200709 if (pm_idle)
710 return;
711
Thomas Gleixnere9623b32008-05-16 22:55:26 +0200712 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200713 /*
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200714 * One CPU supports mwait => All CPUs supports mwait
715 */
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200716 printk(KERN_INFO "using mwait in idle threads.\n");
717 pm_idle = mwait_idle;
Hans Rosenfeld9d8888c2010-07-28 19:09:31 +0200718 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
719 /* E400: APIC timer interrupt does not wake up CPU from C1e */
Len Brown02c68a02011-04-01 16:59:53 -0400720 printk(KERN_INFO "using AMD E400 aware idle routine\n");
721 pm_idle = amd_e400_idle;
Thomas Gleixner6ddd2a22008-06-09 16:59:53 +0200722 } else
723 pm_idle = default_idle;
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200724}
725
Len Brown02c68a02011-04-01 16:59:53 -0400726void __init init_amd_e400_c1e_mask(void)
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030727{
Len Brown02c68a02011-04-01 16:59:53 -0400728 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
729 if (pm_idle == amd_e400_idle)
730 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
Rusty Russell30e1e6d2009-03-17 14:50:34 +1030731}
732
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200733static int __init idle_setup(char *str)
734{
Cyrill Gorcunovab6bc3e2008-07-05 15:53:36 +0400735 if (!str)
736 return -EINVAL;
737
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200738 if (!strcmp(str, "poll")) {
739 printk("using polling idle threads.\n");
740 pm_idle = poll_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100741 boot_option_idle_override = IDLE_POLL;
742 } else if (!strcmp(str, "mwait")) {
743 boot_option_idle_override = IDLE_FORCE_MWAIT;
Linus Torvaldsaf0d6a02011-06-01 02:07:22 +0900744 WARN_ONCE(1, "\"idle=mwait\" will be removed in 2012\n");
Thomas Renningerd1896042010-11-03 17:06:14 +0100745 } else if (!strcmp(str, "halt")) {
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800746 /*
747 * When the boot option of idle=halt is added, halt is
748 * forced to be used for CPU idle. In such case CPU C2/C3
749 * won't be used again.
750 * To continue to load the CPU idle driver, don't touch
751 * the boot_option_idle_override.
752 */
753 pm_idle = default_idle;
Thomas Renningerd1896042010-11-03 17:06:14 +0100754 boot_option_idle_override = IDLE_HALT;
Zhao Yakuida5e09a2008-06-24 18:01:09 +0800755 } else if (!strcmp(str, "nomwait")) {
756 /*
757 * If the boot option of "idle=nomwait" is added,
758 * it means that mwait will be disabled for CPU C2/C3
759 * states. In such case it won't touch the variable
760 * of boot_option_idle_override.
761 */
Thomas Renningerd1896042010-11-03 17:06:14 +0100762 boot_option_idle_override = IDLE_NOMWAIT;
Zhao Yakuic1e3b372008-06-24 17:58:53 +0800763 } else
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200764 return -1;
765
Peter Zijlstra7f424a82008-04-25 17:39:01 +0200766 return 0;
767}
768early_param("idle", idle_setup);
769
Amerigo Wang9d62dcd2009-05-11 22:05:28 -0400770unsigned long arch_align_stack(unsigned long sp)
771{
772 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
773 sp -= get_random_int() % 8192;
774 return sp & ~0xf;
775}
776
777unsigned long arch_randomize_brk(struct mm_struct *mm)
778{
779 unsigned long range_end = mm->brk + 0x02000000;
780 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
781}
782